def main(): parser = ArgumentParser(description="Spawn jobs on Garibaldi using the Torque queue system. Job options specified in config file.") parser.add_argument('--config', action='store', dest="cfg_file", help="File with configuration options", default='configs/job.settings.cfg') parser.add_argument('--dryrun', action='store_true', default=False, dest="dryrun", help="Create job script in jobs/ but do not launch on cluster.") parser.add_argument('-d', nargs='+', action='store', dest="datasets", help="One or more GEO dataset accessions") parser.add_argument('-f', action='store', dest="datasets_file", help="File listing GEO datasets, one per line", type=file) args = parser.parse_args() config = SafeConfigParser() config.read(args.cfg_file) if args.datasets: accessions = args.datasets elif args.datasets_file: accessions = [x.strip("\n") for x in args.datasets_file] args.datasets_file.close() else: print "Must specify GEO datasets, either in file or on command line." parser.print_usage() return for i, accn in enumerate(accessions): job, command = spawn(accn, config, args.dryrun) print "%s launched with command: '%s'" % (job, command)
def run(cls): parser = ArgumentParser( description=("The theme development utility. " "Includes three modes: " "one for serving a theme compiled on-the-fly, " "the other for compiling statically a theme " "and the latter to dump URLs to files") ) subparsers = parser.add_subparsers( title="Commands", description="Available commands (modes of operation)" ) for key, target in cls.commands.items(): if target is None: target = cls else: target = getattr(cls, target) subparser = subparsers.add_parser( key, description=target.__doc__, help=target.__doc__.splitlines()[0] ) target.setup_parser(subparser) subparser.set_defaults(target=target) args = parser.parse_args() if hasattr(args, 'target'): sys.exit(args.target.main(args)) else: parser.print_usage() sys.exit(-1)
def parse_args(): parser = ArgumentParser( description="Post sample query results to the result dashboard") parser.add_argument('--query-ids', '-q', nargs="+", metavar="QUERY_ID", help=('Query ids to create new results for. Defaults ' 'to a single random query id')) parser.add_argument('--num-groups', '-g', nargs="+", type=int, metavar="NUM_GROUPS", help=('The number of groups for each query id ' 'specified by -q. If 1, the query will be ' 'ungrouped. Defaults to 1 each query.')) args = parser.parse_args() if not args.query_ids: args.query_ids = [str(uuid.uuid4())] if not args.num_groups: args.num_groups = [1 for _ in args.query_ids] for num_group in args.num_groups: if num_group < 1: raise ValueError("A query can't have less than 1 group!") if len(args.num_groups) != len(args.query_ids): print ("Length of --num-groups option must match the length of the " "--query-ids option!") print "" parser.print_usage() sys.exit() args.query_id_map = { args.query_ids[i]: args.num_groups[i] for i in range(len(args.query_ids)) } return args
def parse_args(): def_vocab = 2500 def_hidden = 100 def_epoch = 100 def_context = 3 p = ArgumentParser(description='Word segmentation using feedforward neural network') p.add_argument('mode', help='\'train\' or \'test\'') p.add_argument('corpus', help='[in] source corpus') p.add_argument('model', help='[in/out] model file') p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int, help='vocabulary size (default: %d)' % def_vocab) p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int, help='hidden layer size (default: %d)' % def_hidden) p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int, help='number of training epoch (default: %d)' % def_epoch) p.add_argument('--context', default=def_context, metavar='INT', type=int, help='width of context window (default: %d)' % def_context) args = p.parse_args() # check args try: if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'') if args.vocab < 1: raise ValueError('you must set --vocab >= 1') if args.hidden < 1: raise ValueError('you must set --hidden >= 1') if args.epoch < 1: raise ValueError('you must set --epoch >= 1') if args.context < 1: raise ValueError('you must set --context >= 1') except Exception as ex: p.print_usage(file=sys.stderr) six.print_(ex, file=sys.stderr) sys.exit() return args
def main(): parser = ArgumentParser() subs = parser.add_subparsers(dest='cmd') account = subs.add_parser('account') account.add_argument('--json', '-j', action="store_true", help="Output as JSON") sources = subs.add_parser('sources') sources.add_argument('--json', '-j', action="store_true", help="Output as JSON") organization = subs.add_parser('organization') organization.add_argument('--json', '-j', action="store_true", help="Output as JSON") args = parser.parse_args() client = Client.from_config() try: if args.cmd == 'account': data = client.get_account_details() if args.cmd == 'sources': data = client.get_account_sources() if args.cmd == 'organization': data = client.get_account_organization() except ValueError as e: parser.print_usage() sys.stderr.write('{}\n'.format(str(e))) sys.exit(1) if args.json: print(json.dumps(data, indent=4)) else: print(data)
def parse_args(arguments=None) -> Namespace: """ Argument parser for the command line invocation of Pyptables :param arguments: the arguments to pass :return: Namespace containing the arguments """ _parser = ArgumentParser(description="A wrapper around iptables") _parser.add_argument("--new-config", action="store_true") _parser.add_argument("--conf", type=str) _parser.add_argument("--dry-run", action="store_true") args = _parser.parse_args(arguments or sys.argv[1:]) if args.conf is None: args.conf = "/etc/pyptables.conf" if os.path.exists(args.conf) and os.path.isfile(args.conf): args.conf = os.path.abspath(args.conf) elif args.new_config: pass else: print("{} does not exists or is not a file".format(args.conf)) _parser.print_usage() exit(1) return args
def _get_args(): parser = ArgumentParser(description=__doc__) parser.add_argument('xsec_file') if sys.stdin.isatty(): parser.print_usage() exit('ERROR: need to pipe in dsids') return parser.parse_args()
def parse_args(): """Parse command line arguments and perform sanity checks.""" parser = ArgumentParser() parser.add_argument('-d', '--dest_dir', required=True, help="""The target directory, below which to place extracted symbol files""") parser.add_argument('--dump_syms', help='Path to the dump_syms binary from Breakpad') # Options controlling how to find input files. parser.add_argument('-b', '--build_dir', help="""Path to a directory containing results from a Kudu build, e.g. kudu/build/debug""") parser.add_argument('-f', '--binary_files', nargs='+', metavar="FILE", help='List of binary files to process') parser.add_argument('-i', '--stdin_files', action='store_true', help="""Read the list of files to process from stdin""") parser.add_argument('-r', '--pkg', '--rpm', help="""RPM/DEB file containing the binaries to process, use with -s""") parser.add_argument('-s', '--symbol_pkg', '--debuginfo_rpm', help="""RPM/DEB file containing the debug symbols matching the binaries in -r""") args = parser.parse_args() # Post processing checks # Check that either both pkg and debuginfo_rpm/deb are specified, or none. if bool(args.pkg) != bool(args.symbol_pkg): parser.print_usage() die('Either both -r and -s have to be specified, or none') input_flags = [args.build_dir, args.binary_files, args.stdin_files, args.pkg] if sum(1 for flag in input_flags if flag) != 1: die('Specify exactly one way to locate input files (-b/-f/-i/-r,-s)') return args
class Parser(): def __init__(self,*items,proc=None,allow_empty=False,**kwargs): self.items=items self.proc=proc self.kwargs=kwargs self.allow_empty=allow_empty self._subparsers=None def __call__(self,argv=None): self.parser=ArgumentParser(**self.kwargs) for item in self.items: item.add(self) argv=argv or sys.argv[1:] if self.allow_empty or argv: ns=self.parser.parse_args(argv) if self.proc: if isinstance(self.proc,str): self.proc=eval(self.proc) args=dict(ns._get_kwargs()) if self._subparsers: subcommand=args.pop('subcommand',None) if subcommand and hasattr(self.proc,subcommand): self.proc=getattr(self.proc,subcommand) self.proc(**args) else: self.parser.print_usage() @property def subparsers(self): if not self._subparsers: self._subparsers=self.parser.add_subparsers( dest="subcommand",help="sub commands") return self._subparsers
def parse_args(): def_vocab = 2500 def_embed = 100 def_hidden = 100 def_epoch = 20 p = ArgumentParser(description='Word segmentation using LSTM-RNN') p.add_argument('mode', help='\'train\' or \'test\'') p.add_argument('corpus', help='[in] source corpus') p.add_argument('model', help='[in/out] model file') p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int, help='vocabulary size (default: %d)' % def_vocab) p.add_argument('--embed', default=def_embed, metavar='INT', type=int, help='embedding layer size (default: %d)' % def_embed) p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int, help='hidden layer size (default: %d)' % def_hidden) p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int, help='number of training epoch (default: %d)' % def_epoch) args = p.parse_args() # check args try: if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'') if args.vocab < 1: raise ValueError('you must set --vocab >= 1') if args.embed < 1: raise ValueError('you must set --embed >= 1') if args.hidden < 1: raise ValueError('you must set --hidden >= 1') if args.epoch < 1: raise ValueError('you must set --epoch >= 1') except Exception as ex: p.print_usage(file=sys.stderr) print(ex, file=sys.stderr) sys.exit() return args
def main(): parser = ArgumentParser() parser.add_argument('-d', '--dataset') parser.add_argument('-p', '--dataset-path', default=default_dataset_path()) parser.add_argument('-o', '--output') opts = parser.parse_args() dataset_name = opts.dataset dataset_path = opts.dataset_path out_fn = opts.output if not out_fn: logging.error('--output argument required ...') parser.print_usage() sys.exit(1) if not dataset_name: logging.error('--dataset argument required ...') parser.print_usage() sys.exit(1) if dataset_name == 'newsgroups': corpus = (preprocess_ng(doc) for doc in newsgroups.iterator(download_file(newsgroups.NEWSGROUPS_ARCHIVE_URL, dataset_path))) if dataset_name == 'ndt': dataset = NDTDataset(dataset_path=dataset_path) dataset.install() corpus = (preprocess_ndt(doc) for doc in dataset) else: logging.error('Unknown dataset %s ...' % dataset_name) sys.exit(1) d = Dictionary(corpus) d.save_as_text(out_fn, sort_by_word=False)
def main(): parser = ArgumentParser(description='APC Python CLI') parser.add_argument('--host', action = 'store', default = APC_DEFAULT_HOST, help = 'Override the host') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose messages') parser.add_argument('--quiet', action = 'store_true', help = 'Quiet') parser.add_argument('--user', action = 'store', default = APC_DEFAULT_USER, help = 'Override the username') parser.add_argument('--password', action = 'store', default = APC_DEFAULT_PASSWORD, help = 'Override the password') parser.add_argument('--debug', action = 'store_true', help = 'Debug mode') parser.add_argument('--reboot', action = 'store', help = 'Reboot an outlet') parser.add_argument('--off', action = 'store', help = 'Turn off an outlet') parser.add_argument('--on', action = 'store', help = 'Turn on an outlet') args = parser.parse_args() is_command_specified = (args.reboot or args.debug or args.on or args.off) if not is_command_specified: parser.print_usage() raise SystemExit(1) try: apc = APC(args) except pexpect.TIMEOUT, e: raise SystemExit, 'ERROR: Timeout connecting to APC'
def _parse_arguments(): from argparse import ArgumentParser arg_parser = ArgumentParser(prog=NAME.lower()) arg_parser.add_argument('-d', '--debug', action='count', default=0, help='print logging messages, for debugging purposes (may be repeated for extra verbosity)') arg_parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) arg_parser.add_argument('-D', '--hidraw', action='store', dest='hidraw_path', metavar='PATH', help='unifying receiver to use; the first detected receiver if unspecified. Example: /dev/hidraw2') subparsers = arg_parser.add_subparsers(title='commands') sp = subparsers.add_parser('show', help='show information about paired devices') sp.add_argument('device', nargs='?', default='all', help='device to show information about; may be a device number (1..6), a device serial, ' 'at least 3 characters of a device\'s name, "receiver", or "all" (the default)') sp.add_argument('-v', '--verbose', action='store_true', help='print all available information about the inspected device(s)') sp.set_defaults(cmd=show_devices) sp = subparsers.add_parser('config', help='read/write device-specific settings', epilog='Please note that configuration only works on active devices.') sp.add_argument('device', help='device to configure; may be a device number (1..6), a device serial, ' 'or at least 3 characters of a device\'s name') sp.add_argument('setting', nargs='?', help='device-specific setting; leave empty to list available settings') sp.add_argument('value', nargs='?', help='new value for the setting') sp.set_defaults(cmd=config_device) sp = subparsers.add_parser('pair', help='pair a new device', epilog='The Logitech Unifying Receiver supports up to 6 paired devices at the same time.') sp.set_defaults(cmd=pair_device) sp = subparsers.add_parser('unpair', help='unpair a device') sp.add_argument('device', help='device to unpair; may be a device number (1..6), a device serial, ' 'or at least 3 characters of a device\'s name.') sp.set_defaults(cmd=unpair_device) args = arg_parser.parse_args() # Python 3 has an undocumented 'feature' that breaks parsing empty args # http://bugs.python.org/issue16308 if not 'cmd' in args: arg_parser.print_usage(sys.stderr) sys.stderr.write('%s: error: too few arguments\n' % NAME.lower()) sys.exit(2) if args.debug > 0: log_level = logging.WARNING - 10 * args.debug log_format='%(asctime)s,%(msecs)03d %(levelname)8s %(name)s: %(message)s' logging.basicConfig(level=max(log_level, logging.DEBUG), format=log_format, datefmt='%H:%M:%S') else: logging.root.addHandler(logging.NullHandler()) logging.root.setLevel(logging.ERROR) return args
def parse_args(): def_gpu_device = 0 def_vocab = 32768 def_queue = 128 def_stack = 128 def_epoch = 20 def_minibatch = 100 def_unary_limit = 3 p = ArgumentParser( description='Shift-reduce constituent parser', usage= '\n %(prog)s train [options] source model' '\n %(prog)s test source model' '\n %(prog)s -h', ) p.add_argument('mode', help='\'train\' or \'test\'') p.add_argument('source', help='[in] source corpus' '\n train: PENN-style constituent tree in each row' '\n test: space-separated word list in each row') p.add_argument('model', help='[in/out] model parefix') p.add_argument('--use-gpu', action='store_true', default=False, help='use GPU calculation') p.add_argument('--gpu-device', default=def_gpu_device, metavar='INT', type=int, help='GPU device ID to be used (default: %(default)d)') p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int, help='vocabulary size (default: %d)' % def_vocab) p.add_argument('--queue', default=def_queue, metavar='INT', type=int, help='queue state size (default: %d)' % def_queue) p.add_argument('--stack', default=def_stack, metavar='INT', type=int, help='stack state size (default: %d)' % def_stack) p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int, help='number of training epoch (default: %d)' % def_epoch) p.add_argument('--minibatch', default=def_minibatch, metavar='INT', type=int, help='minibatch size (default: %d)' % def_minibatch) p.add_argument('--unary-limit', default=def_unary_limit, metavar='INT', type=int, help='maximum length of unary chain (default: %d)' % def_unary_limit) args = p.parse_args() # check args try: if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'') if args.vocab < 1: raise ValueError('you must set --vocab >= 1') if args.queue < 1: raise ValueError('you must set --queue >= 1') if args.stack < 1: raise ValueError('you must set --stack >= 1') if args.epoch < 1: raise ValueError('you must set --epoch >= 1') if args.minibatch < 1: raise ValueError('you must set --minibatch >= 1') except Exception as ex: p.print_usage(file=sys.stderr) print(ex, file=sys.stderr) sys.exit() return args
def parse_args(): def_gpu_device = 0 def_vocab = 1000 def_embed = 100 def_hidden = 200 def_epoch = 10 def_minibatch = 64 def_generation_limit = 128 p = ArgumentParser( description='Attentional neural machine trainslation', usage= '\n %(prog)s train [options] source target model' '\n %(prog)s test source target model' '\n %(prog)s -h', ) p.add_argument('mode', help='\'train\' or \'test\'') p.add_argument('source', help='[in] source corpus') p.add_argument('target', help='[in/out] target corpus') p.add_argument('model', help='[in/out] model file') p.add_argument('--use-gpu', action='store_true', default=False, help='use GPU calculation') p.add_argument('--gpu-device', default=def_gpu_device, metavar='INT', type=int, help='GPU device ID to be used (default: %(default)d)') p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int, help='vocabulary size (default: %(default)d)') p.add_argument('--embed', default=def_embed, metavar='INT', type=int, help='embedding layer size (default: %(default)d)') p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int, help='hidden layer size (default: %(default)d)') p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int, help='number of training epoch (default: %(default)d)') p.add_argument('--minibatch', default=def_minibatch, metavar='INT', type=int, help='minibatch size (default: %(default)d)') p.add_argument('--generation-limit', default=def_generation_limit, metavar='INT', type=int, help='maximum number of words to be generated for test input (default: %(default)d)') args = p.parse_args() # check args try: if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'') if args.vocab < 1: raise ValueError('you must set --vocab >= 1') if args.embed < 1: raise ValueError('you must set --embed >= 1') if args.hidden < 1: raise ValueError('you must set --hidden >= 1') if args.epoch < 1: raise ValueError('you must set --epoch >= 1') if args.minibatch < 1: raise ValueError('you must set --minibatch >= 1') if args.generation_limit < 1: raise ValueError('you must set --generation-limit >= 1') except Exception as ex: p.print_usage(file=sys.stderr) print(ex, file=sys.stderr) sys.exit() return args
def app(): """ Run the compiler """ parser = ArgumentParser() parser.add_argument("-p", action="store_true", dest="profile", help='Profile this run of the program') parser.add_argument("--dbg", dest = "debugger", action = "store_true", help="Connect to the pydev debugger at startup") parser.add_argument("-d", "--debug", dest = "debug", action = "store_true", help="Enable debug logging") parser.add_argument("-c", "--config", dest = "config_file", help="Use this config file") subcommands = parser.add_subparsers() for cmd in Commander.get_commands(): sub_cmd = subcommands.add_parser(cmd) sub_cmd.set_defaults(command = cmd) arguments = Commander.get_arguments(cmd) for argument in arguments: if len(argument) == 3: sub_cmd.add_argument(argument[0], help = argument[1], dest = argument[2]) elif len(argument) == 2: sub_cmd.add_argument(argument[0], help = argument[1]) else: sub_cmd.add_argument(argument[0], help = argument[1], dest = argument[3], action = argument[2]) options, other = parser.parse_known_args() options.other = other if options.debug: # TODO: fix logging logging.basicConfig(level=logging.DEBUG) if options.debugger: try: import pydevd; pydevd.settrace() except: print("Unable to start pydev debugger") Config.load_config(options.config_file) config = Config.get() if not hasattr(options, "command"): # show help parser.print_usage() return Commander.run(options.command, options, config)
def parse(def_config): parser = ArgumentParser(description='Generate status line output for i3bar') parser.add_argument('-c', '--config', help='absolute path to the config file', default=def_config) args = parser.parse_args() if args.config: _config = config.config(args.config) else: parser.print_usage(file=stderr) exit(1) return _config if _config else None
def main(): parser = ArgumentParser(prog="The F**k") parser.add_argument( "-v", "--version", action="version", version="%(prog)s {}".format(pkg_resources.require("thefuck")[0].version) ) parser.add_argument("-a", "--alias", action="store_true", help="[custom-alias-name] prints alias for current shell") parser.add_argument("command", nargs="*", help="command that should be fixed") known_args = parser.parse_args(sys.argv[1:2]) if known_args.alias: print_alias(False) elif known_args.command: fix_command() else: parser.print_usage()
def get_args(): """ Check and get arguments. Exit with a message when smth went wrong. """ description = 'The tool for grabbing Russian translation of What If? articles from Notabenoid.' parser = ArgumentParser(description=description) parser.add_argument('--all', action='store_true', help='Print list of articles, originals and all translations\' variants. \n' + 'By default only last of top rated translations\' variants will be printed.') parser.add_argument('cookies_file') try: args = parser.parse_args() except ArgumentError as exc: logging.critical(str(exc)) parser.print_usage(file=sys.stderr) exit(ARGS_ERROR_EXIT_CODE) return args
def parse_args(): def_vocab = 2500 def_hidden = 100 def_epoch = 100 def_context = 3 p = ArgumentParser(description="Word segmentation using feedforward neural network") p.add_argument("mode", help="'train' or 'test'") p.add_argument("corpus", help="[in] source corpus") p.add_argument("model", help="[in/out] model file") p.add_argument( "--vocab", default=def_vocab, metavar="INT", type=int, help="vocabulary size (default: %d)" % def_vocab ) p.add_argument( "--hidden", default=def_hidden, metavar="INT", type=int, help="hidden layer size (default: %d)" % def_hidden ) p.add_argument( "--epoch", default=def_epoch, metavar="INT", type=int, help="number of training epoch (default: %d)" % def_epoch ) p.add_argument( "--context", default=def_context, metavar="INT", type=int, help="width of context window (default: %d)" % def_context, ) args = p.parse_args() # check args try: if args.mode not in ["train", "test"]: raise ValueError("you must set mode = 'train' or 'test'") if args.vocab < 1: raise ValueError("you must set --vocab >= 1") if args.hidden < 1: raise ValueError("you must set --hidden >= 1") if args.epoch < 1: raise ValueError("you must set --epoch >= 1") if args.context < 1: raise ValueError("you must set --context >= 1") except Exception as ex: p.print_usage(file=sys.stderr) print(ex, file=sys.stderr) sys.exit() return args
def parse_options(args): parser = ArgumentParser() parser.add_argument("-d", "--dtype", action="store", dest="dtype", choices=("int8", "uint8", "int16", "float32"), default="float32", help="datatype of values to store") parser.add_argument("-f", "--format", action="store", dest="format", choices=("fasta", "wiggle", "bedgraph", "xb", "txtfile"), default="wiggle", help="format of input files") parser.add_argument("-a", "--assembly", default=None, help='genome assembly to create new ' 'track for (e.g. hg18)') parser.add_argument("-p", "--pos_idx", action="store", dest="pos_idx", type=int, default=-1, help="index of position column in the " "tab- or space-delimited input txtfile") parser.add_argument("-v", "--val_idx", action="store", dest="val_idx", type=int, default=-1, help="index of value column in the " "tab- or space-delimited input txtfile") parser.add_argument("-s", "--strand", action="store", dest="strand", default="forward", choices=("forward", "reverse"), help="strand of data to import (for xb files only)") parser.add_argument("track_name", action="store", nargs=1, help="name of track to store data in") parser.add_argument("filename", action="store", nargs="+", help="input file to read data from") options = parser.parse_args(args) if options.format == "txtfile": if options.pos_idx < 0 or options.val_idx < 0: parser.print_help() parser.print_usage() parser.error("positive pos_idx and val_idx values must be " "provided in order to parse txtfiles") return options
def main(): parser = ArgumentParser(description='Human-style Sudoku solver') parser.add_argument('-g', '--guess', action='store_true', help='allow guessing to solve') parser.add_argument('-q', '--quiet', action='store_true', help='solve a board without printing anything') parser.add_argument('-f', '--file', help='solve each board in a text file and output overall results as tab-separated data') parser.add_argument('BOARD', nargs='?', help='a single board to solve') args = vars(parser.parse_args()) if args['BOARD']: solve_board(args['BOARD'], args['guess'], not args['quiet']) elif args['file']: solve_boards(args['file'], args['guess'], not args['quiet']) else: parser.print_usage()
def parse_args(): p = ArgumentParser(description='Choose Make arff output config') p.add_argument('-c', '--config', type=str, help='\'OPENSMILE_CONFIG\' or \'OPENSMILE_CONFIG_2010\'') p.add_argument('-o', '--output_dir', default='output/',type=str, help='[output dir] you set output dir') p.add_argument('-w', '--wav_list', default='wav_list',type=str, help='wav_list you set wav file list') args = p.parse_args() # check args try: if args.config not in ['OPENSMILE_CONFIG', 'OPENSMILE_CONFIG_2010']: raise ValueError('you must set mode = \'OPENSMILE_CONFIG\' or \'OPENSMILE_CONFIG_2010\'') except Exception as ex: p.print_usage(file=sys.stderr) print(ex) print(sys.stderr) sys.exit() return args
def parse_args(): def_vocab = 32768 def_embed = 256 def_hidden = 512 def_epoch = 100 def_minibatch = 64 def_generation_limit = 256 p = ArgumentParser(description='Encoder-decoder neural machine trainslation') p.add_argument('mode', help='\'train\' or \'test\'') p.add_argument('source', help='[in] source vectors, one per line') p.add_argument('target', help='[in/out] target corpus') p.add_argument('model', help='[in/out] model file') p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int, help='vocabulary size (default: %d)' % def_vocab) p.add_argument('--embed', default=def_embed, metavar='INT', type=int, help='embedding layer size (default: %d)' % def_embed) p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int, help='hidden layer size (default: %d)' % def_hidden) p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int, help='number of training epoch (default: %d)' % def_epoch) p.add_argument('--minibatch', default=def_minibatch, metavar='INT', type=int, help='minibatch size (default: %d)' % def_minibatch) p.add_argument('--generation-limit', default=def_generation_limit, metavar='INT', type=int, help='maximum number of words to be generated for test input') args = p.parse_args() # check args try: if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'') if args.vocab < 1: raise ValueError('you must set --vocab >= 1') if args.embed < 1: raise ValueError('you must set --embed >= 1') if args.hidden < 1: raise ValueError('you must set --hidden >= 1') if args.epoch < 1: raise ValueError('you must set --epoch >= 1') if args.minibatch < 1: raise ValueError('you must set --minibatch >= 1') if args.generation_limit < 1: raise ValueError('you must set --generation-limit >= 1') except Exception as ex: p.print_usage(file=sys.stderr) six.print_(ex, file=sys.stderr) sys.exit() return args
def main(): parser = ArgumentParser(usage="./submit.py [-ch|-tkusw filename]") save_config_and_configure_group = parser.add_mutually_exclusive_group() save_config_and_configure_group.add_argument('-c', '--configure', action='store_true', help="start interactive " "configuration") save_config_and_configure_group.add_argument('-s', '--save-config', action='store_true', help="used along with --token " "or/and --url, saves " + "configuration changes " "to configuration file") parser.add_argument('-t', '--task', action='store', help="override task short name (if not specified, " + "filename with extension is taken)") parser.add_argument('-k', '--token', action='store', help="provide a token for authentication") parser.add_argument('-u', '--url', action='store', help="provide connection URL " + "(e.g. http://oioioi.com/c/example-contest/)") parser.add_argument('-w', '--webbrowser', action='store_true', help="open the web browser after successful submission") parser.add_argument("filename", nargs='?') init_config() args = parser.parse_args() if args.token: configuration['token'] = args.token if args.url: configuration['contest-url'] = args.url if args.save_config: save_configuration() elif args.configure: return create_configuration() elif args.filename: return submit(args.filename, args.task, configuration['token'], configuration['contest-url'], args.webbrowser) parser.print_usage() return 1
def parse_args(): p = ArgumentParser(description='Emotion recognition') p.add_argument('-r', '--arff_label', type=str, help='you set the arff label') p.add_argument('--gpu', '-g', default=-1, type=int, help='GPU ID (negative value indicates CPU)') p.add_argument('-f', '--feature', type=str, help='\'IS2009\' or \'IS2010\'') args = p.parse_args() # check args try: if args.feature not in ['IS2009', 'IS2010']: raise ValueError('you must set mode = \'IS2009\' or \'IS2010\'') except Exception as ex: p.print_usage(file=sys.stderr) print(ex) print(sys.stderr) sys.exit() return args
def parse_args(): """ Parse out testing arguments. """ _, fname = os.path.split(__file__) usage = "cat data | python %s" % fname parser = ArgumentParser(usage=usage) parser.add_argument("-m", "--center", dest="m", required=True, type=float, help="control chart centerline") parser.add_argument("-s", "--stdev", dest="s", required=True, type=float, help="standard deviation") if sys.stdin.isatty(): # if isatty() that means it's run without anything piped into it parser.print_usage() print("for more help use --help") sys.exit(1) return parser.parse_args()
def main(): parser = ArgumentParser(prog='thefuck') version = get_installation_info().version parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(version)) parser.add_argument('-a', '--alias', action='store_true', help='[custom-alias-name] prints alias for current shell') parser.add_argument('command', nargs='*', help='command that should be fixed') known_args = parser.parse_args(sys.argv[1:2]) if known_args.alias: print_alias(False) elif known_args.command: fix_command() else: parser.print_usage()
def parse_args(): parser = ArgumentParser(description="Post sample tasks to the crowd server") parser.add_argument('--task-types', '-t', nargs="+", metavar="TASK_TYPE", choices=['sa', 'er', 'ft'], default=['sa'], help=('task types for which to create tasks. (defaults ' 'to just \'sa\'')) parser.add_argument('--crowds', '-c', nargs="+", metavar="CROWD_NAME", choices=['amt', 'internal'], default=['internal'], help=('crowds on which to create tasks. (defaults to ' 'just \'internal\'')) parser.add_argument('--num-tasks', '-n', nargs="+", type=int, metavar="NUM_TASKS", help=('Number of tasks to create (one number for each ' 'task type given with -t). Defaults to one task ' 'for each task type.')) parser.add_argument('--num-assignments', '-a', nargs="+", type=int, metavar="NUM_ASSIGNMENTS", help=('Number of assignments to require (one number ' 'for each task type given with -t). Defaults to ' 'one assignment for each task type.')) parser.add_argument('--ssl', action='store_true', help='Send requests to the crowd server over ssl.') args = parser.parse_args() if not args.num_tasks: args.num_tasks = [1 for task_type in args.task_types] if not args.num_assignments: args.num_assignments = [1 for task_type in args.task_types] if (len(args.num_tasks) != len(args.task_types) or len(args.num_assignments) != len(args.task_types)): print ("Length of --num-tasks and --num-assignments options must match " "the length of the --task-types option!") print "" parser.print_usage() sys.exit() args.types_map = { args.task_types[i] : (args.num_tasks[i], args.num_assignments[i]) for i in range(len(args.num_tasks)) } return args
def parse_args(): p = ArgumentParser(description='Word segmentation using LSTM-RNN') p.add_argument('ref', help='reference file') p.add_argument('rslt', help='system result') args = p.parse_args() # check args try: if args.ref == "": raise ValueError('you must set ref = reference') if args.rslt == "": raise ValueError('you must set rslt = system result') except Exception as ex: p.print_usage(file=sys.stderr) print(ex, file=sys.stderr) sys.exit() return args
def main(args): argparser = ArgumentParser() argparser.add_argument( 'router', metavar='ROUTER', type=str, help='JSON file containing the backup router description') argparser.add_argument('--version', action='version', version="%(prog)s 0.5") subparsers = argparser.add_subparsers() # - - - - list - - - - parser_list = subparsers.add_parser('list', help='List available sync streams') parser_list.add_argument( '--stream-name-only', default=False, action='store_true', help='Output only the names of the available streams') parser_list.set_defaults(func=list_) # - - - - sync - - - - parser_sync = subparsers.add_parser( 'sync', help='Sync contents across one or more sync streams') parser_sync.add_argument( 'stream_names', metavar='STREAM', type=str, nargs='+', help='Name of stream (or stream:subdirectory) to sync contents across') parser_sync.add_argument('--apply', default=False, action='store_true', help='Actually run the rsync command') parser_sync.add_argument( '--thorough', default=False, action='store_true', help= 'Ignore the timestamp on all destination files, to ensure content is synced' ) parser_sync.set_defaults(func=sync) # - - - - rename - - - - parser_rename = subparsers.add_parser( 'rename', help='Rename a subdirectory in both source and dest of sync stream') parser_rename.add_argument('stream_name', metavar='STREAM', type=str, help='Name of stream to operate under') parser_rename.add_argument('existing_subdir_name', metavar='DIRNAME', type=str, help='Existing subdirectory to be renamed') parser_rename.add_argument('new_subdir_name', metavar='DIRNAME', type=str, help='New name for subdirectory') parser_rename.set_defaults(func=rename) options = argparser.parse_args(args) with open(options.router, 'r') as f: router = json.loads(f.read()) try: func = options.func except AttributeError: argparser.print_usage() sys.exit(1) func(router, options)
def main(): argp = ArgumentParser( version='1.0', description="ETArch's SMART chat with QoS support", ) helpType = 'NC: Network Control | telephony: Telephony | \ MMconferencing: Multimedia Conferencing | RT: Real-Time Interactive | \ MMstreaming: Multimedia Streaming |\ bVideo: Broadcast Video' argp.add_argument('-i', '--iface', metavar='INTERFACE', type=str, help='interface') argp.add_argument('-e', '--entity', type=str, help='entity title') argp.add_argument('-w', '--workspace', required=True, type=str, help='workspace title') argp.add_argument('--interval', type=int, help='interval in ms') argp.add_argument('-q', '--QoS', action='store_true', help='require Quality of Service aware routing') argp.add_argument('-bw', '--bandwidth', type=int, help='bandwidth allocation required') argp.add_argument('-t', '--type', choices=[ 'NC', 'telephony', 'MMconferencing', 'RT', 'MMstreaming', 'bVideo' ], help=helpType) argp.add_argument('-vs', '--server', metavar='FILE', type=str, help='start a ffmpeg video server on this host') argp.add_argument('-vc', '--client', const='True', nargs='?', help='start a ffmpeg video client on this host') argp.add_argument('-b', '--bitrate', type=int, help='bitrate for the ffmpeg video server (Kbits/s)') args = argp.parse_args() #print vars(args) if args.type and not args.bandwidth: argp.print_usage() print 'chat.py: error: argument -bw/--bandwidth is required' sys.exit(1) if args.bitrate and not args.server: argp.print_usage() print 'chat.py: error: argument -vs/--server is required' sys.exit(1) if args.iface: iface = args.iface else: iface = netifaces.interfaces()[1] if netifaces.interfaces( )[0] == 'lo' else netifaces.interfaces()[0] if args.entity: entity = args.entity else: entity = iface.partition('-')[0] if iface.partition( '-')[0][0] == 'h' else iface.partition('-')[2] e = dts.Entity(iface, entity, True) print 'Entity "{}" registered.'.format(e.title) w = dts.Workspace(iface, args.workspace, args.QoS, args.bandwidth, args.type) try: w.attach(e) print 'Attached to workspace "{}".'.format(w.title) except dts.DTSException: # Failed to attach, probably does not exists, # then try to create print 'Failed to attach, trying to create' w.create_on_dts(e) print 'Created workspace "{}" and attached to it.'.format(w.title) if not args.client: def reader_loop(): try: while True: msg = w.recv() sys.stdout.write(msg) except gevent.GreenletExit, KeyboardInterrupt: pass reader = gevent.spawn(reader_loop)
def main(): from argparse import ArgumentParser # Parameter parser = ArgumentParser() parser.add_argument("-s", "--source", help="Directory of Repository", default=".", type=str, metavar="<git-repository>") parser.add_argument("-d", "--destination", help="Directory of Destination", default="./slides", type=str, metavar="<destination-folder>") # Actions parser.add_argument("-W", "--weekly", help="Create a pdf for each week", action='store_true') parser.add_argument("-A", "--all", help="Create a pdf with all slides", action='store_true') parser.add_argument("-C", "--copy", help="Copy all slides into a single folder", action='store_true') parser.add_argument( "--compile", help= "Compile a single file or a single week based on identifiers. Possible values include (1 2, 4 *)", type=str, metavar="<week> <slide>", nargs=2, required=False) parser.add_argument( "--compile-git", help="Compile slides that have changes according to git", action='store_true') parser.add_argument("--compile-all", help="Compile all slides", action='store_true') parser.add_argument( "--cleanup", help="Cleanup all temporary latex files (aux, log, ...)", action='store_true') parser.add_argument( "--handout", help= "Compile slides in handout-mode (more compact, no animation slides)", action='store_true') args = parser.parse_args() global GIT_REPO, DST_FOLDER, HANDOUT_MODE GIT_REPO = Path(args.source) DST_FOLDER = Path(args.destination) HANDOUT_MODE = args.handout if args.handout and not (args.compile or args.compile_git or args.compile_all): print( "Warning: --handout flag set, but no compilation initiated. " "Use --compile, --compile-all or --compile-git to compile tex to pdf" ) _did_smth = False if args.compile_all: compile_all() _did_smth = True if args.compile_git and not args.compile_all: compile_git() _did_smth = True if args.compile and not args.compile_all: compile_single(*args.compile) _did_smth = True if args.cleanup: cleanup() _did_smth = True if args.weekly: weekly_slides() _did_smth = True if args.all: full_slides() _did_smth = True if args.copy: copy() _did_smth = True if not _did_smth: parser.print_usage()
def main(): parser = ArgumentParser(usage="./submit.py [-ch|-tkusw filename]") save_config_and_configure_group = parser.add_mutually_exclusive_group() save_config_and_configure_group.add_argument( '-c', '--configure', action='store_true', help="start interactive " "configuration", ) save_config_and_configure_group.add_argument( '-s', '--save-config', action='store_true', help="used along with --token " "or/and --url, saves " + "configuration changes " "to configuration file", ) parser.add_argument( '-t', '--task', action='store', help="override task short name (if not specified, " + "filename with extension is taken)", ) parser.add_argument( '-k', '--token', action='store', help="provide a token for authentication" ) parser.add_argument( '-u', '--url', action='store', help="provide connection URL " + "(e.g. http://oioioi.com/c/example-contest/)", ) parser.add_argument( '-w', '--webbrowser', action='store_true', help="open the web browser after successful submission", ) parser.add_argument("filename", nargs='?') init_config() args = parser.parse_args() if args.token: configuration['token'] = args.token if args.url: configuration['contest-url'] = args.url if args.save_config: save_configuration() elif args.configure: return create_configuration() elif args.filename: return submit( args.filename, args.task, configuration['token'], configuration['contest-url'], args.webbrowser, ) parser.print_usage() return 1
def main(): parser = ArgumentParser() #adding options to parser group = parser.add_mutually_exclusive_group() group.add_argument("-r", "--random", action="store_true", dest="random", help="displays random words") group.add_argument("-d", "--defi", action="store_true", dest="definition", help="displays definition of the word", default="True") group.add_argument("-e", "--examples", action="store_true", dest="examples", help="displays examples") group.add_argument("-t", "--topexample", action="store_true", dest="topexample", help="displays top examples") group.add_argument("-s", "--similarwords", action="store_true", dest="similarwords", help="displays similar words") group.add_argument("-c", "--compact", action="store_true", dest="compact", help="displays all things about word in compact form") group.add_argument("-f", "--file", action="store_true", dest="file", help="displays words in compact form from file") parser.add_argument("word", help="Word / count which ever is applicable") args, other_args = parser.parse_known_args() # print args, args.word word = args.word if word == '': parser.print_usage() exit(1) if args.random: display_random_words(word) elif args.examples: display_examples(word) elif args.topexample: display_top_examples(word) elif args.similarwords: display_related_words(word) elif args.compact: display_compact(word) elif args.file: read_from_file(word) # should be last, value true by default elif args.definition: display_definitions(word) else: display_help()
if parts is None: parts = {part: [] for part in csv.columns.difference([THRESH])} for part in parts: part_vals = csv[part] assert len(part_vals) == len(thresholds) parts[part].append(part_vals) return labels, all_thresholds, parts if __name__ == '__main__': args = parser.parse_args() if not args.input: parser.print_usage(stderr) print('error: must specify at least one --input', file=stderr) exit(1) if args.is_poster: matplotlib.rcParams.update({ 'font.family': 'Ubuntu', 'pgf.rcfonts': False, 'xtick.labelsize': '12', 'ytick.labelsize': '12', 'legend.fontsize': '14', 'axes.labelsize': '16', 'axes.titlesize': '18', }) else: matplotlib.rcParams.update({
def main(): parser = ArgumentParser(description='APC Python CLI') parser.add_argument('--host', action='store', default=APC_DEFAULT_HOST, help='Override the host') parser.add_argument('--user', action='store', default=APC_DEFAULT_USER, help='Override the username') parser.add_argument('--password', action='store', default=APC_DEFAULT_PASSWORD, help='Override the password') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose messages') parser.add_argument('--quiet', action='store_true', help='Quiet') parser.add_argument('--debug', action='store_true', help='Debug mode') parser.add_argument('--reboot', action='store', metavar='OUTLET', help='Reboot an outlet') parser.add_argument('--off', action='store', metavar='OUTLET', help='Turn off an outlet') parser.add_argument('--on', action='store', metavar='OUTLET', help='Turn on an outlet') parser.add_argument( '--cli', action='store', default='', help="command line to execute 'ssh {user}@{host}' or 'telnet {host}") parser.add_argument( '--delay', action='store', default=0, help='delay before on/off (-1 to 7200 sec, where -1=Never)') parser.add_argument('--duration', action='store', default=5, help='reboot duration (5 to 60 sec)') parser.add_argument('--status', action='store_true', help='Status of outlets') args = parser.parse_args() is_command_specified = (args.reboot or args.debug or args.on or args.off or args.status) if not is_command_specified: parser.print_usage() raise SystemExit(1) try: apc = APC(args.host, args.user, args.password, args.verbose, args.quiet, args.cli) except pexpect.TIMEOUT as e: raise SystemExit('ERROR: Timeout connecting to APC') args.delay = int(args.delay) args.duration = int(args.duration) if args.debug: apc.debug() else: try: if args.reboot: apc.reboot(args.reboot, args.delay, args.duration) elif args.on: apc.on(args.on, args.delay) elif args.off: apc.off(args.off, args.delay) elif args.status: print(apc.status()) except pexpect.TIMEOUT as e: raise SystemExit('APC failed! Pexpect result:\n%s' % e) finally: apc.disconnect()
def main(): #----------------------------------------------------------------------------- # Script argument definitions. #----------------------------------------------------------------------------- # Define a top level parser. parser = ArgumentParser( description='Set of commands to manage Azure Subscriptions') subparsers = parser.add_subparsers() create_subscription_parser = subparsers.add_parser( 'create-subscription', help='Creates a new subscription') create_subscription_parser\ .add_argument( '--offer-type', dest='offer_type', action='store', type=str, required=True, help="Azure's Subscription Offer Type: i.e. MS-AZR-0017P") create_subscription_parser\ .add_argument( '--subscription-name', dest='subscription_name', action='store', type=str, required=True, help="Azure's Subscription Name") create_subscription_parser\ .add_argument( '--billing-enrollment-name', dest='billing_enrollment_name', action='store', type=str, required=False, help="Billing enrollment guid. If no value is passed, a default billing enrollment account will be used") set_general_arguments(create_subscription_parser) create_subscription_parser\ .set_defaults( func=create_subscription) create_management_group_parser = subparsers.add_parser( 'create-management-group', help='Creates a new management group') create_management_group_parser\ .add_argument( '--id', dest='id', action='store', type=str, required=True, help="Management Group Id") create_management_group_parser\ .add_argument( '--subscription-id', dest='subscription_id', action='store', default=None, type=str, required=False, help="Subscription Id, if specified, the subscription gets associated to the management group") create_management_group_parser\ .add_argument( '--subscription-name', dest='subscription_name', action='store', default=None, type=str, required=False, help="Subscription Name, if specified, the subscription gets associated to the management group") set_general_arguments(create_management_group_parser) create_management_group_parser\ .set_defaults( func=create_management_group) associate_mgmt_group_parser = subparsers.add_parser( 'associate-management-group', help='Associates a subscription to a management group') associate_mgmt_group_parser\ .add_argument( '--subscription-id', dest='subscription_id', action='store', type=str, required=True, help="Azure's Subscription Id") associate_mgmt_group_parser\ .add_argument( '--management-group-id', dest='management_group_id', action='store', type=str, required=True, help="Azure's Management Group Id") associate_mgmt_group_parser\ .set_defaults( func=associate_mgmt_group) #----------------------------------------------------------------------------- # Process parameter arguments. #----------------------------------------------------------------------------- # Gather the provided argument within an array. args = parser.parse_args() # Let's check if there are parameters passed, if not, print function usage if len(vars(args)) == 0: parser.print_usage() exit() args.func(args) #----------------------------------------------------------------------------- # Call the function indicated by the invocation command. #----------------------------------------------------------------------------- try: args.func(args) except Exception as ex: _logger.error( 'There was an error provisioning the resources: {}'.format( str(ex))) _logger.error(ex)
def initArgs(): global arguments, scriptBaseName, parser, __version__ __version__ = "0.0.0.1" parser = ArgumentParser( description='Test Keras NN Linear Regresssion with dataset from file.', formatter_class=ArgumentDefaultsHelpFormatter) required = parser.add_argument_group('required arguments') required.add_argument("dataFileName", help="data fileName to read data from.") parser.add_argument("-b", "--batch_size", help="batchSize taken from the whole dataSet.", default=-1, type=float) parser.add_argument("-e", "--epochs", help="Number of epochs to go through the NN.", default=5, type=float) parser.add_argument( "-E", "--EarlyStopping", help= "Number of epochs before stopping once your loss starts to increase (disabled by default).", default=-1, type=int) parser.add_argument( "-P", "--PlotMetrics", help="Enables the live ploting of the trained model metrics.", action='store_true', default=False) parser.add_argument("-v", "--validation_split", help="Validation split ratio of the whole dataset.", default=0.2, type=float) parser.add_argument("-a", "--activationFunction", help="NN Layer activation function.", default="linear", choices=['linear', 'relu', 'sigmoid']) parser.add_argument("-l", "--lossFunction", help="NN model loss function.", default="mse", choices=['mse', 'mae', 'rmse']) parser.add_argument("-o", "--optimizer", help="NN model optimizer algo.", default="sgd", choices=['sgd', 'rmsprop', 'adam']) parser.add_argument("--Lr", help="Set the learning rate of the NN.", default=None, type=float) parser.add_argument("-q", "--quiet", help="Be quiet.", action='store_true', default=False) parser.add_argument("-u", "--usage", help="Print usage.", action='store_true', default=False) parser.add_argument("--mdu", help="Print usage in Markdown code blocks.", action='store_true', default=False) parser.add_argument("--mdh", help="Print help in markdown code blocks.", action='store_true', default=False) parser.add_argument("--md", help="Print output in markdown code blocks.", action='store_true', default=False) scriptBaseName = parser.prog arguments = parser.parse_args() if arguments.usage: parser.print_usage() exit() if arguments.mdu: Print("<pre><code>") parser.print_usage() Print("</code></pre>") exit() if arguments.mdh: Print("<pre><code>") parser.print_help() Print("</code></pre>") exit() # if arguments.lossFunction.lower() == 'mse' and arguments.epochs < 10 : arguments.epochs = 15 if arguments.md: Print("<pre><code>") return arguments
args = p.parse_args() logging.basicConfig() log = logging.getLogger() # log.setLevel(logging.INFO) with open(args.config, 'rb') as infile: config = yaml.load(infile, Loader=yaml.FullLoader) if args.address is not None: override_addresses(config, args.address) if 'memories' not in config and args.memory_config == None: print( "Memory Configuration must be in config file or provided using -m") p.print_usage() quit(-1) if args.memory_config: # Use memory configuration from mem_config base_dir = os.path.split(args.memory_config)[0] with open(args.memory_config, 'rb') as infile: mem_config = yaml.load(infile, Loader=yaml.FullLoader) if 'options' in mem_config: config['options'] = mem_config['options'] config['memories'] = mem_config['memories'] config['peripherals'] = mem_config['peripherals'] else: base_dir = os.path.split(args.config)[0] emulate_binary(config,
def main(argv=None): ''' Command line options. ''' if argv is None: argv = sys.argv else: sys.argv.extend(argv) program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = str(__updated__) program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date) program_shortdesc = __doc__.split("\n")[1] program_license = '''%s Created by Alexander Hanl (Aalmann) on %s (last update %s). Copyright 2018. All rights reserved. Licensed under MIT License USAGE ''' % (program_shortdesc, str(__date__), str(__updated__)) if len(argv) <= 1: argv.append("--help") try: command = argv[1] if command == "analyze": # Setup argument parser parser = ArgumentParser( description=program_license, formatter_class=RawDescriptionHelpFormatter, prog="FileSort analyze") parser.add_argument( "-r", "--recursive", dest="recurse", action="store_true", help="recurse into subfolders [default: %(default)s]") # parser.add_argument("-v", "--verbose", dest="verbose", # action="count", help="set verbosity level [default: %(default)s]") parser.add_argument(dest="directory", help="paths to directory with files", metavar="directory", nargs='?') # Process arguments args = parser.parse_args(argv[2:]) if not args.directory: parser.print_help() logging.error("directory not set.") print("\nERROR: directory not set") parser.print_usage() return 1 logging.info("Using directory %s" % (args.directory)) directory = args.directory # verbose = args.verbose recurse = args.recurse analyzer = Analyzer(directory, recurse) analyzer.analyze() # if verbose > 0: # print("Verbose mode on") # if recurse: # print("Recursive mode on") # else: # print("Recursive mode off") elif command == "copy": # Setup argument parser parser = ArgumentParser( description=program_license, formatter_class=RawDescriptionHelpFormatter, prog="FileSort copy") # parser.add_argument("-v", "--verbose", dest="verbose", # action="count", help="set verbosity level [default: %(default)s]") parser.add_argument(dest="directory", help="paths to directory with files", metavar="directory", nargs='?') # Process arguments args = parser.parse_args(argv[2:]) if not args.directory: parser.print_help() logging.error("directory not set.") print("\nERROR: directory not set") parser.print_usage() return 1 logging.info("Using directory %s" % (args.directory)) directory = args.directory # verbose = args.verbose copier = Copy(directory) copier.copy() elif command in ["--help", "-h", "help"]: # Setup argument parser commd_help = "\nThe following commands are available:" commd_help += "\n help print this help message" commd_help += "\n analyze \ Analyze a directory and create a mapping file for copy or move \ command." commd_help += "\n copy Copies the files listed in mapping \ file to the analyzed destinations" commd_help += "\n move Moves the files listed in mapping \ file to the analyzed destination" commd_help += "\n revert Tries to reverts the changes from \ move command." parser = ArgumentParser( description=program_license + commd_help, formatter_class=RawDescriptionHelpFormatter, prog="FileSort") parser.add_argument("command", help="Command to be executed", nargs="?") parser.add_argument('-v', '--version', action='version', version=program_version_message) # Process arguments args = parser.parse_args() elif command == "-v" or command == "--version": parser = ArgumentParser( description=program_license, formatter_class=RawDescriptionHelpFormatter, prog="FileSort") parser.add_argument('-v', '--version', action='version', version=program_version_message) # Process arguments args = parser.parse_args() return 0 except KeyboardInterrupt: # handle keyboard interrupt return 0 except Exception as exc: indent = len(program_name) * " " sys.stderr.write(program_name + ": " + repr(exc) + "\n" + exc.msg + "\n") import traceback sys.stderr.write(traceback.format_exc()) sys.stderr.write(indent + " for help use --help") return 2
def main(): """ Main function """ if __name__ == "__main__": parser = ArgumentParser() subparsers = parser.add_subparsers(help="commands") parser.add_argument('--version', action='version', version=VERSION) # A display command display_parser = subparsers.add_parser("display", help="Display certificate.") display_parser.add_argument("--input", "-i", action="store", help="Certificate path.") display_parser.add_argument("--input-fqdn", "-u", action="store", help="Certificate FQDN.") display_parser.add_argument("--extensions", "-e", action="store_true", default=False, help="Display extensions and signature.") display_parser.add_argument("--port", "-p", action="store", default=443, help="Change HTTPs port.") # An verify command verify_parser = subparsers.add_parser( "verify", help="verifiy couple CA, CERTIFICATE") verify_parser.add_argument("--input", "-i", action="store", help="Certificate path.") verify_parser.add_argument("--ca", action="store", help="CA path.") verify_parser.add_argument("--input-fqdn", "-f", action="store", help="Certificate FQDN.") verify_parser.add_argument("--port", "-p", action="store", help="Change HTTPs port.", default=443) args = parser.parse_args() if len(sys.argv) < 2: parser.print_usage() elif sys.argv[1] == "display": if args.input is not None: display(args.input, extensions=args.extensions) elif args.input_fqdn is not None: display(args.input_fqdn, fqdn=True, extensions=args.extensions, port=args.port) else: display_parser.print_usage() elif sys.argv[1] == "verify": if args.input is not None and args.ca is not None: verify(args.input, args.ca, port=args.port) elif args.input_fqdn is not None and args.ca is not None: verify(args.input_fqdn, args.ca, fqdn=True, port=args.port) else: verify_parser.print_usage()
help="Imagery type. Valid options are mixed, " + "jpeg, png, or source.", choices=["mixed", "jpeg", "png", "source"], default="source") PARSER.add_argument("-q", metavar="quality", type=int, default=75, help="Quality for jpeg images, 0-100. Default is 75", choices=list(range(100))) PARSER.add_argument("-a", dest="append", action="store_true", default=False, help="Append tile set to existing geopackage") PARSER.add_argument("-T", dest="threading", action="store_false", default=True, help="Disable multiprocessing.") ARG_LIST = PARSER.parse_args() if not exists(ARG_LIST.source_folder) or exists(ARG_LIST.output_file): PARSER.print_usage() print("Ensure that TMS directory exists and out file does not.") exit(1) if ARG_LIST.q is not None and ARG_LIST.imagery == 'png': PARSER.print_usage() print("-q cannot be used with png") exit(1) main(ARG_LIST)
def main(): parser = ArgumentParser() parser.add_argument( "-t", dest="target_hosts", required=True, help="Set a target range of addresses to target. Ex 10.11.1.1-255") parser.add_argument( "-o", dest="output_directory", required=True, help="Set the output directory. Ex /root/Documents/labs/") parser.add_argument("-w", dest="wordlist", required=False, help="Set the wordlist to use for generated" " commands. Ex /usr/share/wordlist.txt") parser.add_argument("--pingsweep", dest="ping_sweep", action="store_true", help="Write a new target.txt by performing" " a ping sweep and discovering live hosts.", default=False) parser.add_argument("--dns", dest="find_dns_servers", action="store_true", help="Find DNS servers from a list of targets.", default=False) parser.add_argument("--services", dest="perform_service_scan", action="store_true", help="Perform service scan over targets.", default=False) parser.add_argument("--hostnames", dest="hostname_scan", action="store_true", help="Attempt to discover target hostnames" " and write to 0-name.txt and hostnames.txt.", default=False) parser.add_argument("--snmp", dest="perform_snmp_walk", action="store_true", help="Perform service scan over targets.", default=False) parser.add_argument( "--quick", dest="quick", action="store_true", required=False, help="Move to the next target after performing a quick scan " "and writing first-round recommendations.", default=False) parser.add_argument("--quiet", dest="quiet", action="store_true", help="Supress banner and headers to limit" " to comma dilimeted results only.", default=False) parser.add_argument("--exec", dest="follow", action="store_true", help="Execute shell comamnds from recommendations" " as they are discovered. " "Likely to lead to very long execute times" " depending on the wordlist being used.", default=False) parser.add_argument("--simple_exec", dest="quickfollow", action="store_true", help="Execute non-brute forcing shell" " comamnds only as they are discovered.", default=False) arguments = parser.parse_args() if len(sys.argv) == 1: print_banner() parser.error("No arguments given.") parser.print_usage() sys.exit() if arguments.output_directory.endswith('/' or '\\'): arguments.output_directory = arguments.output_directory[:-1] if arguments.target_hosts.endswith('/' or '\\'): arguments.target_hosts = arguments.target_hosts[:-1] print(is_json(CONFIG))
def parse_args(): def_gpu = -1 def_vocab = 8192 def_embed = 512 def_hidden = 256 def_epoch = 50 p = ArgumentParser( description='Neural MST dependency parser', usage= '\n %(prog)s --mode=train --train=FILE --dev=FILE --model=FILE [options]' '\n %(prog)s --mode=test --model=FILE [options]' '\n %(prog)s -h' ) p.add_argument('--mode', default=None, metavar='STR', type=str, help='\'train\' or \'test\'') p.add_argument('--train', default=None, metavar='STR', type=str, help='[in] training corpus with CoNLL dependency') p.add_argument('--dev', default=None, metavar='STR', type=str, help='[in] development corpus with CoNLL dependency') p.add_argument('--model', default=None, metavar='STR', type=str, help='[in/out] model file prefix') p.add_argument('--gpu', default=def_gpu, metavar='INT', type=int, help='GPU ID if use a GPU, negative value if use CPU (default: %(default)d)') p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int, help='vocabulary size (default: %(default)d)') p.add_argument('--embed', default=def_embed, metavar='INT', type=int, help='embedding size (default: %(default)d)') p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int, help='hidden size (default: %(default)d)') p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int, help='maximum training epoch (default: %(default)d)') args = p.parse_args() # check args try: if args.mode == 'train': if not args.train: raise ValueError('you msut give a string for --train') if not args.dev: raise ValueError('you msut give a string for --dev') if not args.model: raise ValueError('you msut give a string for --model') if not args.vocab > 0: raise ValueError('you must set --vocab > 0') if not args.embed > 0: raise ValueError('you must set --embed > 0') if not args.hidden > 0: raise ValueError('you must set --hidden > 0') if not args.epoch > 0: raise ValueError('you must set --epoch > 0') elif args.mode == 'test': if not args.model: raise ValueError('you msut give a string for --model') else: raise ValueError('you must set mode = \'train\' or \'test\'') except Exception as ex: p.print_usage(file=sys.stderr) print(ex, file=sys.stderr) sys.exit() return args
def parse_args(): def_gpu_device = 0 def_vocab = 32768 def_embed = 1024 def_char_embed = 128 def_queue = 512 def_stack = 512 def_srstate = 512 def_epoch = 50 def_minibatch = 100 def_unary_limit = 3 p = ArgumentParser( description='Shift-reduce constituent parser', usage= '\n %(prog)s train [options] source model' '\n %(prog)s test source model' '\n %(prog)s -h', ) p.add_argument('mode', help='\'train\' or \'test\'') p.add_argument('source', help='[in] source corpus' '\n train: PENN-style constituent tree in each row' '\n test: space-separated word list in each row') p.add_argument('model', help='[in/out] model parefix') p.add_argument('--use-gpu', action='store_true', default=False, help='use GPU calculation') p.add_argument('--gpu-device', default=def_gpu_device, metavar='INT', type=int, help='GPU device ID to be used (default: %(default)d)') p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int, help='vocabulary size (default: %(default)d)') p.add_argument('--embed', default=def_embed, metavar='INT', type=int, help='embedding size (default: %(default)d)') p.add_argument('--char-embed', default=def_char_embed, metavar='INT', type=int, help='character-based embedding size (default: %(default)d)') p.add_argument('--queue', default=def_queue, metavar='INT', type=int, help='queue state size (default: %(default)d)') p.add_argument('--stack', default=def_stack, metavar='INT', type=int, help='stack state size (default: %(default)d)') p.add_argument('--srstate', default=def_srstate, metavar='INT', type=int, help='shift-reduce state size (default: %(default)d)') p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int, help='number of training epoch (default: %(default)d)') p.add_argument('--minibatch', default=def_minibatch, metavar='INT', type=int, help='minibatch size (default: %(default)d)') p.add_argument('--unary-limit', default=def_unary_limit, metavar='INT', type=int, help='maximum length of unary chain (default: %(default)d)') args = p.parse_args() # check args try: if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'') if args.vocab < 1: raise ValueError('you must set --vocab >= 1') if args.embed < 1: raise ValueError('you must set --embed >= 1') if args.char_embed < 1: raise ValueError('you must set --char-embed >= 1') if args.queue < 1: raise ValueError('you must set --queue >= 1') if args.stack < 1: raise ValueError('you must set --stack >= 1') if args.srstate < 1: raise ValueError('you must set --srstate >= 1') if args.epoch < 1: raise ValueError('you must set --epoch >= 1') if args.minibatch < 1: raise ValueError('you must set --minibatch >= 1') except Exception as ex: p.print_usage(file=sys.stderr) print(ex, file=sys.stderr) sys.exit() return args
def parse_args(): vae_H_enc_def = 256 vae_z_dim_def = 100 vae_H_dec_def = 256 vae_Readpatch_def = 2 vae_Writepatch_def = 5 maxepoch_def = 20 batchsize_def = 100 lr_def = 0.001 wdecay_def = 0.0 gpu_def = -1 times_def = 64 alpha_def = 1.0 p = ArgumentParser( description='DRAW Deep Recurrent Attentive Writer', usage= '\n e.g.) python train_draw_noattention.py save_path [option... -g 0 -T 32 etc.]' '\n %(prog)s -h', ) #p.add_argument('data_path', help='[in] training corpus') p.add_argument('save_path', help='[out] model file') p.add_argument('-M','--model_path',nargs='?',default=None,metavar=None,type=str, help='model_path(to continue saved model)') p.add_argument('-I', '--maxepoch', default=maxepoch_def, metavar='INT', type=int, help='the number of training epoch (default: %d)' % maxepoch_def) p.add_argument('-B', '--batchsize', default=batchsize_def, metavar='INT', type=int, help='minibatch size (default: %d)' % batchsize_def) p.add_argument('-L', '--lr', default=lr_def, metavar='FLOAT', type=float, help='learning rate (default: %f)' % lr_def) p.add_argument('-W', '--weightdecay', default=wdecay_def, metavar='FLOAT', type=float, help='weight decay (default: %f)' % wdecay_def) p.add_argument('-g', '--gpu',default=gpu_def, metavar='INT',type=int, help='gpu mode (-1:CPU 0:GPU) (default: %d)' % gpu_def) p.add_argument('-Ve', '--vae_enc',default=vae_H_enc_def, metavar='INT',type=int, help='VAE encoder hidden size (default: %d)' % vae_H_enc_def) p.add_argument('-Vz', '--vae_z',default=vae_z_dim_def, metavar='INT',type=int, help='VAE latent z dimension size (default: %d)' % vae_z_dim_def) p.add_argument('-Vd', '--vae_dec',default=vae_H_dec_def, metavar='INT',type=int, help='VAE decoder hidden size (default: %d)' % vae_H_dec_def) p.add_argument('-Pr', '--Read_patch',default=vae_Readpatch_def, metavar='INT',type=int, help='Read attention window size (default: %d)' % vae_Readpatch_def) p.add_argument('-Pw', '--Write_patch',default=vae_Writepatch_def, metavar='INT',type=int, help='Write attention window size (default: %d)' % vae_Writepatch_def) p.add_argument('-T', '--times',default=times_def, metavar='INT',type=int, help='DRAW sequence length (default: %d)' % times_def) p.add_argument('-A', '--alpha',default=alpha_def, metavar='FLOAT',type=float, help='VAE KL coefficient (default: %f)' % alpha_def) args = p.parse_args() # check args try: if (args.maxepoch < 1): raise ValueError('you must set --epoch >= 1') if (args.batchsize < 1): raise ValueError('you must set --batchsize >= 1') if (args.lr < 0): raise ValueError('you must set --lr >= 0') if (args.weightdecay < 0): raise ValueError('you must set --weightdecay >= 0') if (args.vae_enc < 1): raise ValueError('you must set --vae_enc >= 1') if (args.vae_dec < 1): raise ValueError('you must set --vae_dec >= 1') if (args.vae_z < 1): raise ValueError('you must set --vae_z >= 1') if (args.times < 2): raise ValueError('you must set --times >= 2') if (args.alpha < 0): raise ValueError('you must set --alpha >= 0') if (args.Read_patch < 1): raise ValueError('you must set --vae_enc >= 1') if (args.Write_patch < 1): raise ValueError('you must set --vae_dec >= 1') except Exception as ex: p.print_usage(file=sys.stderr) print(ex, file=sys.stderr) sys.exit() return args
class Runner: """ main crawler api. usage: `./run crawl [OPTIONS...]` will crawl the website and store the articles in the attached database `./run search [OPTIONS...]` [EXPRESSION] will search for a given article with the following search expression. """ def __init__(self): # setup the parse and its initial values self._parser = ArgumentParser( prog=os.path.basename(__file__), description='Utility for parsing and searching articles on the ' '\"theguardian.com.au\" website') self._configure_parser() if len(sys.argv) < 2: self._parser.print_usage() exit(1) self._custom_guardian_config = {} self._args = {} def run(self): # Parse command line args parsed_args = self._parser.parse_args() self._args = vars(parsed_args) # configure the program according to user args self._process_arguments() # default function according to sub_parser is stored in the func attribute on the Namespace. parsed_args.func() self._custom_guardian_config = {} # type: dict def search(self): """ search the database for the terms defined in the argument to --search/-s """ # initialize the database # noinspection PyUnresolvedReferences import scrapytest.db log.debug("searching for articles with: '{}'".format( self._args['query'])) searcher = ArticleSearcher(self._args) results = searcher.run() # type: list[Article] result_arr = [] index = 0 print("Displaying results from highest rank to lowest:\n") for result in results: index += 1 print("{:2d}) {}".format(index, result.title)) result_arr.append(result) # Ask the user for a selection until a valid input is given. if index > 0: while True: try: number = int( input("\nplease choose a number between 1 and {}: ". format(index))) if number < 1 or number > index: raise ValueError else: # Leave some room for viewing clarity then print the article. print("\n") print_article(result_arr[number - 1]) break except ValueError: print("invalid choice!") else: print("search yielded no results") def crawl(self): """ crawl through the database and either save the results to a database or text files. """ # setup settings from scrapy.settings import Settings from scrapytest.spiders import GuardianNewsSpider settings = Settings() settings.set("USER_AGENT", config['crawler_user_agent']) settings.set("LOG_LEVEL", self._args['log_level']) settings.set('custom_guardian_config', self._custom_guardian_config) crawler = CrawlerProcess(settings=settings) crawler.crawl(GuardianNewsSpider) crawler.start() crawler.join() def _process_arguments(self): """ process and save the effects of command line arguments """ # log level logging.basicConfig(filename=self._args['log_file'], level=self._args['log_level']) self._custom_guardian_config['LOG_LEVEL'] = self._args['log_level'] if 'max_depth' in self._args: self._custom_guardian_config.update( {'max_depth': self._args['max_depth']}) def _configure_parser(self): """ Add the command line options to the argument parser """ # All parsers have logging self._log_parser = ArgumentParser(add_help=False) self._log_parser.add_argument( '-l', '--log-level', help="set level of log output", dest='log_level', action='store', type=str.upper, default='INFO', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']) self._log_parser.add_argument('-lf', '--log-file', help="set the file for storing log data", dest='log_file', action='store', type=str, default='./scrapytest.log') self._subparsers = self._parser.add_subparsers() # setup search api self._search_parser = self._subparsers.add_parser( 'search', help='Search for articles in the database', parents=[self._log_parser]) # type: ArgumentParser self._search_parser.set_defaults(func=self.search) # TODO: Implement functionality self._search_parser.add_argument( 'query', help="search query for finding an article", nargs='+', action='store', type=str, metavar="QUERY") # TODO: add case sensitive search # setup crawl api self._crawl_parser = self._subparsers.add_parser( 'crawl', help='Crawl through \"theguardian.com.au\" and optionally ' 'store data', parents=[self._log_parser]) # type: ArgumentParser self._crawl_parser.set_defaults(func=self.crawl) self._crawl_parser.add_argument( '--section', help="section to search under", dest='section', nargs='+', choices=config['guardian_spider']['collection_paths']) # TODO: Implement functionality self._crawl_parser.add_argument( '--no-save', help="do not save results to the database", dest='save_crawl_results', action='store_false', default=True) # TODO: Implement functionality self._crawl_parser.add_argument('-p', '--print-json', help="print results in json format", action='store_true', dest='print_json')
class Parser(object): """Argument parser that can handle arguments with our special placeholder. """ def __init__(self): self._parser = ArgumentParser(prog='thefuck', add_help=False) self._add_arguments() def _add_arguments(self): """Adds arguments to parser.""" self._parser.add_argument( '-v', '--version', action='store_true', help="show program's version number and exit") self._parser.add_argument( '-a', '--alias', nargs='?', const=get_alias(), help='[custom-alias-name] prints alias for current shell') self._parser.add_argument( '--enable-experimental-instant-mode', action='store_true', help='enable experimental instant mode, use on your own risk') self._parser.add_argument('-h', '--help', action='store_true', help='show this help message and exit') self._add_conflicting_arguments() self._parser.add_argument('-d', '--debug', action='store_true', help='enable debug output') self._parser.add_argument('--force-command', action='store', help=SUPPRESS) self._parser.add_argument('command', nargs='*', help='command that should be fixed') def _add_conflicting_arguments(self): """It's too dangerous to use `-y` and `-r` together.""" group = self._parser.add_mutually_exclusive_group() group.add_argument('-y', '--yes', action='store_true', help='execute fixed command without confirmation') group.add_argument('-r', '--repeat', action='store_true', help='repeat on failure') def _prepare_arguments(self, argv): """Prepares arguments by: - removing placeholder and moving arguments after it to beginning, we need this to distinguish arguments from `command` with ours; - adding `--` before `command`, so our parse would ignore arguments of `command`. """ if ARGUMENT_PLACEHOLDER in argv: index = argv.index(ARGUMENT_PLACEHOLDER) return argv[index + 1:] + ['--'] + argv[:index] elif argv and not argv[0].startswith('-') and argv[0] != '--': return ['--'] + argv else: return argv def parse(self, argv): arguments = self._prepare_arguments(argv[1:]) return self._parser.parse_args(arguments) def print_usage(self): self._parser.print_usage(sys.stderr) def print_help(self): self._parser.print_help(sys.stderr)
def which(args, stdin=None, stdout=None, stderr=None): """ Checks if each arguments is a xonsh aliases, then if it's an executable, then finally return an error code equal to the number of misses. If '-a' flag is passed, run both to return both `xonsh` match and `which` match. """ desc = "Parses arguments to which wrapper" parser = ArgumentParser('which', description=desc) parser.add_argument('args', type=str, nargs='+', help='The executables or aliases to search for') parser.add_argument('-a','--all', action='store_true', dest='all', help='Show all matches in $PATH and xonsh.aliases') parser.add_argument('-s', '--skip-alias', action='store_true', help='Do not search in xonsh.aliases', dest='skip') parser.add_argument('-V', '--version', action='version', version='{}'.format(_which.__version__), help='Display the version of the python which module ' 'used by xonsh') parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='Print out how matches were located and show ' 'near misses on stderr') parser.add_argument('-p', '--plain', action='store_true', dest='plain', help='Do not display alias expansions or location of ' 'where binaries are found. This is the ' 'default behavior, but the option can be used to ' 'override the --verbose option') parser.add_argument('--very-small-rocks', action=AWitchAWitch) if ON_WINDOWS: parser.add_argument('-e', '--exts', nargs='*', type=str, help='Specify a list of extensions to use instead ' 'of the standard list for this system. This can ' 'effectively be used as an optimization to, for ' 'example, avoid stat\'s of "foo.vbs" when ' 'searching for "foo" and you know it is not a ' 'VisualBasic script but ".vbs" is on PATHEXT. ' 'This option is only supported on Windows', dest='exts') if len(args) == 0: parser.print_usage(file=stderr) return -1 pargs = parser.parse_args(args) if pargs.all: pargs.verbose = True if ON_WINDOWS: if pargs.exts: exts = pargs.exts else: exts = builtins.__xonsh_env__.get('PATHEXT', ['.COM', '.EXE', '.BAT']) else: exts = None failures = [] for arg in pargs.args: nmatches = 0 # skip alias check if user asks to skip if (arg in builtins.aliases and not pargs.skip): if pargs.plain or not pargs.verbose: if isinstance(builtins.aliases[arg], list): print(' '.join(builtins.aliases[arg]), file=stdout) else: print(arg, file=stdout) else: print("aliases['{}'] = {}".format(arg, builtins.aliases[arg]), file=stdout) nmatches += 1 if not pargs.all: continue # which.whichgen gives the nicest 'verbose' output if PATH is taken # from os.environ so we temporarily override it with # __xosnh_env__['PATH'] original_os_path = os.environ['PATH'] os.environ['PATH'] = builtins.__xonsh_env__.detype()['PATH'] matches = _which.whichgen(arg, exts=exts, verbose=pargs.verbose) os.environ['PATH'] = original_os_path for abs_name, from_where in matches: if ON_WINDOWS: # Use list dir to get correct case for the filename # i.e. windows is case insensitive but case preserving p, f = os.path.split(abs_name) f = next(s.name for s in scandir(p) if s.name.lower() == f.lower()) abs_name = os.path.join(p, f) if builtins.__xonsh_env__.get('FORCE_POSIX_PATHS', False): abs_name.replace(os.sep, os.altsep) if pargs.plain or not pargs.verbose: print(abs_name, file=stdout) else: print('{} ({})'.format(abs_name, from_where), file=stdout) nmatches += 1 if not pargs.all: break if not nmatches: failures.append(arg) if len(failures) == 0: return 0 else: print('{} not in $PATH'.format(', '.join(failures)), file=stderr, end='') if not pargs.skip: print(' or xonsh.builtins.aliases', file=stderr, end='') print('', end='\n') return len(failures)
def parse_args(): parser = ArgumentParser() ex_group = parser.add_mutually_exclusive_group(required=True) ex_group.add_argument( '-t', '--train', dest='trainsize', help= """Include the training step and use TRAIN value as the number of _notcar_ samples to use for training on. Specify 'all' to train on all available data. If not including training, must specify a classifier to load from disk (a previously pickled one) using the `-c` switch. The number of car samples loaded for training will be porportional to the number of notcar samples specified here.""") ex_group.add_argument('-v', '--videoin', dest='input_video', help="""The input video file""") parser.add_argument( '-0', '--t0', dest='video_start', help="""T0 -- time in seconds to start the video from""") parser.add_argument('-1', '--t1', dest='video_end', help="""T1 -- time in seconds to end the video at""") parser.add_argument( '-s', '--save', dest='save_file', help="""Where to save the SVM model back to disk (if training)""") parser.add_argument('-o', '--videoout', dest='output_video', help="""The video output file""") parser.add_argument( '-c', '--classifier', dest='clf_fname', help= """Where to find a previously pickled SVM file to use if not training""" ) parser.add_argument( '-d', '--stdscaler', dest='scaler_fname', help="""Where to find a previously pickled StandadrScaler for the SVM""" ) parser.add_argument( '-g', '--debug', dest='debug', action='store_true', help= """Debug mode - things like include all rectangles to output video and possibly print more logs.""") args = parser.parse_args() if args.trainsize is None: args.train = False if args.clf_fname is None: parser.print_usage() sys.exit() if args.scaler_fname is None: args.scaler_fname = ''.join( args.clf_fname.split('.')[:-1]) + '_scaler.pkl' else: args.train = True if args.trainsize == 'all': args.trainsize = -1 else: args.trainsize = int(args.trainsize) if args.save_file is None: print('!!! WARNING !!! Trained model will not be stored to disk.') return args
def main(): """ Main routine, parses arguments and calls other routines Arguments: None Results: None """ global generalize_paths global timeout_seconds global yara_folder global use_pmc global debug print('--===[ Noriben v%s ]===--' % __VERSION__) print('--===[ @bbaskin ]===--\r\n') parser = ArgumentParser() parser.add_argument( '-c', '--csv', help='Re-analyze an existing Noriben CSV file [input file]', required=False) parser.add_argument( '-p', '--pml', help='Re-analyze an existing Noriben PML file [input file]', required=False) parser.add_argument( '-f', '--filter', help='Specify alternate Procmon Filter PMC [input file]', required=False) parser.add_argument('-t', '--timeout', help='Number of seconds to collect activity', required=False, type=int) parser.add_argument('--output', help='Folder to store output files', required=False) parser.add_argument('--yara', help='Folder containing YARA rules', required=False) parser.add_argument( '--generalize', dest='generalize_paths', default=False, action='store_true', help='Generalize file paths to their environment variables. Default: %s' % generalize_paths, required=False) parser.add_argument('--cmd', help='Command line to execute (in quotes)', required=False) parser.add_argument('-d', dest='debug', action='store_true', help='Enable debug tracebacks', required=False) args = parser.parse_args() report = list() timeline = list() if args.debug: debug = True # Check to see if string generalization is wanted if args.generalize_paths: generalize_paths = True if generalize_paths: generalize_vars_init() # Check for a valid filter file if args.filter: if file_exists(args.filter): pmc_file = args.filter else: pmc_file = 'ProcmonConfiguration.PMC' else: pmc_file = 'ProcmonConfiguration.PMC' if not file_exists(pmc_file): use_pmc = False print('[!] Filter file %s not found. Continuing without filters.' % pmc_file) else: use_pmc = True print('[*] Using filter file: %s' % pmc_file) # Find a valid procmon executable. procmonexe = check_procmon() if not procmonexe: print('[!] Unable to find Procmon (%s) in path.' % procmon) sys.exit(1) # Check to see if specified output folder exists. If not, make it. # This only works one path deep. In future, may make it recursive. if args.output: output_dir = args.output if not os.path.exists(output_dir): try: os.mkdir(output_dir) except WindowsError: print('[!] Unable to create directory: %s' % output_dir) sys.exit(1) else: output_dir = '' # Check to see if specified YARA folder exists if args.yara: yara_folder = args.yara if not yara_folder[-1] == '\\': yara_folder += '\\' if not os.path.exists(yara_folder): print('[!] YARA rule path not found: %s' % yara_folder) yara_folder = '' # Check if user-specified to rescan a PML if args.pml: if file_exists(args.pml): # Reparse an existing PML csv_file = output_dir + os.path.splitext(args.pml)[0] + '.csv' txt_file = output_dir + os.path.splitext(args.pml)[0] + '.txt' timeline_file = output_dir + os.path.splitext( args.pml)[0] + '_timeline.csv' process_PML_to_CSV(procmonexe, args.pml, pmc_file, csv_file) if not file_exists(csv_file): print('[!] Error detected. Could not create CSV file: %s' % csv_file) sys.exit(1) parse_csv(csv_file, report, timeline) print('[*] Saving report to: %s' % txt_file) codecs.open(txt_file, 'w', 'utf-8').write('\r\n'.join(report)) print('[*] Saving timeline to: %s' % timeline_file) codecs.open(timeline_file, 'w', 'utf-8').write('\r\n'.join(timeline)) open_file_with_assoc(txt_file) sys.exit() else: print('[!] PML file does not exist: %s\n' % args.pml) parser.print_usage() sys.exit(1) # Check if user-specified to rescan a CSV if args.csv: if file_exists(args.csv): # Reparse an existing CSV txt_file = os.path.splitext(args.csv)[0] + '.txt' timeline_file = os.path.splitext(args.csv)[0] + '_timeline.csv' parse_csv(args.csv, report, timeline) print('[*] Saving report to: %s' % txt_file) codecs.open(txt_file, 'w', 'utf-8').write('\r\n'.join(report)) print('[*] Saving timeline to: %s' % timeline_file) codecs.open(timeline_file, 'w', 'utf-8').write('\r\n'.join(timeline)) open_file_with_assoc(txt_file) sys.exit() else: parser.print_usage() sys.exit(1) if args.timeout: timeout_seconds = args.timeout if args.cmd: exe_cmdline = args.cmd else: exe_cmdline = '' # Start main data collection and processing print('[*] Using procmon EXE: %s' % procmonexe) session_id = get_session_name() pml_file = output_dir + 'Noriben_%s.pml' % session_id csv_file = output_dir + 'Noriben_%s.csv' % session_id txt_file = output_dir + 'Noriben_%s.txt' % session_id timeline_file = output_dir + 'Noriben_%s_timeline.csv' % session_id print('[*] Procmon session saved to: %s' % pml_file) print('[*] Launching Procmon ...') launch_procmon_capture(procmonexe, pml_file, pmc_file) if exe_cmdline: print('[*] Launching command line: %s' % exe_cmdline) subprocess.Popen(exe_cmdline) else: print('[*] Procmon is running. Run your executable now.') if timeout_seconds: print( '[*] Running for %d seconds. Press Ctrl-C to stop logging early.' % timeout_seconds) # Print a small progress indicator, for those REALLY long sleeps. try: for i in range(timeout_seconds): progress = (100 / timeout_seconds) * i sys.stdout.write('\r%d%% complete' % progress) sys.stdout.flush() sleep(1) except KeyboardInterrupt: pass else: print('[*] When runtime is complete, press CTRL+C to stop logging.') try: while True: sleep(10) except KeyboardInterrupt: pass print('\n[*] Termination of Procmon commencing... please wait') terminate_procmon(procmonexe) print('[*] Procmon terminated') if not file_exists(pml_file): print('[!] Error creating PML file!') sys.exit(1) # PML created, now convert it to a CSV for parsing process_PML_to_CSV(procmonexe, pml_file, pmc_file, csv_file) if not file_exists(csv_file): print('[!] Error detected. Could not create CSV file: %s' % csv_file) sys.exit(1) # Process CSV file, results in 'report' and 'timeline' output lists parse_csv(csv_file, report, timeline) print('[*] Saving report to: %s' % txt_file) codecs.open(txt_file, 'w', 'utf-8').write('\r\n'.join(report)) print('[*] Saving timeline to: %s' % timeline_file) codecs.open(timeline_file, 'w', 'utf-8').write('\r\n'.join(timeline)) open_file_with_assoc(txt_file)
def get_parser(): """Get parser object for this script.""" #parser = ArgumentParser(description=__doc__, # formatter_class=ArgumentDefaultsHelpFormatter) parser = ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.print_usage = parser.print_help subparsers = parser.add_subparsers( help='Two possible ways to run this sript, either:', dest='run_type') pt_parser = subparsers.add_parser('point', help='Run script for a single point.') rg_parser = subparsers.add_parser('reg', help='Run script for a region.') pt_parser.add_argument( '--lat', help='Single point latitude. [default: %(default)s]', action="store", dest="plat", required=False, type=plat_type, default=42.5) pt_parser.add_argument( '--lon', help='Single point longitude. [default: %(default)s]', action="store", dest="plon", required=False, type=plon_type, default=287.8) pt_parser.add_argument('--site', help='Site name or tag. [default: %(default)s]', action="store", dest="site_name", required=False, type=str, default='') pt_parser.add_argument( '--create_domain', help= 'Flag for creating CLM domain file at single point. [default: %(default)s]', action="store", dest="create_domain", type=str2bool, nargs='?', const=True, required=False, default=False) pt_parser.add_argument( '--create_surface', help= 'Flag for creating surface data file at single point. [default: %(default)s]', action="store", dest="create_surfdata", type=str2bool, nargs='?', const=True, required=False, default=True) pt_parser.add_argument( '--create_landuse', help= 'Flag for creating landuse data file at single point. [default: %(default)s]', action="store", dest="create_landuse", type=str2bool, nargs='?', const=True, required=False, default=False) pt_parser.add_argument( '--create_datm', help= 'Flag for creating DATM forcing data at single point. [default: %(default)s]', action="store", dest="create_datm", type=str2bool, nargs='?', const=True, required=False, default=False) pt_parser.add_argument( '--datm_syr', help= 'Start year for creating DATM forcing at single point. [default: %(default)s]', action="store", dest="datm_syr", required=False, type=int, default=1901) pt_parser.add_argument( '--datm_eyr', help= 'End year for creating DATM forcing at single point. [default: %(default)s]', action="store", dest="datm_eyr", required=False, type=int, default=2014) pt_parser.add_argument( '--crop', help= 'Create datasets using the extensive list of prognostic crop types. [default: %(default)s]', action="store_true", dest="crop_flag", default=False) pt_parser.add_argument('--dompft', help='Dominant PFT type . [default: %(default)s] ', action="store", dest="dom_pft", type=int, default=7) pt_parser.add_argument( '--no-unisnow', help= 'Turn off the flag for create uniform snowpack. [default: %(default)s]', action="store_false", dest="uni_snow", default=True) pt_parser.add_argument( '--no-overwrite_single_pft', help= 'Turn off the flag for making the whole grid 100%% single PFT. [default: %(default)s]', action="store_false", dest="overwrite_single_pft", default=True) pt_parser.add_argument( '--zero_nonveg', help='Set all non-vegetation landunits to zero. [default: %(default)s]', action="store", dest="zero_nonveg", type=bool, default=True) pt_parser.add_argument( '--no_saturation_excess', help='Turn off the flag for saturation excess. [default: %(default)s]', action="store", dest="no_saturation_excess", type=bool, default=True) pt_parser.add_argument('--outdir', help='Output directory. [default: %(default)s]', action="store", dest="out_dir", type=str, default="/glade/scratch/" + myname + "/single_point/") rg_parser.add_argument( '--lat1', help='Region start latitude. [default: %(default)s]', action="store", dest="lat1", required=False, type=plat_type, default=-40) rg_parser.add_argument('--lat2', help='Region end latitude. [default: %(default)s]', action="store", dest="lat2", required=False, type=plat_type, default=15) rg_parser.add_argument( '--lon1', help='Region start longitude. [default: %(default)s]', action="store", dest="lon1", required=False, type=plon_type, default=275.) rg_parser.add_argument('--lon2', help='Region end longitude. [default: %(default)s]', action="store", dest="lon2", required=False, type=plon_type, default=330.) rg_parser.add_argument('--reg', help='Region name or tag. [default: %(default)s]', action="store", dest="reg_name", required=False, type=str, default='') rg_parser.add_argument( '--create_domain', help= 'Flag for creating CLM domain file for a region. [default: %(default)s]', action="store", dest="create_domain", type=str2bool, nargs='?', const=True, required=False, default=False) rg_parser.add_argument( '--create_surface', help= 'Flag for creating surface data file for a region. [default: %(default)s]', action="store", dest="create_surfdata", type=str2bool, nargs='?', const=True, required=False, default=True) rg_parser.add_argument( '--create_landuse', help= 'Flag for creating landuse data file for a region. [default: %(default)s]', action="store", dest="create_landuse", type=str2bool, nargs='?', const=True, required=False, default=False) rg_parser.add_argument( '--create_datm', help= 'Flag for creating DATM forcing data for a region. [default: %(default)s]', action="store", dest="create_datm", type=str2bool, nargs='?', const=True, required=False, default=False) rg_parser.add_argument( '--datm_syr', help= 'Start year for creating DATM forcing for a region. [default: %(default)s]', action="store", dest="datm_syr", required=False, type=int, default=1901) rg_parser.add_argument( '--datm_eyr', help= 'End year for creating DATM forcing for a region. [default: %(default)s]', action="store", dest="datm_eyr", required=False, type=int, default=2014) rg_parser.add_argument( '--crop', help= 'Create datasets using the extensive list of prognostic crop types. [default: %(default)s]', action="store_true", dest="crop_flag", default=False) rg_parser.add_argument('--dompft', help='Dominant PFT type . [default: %(default)s] ', action="store", dest="dom_pft", type=int, default=7) rg_parser.add_argument('--outdir', help='Output directory. [default: %(default)s]', action="store", dest="out_dir", type=str, default="/glade/scratch/" + myname + "/regional/") return parser
parser.add_argument("--skipemptycolumns", dest="skip_trailing_columns", default=False, action="store_true", help="skip trailing empty columns") parser.add_argument("-p", "--sheetdelimiter", dest="sheetdelimiter", default="--------", help="sheet delimiter used to separate sheets, pass '' if you do not need delimiter, or 'x07' " "or '\\f' for form feed (default: '--------')") parser.add_argument("-q", "--quoting", dest="quoting", default="minimal", help="quoting - fields quoting in csv, 'none' 'minimal' 'nonnumeric' or 'all' (default: minimal)") parser.add_argument("-s", "--sheet", dest="sheetid", default=1, type=inttype, help="sheet number to convert") if argparser: options = parser.parse_args() else: (options, args) = parser.parse_args() if len(args) < 1: parser.print_usage() sys.stderr.write("error: too few arguments" + os.linesep) sys.exit(1) options.infile = args[0] options.outfile = len(args) > 1 and args[1] or None if len(options.delimiter) == 1: pass elif options.delimiter == 'tab' or options.delimiter == '\\t': options.delimiter = '\t' elif options.delimiter == 'comma': options.delimiter = ',' elif options.delimiter[0] == 'x': options.delimiter = chr(int(options.delimiter[1:])) else: sys.stderr.write("error: invalid delimiter\n")
def main(): from argparse import ArgumentParser p = ArgumentParser() p.add_argument('-r', '--rx_ports', nargs='+', default=[5556, 5558], help='Port numbers to receive zmq messages for IO on') p.add_argument( '-t', '--tx_ports', nargs='+', default=[5555, 5557], help= 'Port numbers to send IO messages via zmq, lenght must match --rx_ports' ) p.add_argument('-i', '--interface', required=False, default=None, help='Ethernet Interace to echo data on') p.add_argument( '-p', '--enable_host_rx', required=False, default=False, action='store_true', help='Enable Recieving data from host interface, requires -i') args = p.parse_args() if len(args.rx_ports) != len(args.tx_ports): print("Number of rx_ports and number of tx_ports must match") p.print_usage() quit(-1) logging.basicConfig() #log = logging.getLogger() log.setLevel(logging.DEBUG) hub = ViruatalEthHub() if args.interface is not None: host_eth = HostEthernetServer(args.interface, args.enable_host_rx) hub.add_server(host_eth) host_eth.start() for idx, rx_port in enumerate(args.rx_ports): print(idx) server = IOServer(rx_port, args.tx_ports[idx]) hub.add_server(server) if idx == 0: interrupter = SendInterrupt(server) server.start() time.sleep(2) try: while (1): intr = input("ISR Num:") intr = int(intr) interrupter.trigger_interrupt(intr) pass except KeyboardInterrupt: pass log.info("Shutting Down") hub.shutdown()
def _parse_args_and_run_subcommand(argv): parser = ArgumentParser( prog="anaconda-project", description="Actions on projects (runnable projects).") subparsers = parser.add_subparsers(help="Sub-commands") parser.add_argument('-v', '--version', action='version', version=version) parser.add_argument('--verbose', action='store_true', default=False, help="show verbose debugging details") def add_directory_arg(preset): preset.add_argument( '--directory', metavar='PROJECT_DIR', default='.', help= "Project directory containing anaconda-project.yml (defaults to current directory)" ) def add_env_spec_arg(preset): preset.add_argument( '--env-spec', metavar='ENVIRONMENT_SPEC_NAME', default=None, action='store', help="An environment spec name from anaconda-project.yml") def add_prepare_args(preset, include_command=True): add_directory_arg(preset) add_env_spec_arg(preset) all_supported_modes = list(_all_ui_modes) # we don't support "ask about every single thing" mode yet. all_supported_modes.remove(UI_MODE_TEXT_ASK_QUESTIONS) preset.add_argument('--mode', metavar='MODE', default=UI_MODE_TEXT_DEVELOPMENT_DEFAULTS_OR_ASK, choices=_all_ui_modes, action='store', help="One of " + ", ".join(_all_ui_modes)) if include_command: preset.add_argument( '--command', metavar='COMMAND_NAME', default=None, action='store', help= "A command name from anaconda-project.yml (env spec for this command will be used)" ) def add_env_spec_name_arg(preset, required): preset.add_argument( '-n', '--name', metavar='ENVIRONMENT_SPEC_NAME', required=required, action='store', help="Name of the environment spec from anaconda-project.yml") preset = subparsers.add_parser( 'init', help="Initialize a directory with default project configuration") add_directory_arg(preset) preset.add_argument( '--with-anaconda-package', action='store_true', help="Add the 'anaconda' metapackage to the packages list.", default=None) preset.add_argument( '--empty-environment', action='store_true', help= "[DEPRECATED] Do not add the default package set to the environment.", default=None) preset.add_argument('-y', '--yes', action='store_true', help="Assume yes to all confirmation prompts", default=None) preset.set_defaults(main=init.main) preset = subparsers.add_parser( 'run', help="Run the project, setting up requirements first") add_prepare_args(preset, include_command=False) preset.add_argument('command', metavar='COMMAND_NAME', default=None, nargs='?', help="A command name from anaconda-project.yml") preset.add_argument('extra_args_for_command', metavar='EXTRA_ARGS_FOR_COMMAND', default=None, nargs=REMAINDER) preset.set_defaults(main=run.main) preset = subparsers.add_parser( 'prepare', help="Set up the project requirements, but does not run the project") preset.add_argument('--all', action='store_true', help="Prepare all environments", default=None) preset.add_argument('--refresh', action='store_true', help='Remove and recreate the environment', default=None) add_prepare_args(preset) preset.set_defaults(main=prepare.main) preset = subparsers.add_parser( 'clean', help= "Removes generated state (stops services, deletes environment files, etc)" ) add_directory_arg(preset) preset.set_defaults(main=clean.main) if not anaconda_project._beta_test_mode: preset = subparsers.add_parser( 'activate', help= "Set up the project and output shell export commands reflecting the setup" ) add_prepare_args(preset) preset.set_defaults(main=activate.main) preset = subparsers.add_parser( 'archive', help= "Create a .zip, .tar.gz, or .tar.bz2 archive with project files in it") add_directory_arg(preset) preset.add_argument('filename', metavar='ARCHIVE_FILENAME') preset.add_argument('--pack-envs', action='store_true', help='Experimental: Package env_specs into the archive' ' using conda-pack') preset.set_defaults(main=archive.main) preset = subparsers.add_parser( 'unarchive', help= "Unpack a .zip, .tar.gz, or .tar.bz2 archive with project files in it") preset.add_argument('filename', metavar='ARCHIVE_FILENAME') preset.add_argument('directory', metavar='DESTINATION_DIRECTORY', default=None, nargs='?') preset.set_defaults(main=unarchive.main) preset = subparsers.add_parser('upload', help="Upload the project to Anaconda Cloud") add_directory_arg(preset) preset.add_argument('-p', '--private', action='store_true', help="Upload a private project", default=None) preset.add_argument('-s', '--site', metavar='SITE', help='Select site to use') preset.add_argument( '-t', '--token', metavar='TOKEN', help='Auth token or a path to a file containing a token') preset.add_argument('-u', '--user', metavar='USERNAME', help='User account, defaults to the current user') preset.add_argument( '--suffix', metavar='SUFFIX', help='Project archive suffix (.tar.gz, .tar.bz2, .zip)', default='.tar.bz2', choices=['.tar.gz', '.tar.bz2', '.zip']) preset.set_defaults(main=upload.main) preset = subparsers.add_parser( 'download', help="Download the project from Anaconda Cloud") add_directory_arg(preset) preset.add_argument( 'project', help= 'The project to download as <username>/<projectname>. If <projectname>' + 'has spaces inclose everything in quotes "<username>/<project name>".' + 'If specified as <projectname> then the logged-in username is used.') preset.add_argument('--no-unpack', action='store_true', help='Do not unpack the project archive.') preset.add_argument( '--parent_dir', default=None, help= 'Download archive to specific directory, otherwise downloaded to current working directory.' ) preset.add_argument('-s', '--site', metavar='SITE', help='Select site to use') preset.add_argument( '-t', '--token', metavar='TOKEN', help='Auth token or a path to a file containing a token') preset.add_argument('-u', '--user', metavar='USERNAME', help='User account, defaults to the current user') preset.set_defaults(main=download.main) preset = subparsers.add_parser( 'dockerize', help="Build a docker image of the Anaconda Project.") add_directory_arg(preset) preset.add_argument( '-t', '--tag', default=None, help='Tag of the output docker image in the format name:tag. ' 'Default: "<project-name>:latest", where <project-name> is taken from ' 'the name tag in the anaconda-project.yml file.') preset.add_argument( '--command', default='default', help= 'Select the command to run. If unspecified the "default" command is run.\nThe default command ' 'is defined as either the command named "default" (if any) or (otherwise) ' 'the first command specified in the anaconda-project.yml file.') preset.add_argument('--builder-image', default='{}:latest'.format(DEFAULT_BUILDER_IMAGE), help='The s2i builder image') preset.add_argument( 'build_args', default=None, nargs="*", help='Optional arguments for the s2i build command. ' 'See the output of "s2i build --help" for the available arguments. ' 'It is recommended to include a -- separator before supplying these arguments.' ) preset.set_defaults(main=dockerize.main) preset = subparsers.add_parser( 'add-variable', help="Add a required environment variable to the project") add_env_spec_arg(preset) preset.add_argument('vars_to_add', metavar='VARS_TO_ADD', default=None, nargs=REMAINDER) preset.add_argument('--default', metavar='DEFAULT_VALUE', default=None, help='Default value if environment variable is unset') add_directory_arg(preset) preset.set_defaults(main=variable_commands.main_add) preset = subparsers.add_parser( 'remove-variable', help="Remove an environment variable from the project") add_env_spec_arg(preset) add_directory_arg(preset) preset.add_argument('vars_to_remove', metavar='VARS_TO_REMOVE', default=None, nargs=REMAINDER) preset.set_defaults(main=variable_commands.main_remove) preset = subparsers.add_parser('list-variables', help="List all variables on the project") add_env_spec_arg(preset) add_directory_arg(preset) preset.set_defaults(main=variable_commands.main_list) preset = subparsers.add_parser( 'set-variable', help="Set an environment variable value in anaconda-project-local.yml") add_env_spec_arg(preset) preset.add_argument('vars_and_values', metavar='VARS_AND_VALUES', default=None, nargs=REMAINDER) add_directory_arg(preset) preset.set_defaults(main=variable_commands.main_set) preset = subparsers.add_parser( 'unset-variable', help= "Unset an environment variable value from anaconda-project-local.yml") add_env_spec_arg(preset) add_directory_arg(preset) preset.add_argument('vars_to_unset', metavar='VARS_TO_UNSET', default=None, nargs=REMAINDER) preset.set_defaults(main=variable_commands.main_unset) preset = subparsers.add_parser( 'add-download', help="Add a URL to be downloaded before running commands") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('filename_variable', metavar='ENV_VAR_FOR_FILENAME', default=None) preset.add_argument('download_url', metavar='DOWNLOAD_URL', default=None) preset.add_argument( '--filename', help="The name to give the file/folder after downloading it", default=None) preset.add_argument('--hash-algorithm', help="Defines which hash algorithm to use", default=None, choices=_hash_algorithms) preset.add_argument( '--hash-value', help="The expected checksum hash of the downloaded file", default=None) preset.set_defaults(main=download_commands.main_add) preset = subparsers.add_parser( 'remove-download', help="Remove a download from the project and from the filesystem") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('filename_variable', metavar='ENV_VAR_FOR_FILENAME', default=None) preset.set_defaults(main=download_commands.main_remove) preset = subparsers.add_parser('list-downloads', help="List all downloads on the project") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=download_commands.main_list) service_types = RequirementsRegistry().list_service_types() service_choices = list(map(lambda s: s.name, service_types)) def add_service_variable_name(preset): preset.add_argument('--variable', metavar='ENV_VAR_FOR_SERVICE_ADDRESS', default=None) preset = subparsers.add_parser( 'add-service', help="Add a service to be available before running commands") add_directory_arg(preset) add_env_spec_arg(preset) add_service_variable_name(preset) preset.add_argument('service_type', metavar='SERVICE_TYPE', default=None, choices=service_choices) preset.set_defaults(main=service_commands.main_add) preset = subparsers.add_parser('remove-service', help="Remove a service from the project") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('variable', metavar='SERVICE_REFERENCE', default=None) preset.set_defaults(main=service_commands.main_remove) preset = subparsers.add_parser('list-services', help="List services present in the project") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=service_commands.main_list) def add_package_args(preset): preset.add_argument('--pip', action='store_true', help='Install the requested packages using pip.') preset.add_argument('-c', '--channel', metavar='CHANNEL', action='append', help='Channel to search for packages') preset.add_argument('packages', metavar='PACKAGES', default=None, nargs=REMAINDER) preset = subparsers.add_parser( 'add-env-spec', help="Add a new environment spec to the project") add_directory_arg(preset) add_package_args(preset) add_env_spec_name_arg(preset, required=True) preset.set_defaults(main=environment_commands.main_add) preset = subparsers.add_parser( 'remove-env-spec', help="Remove an environment spec from the project") add_directory_arg(preset) add_env_spec_name_arg(preset, required=True) preset.set_defaults(main=environment_commands.main_remove) preset = subparsers.add_parser( 'list-env-specs', help="List all environment specs for the project") add_directory_arg(preset) preset.set_defaults(main=environment_commands.main_list_env_specs) preset = subparsers.add_parser( 'export-env-spec', help="Save an environment spec as a conda environment file") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.add_argument('filename', metavar='ENVIRONMENT_FILE') preset.set_defaults(main=environment_commands.main_export) preset = subparsers.add_parser( 'lock', help="Lock all packages at their current versions") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.set_defaults(main=environment_commands.main_lock) preset = subparsers.add_parser('unlock', help="Remove locked package versions") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.set_defaults(main=environment_commands.main_unlock) preset = subparsers.add_parser( 'update', help="Update all packages to their latest versions") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.set_defaults(main=environment_commands.main_update) preset = subparsers.add_parser( 'add-packages', help="Add packages to one or all project environments") add_directory_arg(preset) add_env_spec_arg(preset) add_package_args(preset) preset.set_defaults(main=environment_commands.main_add_packages) preset = subparsers.add_parser( 'remove-packages', help="Remove packages from one or all project environments") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('--pip', action='store_true', help='Uninstall the requested packages using pip.') preset.add_argument('packages', metavar='PACKAGE_NAME', default=None, nargs='+') preset.set_defaults(main=environment_commands.main_remove_packages) preset = subparsers.add_parser( 'list-packages', help="List packages for an environment on the project") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=environment_commands.main_list_packages) def add_platforms_list(preset): preset.add_argument('platforms', metavar='PLATFORM_NAME', default=None, nargs='+') preset = subparsers.add_parser( 'add-platforms', help="Add platforms to one or all project environments") add_directory_arg(preset) add_env_spec_arg(preset) add_platforms_list(preset) preset.set_defaults(main=environment_commands.main_add_platforms) preset = subparsers.add_parser( 'remove-platforms', help="Remove platforms from one or all project environments") add_directory_arg(preset) add_env_spec_arg(preset) add_platforms_list(preset) preset.set_defaults(main=environment_commands.main_remove_platforms) preset = subparsers.add_parser( 'list-platforms', help="List platforms for an environment on the project") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=environment_commands.main_list_platforms) def add_command_name_arg(preset): preset.add_argument('name', metavar="NAME", help="Command name used to invoke it") preset = subparsers.add_parser('add-command', help="Add a new command to the project") add_directory_arg(preset) command_choices = list(ALL_COMMAND_TYPES) + ['ask'] command_choices.remove( "conda_app_entry") # conda_app_entry is sort of silly and may go away preset.add_argument('--type', action="store", choices=command_choices, help="Command type to add") add_command_name_arg(preset) add_env_spec_arg(preset) preset.add_argument( '--supports-http-options', dest='supports_http_options', action="store_true", help="The command supports project's HTTP server options") preset.add_argument( '--no-supports-http-options', dest='supports_http_options', action="store_false", help=" The command does not support project's HTTP server options") preset.add_argument('command', metavar="COMMAND", help="Command line or app filename to add") preset.set_defaults(main=command_commands.main, supports_http_options=None) preset = subparsers.add_parser('remove-command', help="Remove a command from the project") add_directory_arg(preset) add_command_name_arg(preset) preset.set_defaults(main=command_commands.main_remove) preset = subparsers.add_parser( 'list-default-command', help="List only the default command on the project") add_directory_arg(preset) preset.set_defaults(main=command_commands.main_default) preset = subparsers.add_parser('list-commands', help="List the commands on the project") add_directory_arg(preset) preset.set_defaults(main=command_commands.main_list) # argparse doesn't do this for us for whatever reason if len(argv) < 2: print("Must specify a subcommand.", file=sys.stderr) parser.print_usage(file=sys.stderr) return 2 # argparse exits with 2 on bad args, copy that try: args = parser.parse_args(argv[1:]) except SystemExit as e: return e.code if args.verbose: logger = (logging.getLoggerClass())(name="anaconda_project_verbose") logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(stream=sys.stderr) logger.addHandler(handler) push_verbose_logger(logger) try: # '--directory' is used for most subcommands; for unarchive, # args.directory is positional and may be None if 'directory' in args and args.directory is not None: args.directory = os.path.realpath(os.path.abspath(args.directory)) return args.main(args) finally: if args.verbose: pop_verbose_logger()
def get_arguments(): src_vocab = 4000 trg_vocab = 5000 embed_size = 512 hidden_size = 512 epoch = 30 minibatch_size = 64 generation_limit = 32 dropout = 0.5 parser = ArgumentParser() parser.add_argument("mode", help="'train', 'resume', or 'test'") parser.add_argument("model", help='model file prefix') parser.add_argument( "--gpu", default=-1, metavar='INT', type=int, help='GPU device ID to be used (default: %(default)d [use CPU])') parser.add_argument("--src-vocab", default=src_vocab, metavar='INT', type=int, help="source vocabulary size (default: %(default)d)") parser.add_argument("--trg-vocab", default=trg_vocab, metavar='INT', type=int, help="target vocabulary size (default: %(default)d)") parser.add_argument("--embed", default=embed_size, metavar='INT', type=int, help="embedding layer size (default: %(default)d)") parser.add_argument("--hidden", default=hidden_size, metavar='INT', type=int, help="hidden layer size (default: %(default)d)") parser.add_argument("--epoch", default=epoch, metavar='INT', type=int, help="number of training epoch (default: %(default)d)") parser.add_argument("--minibatch", default=minibatch_size, metavar="INT", type=int, help="minibatch size (default: %(default)d)") parser.add_argument( "--generation-limit", default=generation_limit, metavar="INT", type=int, help= "maximum number of words to be generated for test input (default: %(default)d)" ) parser.add_argument("--dropout", default=dropout_rate, metavar="FLOAT", type=flaot, help="dropout rate") args = parser.parse_args() try: if args.mode not in ("train", "resume", "test"): raise ValueError( "you must set mode = 'train', 'resume', or 'test'") if args.gpu < 0: raise ValueError("you must set --gpu >= 0") if args.src_vocab < 1: raise ValueError("you must set --src-vocab >= 1") if args.trg_vocab < 1: raise ValueError("you must set --trg-vocab >= 1") if args.embed < 1: raise ValueError("you must set --embed >= 1") if args.hidden < 1: raise ValueError("you must set --hidden >= 1") if args.epoch < 1: raise ValueError("you must set --epoch >= 1") if args.minibatch < 1: raise ValueError("you must set --minibatch >= 1") if args.generation_limit < 1: raise ValueError("you must set --generation-limit >= 1") if args.dropout < 0 or args.dropout > 1: raise ValueError("you must set --dropout in [0, 1]") except Exception as ex: parser.print_usage(file=sys.stderr) print(ex, file=sys.stderr) sys.exit() for (key, val) in vars(args).items(): print("%s: %s" % (key, val)) return args
def main(): parser = ArgumentParser(description="PassiveTotal Command Line Client") subs = parser.add_subparsers(dest='cmd') pdns = subs.add_parser('pdns', help="Query passive DNS data") pdns.add_argument('--query', '-q', required=True, help="Query for a domain, IP address or wildcard") pdns.add_argument('--sources', type=str, default=None, help="CSV string of passive DNS sources", nargs='+') pdns.add_argument('--end', '-e', default=None, type=valid_date, help="Filter records up to this end date (YYYY-MM-DD)") pdns.add_argument('--start', '-s', default=None, type=valid_date, help="Filter records from this start date (YYYY-MM-DD)") pdns.add_argument('--timeout', '-t', default=3, help="Timeout to use for passive DNS source queries") pdns.add_argument('--unique', action="store_true", help="Use this to only get back unique resolutons") pdns.add_argument('--format', choices=['json', 'text', 'csv', 'stix', 'table', 'xml'], help="Format of the output from the query") whois = subs.add_parser('whois', help="Query WHOIS data") whois.add_argument('--query', '-q', required=True, help="Query for a domain or IP address") whois.add_argument('--field', '-f', type=str, default=None, help="Run a specific query against a WHOIS field") whois.add_argument('--compact', action="store_true", help="Show WHOIS record in a compact way") whois.add_argument('--format', choices=['json', 'text', 'csv', 'stix', 'table', 'xml'], help="Format of the output from the query") ssl = subs.add_parser('ssl', help="Query SSL certificate data") ssl.add_argument('--query', '-q', required=True, help="Query for an IP address or SHA-1") ssl.add_argument('--field', '-f', type=str, default=None, help="Run a specific query against a certificate field") ssl.add_argument('--type', '-t', choices=['search', 'history'], help="Perform a plain search or get history") ssl.add_argument('--compact', action="store_true", help="Show SSL record in a compact way") ssl.add_argument('--format', choices=['json', 'text', 'csv', 'stix', 'table', 'xml'], help="Format of the output from the query") attribute = subs.add_parser('attribute', help="Query host attribute data") attribute.add_argument('--query', '-q', required=True, help="Query for a domain or IP address") attribute.add_argument('--type', '-t', choices=['tracker', 'component'], help="Query tracker data or component data", required=True) attribute.add_argument('--format', choices=['json', 'csv', 'table', 'xml'], help="Format of the output from the query") action = subs.add_parser('action', help="Query and input feedback") action.add_argument('--query', '-q', required=True, help="Domain, IP address, Email, SSL certificate") action.add_argument('--metadata', action="store_true", help="Get metadata associated with a query") action.add_argument('--tags', type=str, default=None, help="Tag values to use in conjunction with an action") action.add_argument('--add-tags', action="store_true", help="Add tag values") action.add_argument('--remove-tags', action="store_true", help="Remove tag values") action.add_argument('--set-tags', action="store_true", help="Set tag values") action.add_argument( '--classification', choices=['malicious', 'non-malicious', 'suspicious', 'unknown'], help="Classification to apply to the query") action.add_argument('--monitor', choices=['true', 'false'], help="Read or write a monitor value") action.add_argument('--sinkhole', choices=['true', 'false'], help="Read or write a sinkhole value") action.add_argument('--dynamic-dns', choices=['true', 'false'], help="Read or write a dynamic DNS value") action.add_argument('--ever-compromised', choices=['true', 'false'], help="Read or write a compromised value") action.add_argument('--json', '-j', action="store_true", help="Output as JSON") osint = subs.add_parser('osint', help="Query OSINT data") osint.add_argument('--query', '-q', required=True, help="Query for a domain or IP address") osint.add_argument('--format', choices=['json', 'text', 'csv', 'stix', 'table', 'xml'], help="Format of the output from the query") args, unknown = parser.parse_known_args() data = None try: if args.cmd == 'pdns': data = call_dns(args) elif args.cmd == 'whois': data = call_whois(args) elif args.cmd == 'ssl': data = call_ssl(args) elif args.cmd == 'action': data = call_actions(args) elif args.cmd == 'attribute': data = call_attribute(args) elif args.cmd == 'osint': data = call_osint(args) else: parser.print_usage() sys.exit(1) except ValueError as e: parser.print_usage() sys.stderr.write('{}\n'.format(str(e))) sys.exit(1) output = write_output(data, args) for item in output: print(item + "\n")
def main(argv=None): # IGNORE:C0111 """Command line options.""" if argv is not None: sys.argv.extend(argv) program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = str(__updated__) program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date) program_shortdesc = __import__('__main__').__doc__.split("\n")[1] program_license = '''%s Created by Dmitry Nezhevenko on %s. Copyright 2016 Dmitry Nezhevenko. All rights reserved. Licensed under the GPLv2+ Distributed on an "AS IS" basis without warranties or conditions of any kind, either express or implied. USAGE ''' % (program_shortdesc, str(__date__)) try: # Setup argument parser parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('-V', '--version', action='version', version=program_version_message) # notification itself icon_opts = parser.add_argument_group('notification', 'notification look') icon_opts.add_argument( '-t', '--title', action='store', default=None, help='notification title (useful for duply with pre-backup script') icon_opts.add_argument( '-n', '--name', action='store', default='duply', help='application name for notification (default: duply)') icon_opts.add_argument('-i', '--icon', action='store', default='ark', help='notification icon (default: ark)') # dbus stuff dbus_opts = parser.add_argument_group('dbus', 'how to connect to DBus daemon') dbus_excl = dbus_opts.add_mutually_exclusive_group() dbus_excl.add_argument( '--dbus-env', dest='dbus_env', action='store', help='file to read dbus environment variables' ' (if current session has no DBUS_SESSION_BUS_ADDRESS).') dbus_excl.add_argument( '--dbus-user', dest='dbus_user', action='store', help= 'guess environment for user (from /home/$USER/.dbus/session-bus)') dbus_opts.add_argument( '--dbus-test', '--test-dbus', dest='test_dbus', action='store_true', help= "just try to show notification without backup. Useful for dbus testing" ) # debug debug_opts = parser.add_argument_group('debug', 'debugging stuff') debug_opts.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="set verbosity") debug_opts.add_argument( '--debug-log', dest='debug_log', action='store', default=None, help='save duplicity machine-readable log to file') debug_opts.add_argument( '--replay-log', dest='replay_log', action='store', default=None, help='parse provided log file instead of running duplicity') debug_opts.add_argument( '--replay-speed', type=float, dest='replay_speed', action='store', default=1.0, help='replay speed (2 will replay 2 times faster)') parser.add_argument('cmd', nargs='*', action='store', help='duplicity/duply command line') # Process arguments args = parser.parse_args() verbose = args.verbose if len(args.cmd) == 0 and not args.replay_log and not args.test_dbus: parser.print_usage() return 1 if verbose: print("Verbose mode on") globals.verbose = True logging.basicConfig(level=logging.DEBUG) globals.notification_title = args.title globals.notification_app_name = args.name globals.notification_icon = args.icon globals.dbus_user = args.dbus_user globals.dbus_env = args.dbus_env globals.save_duply_log_file_name = args.debug_log globals.replay_log_file_name = args.replay_log globals.replay_log_speed = args.replay_speed if args.test_dbus: return test_dbus() return run_me(args.cmd) except KeyboardInterrupt: return 1 except Exception as e: if DEBUG or globals.verbose: raise indent = len(program_name) * " " sys.stderr.write(program_name + ": " + repr(e) + "\n") sys.stderr.write(indent + " for help use --help\n") return 2
def parse_args(): parser = ArgumentParser( description="Post sample tasks to the crowd server") parser.add_argument( '--task-types', '-t', nargs="+", metavar="TASK_TYPE", choices=['sa', 'er', 'ft'], default=['sa'], help=('task types for which to create tasks. (defaults ' 'to just \'sa\'')) parser.add_argument('--crowds', '-c', nargs="+", metavar="CROWD_NAME", choices=['amt', 'internal'], default=['internal'], help=('crowds on which to create tasks. (defaults to ' 'just \'internal\'')) parser.add_argument('--num-tasks', '-n', nargs="+", type=int, metavar="NUM_TASKS", help=('Number of tasks to create (one number for each ' 'task type given with -t). Defaults to one task ' 'for each task type.')) parser.add_argument('--num-assignments', '-a', nargs="+", type=int, metavar="NUM_ASSIGNMENTS", help=('Number of assignments to require (one number ' 'for each task type given with -t). Defaults to ' 'one assignment for each task type.')) parser.add_argument('--ssl', action='store_true', help='Send requests to the crowd server over ssl.') args = parser.parse_args() if not args.num_tasks: args.num_tasks = [1 for task_type in args.task_types] if not args.num_assignments: args.num_assignments = [1 for task_type in args.task_types] if (len(args.num_tasks) != len(args.task_types) or len(args.num_assignments) != len(args.task_types)): print( "Length of --num-tasks and --num-assignments options must match " "the length of the --task-types option!") print "" parser.print_usage() sys.exit() args.types_map = { args.task_types[i]: (args.num_tasks[i], args.num_assignments[i]) for i in range(len(args.num_tasks)) } return args