def main(): parser = ArgumentParser() parser.add_argument("--engine", type=str, default="sqlite:///accounts.db") args = parser.parse_args() db = DB(args.engine) parser = xml.sax.make_parser() parser.setFeature(xml.sax.handler.feature_namespaces, 0) print("reading OK table...") handler = OKHandler(db) parser.setContentHandler(handler) parser.parse("./OK.xml") print("reading accounts table...") handler = AccountsHandler(db) parser.setContentHandler(handler) parser.parse("./Accounts.xml")
def main(): parser = ArgumentParser( description=__doc__ ) parser.add_argument("log_file", type=str, help="The log file to be processed") parser.add_argument("--interval", type=int, default=1, help="The duration of an interval, in minutes") parser.add_argument("--start", type=str, default="1970-01-01T00:00", help="The start time for considering log entries") parser.add_argument("--end", type=str, default="2099-01-01T00:00", help="The end time for considering log entries") parser.add_argument("--success", type=str, default="2xx", help="The patterns of the status codes that will be " "considered successful") args = parser.parse_args() success_codes = list() for pattern in args.success.split(','): success_codes += expand(pattern) parser = LogParser(args.log_file, args.start, args.end, args.interval, success_codes) parser.parse() parser.print_stats()
def parseArgs(): ''' Process command line args using argparse or if not available the optparse in a backwards compatible way Returns tuple of (args, remnants) where args is object with attributes corresponding to named arguments and remnants is list of remaining unnamed positional arguments ''' try: # make backwards compatible with deprecated optparse from argparse import ArgumentParser as Parser Parser.add = Parser.add_argument Parser.add_group = Parser.add_argument_group Parser.parse = Parser.parse_known_args except ImportError as ex: from optparse import OptionParser as Parser Parser.add = Parser.add_option Parser.add_group = Parser.add_option_group Parser.parse = Parser.parse_args d = "Runs localhost web application wsgi service on given host address and port. " d += "\nDefault host:port is 0.0.0.0:8080." d += "\n(0.0.0.0 is any interface on localhost)" p = Parser(description = d) p.add('-l','--level', action='store', default='info', choices=aiding.LOGGING_LEVELS.keys(), help="Logging level.") p.add('-s','--server', action = 'store', default='paste', help = "Web application WSGI server type.") p.add('-a','--host', action = 'store', default='0.0.0.0', help = "Web application WSGI server ip host address.") p.add('-p','--port', action = 'store', default='8080', help = "Web application WSGI server ip port.") p.add('-b','--base', action = 'store', default = '', help = "Base Url path prefix for client side web application.") p.add('-x','--cors', action = 'store_true', default = False, help = "Enable CORS Cross Origin Resource Sharing on server.") p.add('-t','--tls', action = 'store_true', default = False, help = "Use TLS/SSL (https).") p.add('-c','--cert', action = 'store', default = '/etc/pki/tls/certs/localhost.crt', help = "File path to tls/ssl cacert certificate file.") p.add('-k','--key', action = 'store', default = '/etc/pki/tls/certs/localhost.key', help = "File path to tls/ssl private key file.") p.add('-e','--pem', action = 'store', default = '/etc/pki/tls/certs/localhost.pem', help = "File path to tls/ssl pem file with both cert and key.") p.add('-g','--gen', action = 'store_true', default = False, help = ("Generate web app load file. Default is 'app/main.html'" " or if provided the file specified by -f option.")) p.add('-f','--load', action = 'store', default = 'app/main.html', help = "Filepath to save generated web app load file upon -g option.") p.add('-C','--coffee', action = 'store_true', default = False, help = "Upon -g option generate to load coffeescript.") p.add('-d','--devel', action = 'store_true', default = False, help = "Development mode.") return (p.parse())
def main(): usage = """usage: %(prog)s [options] /path/to/dump.rdb Example : %(prog)s --command json -k "user.*" /var/redis/6379/dump.rdb""" parser = ArgumentParser(prog='rdb', usage=usage) parser.add_argument("-c", "--command", dest="command", required=True, help="Command to execute. Valid commands are json, diff, justkeys, justkeyvals, memory and protocol", metavar="CMD") parser.add_argument("-f", "--file", dest="output", help="Output file", metavar="FILE") parser.add_argument("-n", "--db", dest="dbs", action="append", help="Database Number. Multiple databases can be provided. If not specified, all databases will be included.") parser.add_argument("-k", "--key", dest="keys", default=None, help="Keys to export. This can be a regular expression") parser.add_argument("-o", "--not-key", dest="not_keys", default=None, help="Keys Not to export. This can be a regular expression") parser.add_argument("-t", "--type", dest="types", action="append", help="""Data types to include. Possible values are string, hash, set, sortedset, list. Multiple typees can be provided. If not specified, all data types will be returned""") parser.add_argument("-b", "--bytes", dest="bytes", default=None, help="Limit memory output to keys greater to or equal to this value (in bytes)") parser.add_argument("-l", "--largest", dest="largest", default=None, help="Limit memory output to only the top N keys (by size)") parser.add_argument("-e", "--escape", dest="escape", choices=ESCAPE_CHOICES, help="Escape strings to encoding: %s (default), %s, %s, or %s." % tuple(ESCAPE_CHOICES)) expire_group = parser.add_mutually_exclusive_group(required=False) expire_group.add_argument("-x", "--no-expire", dest="no_expire", default=False, action='store_true', help="With protocol command, remove expiry from all keys") expire_group.add_argument("-a", "--amend-expire", dest="amend_expire", default=0, type=int, metavar='N', help="With protocol command, add N seconds to key expiry time") parser.add_argument("dump_file", nargs=1, help="RDB Dump file to process") options = parser.parse_args() filters = {} if options.dbs: filters['dbs'] = [] for x in options.dbs: try: filters['dbs'].append(int(x)) except ValueError: raise Exception('Invalid database number %s' %x) if options.keys: filters['keys'] = options.keys if options.not_keys: filters['not_keys'] = options.not_keys if options.types: filters['types'] = [] for x in options.types: if not x in VALID_TYPES: raise Exception('Invalid type provided - %s. Expected one of %s' % (x, (", ".join(VALID_TYPES)))) else: filters['types'].append(x) out_file_obj = None try: if options.output: out_file_obj = open(options.output, "wb") else: # Prefer not to depend on Python stdout implementation for writing binary. out_file_obj = os.fdopen(sys.stdout.fileno(), 'wb') try: callback = { 'diff': lambda f: DiffCallback(f, string_escape=options.escape), 'json': lambda f: JSONCallback(f, string_escape=options.escape), 'justkeys': lambda f: KeysOnlyCallback(f, string_escape=options.escape), 'justkeyvals': lambda f: KeyValsOnlyCallback(f, string_escape=options.escape), 'memory': lambda f: MemoryCallback(PrintAllKeys(f, options.bytes, options.largest), 64, string_escape=options.escape), 'protocol': lambda f: ProtocolCallback(f, string_escape=options.escape, emit_expire=not options.no_expire, amend_expire=options.amend_expire ) }[options.command](out_file_obj) except: raise Exception('Invalid Command %s' % options.command) if not PYTHON_LZF_INSTALLED: eprint("WARNING: python-lzf package NOT detected. " + "Parsing dump file will be very slow unless you install it. " + "To install, run the following command:") eprint("") eprint("pip install python-lzf") eprint("") parser = RdbParser(callback, filters=filters) parser.parse(options.dump_file[0]) finally: if options.output and out_file_obj is not None: out_file_obj.close()
#!/usr/bin/env python3 import logging from logging import config import settings from argparse import ArgumentParser from argparse import FileType from parser import Parser from tokenizer import Tokenizer logging.config.dictConfig(settings.LOGGING) parser = ArgumentParser(description="Compile Kaleidoscope files.") parser.add_argument('source', metavar='source', type=FileType('r'), help="The input source file to compile.") args = parser.parse_args() logging.info('Compiling "{}"'.format(args.source.name)) tokenizer = Tokenizer(args.source) parser = Parser(tokenizer) ir = parser.parse() for item in ir: print(item, end=' ')
"""Vega compiler""" from argparse import ArgumentParser from argparse import FileType from vega.front_end.parser import Parser from vega.front_end.exception import BaseError if __name__ == "__main__": parser = ArgumentParser(description="Compile") parser.add_argument('code', type=FileType('r')) args = parser.parse_args() code = args.code parser = Parser(code) try: parser.parse() except BaseError as e: print(e.message)
line = self.next() if line.startswith("def "): self.push_front(line) self.fix_fun(out) elif line.startswith("class "): self.push_front(line) self.fix_class(out) else: m = self.SIMPLE_ASSIGNMENT_RE.match(line) if m: self.push_front(line) self.fix_assignment(out, m) else: out.append(line) with open(out_path, "w") as o: o.write("\n".join(out)) # -------------------------------------------------------------------------- patches = load_patches(args) collecter = collect_pydoc_t(args.interface) collected = collecter.collect() parser = wrapper_utils.cpp_wrapper_file_parser_t(args) cpp_wrapper_functions = parser.parse(args.cpp_wrapper) fixer = idaapi_fixer_t(collected, patches, cpp_wrapper_functions) fixer.fix_file(args) with open(args.epydoc_injections, "w") as fout: for key in sorted(fixer.epydoc_injections.keys()): fout.write("\n\nida_%s.%s\n" % (args.module, key)) fout.write("\n".join(fixer.epydoc_injections[key]))
nml_name = re.search("(.*).xml", args.namelist).group(1) nml_tmp = nml_name + "-tmp.xml" nml_target = nml_name + ".xml" if args.output_namelist is not None: nml_target = args.output_namelist # Add models to <MODELS>-tags if args.model_entry is not None and args.variable is None: models_start = '<MODELS>' + '\n' models_end = '</MODELS>' + '\n' namelist = library_nml.addModels() parser = xml.sax.make_parser() parser.setContentHandler(namelist) parser.parse(args.namelist) nml_sections = namelist.namelist_sections shutil.copy(nml_name + '.xml', nml_name + '-backup.xml') ftmp = open(nml_tmp, "w") ftmp.write(nml_sections['header'].encode('utf8')) ftmp.write(models_start.encode('utf8')) ftmp.write(nml_sections['MODELS'] + '\n' + '<model> ' + args.model_entry + '</model>' + '\n'.encode('utf8')) ftmp.write(models_end.encode('utf8')) ftmp.write(nml_sections['footer'].encode('utf8')) ftmp.close() shutil.move(nml_tmp, nml_target)
password=s_password, host=s_host, port=n_port) cur = conn.cursor() cur.execute("select * from books.get_bank_account_id (%s, %s)", (s_bank_account_number, s_bank_account_friendly_name)) row = cur.fetchone() n_bank_account_id = row[0] conn.commit() cur.execute("select * from load.prepare_ofx (%s)", [n_bank_account_id]) conn.commit() parser = OFXTree() with open(s_ofxfile, 'rb') as f: # N.B. need to open file in binary mode parser.parse(f) ofx = parser.convert() stmts = ofx.statements txs = stmts[0].transactions acct = stmts[0].account s_bankid = acct.bankid s_acctid = acct.acctid s_accttype = acct.accttype s_branchid = acct.branchid conn = psycopg2.connect(database=s_databasename, user=s_username,
inverse_table[index] = n index += 1 ### Retreiving sequence seq_dict = {} if BIOPY_AVAIL : for record in SeqIO.parse(cocntig_file, "fasta"): ID = record.seq seq = str(record.seq) seq_dict[ID] = seq else: parser = FAParser(args.contig_file) parser.open() for id , seq in parser.parse() : seq_dict[id] = seq ### Then k-mersize and insert into differenct list using indexes num_contigs = len(seq_dict.keys()) seq_ids = seq_dict[keys] index_set = [ [None] for _ in xrange(len(seq_ids)) ] for i , val in enumerate(seq_ids) : seq = seq_dict[val] for j in xrange(len(seq)-ksize): kmer = seq[j:j+ksize] idx = index_table[kmer] index_set[i].append(idx)
from parser import BGChatParser if __name__ == '__main__': from argparse import ArgumentParser parser = ArgumentParser( description=unicode(u'Парсер чатика BestGothic.com в свою уютненькую БДшечку SQLite. ' u'Просматривать содержимое базы SQLite можно любым удобным вам способом, ' u'например через Sqliteman (http://sourceforge.net/projects/sqliteman/)'), epilog=unicode(u'Нет смысла ставить интервал менее 30 секунд при периодическом парсинге. ' u'Практически никогда не бывает более 20 сообщений за 30 секунд, ' u'да и хостеры бестготика побанить могут за постоянные запросы.') ) parser.add_argument('-t', action='store_true', default=False, help=unicode(u'Включать только текст. ' u'Любая HTML-сущность (Например смайл) будет проигнорирована. ' u'По умолчанию отключено.')) parser.add_argument('-p', action='store_true', help=unicode(u'Включает режим периодического парсинга.')) parser.add_argument('-i', type=int, default=30, help=unicode(u'Интервал для парсинга чата в секундах. По умолчанию 30.')) parser.add_argument('db', type=unicode, nargs='?', default=u'bgchat.db', help=unicode(u'Путь до файла с базой данных SQLite, в которую вы хотите парсить чат.')) args = parser.parse_args() parser = BGChatParser(html=not args.t, interval=args.i, db_options={'name': args.db}) if args.p: parser.run() else: parser.parse()
from weightless.io import Reactor from weightless.core import compose, be from argparse import ArgumentParser class RequestEcho(object): def handleRequest(self, *args, **kwargs): yield "HTTP/1.0 200 OK\r\n" yield "\r\n" yield JsonList(args).pretty_print() yield JsonDict(kwargs).pretty_print() def dna(reactor, port, **kwargs): return (Observable(), (ObservableHttpServer(reactor, bindAddress='0.0.0.0', port=port), (BasicHttpHandler(), (RequestEcho(), )))) if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('--port', type='int', default=6060) args = parser.parse() reactor = Reactor() server = be(dna(reactor=reactor, **vars(args))) list(compose(server.once.observer_init())) reactor.loop()
# Set up the parse map regex_table = ( ("def", "(def)[\t ]+", ParseMap.IGNORE), ("func_name", "[a-zA-Z_][a-zA-Z0-9_]+", ParseMap.LITERAL), ("endl", "[\t ]*:[\t ]*\n", ParseMap.IGNORE), ) parser = ParseMap(regex_table) parser.assert_match(test_regex) passing_tests = {"def foo:\n": "foo", "def f2oo:\n": "f2oo", "def foo3 : \n": "foo3"} failing_tests = ("def 1foo:\n", "def\tfoo:", "def:") for test in passing_tests: result = parser.parse(test) if result["func_name"] != passing_tests[test]: raise Exception for test in failing_tests: try: result = parser.parse(test) raise Exception except ParseMapError: continue # Run a test against input that must be transformed with a function test_regex = "(sum)[\t ]+[0-9]+[\t ]*(and)[\t ]*[0-9]+" parser = ParseMap( ( ("sum", "(sum)[\t ]+", ParseMap.IGNORE),
with open(args.config) as handle: config = json.load(handle) except FileNotFoundError as err: pr.print(f'Config file {args.config} not found.', time=True) raise err except json.JSONDecodeError as err: pr.print(f'Config file {args.config} is not valid JSON.', time=True) raise err except KeyError as err: pr.print(f'Config file {args.config} is not valid config file.', time=True) raise err database = config['database'] encoding = config['encoding'] database['password'] = getpass( f'SQL password for {database["user"]}@localhost: ') parser = AgentsParser(database, encoding) if not config['resume']: for table in database['tables'].keys(): parser.database.create_table(table) options = ('silent', 'bin_size', 'resume') params = {key: config[key] for key in options if key in config} parser.parse(config['sourcepath'], **params) if config['create_idxs']: parser.create_idxs()