def main(): opts, args = parse_args(doc=__doc__, minargc=1) for screen_name in args: try: fetch_user_timeline(screen_name) except: logging.error("Could not fetch %s's timeline.", screen_name)
def main(): options, args = parse_args(doc=__doc__, minargc=1) for m in args: module = __import__(m) if hasattr(module, RUN_FUNCTION): func = getattr(module, RUN_FUNCTION) run_profiler(func) else: logging.error("No '%s' is found in %s.", RUN_FUNCTION, m)
def main(): opts, args = parse_args(doc=__doc__, postfook=postfook) fname, section = args try: config = yaml.load(open(fname)) except yaml.scanner.ScannerError, e: logging.error("%s is invalid YAML format file.", fname) return
def main(): opts, _ = parse_args(doc=__doc__, maxargc=0) fname = opts.output or ':memory:' database = sqlite3.connect(fname) cur = database.cursor() createtable(cur) addrecords(cur) showrecords(cur) database.commit() cur.close()
def main(): opts, files = parse_args(doc=__doc__, postfook=check_file_path) writer = None if opts.output: writer = codecs.open(opts.output, 'w', opts.enc_out) p = Xml2Csv(writer) for fname in files: p.process_file(fname) if writer: writer.close()
def main(): opts, args = parse_args(doc=__doc__, prefook=prefook) port = opts.port from twisted.internet import reactor from twisted.web import server reactor.listenTCP(port, server.Site(SiteHome())) try: print "Test server running on %d port..." % port reactor.run() except KeyboardInterrupt: reactor.stop()
def main(): opts, files = parse_args(doc=__doc__, postfook=check_file_path) if opts.filename: logging.info("Read template file: %s" % (opts.filename,)) tpl = open(opts.filename).read() else: tpl = DEFAULT_TEMPLATE template = jinja2.Template(tpl) for fname in files: logging.info("Start to process: %s" % (fname,)) params = {"texts": [l.strip() for l in open(fname) if l]} print template.render(params)
def main(): opts, files = parse_args(doc=__doc__, maxargc=1, postfook=check_file_path) def render(doc): title = doc["title"] or "" url = doc["url"] or "" abstract = doc["abstract"] or "" print """<div class="article"> <a href="%s">%s</a><p>%s</p></div>""" % (url, title, abstract) target_file = files[0] parser = WikipediaAbstractParser() parser.add_callback(render) xml.sax.parse(target_file, parser)
def main(): opts, files = parse_args(doc=__doc__, minargc=1) outdir = opts.output or DEFAUT_BUILD_DIR if not os.path.exists(outdir): os.mkdir(outdir) logging.info("Created %s", outdir) env = jinja2.Environment(loader=jinja2.FileSystemLoader(opts.basedir)) for fname in files: logging.info("Start to process: %s", fname) template = env.get_template(fname) out = os.path.join(outdir, fname) with codecs.open(out, "wb", encoding=opts.enc_out) as w: w.write(template.render()) logging.info("Wrote to %s", out)
def main(): # This is a Confuluence table style. header = Template("""\ ||Attribute Name ||Field Name ||Field Type ||Indexed ||Stored ||Multi Valued |""") body = Template("""| Edit on your own | $name | $type | \ $indexed | $stored | $multiValued |""") footer = Template("") class Processor(object): # Output style to organize table layout. style = {"header":None, "body":None, "footer":None} count = 0 def leading(self): if self.style["header"]: print self.style["header"].substitute({}) def trailing(self): if self.style["footer"]: print self.style["footer"].substitute({}) def process(self, doc): self.count += 1 # TODO: Use 1st, 2nd, 3rd, and so forth. logging.debug("Processing %dth item." % (self.count,)) if not doc.has_key("multiValued"): doc["multiValued"] = "" if self.style["body"]: print self.style["body"].safe_substitute(doc) opts, files = parse_args(doc=__doc__, postfook=check_file_path) processor = Processor() processor.style["header"] = header processor.style["body"] = body parser = SolrSchemaParser() parser.add_callback(processor.process) processor.leading() for fname in files: xml.sax.parse(fname, parser) processor.trailing()
def main(): opts, files = parse_args(doc=__doc__, postfook=check_file_path) def get_database_info(info): host = info["host"] or "127.0.0.1" port = info["port"] or 3306 database = info["database"] or "test" username = info["username"] or "" password = info["password"] or "" return host, port, database, username, password def process(database): cur = database.cursor() d61.createtable(cur) d61.addrecords(cur) d61.showrecords(cur) database.commit() cur.close() for fname in files: logging.info("start to process: %s", fname) try: info = json.load(open(fname)) except: logging.error("%s is invalid JSON format.", fname) continue host, port, database, username, password = get_database_info(info) logging.debug("connect \"%s\" of %s@%s:%d" % (database, username, host, port)) try: conn = MySQLdb.connect( host=host, port=port, db=database, user=username, passwd=password) except Exception, e: logging.error("could not connect MySQL. %s" % (e,)) continue process(conn)
def main(): opts, files = parse_args(doc=__doc__, postfook=check_file_path) for fname in files: yaml2json(fname)
def main(): opts, files = parse_args(doc=__doc__, prefook=prefook, postfook=postfook) reader = csv.reader(open(files[0], "r")) print_html(reader, style=opts.style)
def main(): opts, files = parse_args(doc=__doc__, postfook=check_file_path) for fname in files: showimagesize(fname)
def main(): opts, args = parse_args(doc=__doc__) if args: [filetypes(d) for d in args] else: filetypes(os.getcwd())
def main(): opts, args = parse_args(doc=__doc__, minargc=1, maxargc=1) keyword = args[0] search_tweets(keyword)
def main(): opts, files = parse_args(doc=__doc__, postfook=check_file_path) p = HTMLProcessor(opts.output) for fname in files: p.process_file(fname)
def main(): opts, projects = parse_args(doc=__doc__, postfook=postfook) project_starter(projects[0])
def main(): opts, files = parse_args(doc=__doc__, postfook=check_file_path) for fname in files: publish_restructured_text(fname)
def main(): opts, files = parse_args(doc=__doc__, postfook=check_file_path) for fname in files: createthumbnail(fname)
def main(): opts, dirs = parse_args(doc=__doc__) if dirs: [removeemptydir(d) for d in dirs] else: removeemptydir(os.getcwd())
def main(): opts, files = parse_args(doc=__doc__, prefook=prefook, postfook=postfook) handler = {"html": print_html, "json": print_json} reader = csv.reader(open(files[0], "r")) handler[opts.outtype](reader)
def main(): opts, files = parse_args(doc=__doc__, postfook=check_file_path) images = [getimageinfo(f) for f in files] print json.dumps(images)