def __init__(self, baseurl=None, config=None, username=None, password=None,profile=None): config=Config(config,profile=profile) if not baseurl: baseurl=config.getconfig("api").baseurl if not baseurl: raise Exception('Server requires baseurl') elif type(baseurl) is not str: # Handle unparsed URLs as baseurls baseurl=unparseurl(baseurl) if not urlparse(baseurl).hostname: raise Exception('Bad baseurl arg',baseurl) if baseurl.endswith('/api/'): baseurl=baseurl elif baseurl.endswith('/'): baseurl=baseurl+'api/' else: baseurl=baseurl+'/api/' if not username: username=config.getconfig(baseurl).username if type(username) is not str: raise Exception('bad username',username) if not password: password=config.getconfig(baseurl).password if type(password) is not str: # Don't pass the password arg to try and keep it out of # error messages which anyone might see raise Exception('bad password') self.baseurl=baseurl self.username=username self.password=password self.credentials = (username, password) self.endpoints={} self.__cookies = None
def __init__(self, config=getconfig()): self.config = config
################################################################ # # #####################实现多浏览器多平台的测试####################### # # ################################################################ import time from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities import config import threading #通过host,browser来参数化脚本 for host,browser in config.getconfig().items(): print host print browser driver=webdriver.Remote( command_executor=host, desired_capabilities={ 'platform':'ANY', 'browserName':browser, 'version':'', 'javascriptEnabled':True } ) driver.get('http://www.baidu.com') driver.maximize_window() driver.implicitly_wait(30) driver.find_element_by_id('kw').send_keys('selenium grid2')
def __init__(self, notifier, mediamanager, config=getconfig()): self.notifier = notifier self.mediamanager = mediamanager self.dictionaries = dictionary.PinyinDictionary.loadall() self.config = config
def __init__(self, notifier, mediamanager, config=getconfig()): self.notifier = notifier self.mediamanager = mediamanager self.config = config
for (key, value) in theform.items(): if hasattr(value, 'file'): del theform[key] theform['submitted_timestamp'] = t.isoformat() crashid = makeuuid(t) dumpdir = os.path.join(config.minidump_storage_path, str(t.year), t.strftime('%m-%d'), crashid) queueitempath = os.path.join(config.processor_queue_path, crashid) try: self.writefiles(dumpdir, dumpmap, theform) os.symlink(dumpdir, queueitempath) except: shutil.rmtree(dumpdir, ignore_errors=True) raise return "CrashID=bp-%s" % crashid if __name__ == '__main__': config = getconfig() app = Collector(config) cherryconfig = {'global': {'server.socket_host': config.collector_addr, 'server.socket_port': config.collector_port, 'engine.autoreload.on': False, 'log.screen': False, 'log.access_file': config.collector_access_log, 'log.error_file': config.collector_error_log}} cherrypy.quickstart(app, config=cherryconfig)
if o == '-c': # fichier de configuration alternatif configfile=a if o == '-s': silent = True if o == '-h': usage() if len(cmds) == 0 \ or cmds[0] not in ('info', 'run', 'dryrun', 'undo', 'dryundo', 'change'): usage() if 'info' in cmds: silent = True # recuperation de la configuration conf = config.getconfig(configfile, silent=silent) # recuperation des taches todo = prep.todolist(conf, silent=silent) if cmds[0] == 'run': jobs.dothejob(todo, action='do', silent=silent) if cmds[0] == 'dryrun': jobs.dothejob(todo, action='do', dryrun=True, silent=silent) if cmds[0] == 'undo': jobs.dothejob(todo, action='undo', silent=silent)
os.unlink(linkpath) continue try: self.process(dumpdir) except KeyboardInterrupt: raise except Exception as e: print "[%s] Error while processing dump '%s'. Skipping.: %s" % (time.asctime(), dumpdir, e) traceback.print_exc(6) continue os.unlink(linkpath) def loop(self): lasttime = 0 while True: if time.time() < lasttime + self.config.processor_wakeinterval: time.sleep(lasttime + self.config.processor_wakeinterval - time.time()) lasttime = time.time() try: self.searchandprocess() except KeyboardInterrupt: raise except Exception as e: print "[%s] Continuing after exception: %s" % (time.asctime(), e) if __name__ == '__main__': from config import getconfig Processor(getconfig()).loop()
yield date date += timedelta(days=1) def getjstime(d): return calendar.timegm(d.timetuple()) * 1000 startdate = datetime.strptime(opts.startdate, '%Y-%m-%d').date() enddate = datetime.strptime(opts.enddate, '%Y-%m-%d').date() w = csv.writer(sys.stdout, dialect='excel-tab') w.writerow(('date', 'channel', 'c')) j = {} for date in daterange(startdate, enddate): infile = os.path.join(getconfig().minidump_storage_path, str(date.year), date.strftime('%m-%d'), 'daily-summary.csv') if not os.path.exists(infile): continue fd = open(infile) r = csv.reader(fd, dialect='excel-tab') cdata = {} for channel, duration, count in r: count = int(count) if not channel in cdata: cdata[channel] = count else:
#!/usr/bin/env python from __future__ import print_function import os, markdown, shutil from bs4 import BeautifulSoup from gittools import githead, gitorigin, githistory from config import getconfig # this is from config.py - feed it the ini_file string and it will return config with named keys - eg: config[ "your_config_parameter" ] ini_file = "general.ini" config = getconfig( ini_file ) backup_config = config # this is so we can reload the config file and still have access to the original values if they're absent from the other ini files md_items = os.listdir( config[ "document_folder" ] ) for md_item in md_items: if md_item.lower().endswith( '.md' ): input_md = md_item md = markdown.Markdown( output_format = "html5" ) input_md = config[ "document_folder" ] + input_md loc_input_md = input_md input_md = open( input_md, 'r' ).read() input_md = md.convert( input_md ) sanitise_md = str( input_md ).replace( ">\n", ">" ) # this is so we can just ignore the extra sibling nodes happen after closing tags soup = BeautifulSoup( sanitise_md, 'html5lib' ) for html_elem in soup.find_all( 'h1' ): title_entity = html_elem.text