def prologue(self): message.message.verbosity(message.INT_DEBUG) message.warning('a warning %(c)d', c=666) import os, signal message.debug(str(os.getpid())) os.kill(os.getpid(), signal.SIGSEGV)
def __init__(self, obj=None, func=None, name=None, cb_filter=None, reason=vpi.cbValueChange, **kwargs) : for attr, val in kwargs.iteritems() : setattr(self, attr, val) self.funcs = set() self.__iadd__(func) self.cnt = 0 self.filtered = 0 self.excepted = 0 self.name = name or 'none given' self.cb_filter = cb_filter or self.cb_filter self.obj = obj self.callback = vpi.s_cb_data() if obj : self.callback.obj = obj.handle self.callback.value = obj.vpi_value self.callback.value = obj.vpi_value self.callback.reason = reason self.callback.script(self.cb_fn) self.cb = vpi.vpi_register_cb(self.callback) self.vpi_chk_error = vpiChkError() message.debug('registered callback "%(name)s" for %(reason)s', reason=self.cb_type(), name=self.name) self.callbacks.append(self)
def increment(self) : message.debug('increment with table %(table)s, log_id %(log_id)s', table=self.temp, log_id=self.log_id) self.db.execute('DROP TEMPORARY TABLE IF EXISTS ' + self.itemp + ';') self.db.execute('CREATE TEMPORARY TABLE ' + self.itemp + ' SELECT log.*, current.log_id IS NULL AS new FROM log LEFT JOIN ' + self.temp + ' AS current USING (log_id) WHERE (log.root = %s OR log.log_id = %s) AND (current.log_id is NULL OR log.status != IFNULL(current.status, -1));', (self.log_id, self.log_id)) self.db.execute('REPLACE INTO ' + self.temp + ' SELECT * FROM ' + self.itemp + ';') self.db.commit() return self.itemp
def __init__(self, description='none given', test=None, root=None, parent=None, level=message.ERROR, queue='threaded', **kwargs): self.commit_level = level self.abv = activityBlockVersion(**kwargs) # init default filter self.filter_fn = self.filter self.root = root or mdbDefault().root self.parent = parent or mdbDefault().parent # add to list of instances mdb.instances.append(self) # psuedo singleton if mdb.instance is None: try: _queue = getattr(self.queue, queue) except AttributeError: message.fatal('No mdb queue type %(queue)s', queue=queue) message.information('Using queue %(queue)s', queue=queue) mdb.instance = _queue(self.abv, self.root, self.parent, description, test) # install callback message.emit_cbs.add('mdb emit', 1, self.add, None) message.debug('hello ...')
def prologue(self) : message.message.verbosity(message.INT_DEBUG) message.warning('a warning %(c)d', c=666) import os, signal message.debug(str(os.getpid())) os.kill(os.getpid(), signal.SIGSEGV)
def increment(self, log_id): if self.cvg.HAS_UPDATE: # in e.g. mysql we can use a join in an update # WHERE hits.log_id = %s rows = self.cvg.execute( 'UPDATE ' + self.covg + ''' AS status JOIN hits ON (status.bucket_id = hits.bucket_id AND hits.log_id = %s) SET status.hits = CASE status.goal WHEN -1 THEN 0 WHEN 0 THEN 0 ELSE MIN(status.goal, status.hits + hits.hits) END, status.rhits = CASE status.goal WHEN -1 THEN 0 WHEN 0 THEN 0 ELSE MIN(status.goal + status.max_hits, status.rhits + MIN(hits.hits, status.goal)) END, status.total_hits = status.total_hits + hits.hits, status.max_hits = MIN(status.goal, MAX(max_hits, hits.hits)), status.tests = status.tests + 1;''', log_id) else: # but we need to resort to this for e.g. sqlite rows = self.cvg.execute( 'REPLACE INTO ' + self.covg + ''' SELECT status.bucket_id, status.goal, CASE status.goal WHEN -1 THEN 0 WHEN 0 THEN 0 ELSE MIN(status.goal, status.hits + hits.hits) END AS hits, CASE status.goal WHEN -1 THEN 0 WHEN 0 THEN 0 ELSE MIN(status.goal + status.max_hits, status.rhits + MIN(hits.hits, status.goal)) END AS rhits, status.total_hits + hits.hits as total_hits, MIN(status.goal, MAX(max_hits, hits.hits)) as max_hits, status.tests + 1 FROM ''' + self.covg + ' AS status JOIN hits USING (bucket_id) WHERE hits.log_id = %s;', (log_id, )) message.debug('update %(rows)d rows', rows=rows) return rows
def __del__(self): message.debug( 'Finish incremental with table %(table)s, log_id %(log_id)s', table=self.temp, log_id=self.log_id) self.db.execute('DROP TEMPORARY TABLE IF EXISTS ' + self.temp + ',' + self.itemp + ';') self.db.close()
def result(self, log_id, buckets): with mdb.connection().row_cursor() as db: message.debug('retrieving %(log_id)s bucket coverage', log_id=log_id) db.execute( 'SELECT hits.log_id, log.test, log.description, SUM(hits.hits) AS hits FROM hits NATURAL JOIN log WHERE log.root = %(log_id)s AND bucket_id IN (%(buckets)s) GROUP BY hits.log_id ORDER BY hits DESC;' % locals()) return db.fetchall()
def result(self, log_id): with mdb.connection().row_cursor() as db: message.debug('retrieving %(log_id)s coverage information', log_id=log_id) db.execute( 'SELECT %(log_id)s as log_id, (SELECT log_id FROM goal WHERE log_id = %(log_id)s LIMIT 1) AS goal, (SELECT log_id FROM hits WHERE log_id = %(log_id)s LIMIT 1) AS coverage, (SELECT goal_id FROM master WHERE log_id = %(log_id)s LIMIT 1) AS master, (SELECT goal.log_id FROM goal JOIN log ON (log.root = goal.log_id) WHERE log.log_id = %(log_id)s LIMIT 1) AS root;' % locals()) return db.fetchone()
def run(self): message.debug('Timer thread is ' + threading.current_thread().name) while not self.finished.is_set(): self.finished.wait(self.interval) self.function(*self.args, **self.kwargs) message.debug('is finished ' + threading.current_thread().name)
def simulation_fatal(self) : 'Wrapper for fatal epilogue' message.debug('Fatal - End of Simulation') return try : self.fatal() except : exc = sys.exc_info() message.error('fatal epilogue failed because ' + str(exc[0])) self.traceback(exc[2])
def simulation_fatal(self): 'Wrapper for fatal epilogue' message.debug('Fatal - End of Simulation') return try: self.fatal() except: exc = sys.exc_info() message.error('fatal epilogue failed because ' + str(exc[0])) self.traceback(exc[2])
def __init__(self, xml) : try : self.xml = libxml2.parseFile(xml) except : message.fatal('unable to read regression file %(xml)s because %(excpt)s', xml=xml, excpt=sys.exc_info()[1]) return for idx, node in enumerate(self.xml.xpathEval('//*')) : try : node.setProp('nid', 'id-' + str(idx)) except : message.debug('setProp failed for %(tag)s', tag=node.name)
def __init__(self, log_id): self.log_id = log_id self.db = mdb.connection().row_cursor() self.db.execute( 'CREATE TEMPORARY TABLE ' + self.temp + ' (PRIMARY KEY (log_id)) SELECT log.*, 0 as new FROM log WHERE log.root = %s or log.log_id = %s;', (log_id, log_id)) message.debug( 'Start new incremental with table %(table)s, log_id %(log_id)s', table=self.temp, log_id=log_id)
def enqueue(cmd): 'just execute here' message.debug('enqueue %(cmd)s', cmd=cmd) result = subprocess.Popen( cmd.split(' '), env=dict(os.environ, MDB='root=' + str(self.mdb.get_root()) + ',parent=' + str(self.mdb.log_id))).wait() if result > 0: message.warning('process %(cmd)s returned non zero %(result)d', cmd=cmd, result=result)
def result(self, log_id, offset, size) : ''' log_id : regression id offset : first bucket index size : number of buckets ''' with mdb.connection().row_cursor() as db : message.debug('calculating %(log_id)s coverage heat map [%(offset)s+:%(size)s]', log_id=log_id, offset=offset, size=size) db.execute('SELECT bucket_id, SUM(hits) AS hits, count(hits) AS tests, '+db.split('test')+' AS testname FROM hits JOIN log USING (log_id) WHERE log.root = %s AND hits.bucket_id >= %s AND hits.bucket_id < %s GROUP BY bucket_id, testname ORDER BY bucket_id ASC, hits DESC;', (log_id, offset, offset+size)) testnames = self.compress() data = list(index.groupby(db, lambda row : row.bucket_id, keyfact=lambda s : list(s._grouper(s.tgtkey)), grpfact=testnames)) return dict(testnames=testnames.tests(), data=data)
def __init__(self, log_ids=[], test_ids=[], xml=None, threshold=0, robust=False, previous=None) : 'log_ids is a list of regression roots' self.log_ids = log_ids s_log_ids = ','.join(map(str, log_ids)) self.tests = mdb.connection().row_cursor() if log_ids : # create table of individual runs, but not root node as this may have already summarised coverage self.tests.execute('CREATE TEMPORARY TABLE '+self.invs+' AS SELECT l1.*, goal_id AS master FROM log AS l0 JOIN log AS l1 ON (l0.log_id = l1.root) LEFT OUTER JOIN master ON (l1.log_id = master.log_id) WHERE l1.root IN ('+s_log_ids+');') self.tests.execute('SELECT count(*) AS children FROM '+self.invs) children = self.tests.fetchone().children if children : message.information('%(log_ids)s %(has)s %(children)d children', log_ids=s_log_ids, children=children, has='have' if len(log_ids) > 1 else 'has') # append individual runs as given by test_ids if xml : xml_ids = xml.xml.xpath('/optimize/test/log_id/text()') else : xml_ids=[] if test_ids or xml_ids : s_test_ids = ','.join(map(str, test_ids+xml_ids)) create = ('INSERT INTO '+self.invs) if log_ids else ('CREATE TEMPORARY TABLE '+self.invs+' AS') self.tests.execute(create+' SELECT log.*, IFNULL(goal_id, goal.log_id) AS master FROM log LEFT OUTER JOIN master ON (log.log_id = master.log_id) LEFT OUTER JOIN goal ON (log.log_id = goal.log_id) WHERE log.log_id IN ('+s_test_ids+') GROUP BY log_id;') self.tests.execute('SELECT count(*) AS tests FROM '+self.invs) tests = self.tests.fetchone().tests if tests < 1 : message.fatal('no tests') message.information('starting with %(count)d tests in table %(table)s', count=tests, table=self.invs) # check congruency self.cvg = mdb.connection().row_cursor() rows=self.cvg.execute("SELECT md5_self AS md5, 'md5_self' AS type, invs.master, invs.root FROM point JOIN "+self.invs+" AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;") md5 = self.cvg.fetchall() if not md5 : message.fatal('no master') elif len(md5) > 1 : message.fatal('md5 of multiple masters do not match') else : message.debug('md5 query returns %(rows)d', rows=rows) self.master = mdb.accessor(md5=md5[0]) self.cvg.execute("SELECT DISTINCT(md5_axes) AS md5, 'md5_axes' AS type, invs.master, invs.root FROM point JOIN "+self.invs+" AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;") md5 = self.cvg.fetchall() if len(md5) > 1 : message.fatal('md5 of multiple axis masters do not match') self.master.axes = md5[0] # create status table, collating goal & hits self.cvg.execute('CREATE TEMPORARY TABLE '+self.covg+' (bucket_id INTEGER NOT NULL PRIMARY KEY, goal INTEGER, hits INTEGER, total_hits INTEGER, rhits INTEGER, max_hits INTEGER, tests INTEGER);') try : self.threshold = float(threshold) except : self.threshold = 0.0 message.warning('cannot convert threshold value given "%(arg)s" to float because %(exception)s, using %(threshold)2.1f', arg=threshold, exception=sys.exc_info()[0], threshold=self.threshold) self.robust = robust self.previous = previous
def __init__(self, xml): try: self.xml = libxml2.parseFile(xml) except: message.fatal( 'unable to read regression file %(xml)s because %(excpt)s', xml=xml, excpt=sys.exc_info()[1]) return for idx, node in enumerate(self.xml.xpathEval('//*')): try: node.setProp('nid', 'id-' + str(idx)) except: message.debug('setProp failed for %(tag)s', tag=node.name)
def enqueue(self, cmd): 'just execute here' message.debug('enqueue %(cmd)s', cmd=cmd) result = subprocess.Popen( cmd.split(' '), env=dict(os.environ, MDB='root=' + str(mdb_conn.get_root()) + ',parent=' + str(mdb_conn.log_id), PYTHONPATH=os.environ['PYTHONPATH'] + ':../python')).wait() if result > 0: message.warning('process %(cmd)s returned non zero %(result)d', cmd=cmd, result=result)
def __init__(self, name, description=None, parent=None, root=False, id=None): if parent is None: if root: # this is the new root node self.root = self.get_root(self) else: # default is root node self.root = self.get_root() _parent = self.root message.debug( "Hierarchy '%(name)s' given no parent id, defaulting to root", name=name) else: try: # it might be an integer reference _parent = self.all_nodes[int(parent)] message.debug('Parent id given as integer %(parent)d', parent=int(parent)) except: # must be hierarchy object assert isinstance(parent, (hierarchy, coverpoint)) _parent = parent self.root = _parent.root self.name = name self.description = description or 'None given' self.children = list() if root: self.parent = None self.rootMixin.mixin(self, id) else: _parent.add_child(self) # assign unique id self.id = id or self.root.get_id() # store hashed by this id self.all_nodes[self.id] = self if self.is_root: # root node messages.CVG_40(name=self.name, id=self.id) self.MESSAGE(name=name, id=self.id, type=self.__class__.__name__, parent=self.get_parent_id())
def increment(self): message.debug('increment with table %(table)s, log_id %(log_id)s', table=self.temp, log_id=self.log_id) self.db.execute('DROP TEMPORARY TABLE IF EXISTS ' + self.itemp + ';') self.db.execute( 'CREATE TEMPORARY TABLE ' + self.itemp + ' SELECT log.*, current.log_id IS NULL AS new FROM log LEFT JOIN ' + self.temp + ' AS current USING (log_id) WHERE (log.root = %s OR log.log_id = %s) AND (current.log_id is NULL OR log.status != IFNULL(current.status, -1));', (self.log_id, self.log_id)) self.db.execute('REPLACE INTO ' + self.temp + ' SELECT * FROM ' + self.itemp + ';') self.db.commit() return self.itemp
def end_of_simulation(self, run_epilogue=True) : 'Wrapper for epilogue' message.debug('End of Simulation') if run_epilogue : try : self.epilogue() except : exc = sys.exc_info() message.error('epilogue failed because ' + str(exc[0])) self.traceback(exc[2]) # remove fatal callback message.terminate_cbs.rm(self.name) else : message.note('Not running epilogue due to early terminate') # tidy up mdb.finalize_all() # coverage if self.coverage : database.insert.write(self.coverage, self.mdb.log_id, database.upload.RESULT) # remove callbacks verilog.callback.remove_all()
def end_of_simulation(self, run_epilogue=True): 'Wrapper for epilogue' message.debug('End of Simulation') if run_epilogue: try: self.epilogue() except: exc = sys.exc_info() message.error('epilogue failed because ' + str(exc[0])) self.traceback(exc[2]) # remove fatal callback message.terminate_cbs.rm(self.name) else: message.note('Not running epilogue due to early terminate') # tidy up mdb.finalize_all() # coverage if self.coverage: database.insert.write(self.coverage, self.mdb.log_id, database.upload.RESULT) # remove callbacks verilog.callback.remove_all()
def increment(self, log_id) : if self.cvg.HAS_UPDATE : # in e.g. mysql we can use a join in an update # WHERE hits.log_id = %s rows=self.cvg.execute('UPDATE '+self.covg+''' AS status JOIN hits ON (status.bucket_id = hits.bucket_id AND hits.log_id = %s) SET status.hits = CASE status.goal WHEN -1 THEN 0 WHEN 0 THEN 0 ELSE MIN(status.goal, status.hits + hits.hits) END, status.rhits = CASE status.goal WHEN -1 THEN 0 WHEN 0 THEN 0 ELSE MIN(status.goal + status.max_hits, status.rhits + MIN(hits.hits, status.goal)) END, status.total_hits = status.total_hits + hits.hits, status.max_hits = MIN(status.goal, MAX(max_hits, hits.hits)), status.tests = status.tests + 1;''', log_id) else : # but we need to resort to this for e.g. sqlite rows=self.cvg.execute('REPLACE INTO '+self.covg+''' SELECT status.bucket_id, status.goal, CASE status.goal WHEN -1 THEN 0 WHEN 0 THEN 0 ELSE MIN(status.goal, status.hits + hits.hits) END AS hits, CASE status.goal WHEN -1 THEN 0 WHEN 0 THEN 0 ELSE MIN(status.goal + status.max_hits, status.rhits + MIN(hits.hits, status.goal)) END AS rhits, status.total_hits + hits.hits as total_hits, MIN(status.goal, MAX(max_hits, hits.hits)) as max_hits, status.tests + 1 FROM '''+self.covg+' AS status JOIN hits USING (bucket_id) WHERE hits.log_id = %s;', (log_id,)) message.debug('update %(rows)d rows', rows=rows) return rows
def result(self, log_id, offset, size): ''' log_id : regression id offset : first bucket index size : number of buckets ''' with mdb.connection().row_cursor() as db: message.debug( 'calculating %(log_id)s coverage heat map [%(offset)s+:%(size)s]', log_id=log_id, offset=offset, size=size) db.execute( 'SELECT bucket_id, SUM(hits) AS hits, count(hits) AS tests, ' + db.split('test') + ' AS testname FROM hits JOIN log USING (log_id) WHERE log.root = %s AND hits.bucket_id >= %s AND hits.bucket_id < %s GROUP BY bucket_id, testname ORDER BY bucket_id ASC, hits DESC;', (log_id, offset, offset + size)) testnames = self.compress() data = list( index.groupby(db, lambda row: row.bucket_id, keyfact=lambda s: list(s._grouper(s.tgtkey)), grpfact=testnames)) return dict(testnames=testnames.tests(), data=data)
def __init__(self, log_id) : self.log_id = log_id self.db = mdb.connection().row_cursor() self.db.execute('CREATE TEMPORARY TABLE '+self.temp+' (PRIMARY KEY (log_id)) SELECT log.*, 0 as new FROM log WHERE log.root = %s or log.log_id = %s;', (log_id, log_id)) message.debug('Start new incremental with table %(table)s, log_id %(log_id)s', table=self.temp, log_id=log_id)
def result(self, log_id): with mdb.connection().row_cursor() as db: message.debug('retrieving %(log_id)s messages', log_id=log_id) db.execute('SELECT * FROM message WHERE log_id = %(log_id)s;' % locals()) return db.fetchall()
def enqueue(self, cmd) : 'just execute here' message.debug('enqueue %(cmd)s', cmd=cmd) result = subprocess.Popen(cmd.split(' '), env=dict(os.environ, MDB='root='+str(mdb_conn.get_root())+',parent='+str(mdb_conn.log_id), PYTHONPATH=os.environ['PYTHONPATH']+':../python')).wait() if result > 0 : message.warning('process %(cmd)s returned non zero %(result)d', cmd=cmd, result=result)
def __init__(self, log_ids=[], test_ids=[], xml=None, threshold=0, robust=False, previous=None): 'log_ids is a list of regression roots' self.log_ids = log_ids s_log_ids = ','.join(map(str, log_ids)) self.tests = mdb.connection().row_cursor() if log_ids: # create table of individual runs, but not root node as this may have already summarised coverage self.tests.execute( 'CREATE TEMPORARY TABLE ' + self.invs + ' AS SELECT l1.*, goal_id AS master FROM log AS l0 JOIN log AS l1 ON (l0.log_id = l1.root) LEFT OUTER JOIN master ON (l1.log_id = master.log_id) WHERE l1.root IN (' + s_log_ids + ');') self.tests.execute('SELECT count(*) AS children FROM ' + self.invs) children = self.tests.fetchone().children if children: message.information( '%(log_ids)s %(has)s %(children)d children', log_ids=s_log_ids, children=children, has='have' if len(log_ids) > 1 else 'has') # append individual runs as given by test_ids if xml: xml_ids = xml.xml.xpath('/optimize/test/log_id/text()') else: xml_ids = [] if test_ids or xml_ids: s_test_ids = ','.join(map(str, test_ids + xml_ids)) create = ('INSERT INTO ' + self.invs) if log_ids else ('CREATE TEMPORARY TABLE ' + self.invs + ' AS') self.tests.execute( create + ' SELECT log.*, IFNULL(goal_id, goal.log_id) AS master FROM log LEFT OUTER JOIN master ON (log.log_id = master.log_id) LEFT OUTER JOIN goal ON (log.log_id = goal.log_id) WHERE log.log_id IN (' + s_test_ids + ') GROUP BY log_id;') self.tests.execute('SELECT count(*) AS tests FROM ' + self.invs) tests = self.tests.fetchone().tests if tests < 1: message.fatal('no tests') message.information('starting with %(count)d tests in table %(table)s', count=tests, table=self.invs) # check congruency self.cvg = mdb.connection().row_cursor() rows = self.cvg.execute( "SELECT md5_self AS md5, 'md5_self' AS type, invs.master, invs.root FROM point JOIN " + self.invs + " AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;" ) md5 = self.cvg.fetchall() if not md5: message.fatal('no master') elif len(md5) > 1: message.fatal('md5 of multiple masters do not match') else: message.debug('md5 query returns %(rows)d', rows=rows) self.master = mdb.accessor(md5=md5[0]) self.cvg.execute( "SELECT DISTINCT(md5_axes) AS md5, 'md5_axes' AS type, invs.master, invs.root FROM point JOIN " + self.invs + " AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;" ) md5 = self.cvg.fetchall() if len(md5) > 1: message.fatal('md5 of multiple axis masters do not match') self.master.axes = md5[0] # create status table, collating goal & hits self.cvg.execute( 'CREATE TEMPORARY TABLE ' + self.covg + ' (bucket_id INTEGER NOT NULL PRIMARY KEY, goal INTEGER, hits INTEGER, total_hits INTEGER, rhits INTEGER, max_hits INTEGER, tests INTEGER);' ) try: self.threshold = float(threshold) except: self.threshold = 0.0 message.warning( 'cannot convert threshold value given "%(arg)s" to float because %(exception)s, using %(threshold)2.1f', arg=threshold, exception=sys.exc_info()[0], threshold=self.threshold) self.robust = robust self.previous = previous
def result(self, log_id): with mdb.connection().row_cursor() as db : message.debug('retrieving %(log_id)s messages', log_id=log_id) db.execute('SELECT * FROM message WHERE log_id = %(log_id)s;' % locals()) return db.fetchall()
def enqueue(cmd) : 'just execute here' message.debug('enqueue %(cmd)s', cmd=cmd) result = subprocess.Popen(cmd.split(' '), env=dict(os.environ, MDB='root='+str(self.mdb.get_root())+',parent='+str(self.mdb.log_id))).wait() if result > 0 : message.warning('process %(cmd)s returned non zero %(result)d', cmd=cmd, result=result)
def __del__(self) : message.debug('Finish incremental with table %(table)s, log_id %(log_id)s', table=self.temp, log_id=self.log_id) self.db.execute('DROP TEMPORARY TABLE IF EXISTS ' + self.temp + ',' + self.itemp + ';') self.db.close()
def result(self, log_id, buckets) : with mdb.connection().row_cursor() as db : message.debug('retrieving %(log_id)s bucket coverage', log_id=log_id) db.execute('SELECT hits.log_id, log.test, log.description, SUM(hits.hits) AS hits FROM hits NATURAL JOIN log WHERE log.root = %(log_id)s AND bucket_id IN (%(buckets)s) GROUP BY hits.log_id ORDER BY hits DESC;' % locals()) return db.fetchall()
def __init__(self, reference=False) : self.reference = reference self.data = list() message.debug("sqlite insert created")
def finalize(cls): message.debug('Finalize') if cls.instance: cls.instance.finalize()
def finalize(self, *args): 'set test status & clean up' if self.timer.running(): message.debug('... bye') self.timer.cancel() super(mdb.queue.threaded, self).finalize(*args)
def debug(self, indent='', pfix='-', verbose=True): message.debug(indent + self.SYMBOL + ' ' + self.name + ' ' + self.coverage().description() if verbose else '') for child in self.children: child.debug(indent=indent + pfix, pfix=pfix, verbose=verbose)
def __init__(self, reference=False): self.reference = reference self.data = list() message.debug("sqlite insert created")
if __name__ == '__main__': # bottle options server = re.match(r'^((?P<host>[^:]+):)?(?P<port>[0-9]+)$', options.http) bottle_opts = server.groupdict() # intercept log messages and redirect to our logger def wsgi_log(self, format, *args): severity = message.warning if args[-2] == '404' else message.debug severity(format.strip() % args) from wsgiref.simple_server import WSGIRequestHandler WSGIRequestHandler.log_message = wsgi_log # location of static data static = os.path.join(options.root, 'static') message.debug('Using %(path)s for static data', path=static) @bottle.get('/static/<filename:path>') def server_static(filename): return bottle.static_file(filename, root=static) @bottle.route('/') @bottle.route('/index.html') def index_html(): return bottle.static_file('/index.html', root=static) if options.gevent: import gevent from gevent import monkey monkey.patch_all()
if __name__ == '__main__' : # bottle options server = re.match(r'^((?P<host>[^:]+):)?(?P<port>[0-9]+)$', options.http) bottle_opts = server.groupdict() # intercept log messages and redirect to our logger def wsgi_log(self, format, *args) : severity = message.warning if args[-2] == '404' else message.debug severity(format.strip() % args) from wsgiref.simple_server import WSGIRequestHandler WSGIRequestHandler.log_message = wsgi_log # location of static data static = os.path.join(options.root, 'static') message.debug('Using %(path)s for static data', path=static) @bottle.get('/static/<filename:path>') def server_static(filename): return bottle.static_file(filename, root=static) @bottle.route('/') @bottle.route('/index.html') def index_html() : return bottle.static_file('/index.html', root=static) if options.gevent : import gevent from gevent import monkey; monkey.patch_all() class wsgi_log:
def result(self, log_id) : with mdb.connection().row_cursor() as db : message.debug('retrieving %(log_id)s coverage information', log_id=log_id) db.execute('SELECT %(log_id)s as log_id, (SELECT log_id FROM goal WHERE log_id = %(log_id)s LIMIT 1) AS goal, (SELECT log_id FROM hits WHERE log_id = %(log_id)s LIMIT 1) AS coverage, (SELECT goal_id FROM master WHERE log_id = %(log_id)s LIMIT 1) AS master, (SELECT goal.log_id FROM goal JOIN log ON (log.root = goal.log_id) WHERE log.log_id = %(log_id)s LIMIT 1) AS root;' % locals()) return db.fetchone()