def open(self, prefix, port, rate=None): if self.isOpen(): error("Already opened!") else: if rate == None: self.rate = self.default else: self.rate = rate self.prefix = prefix self.name = port portname = prefix+port try: self.port = serial.Serial(portname, rate, timeout=.01, # time to accumulate characters: 10 ms @ 115200, thats up to 115.2 chars parity=self.parity, stopbits=self.stopbits, xonxoff=0, rtscts=0, # hw flow control bytesize=self.bytesize) note('opened %s at %d'%(port, rate)) self.start() # run serial in thread self.opened.emit() except Exception, e: if self.port: self.port.close() self.port = None # error('open port failed for '+prefix+port) raise Exception('open port failed for '+prefix+port)
def close(self): table = 'goal' if self.reference else 'hits' if len(self.data) == 0: message.note('No data to upload into table "%(table)s", skipping', table=table) return message.information( 'starting data upload to table "%(table)s" via insert', table=table) with mdb.mdb.cursor() as cursor: rows = cursor.executemany( 'INSERT INTO ' + table + ' (log_id, bucket_id, ' + table + ') VALUES (%s, %s, %s);', self.data) warnings = cursor.warning_count() if warnings: message.warning('upload to db via insert with %(warnings)', warnings=warnings) if rows is None: message.warning( 'upload to db via insert "%(table)s" returned None', table=table) else: message.information( 'upload to db via insert added %(rows)d rows of %(data)d to "%(table)s"', rows=int(rows), data=len(self.data), table=table)
def run(self): xml = self.xmlDump() cnt = 0 for incr in self: if cnt % 20 == 0: message.information(' log_id : rows : hits : coverage') cnt += 1 message.information( ' %(log_id)6d : %(rows)6d : %(hits)6d : %(cvg)s', log_id=incr.log.log_id, rows=incr.updates, hits=incr.hits, cvg=incr.status.coverage.description()) if incr.hits: # this test contributed to overall coverage xml.add(incr) if incr.status.metric().is_hit(): message.note('all coverage hit') break message.information('coverage : ' + self.status().coverage.description()) if self.robust: message.information('robust : ' + self.status().robust.description()) message.information('tests : %(count)d', count=int( xml.xml.xpath('count(/optimize/test/log_id)'))) # now regenerate hierarchy and report coverage on point basis xml.append(self.hierarchy().xml()) return xml
def open(self, prefix, port, rate=None): if self.isOpen(): error("Already opened!") else: if rate == None: self.rate = self.default else: self.rate = rate self.prefix = prefix self.name = port portname = prefix + port try: self.port = serial.Serial( portname, rate, timeout= .01, # time to accumulate characters: 10 ms @ 115200, thats up to 115.2 chars parity=self.parity, stopbits=self.stopbits, xonxoff=0, rtscts=0, # hw flow control bytesize=self.bytesize) note('opened %s at %d' % (port, rate)) self.start() # run serial in thread self.opened.emit() except Exception, e: if self.port: self.port.close() self.port = None # error('open port failed for '+prefix+port) raise Exception('open port failed for ' + prefix + port)
def connect(self, *args, **kwargs): try: self.db = kwargs['db'] except KeyError: self.db = self.default_db try: instance = MySQLdb.connect(host=self.default_host, port=self.default_port, db=self.default_db, user=self.default_user, passwd=self.default_passwd) instance.autocommit(False) except: message.warning( 'Unable to connect to mysql db %(db)s at %(host)s:%(port)d because %(exc)s', db=self.db, host=self.default_host, port=self.default_port, exc=sys.exc_info()[0]) return message.note( "Connected to mysql db %(db)s at %(host)s:%(port)d for %(thread)s", db=self.default_db, host=self.default_host, port=self.default_port, thread=self.id()) # this should be keyed on db too - but we don't used multiple databases currently self.instance[self.id()] = instance
def __iter__(self) : self.reset() switched = False current = self.status() testlist = self.testlist() while testlist : log = testlist.pop(0) updates = self.increment(log.log_id) status = self.status() yield mdb.accessor(log=log, last=current, updates=updates, status=status, hits=status.metric().hits-current.metric().hits) current = status # calculate incremental coverage of remaining tests with mdb.connection().row_cursor() as db : db.execute('DELETE FROM '+self.invs+' WHERE log_id = %s;', (log.log_id,)) if status.metric().coverage() > self.threshold : if not switched : switched = True message.note('Switching to incremental selection at %(threshold)0.2f', threshold=self.threshold) # switch to incremental as coverage closes minexpr = '(status.goal+status.max_hits)-status.rhits' if self.robust else 'status.goal-status.hits' if self.robust : db.execute('SELECT invs.*, IFNULL(SUM(MIN((status.goal+status.max_hits)-status.rhits, hits.hits)), 0) AS hits FROM '+self.invs+' AS invs LEFT OUTER JOIN hits USING (log_id) JOIN '+self.covg+' AS status ON (hits.bucket_id = status.bucket_id AND status.goal > 0 AND status.hits < (status.goal+status.max_hits)) GROUP BY log_id ORDER BY hits DESC;') else : db.execute('SELECT invs.*, IFNULL(SUM(MIN(status.goal-status.hits, hits.hits)), 0) AS hits FROM '+self.invs+' AS invs LEFT OUTER JOIN hits USING (log_id) JOIN '+self.covg+' AS status ON (hits.bucket_id = status.bucket_id AND status.goal > 0 AND status.hits < status.goal) GROUP BY log_id ORDER BY hits DESC;') testlist = db.fetchall()
def reset(self) : if self.previous and self.robust : # incorporate previous max_hits into coverage accumulator used by robust generator self.cvg.execute('REPLACE INTO '+self.covg+' SELECT goal.bucket_id, goal.goal, 0 AS hits, 0 AS total_hits, 0 AS rhits, previous.max_hits AS max_hits, 0 AS tests FROM goal JOIN ' + self.previous.covg + ' AS previous USING (bucket_id) WHERE goal.log_id=%s;', (self.get_master(), )) self.cvg.execute('SELECT SUM(goal) as goal, SUM(goal+max_hits) as robust from '+self.covg+';') message.note('Incorporating previous max_hits into coverage accumulator. goal %(goal)d is now %(robust)d', **self.cvg.fetchone()) else : self.cvg.execute('REPLACE INTO '+self.covg+' SELECT bucket_id, goal, 0 AS hits, 0 AS total_hits, 0 AS rhits, 0 AS max_hits, 0 AS tests FROM goal WHERE log_id=%s;', (self.get_master(), ))
def time(self, **args): elapsed = time.time() self.doit(**args) elapsed = time.time() - elapsed message.note('page %(page)s served in %(time)0.2fs', page=bottle.request.fullpath, time=elapsed) return elapsed
def hush_creation(cls, hush=True): level = message.IGNORE if hush else message.INFORMATION if hush: message.note('reducing coverage point creation verbosity') for msg in [ messages.CVG_1, messages.CVG_2, messages.CVG_40, messages.CVG_41, messages.CVG_42 ]: msg.level = level
def remove(self) : message.note('callback "%(name)s" called %(cnt)d times, filtered %(filtered)d, exceptions raised %(excepted)d', cnt=self.cnt, filtered=self.filtered, excepted=self.excepted, name=self.name) # e.g. Icarus can return null pointer for unsupported reasons, # so test non NULL/None object prior to remove/free if self.cb : vpi.vpi_remove_cb(self.cb) self.vpi_chk_error = vpiChkError() vpi.vpi_free_object(self.cb) self.callbacks.remove(self)
def pdb(cls, traceback=None): message.note('entering pdb command line') try: import pdb if traceback: pdb.post_mortem(traceback) else: pdb.set_trace() pass except: pass message.note('leaving pdb command line')
def closePort(self): if self.isOpen(): port = self.port self.port = None try: port.flush() port.close() except: pass note('closed %s' % self.name) else: self.port = None
def closePort(self): if self.isOpen(): port = self.port self.port = None try: port.flush() port.close() except: pass note('closed %s'%self.name) else: self.port = None
def pdb(cls, traceback=None) : message.note('entering pdb command line') try : import pdb if traceback : pdb.post_mortem(traceback) else : pdb.set_trace() pass except : pass message.note('leaving pdb command line')
def run(self): # perhaps open read and close are all in this thread while self.port: try: c = self.port.read(1) # figure out why it doesn't block!!! c += self.port.read(self.port.inWaiting()) # get rest of chars self.inputs += len(c) if c: self.source.emit(c) except IOError: self.closePort() note('Alert: device removed while open ') except Exception, e: self.closePort()
def __init__(self, name=None, activity=None, block=None, test=None, db=None): self.epilogue_cb = epilogue(self.end_of_simulation) self.name = name or self.name self.test = test or self.test self.is_success = None self.coverage = None activity = activity or self.activity block = block or self.block message.terminate_cbs.add(self.name, 10, self.terminate, self.check_success) try: mdb.connection.set_default_db(db=self.get_db()) self.mdb = mdb.mdb(self.name, activity=activity, block=block, test=self.test) except: message.note('Not using mdb because ' + str(sys.exc_info())) self.START() try: self.prologue() except: exc = sys.exc_info() message.error('prologue failed because ' + str(exc[0])) self.traceback(exc[2]) # self.coverage *may* be assigned to root node in prologue, # if not check for one and use last one created if exists if self.coverage is None and coverage.hierarchy.populated(): self.coverage = coverage.hierarchy.last_root if self.coverage: if getattr(self, 'master_id', False): database.insert.set_master(self.mdb.log_id, self.master_id) if getattr(self, 'master_chk', False): # create the hierarchy from master id and verify congruent pass else: database.insert.write(self.coverage, self.mdb.log_id, database.upload.REFERENCE) # is verilog library synthetic? if verilog.vpiInfo().product == 'Python': self.end_of_simulation()
def iteration(ordering, iter_cnt=1, xml=None) : # use current optimization group if this is not first iteration order = ordering[0] message.note('Iteration %(iter_cnt)d uses "%(order)s"', **locals()) if xml : opt = database.optimize.options[order](xml=xml, **optimize_opts) else : opt = database.optimize.options[order](regressions, tests, **optimize_opts) run = opt.run() optimize_opts['previous'] = opt if len(ordering) > 1 : return iteration(ordering[1:], iter_cnt+1, run) # always return last optimization run return opt, run
def close(self) : table = 'goal' if self.reference else 'hits' if len(self.data) == 0 : message.note('No data to upload into table "%(table)s", skipping', table=table) return message.information('starting data upload to table "%(table)s" via insert', table=table) with mdb.mdb.cursor() as cursor : rows = cursor.executemany('INSERT INTO '+table+' (log_id, bucket_id, '+table+') VALUES (%s, %s, %s);', self.data) warnings = cursor.warning_count() if warnings : message.warning('upload to db via insert with %(warnings)', warnings=warnings) if rows is None : message.warning('upload to db via insert "%(table)s" returned None', table=table) else : message.information('upload to db via insert added %(rows)d rows of %(data)d to "%(table)s"', rows=int(rows), data=len(self.data), table=table)
def iteration(ordering, iter_cnt=1, xml=None): # use current optimization group if this is not first iteration order = ordering[0] message.note('Iteration %(iter_cnt)d uses "%(order)s"', **locals()) if xml: opt = database.optimize.options[order](xml=xml, **optimize_opts) else: opt = database.optimize.options[order](regressions, tests, **optimize_opts) run = opt.run() optimize_opts['previous'] = opt if len(ordering) > 1: return iteration(ordering[1:], iter_cnt + 1, run) # always return last optimization run return opt, run
def __iter__(self): self.reset() switched = False current = self.status() testlist = self.testlist() while testlist: log = testlist.pop(0) updates = self.increment(log.log_id) status = self.status() yield mdb.accessor(log=log, last=current, updates=updates, status=status, hits=status.metric().hits - current.metric().hits) current = status # calculate incremental coverage of remaining tests with mdb.connection().row_cursor() as db: db.execute('DELETE FROM ' + self.invs + ' WHERE log_id = %s;', (log.log_id, )) if status.metric().coverage() > self.threshold: if not switched: switched = True message.note( 'Switching to incremental selection at %(threshold)0.2f', threshold=self.threshold) # switch to incremental as coverage closes minexpr = '(status.goal+status.max_hits)-status.rhits' if self.robust else 'status.goal-status.hits' if self.robust: db.execute( 'SELECT invs.*, IFNULL(SUM(MIN((status.goal+status.max_hits)-status.rhits, hits.hits)), 0) AS hits FROM ' + self.invs + ' AS invs LEFT OUTER JOIN hits USING (log_id) JOIN ' + self.covg + ' AS status ON (hits.bucket_id = status.bucket_id AND status.goal > 0 AND status.hits < (status.goal+status.max_hits)) GROUP BY log_id ORDER BY hits DESC;' ) else: db.execute( 'SELECT invs.*, IFNULL(SUM(MIN(status.goal-status.hits, hits.hits)), 0) AS hits FROM ' + self.invs + ' AS invs LEFT OUTER JOIN hits USING (log_id) JOIN ' + self.covg + ' AS status ON (hits.bucket_id = status.bucket_id AND status.goal > 0 AND status.hits < status.goal) GROUP BY log_id ORDER BY hits DESC;' ) testlist = db.fetchall()
def reset(self): if self.previous and self.robust: # incorporate previous max_hits into coverage accumulator used by robust generator self.cvg.execute( 'REPLACE INTO ' + self.covg + ' SELECT goal.bucket_id, goal.goal, 0 AS hits, 0 AS total_hits, 0 AS rhits, previous.max_hits AS max_hits, 0 AS tests FROM goal JOIN ' + self.previous.covg + ' AS previous USING (bucket_id) WHERE goal.log_id=%s;', (self.get_master(), )) self.cvg.execute( 'SELECT SUM(goal) as goal, SUM(goal+max_hits) as robust from ' + self.covg + ';') message.note( 'Incorporating previous max_hits into coverage accumulator. goal %(goal)d is now %(robust)d', **self.cvg.fetchone()) else: self.cvg.execute( 'REPLACE INTO ' + self.covg + ' SELECT bucket_id, goal, 0 AS hits, 0 AS total_hits, 0 AS rhits, 0 AS max_hits, 0 AS tests FROM goal WHERE log_id=%s;', (self.get_master(), ))
def connect(self, *args, **kwargs) : try : self.db = kwargs['db'] except KeyError: self.db = self.default_db try : instance = MySQLdb.connect( host=self.default_host, port=self.default_port, db=self.default_db, user=self.default_user, passwd=self.default_passwd ) instance.autocommit(False) except : message.warning('Unable to connect to mysql db %(db)s at %(host)s:%(port)d because %(exc)s', db=self.db, host=self.default_host, port=self.default_port, exc=sys.exc_info()[0]) return message.note("Connected to mysql db %(db)s at %(host)s:%(port)d for %(thread)s", db=self.default_db, host=self.default_host, port=self.default_port, thread=self.id()) # this should be keyed on db too - but we don't used multiple databases currently self.instance[self.id()] = instance
def end_of_simulation(self, run_epilogue=True) : 'Wrapper for epilogue' message.debug('End of Simulation') if run_epilogue : try : self.epilogue() except : exc = sys.exc_info() message.error('epilogue failed because ' + str(exc[0])) self.traceback(exc[2]) # remove fatal callback message.terminate_cbs.rm(self.name) else : message.note('Not running epilogue due to early terminate') # tidy up mdb.finalize_all() # coverage if self.coverage : database.insert.write(self.coverage, self.mdb.log_id, database.upload.RESULT) # remove callbacks verilog.callback.remove_all()
def __init__(self, name=None, activity=None, block=None, test=None, db=None) : self.epilogue_cb = epilogue(self.end_of_simulation) self.name = name or self.name self.test = test or self.test self.is_success = None self.coverage = None activity = activity or self.activity block = block or self.block message.terminate_cbs.add(self.name, 10, self.terminate, self.check_success) try : mdb.connection.set_default_db(db=self.get_db()) self.mdb = mdb.mdb(self.name, activity=activity, block=block, test=self.test) except : message.note('Not using mdb because ' + str(sys.exc_info())) self.START() try : self.prologue() except : exc = sys.exc_info() message.error('prologue failed because ' + str(exc[0])) self.traceback(exc[2]) # self.coverage *may* be assigned to root node in prologue, # if not check for one and use last one created if exists if self.coverage is None and coverage.hierarchy.populated() : self.coverage = coverage.hierarchy.last_root if self.coverage : if getattr(self, 'master_id', False) : database.insert.set_master(self.mdb.log_id, self.master_id) if getattr(self, 'master_chk', False) : # create the hierarchy from master id and verify congruent pass else : database.insert.write(self.coverage, self.mdb.log_id, database.upload.REFERENCE) # is verilog library synthetic? if verilog.vpiInfo().product == 'Python' : self.end_of_simulation()
def end_of_simulation(self, run_epilogue=True): 'Wrapper for epilogue' message.debug('End of Simulation') if run_epilogue: try: self.epilogue() except: exc = sys.exc_info() message.error('epilogue failed because ' + str(exc[0])) self.traceback(exc[2]) # remove fatal callback message.terminate_cbs.rm(self.name) else: message.note('Not running epilogue due to early terminate') # tidy up mdb.finalize_all() # coverage if self.coverage: database.insert.write(self.coverage, self.mdb.log_id, database.upload.RESULT) # remove callbacks verilog.callback.remove_all()
def run(self) : xml = self.xmlDump() cnt = 0 for incr in self : if cnt % 20 == 0 : message.information(' log_id : rows : hits : coverage') cnt+=1 message.information(' %(log_id)6d : %(rows)6d : %(hits)6d : %(cvg)s', log_id=incr.log.log_id, rows=incr.updates, hits=incr.hits, cvg=incr.status.coverage.description()) if incr.hits : # this test contributed to overall coverage xml.add(incr) if incr.status.metric().is_hit() : message.note('all coverage hit') break message.information('coverage : ' + self.status().coverage.description()) if self.robust : message.information('robust : ' + self.status().robust.description()) message.information('tests : %(count)d', count=int(xml.xml.xpath('count(/optimize/test/log_id)'))) # now regenerate hierarchy and report coverage on point basis xml.append(self.hierarchy().xml()) return xml
def bottle_log(msg): message.note(msg.strip())
def prologue(self): message.note('Creating 1000 signal instances') instances = [duv.single_bit for i in range(0, 1000)] for idx, inst in enumerate(instances): message.information('%(idx)d is %(val)d', idx=idx, val=int(inst))
def setRate(self, rate): if self.rate != rate: note('Baudrate changed to %d' % rate) self.rate = rate if self.isOpen(): self.port.baudrate = rate
def prologue(self) : # hush debug messages to database self.mdb.filter_fn = lambda cb_id, level, filename : level < message.INFORMATION duv.mem[0] = 69 duv.mem[69] = 666 totin = 0 message.note('begin initialize') for r in duv.mem : r.set_value(verilog.vpiInt(r.index)) totin += r.index message.note('end initialize at %(idx)d, sum is %(tot)d', idx=r.index, tot=totin) message.note('begin read') totout = reduce(lambda a,b : int(a)+int(b), duv.mem) message.note('end read, sum is %(tot)d', tot=totout) duv.mem[666] = 69 message.note('size of [%(lhs)d:%(rhs)d] is %(size)d', lhs=duv.mem.lhs, rhs=duv.mem.rhs, size=duv.mem.size) message.note('size of mem[0] [%(lhs)d:%(rhs)d] is %(size)d', lhs=duv.mem[0].lhs, rhs=duv.mem[0].rhs, size=duv.mem[0].size)
def time(self, **args) : elapsed=time.time() self.doit(**args) elapsed = time.time()-elapsed message.note('page %(page)s served in %(time)0.2fs', page=bottle.request.fullpath, time=elapsed) return elapsed
def prologue(self): # hush debug messages to database self.mdb.filter_fn = lambda cb_id, level, filename: level < message.INFORMATION duv.mem[0] = 69 duv.mem[69] = 666 totin = 0 message.note('begin initialize') for r in duv.mem: r.set_value(verilog.vpiInt(r.index)) totin += r.index message.note('end initialize at %(idx)d, sum is %(tot)d', idx=r.index, tot=totin) message.note('begin read') totout = reduce(lambda a, b: int(a) + int(b), duv.mem) message.note('end read, sum is %(tot)d', tot=totout) duv.mem[666] = 69 message.note('size of [%(lhs)d:%(rhs)d] is %(size)d', lhs=duv.mem.lhs, rhs=duv.mem.rhs, size=duv.mem.size) message.note('size of mem[0] [%(lhs)d:%(rhs)d] is %(size)d', lhs=duv.mem[0].lhs, rhs=duv.mem[0].rhs, size=duv.mem[0].size)
def write(cls, msg) : message.note(msg)
def execute(self) : message.note('Reset == %(rst)d', rst=self.obj)
def epilogue(self) : if self.rstCallback.cnt == 2: message.note('Reset callbacks observed = %(cnt)d', cnt=self.rstCallback.cnt) self.success() else : message.error('Reset callbacks observed = %(cnt)d', cnt=self.rstCallback.cnt)
def prologue(self) : message.error('a int_debug %(c)d', c=69) message.note('a note')
message.control.DEBUG.echo = 1 message.control.FATAL.threshold = -1 def fn0(*args): print "fn0", args[1].tv_nsec, args def fn1(*args): print "fn1", args message.emit_cbs.add('defaultx', 2, fn0) message.terminate_cbs.add('bob', 1, fn1) try: message.emit_cbs.add('bob', 1, True) except message.CallbackError as cberr: message.note('expected exception : ' + str(cberr)) def terminate(*args): message.note('terminate ' + str(args)) print 'terminate ' + str(args[0]) print args[0].name message.terminate_cbs.add('python', 0, terminate) message.internal('whoops')
def write(cls, msg): message.note(msg)
def execute(self) : message.note('vpi cbEndOfSimulation')
def prologue(self): message.int_debug('a int_debug %(c)d', c=69) message.note('a note')
def prologue(self): message.message.verbosity(message.INT_DEBUG) ignore() message.warning('a warning %(c)d', c=666) message.note('a note')
def terminate(*args) : message.note('terminate ' + str(args)) print 'terminate ' + str(args[0]) print args[0].name
def finalize() : message.note('finalize')
def prologue(self) : message.message.verbosity(message.INT_DEBUG) message.information('simulator is %(product)s', product=verilog.vpiInfo().product) message.note('a note')
def prologue(self): message.fatal('a fatal %(c)d', c=69) message.note('a note')
def prologue(self) : message.message.verbosity(message.INT_DEBUG) message.warning('a warning %(c)d', c=666) message.note('a note') self.pdb()
def bottle_log(msg) : message.note(msg.strip())
def prologue(self) : message.fatal('a fatal %(c)d', c=69) message.note('a note')
def prologue(self) : message.note('Creating 1000 signal instances') instances = [duv.single_bit for i in range(0, 1000)] for idx, inst in enumerate(instances) : message.information('%(idx)d is %(val)d', idx=idx, val=int(inst))
# Copyright (c) 2012, 2013 Rich Porter - see LICENSE for further details import message message.message.instance.verbosity(0) message.control[message.NOTE].echo = 0 message.control.DEBUG.echo = 1 message.control.FATAL.threshold = -1 def fn0(*args) : print "fn0", args[1].tv_nsec, args def fn1(*args) : print "fn1", args message.emit_cbs.add('defaultx', 2, fn0) message.terminate_cbs.add('bob', 1, fn1) try : message.emit_cbs.add('bob', 1, True) except message.CallbackError as cberr : message.note('expected exception : ' + str(cberr)) def terminate(*args) : message.note('terminate ' + str(args)) print 'terminate ' + str(args[0]) print args[0].name message.terminate_cbs.add('python', 0, terminate) message.internal('whoops')
def terminate(*args): message.note('terminate ' + str(args)) print 'terminate ' + str(args[0]) print args[0].name
def setRate(self, rate): if self.rate != rate: note('Baudrate changed to %d'%rate) self.rate = rate if self.isOpen(): self.port.baudrate = rate