def testConstructor(): tl = TestingLogger() bl = BogusLogger() assert None == tl.logger assert 6 == len(tl.levelcode) expected = {0:'NOTSET',10:'DEBUG',20:'INFO',30:'WARNING',40:'ERROR',50:'FATAL'} for c in expected.keys(): assert expected[c] == tl.levelcode[c], 'But at %s expected %s got %s'%(c,expected[c],lc.levelcode[c]) tl = TestingLogger(bl) assert bl is tl.logger assert None == bl.item
def setUp(self): self.logger = TestingLogger() self.connectionData0 = (config.databaseHost,config.databaseName,config.databaseUserName,config.databasePassword) self.connectionDataL = (config.databaseHost,config.databaseName,config.databaseUserName,config.databasePassword,self.logger) self.dsn = "host=%s dbname=%s user=%s password=%s" % self.connectionData0 self.connection = psycopg2.connect(self.dsn) createDB(self.connection)
def testInfo(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.info("info") tlb.info("info") assert (logging.INFO, 'info', ()) == bl.item assert logging.INFO == tl.levels[0] assert logging.INFO == tlb.levels[0] assert 'info' == tl.buffer[0] assert 'info' == tlb.buffer[0]
def testFatal(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.fatal("fatal") tlb.fatal("fatal") assert (logging.FATAL, 'fatal', ()) == bl.item assert logging.FATAL == tl.levels[0] assert logging.FATAL == tlb.levels[0] assert 'fatal' == tl.buffer[0] assert 'fatal' == tlb.buffer[0]
def testCritical(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.critical("critical") tlb.critical("critical") assert (logging.CRITICAL, 'critical', ()) == bl.item assert logging.CRITICAL == tl.levels[0] assert logging.CRITICAL == tlb.levels[0] assert 'critical' == tl.buffer[0] assert 'critical' == tlb.buffer[0]
def testError(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.error("error") tlb.error("error") assert (logging.ERROR, 'error', ()) == bl.item assert logging.ERROR == tl.levels[0] assert logging.ERROR == tlb.levels[0] assert 'error' == tl.buffer[0] assert 'error' == tlb.buffer[0]
def testWarn(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.warn("warn") tlb.warn("warn") assert (logging.WARN, 'warn', ()) == bl.item assert logging.WARN == tl.levels[0] assert logging.WARN == tlb.levels[0] assert 'warn' == tl.buffer[0] assert 'warn' == tlb.buffer[0]
def testWarning(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.warning("warning") tlb.warning("warning") assert (logging.WARNING, 'warning', ()) == bl.item assert logging.WARNING == tl.levels[0] assert logging.WARNING == tlb.levels[0] assert 'warning' == tl.buffer[0] assert 'warning' == tlb.buffer[0]
def testDebug(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.debug("bug") tlb.debug("bug") assert (logging.DEBUG, 'bug', ()) == bl.item assert logging.DEBUG == tl.levels[0] assert logging.DEBUG == tlb.levels[0] assert 'bug' == tl.buffer[0] assert 'bug' == tlb.buffer[0]
def testReportExceptionAndContinue(self): logger = TestingLogger() util.reportExceptionAndContinue(logger) assert (4 == len(logger.levels)) assert ([logging.ERROR, logging.ERROR, logging.ERROR, logging.ERROR] == logger.levels) #print logger.buffer assert ("Caught Error: None" == logger.buffer[0]) assert ('None' == logger.buffer[1] ), "expected 'None' but got %s" % logger.buffer[1] assert ("trace back follows:" in logger.buffer[2]) logger.clear() util.reportExceptionAndContinue(logger, loggingLevel=-39) assert (4 == len(logger.levels)) assert ([-39, -39, -39, -39] == logger.levels) assert ("Caught Error: None" == logger.buffer[0]) assert ('None' == logger.buffer[1]) assert ("trace back follows:" in logger.buffer[2]) logger.clear() util.reportExceptionAndContinue(logger, ignoreFunction=ignoreAlways) assert ([] == logger.levels) try: raise TestingException("test message") except TestingException, e: util.reportExceptionAndContinue(logger, loggingLevel=-12)
def testReportExceptionAndContinue(self): logger = TestingLogger() util.reportExceptionAndContinue(logger) assert(3 == len(logger.levels)) assert([logging.ERROR, logging.ERROR, logging.INFO] == logger.levels) assert("MainThread Caught Error: None" == logger.buffer[0]) assert('' == logger.buffer[1]) assert("trace back follows:" in logger.buffer[2]) logger.clear() util.reportExceptionAndContinue(logger, loggingLevel=-39) assert(3 == len(logger.levels)) assert([-39, -39, logging.INFO] == logger.levels) assert("MainThread Caught Error: None" == logger.buffer[0]) assert('' == logger.buffer[1]) assert("trace back follows:" in logger.buffer[2]) logger.clear() util.reportExceptionAndContinue(logger, ignoreFunction = ignoreAlways) assert([] == logger.levels) try: raise TestingException("test message") except TestingException, e: util.reportExceptionAndContinue(logger, loggingLevel=-12)
def testReportExceptionAndAbort(self): logger = TestingLogger() try: util.reportExceptionAndAbort(logger) assert (False) except SystemExit, e: assert (True) assert (5 == len(logger.levels)) assert ([ logging.CRITICAL, logging.CRITICAL, logging.CRITICAL, logging.CRITICAL, logging.CRITICAL ] == logger.levels) assert ("cannot continue - quitting" == logger.buffer[4])
def testReportExceptionAndContinue(self): logger = TestingLogger() util.reportExceptionAndContinue(logger) assert(4 == len(logger.levels)) assert([logging.ERROR, logging.ERROR, logging.ERROR, logging.ERROR] == logger.levels) #print logger.buffer assert("Caught Error: None" == logger.buffer[0]) assert('None' == logger.buffer[1]), "expected 'None' but got %s" % logger.buffer[1] assert("trace back follows:" in logger.buffer[2]) logger.clear() util.reportExceptionAndContinue(logger, loggingLevel=-39) assert(4 == len(logger.levels)) assert([-39, -39, -39, -39] == logger.levels) assert("Caught Error: None" == logger.buffer[0]) assert('None' == logger.buffer[1]) assert("trace back follows:" in logger.buffer[2]) logger.clear() util.reportExceptionAndContinue(logger, ignoreFunction = ignoreAlways) assert([] == logger.levels) try: raise TestingException("test message") except TestingException, e: util.reportExceptionAndContinue(logger, loggingLevel=-12)
def setUp(self): global me self.config = cfgManager.newConfiguration( configurationModule=testConfig, applicationName='Testing ftpscraper') myDir = os.path.split(__file__)[0] if not myDir: myDir = '.' replDict = {'testDir': '%s' % myDir} for i in self.config: try: self.config[i] = self.config.get(i) % (replDict) except: pass self.logger = TestingLogger(me.fileLogger) self.testConfig = cfgManager.Config([ ('t', 'testPath', True, './TEST-BUILDS', ''), ('f', 'testFileName', True, 'lastrun.pickle', '') ]) self.testConfig["persistentDataPathname"] = os.path.join( self.testConfig.testPath, self.testConfig.testFileName)
def testWarn(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.warn("warn") tlb.warn("warn") assert (logging.WARN,'warn',()) == bl.item assert logging.WARN == tl.levels[0] assert logging.WARN == tlb.levels[0] assert 'warn' == tl.buffer[0] assert 'warn' == tlb.buffer[0]
def testCritical(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.critical("critical") tlb.critical("critical") assert (logging.CRITICAL,'critical',()) == bl.item assert logging.CRITICAL == tl.levels[0] assert logging.CRITICAL == tlb.levels[0] assert 'critical' == tl.buffer[0] assert 'critical' == tlb.buffer[0]
def testError(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.error("error") tlb.error("error") assert (logging.ERROR,'error',()) == bl.item assert logging.ERROR == tl.levels[0] assert logging.ERROR == tlb.levels[0] assert 'error' == tl.buffer[0] assert 'error' == tlb.buffer[0]
def testInfo(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.info("info") tlb.info("info") assert (logging.INFO,'info',()) == bl.item assert logging.INFO == tl.levels[0] assert logging.INFO == tlb.levels[0] assert 'info' == tl.buffer[0] assert 'info' == tlb.buffer[0]
def testDebug(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.debug("bug") tlb.debug("bug") assert (logging.DEBUG,'bug',()) == bl.item assert logging.DEBUG == tl.levels[0] assert logging.DEBUG == tlb.levels[0] assert 'bug' == tl.buffer[0] assert 'bug' == tlb.buffer[0]
def testWarning(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.warning("warning") tlb.warning("warning") assert (logging.WARNING,'warning',()) == bl.item assert logging.WARNING == tl.levels[0] assert logging.WARNING == tlb.levels[0] assert 'warning' == tl.buffer[0] assert 'warning' == tlb.buffer[0]
def testFatal(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) tl.fatal("fatal") tlb.fatal("fatal") assert (logging.FATAL,'fatal',()) == bl.item assert logging.FATAL == tl.levels[0] assert logging.FATAL == tlb.levels[0] assert 'fatal' == tl.buffer[0] assert 'fatal' == tlb.buffer[0]
def setUp(self): global me self.config = configurationManager.newConfiguration(configurationModule = testConfig, applicationName='Testing builds') myDir = os.path.split(__file__)[0] if not myDir: myDir = '.' replDict = {'testDir':'%s'%myDir} for i in self.config: try: self.config[i] = self.config.get(i)%(replDict) except: pass self.logger = TestingLogger(me.fileLogger) self.testConfig = configurationManager.Config([('t','testPath', True, './TEST-BUILDS', ''), ('f','testFileName', True, 'lastrun.pickle', ''), ]) self.testConfig["persistentDataPathname"] = os.path.join(self.testConfig.testPath, self.testConfig.testFileName)
def setUp(self): global me self.config = configurationManager.newConfiguration(configurationModule = testConfig, applicationName='Testing MTBF') myDir = os.path.split(__file__)[0] if not myDir: myDir = '.' replDict = {'testDir':'%s'%myDir} for i in self.config: try: self.config[i] = self.config.get(i)%(replDict) except: pass self.logger = TestingLogger(me.fileLogger) self.connection = psycopg2.connect(me.dsn) cursor = self.connection.cursor() self.testDB = TestDB() self.testDB.removeDB(self.config,self.logger) self.testDB.createDB(self.config,self.logger) self.prods = ['zorro','vogel','lizz',] self.oss = ['OSX','LOX','WOX',] self.productDimData = [] # filled in by fillMtbfTables
def testLog(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) for level in range(0, 60, 10): tl.log(level, 'message') tlb.log(level, 'message') assert 'message' == tl.buffer[-1] assert level == tl.levels[-1] assert (level, 'message', ()) == bl.item tl = TestingLogger() tlb = TestingLogger(bl) for level in range(0, 60, 10): tl.log(level, 'message %s %s', 'one', 'two') tlb.log(level, 'message %s %s', 'one', 'two') assert 'message one two' == tl.buffer[-1] assert (level, 'message %s %s', ('one', 'two')) == bl.item
def testLog(): bl = BogusLogger() tl = TestingLogger() tlb = TestingLogger(bl) for level in range(0,60,10): tl.log(level,'message') tlb.log(level,'message') assert 'message' == tl.buffer[-1] assert level == tl.levels[-1] assert (level,'message',()) == bl.item tl = TestingLogger() tlb = TestingLogger(bl) for level in range(0,60,10): tl.log(level,'message %s %s','one','two') tlb.log(level,'message %s %s','one','two') assert 'message one two' == tl.buffer[-1] assert (level,'message %s %s',('one','two')) == bl.item
def testClear(): tl = TestingLogger() tl.clear() assert 0 == len(tl) assert 0 == len(tl.levels) assert 0 == len(tl.buffer) tl.debug('woo') tl.info('woo') tl.warning('woo') tl.warn('woo') tl.error('woo') tl.critical('woo') tl.fatal('woo') assert 7 == len(tl) assert 7 == len(tl.levels) assert 7 == len(tl.buffer) tl.clear() assert 0 == len(tl) assert 0 == len(tl.levels) assert 0 == len(tl.buffer)
def testLenFunction(): tl = TestingLogger() exp = 0 assert exp == len(tl) tl.debug('woo') exp += 1 assert exp == len(tl) tl.info('woo') exp += 1 assert exp == len(tl) tl.warning('woo') exp += 1 assert exp == len(tl) tl.warn('woo') exp += 1 assert exp == len(tl) tl.error('woo') exp += 1 assert exp == len(tl) tl.critical('woo') exp += 1 assert exp == len(tl) tl.fatal('woo') exp += 1 assert exp == len(tl)
def testStrFunction(): tl = TestingLogger() assert '' == str(tl) tl.debug('debug') expLines = ['DEBUG (10): debug'] tl.info('info') expLines.append('INFO (20): info') tl.warn('warn') expLines.append('WARNING (30): warn') tl.warning('warning') expLines.append('WARNING (30): warning') tl.error('error') expLines.append('ERROR (40): error') tl.critical('critical') expLines.append('FATAL (50): critical') tl.fatal('fatal') expLines.append('FATAL (50): fatal') expected = "\n".join(expLines) assert expected == str(tl)
class TestFtpScraper(unittest.TestCase): def setUp(self): global me self.config = cfgManager.newConfiguration( configurationModule=testConfig, applicationName='Testing ftpscraper') myDir = os.path.split(__file__)[0] if not myDir: myDir = '.' replDict = {'testDir': '%s' % myDir} for i in self.config: try: self.config[i] = self.config.get(i) % (replDict) except: pass self.logger = TestingLogger(me.fileLogger) self.testConfig = cfgManager.Config([('t', 'testPath', True, './TEST-BUILDS', ''), ('f', 'testFileName', True, 'lastrun.pickle', '')]) self.testConfig["persistentDataPathname"] = os.path.join( self.testConfig.testPath, self.testConfig.testFileName) def tearDown(self): self.logger.clear() def test_getLinks(self): self.config.products = ('PRODUCT1', 'PRODUCT2') self.config.base_url = 'http://www.example.com/' fake_response_url = "%s%s" % (self.config.base_url, self.config.products[0]) fake_response_contents = """ blahblahblahblahblah <a href="product1-v1.en-US.p1.txt">product1-v1.en-US.p1.txt</a> <a href="product1-v1.en-US.p1.zip">product1-v1.en-US.p1.zip</a> <a href="product2-v2.en-US.p2.txt">product2-v2.en-US.p2.txt</a> <a href="product2-v2.en-US.p2.zip">product2-v2.en-US.p2.zip</a> blahblahblahblahblah """ fakeResponse = exp.DummyObjectWithExpectations() fakeResponse.code = 200 fakeResponse.expect('read', (), {}, fake_response_contents) fakeResponse.expect('close', (), {}) fakeUrllib2 = exp.DummyObjectWithExpectations() fakeUrllib2.expect('urlopen', (fake_response_url,), {}, fakeResponse) actual = ftpscraper.getLinks('http://www.example.com/PRODUCT1', startswith='product1', urllib=fakeUrllib2) expected = ['product1-v1.en-US.p1.txt', 'product1-v1.en-US.p1.zip'] assert actual == expected, "expected %s, got %s" % (expected, actual) fakeResponse = exp.DummyObjectWithExpectations() fakeResponse.code = 200 fakeResponse.expect('read', (), {}, fake_response_contents) fakeResponse.expect('close', (), {}) fakeUrllib2 = exp.DummyObjectWithExpectations() fakeUrllib2.expect('urlopen', (fake_response_url,), {}, fakeResponse) expected = ['product1-v1.en-US.p1.zip', 'product2-v2.en-US.p2.zip'] actual = ftpscraper.getLinks('http://www.example.com/PRODUCT1', endswith='.zip', urllib=fakeUrllib2) assert actual == expected, "expected %s, got %s" % (expected, actual) def test_parseInfoFile(self): self.config.products = ('PRODUCT1', 'PRODUCT2') self.config.base_url = 'http://www.example.com/' fake_response_url = "%s%s" % (self.config.base_url, self.config.products[0]) fake_response_contents = """ 20111011042016 http://hg.mozilla.org/releases/mozilla-aurora/rev/327f5fdae663 """ fakeResponse = exp.DummyObjectWithExpectations() fakeResponse.code = 200 fakeResponse.expect('read', (), {}, fake_response_contents) fakeResponse.expect('close', (), {}) fakeUrllib2 = exp.DummyObjectWithExpectations() fakeUrllib2.expect('urlopen', (fake_response_url,), {}, fakeResponse) rev = 'http://hg.mozilla.org/releases/mozilla-aurora/rev/327f5fdae663' expected = { 'buildID': '20111011042016', 'rev': rev } actual = ftpscraper.parseInfoFile('http://www.example.com/PRODUCT1', nightly=True, urllib=fakeUrllib2) assert actual == expected, "expected %s, got %s" % (expected, actual) fake_response_contents = """ buildID=20110705195857 """ fakeResponse = exp.DummyObjectWithExpectations() fakeResponse.code = 200 fakeResponse.expect('read', (), {}, fake_response_contents) fakeResponse.expect('close', (), {}) fakeUrllib2 = exp.DummyObjectWithExpectations() fakeUrllib2.expect('urlopen', (fake_response_url,), {}, fakeResponse) expected = {'buildID': '20110705195857'} actual = ftpscraper.parseInfoFile('http://www.example.com/PRODUCT1', nightly=False, urllib=fakeUrllib2) assert actual == expected, "expected %s, got %s" % (expected, actual)
class TestMtbf(unittest.TestCase): def setUp(self): global me self.config = configurationManager.newConfiguration(configurationModule = testConfig, applicationName='Testing MTBF') myDir = os.path.split(__file__)[0] if not myDir: myDir = '.' replDict = {'testDir':'%s'%myDir} for i in self.config: try: self.config[i] = self.config.get(i)%(replDict) except: pass self.logger = TestingLogger(me.fileLogger) self.connection = psycopg2.connect(me.dsn) cursor = self.connection.cursor() self.testDB = TestDB() self.testDB.removeDB(self.config,self.logger) self.testDB.createDB(self.config,self.logger) self.prods = ['zorro','vogel','lizz',] self.oss = ['OSX','LOX','WOX',] self.productDimData = [] # filled in by fillMtbfTables def tearDown(self): self.testDB.removeDB(self.config,self.logger) self.logger.clear() def fillMtbfTables(self,cursor): """ Need some data to test with. Here's where we make it out of whole cloth... """ # (id),product,version,os,release : what product is it, by id self.productDimData = [ [self.prods[p],'%s.1.%s'%(p,r), self.oss[o], 'beta-%s'%r] for p in range(2) for o in range(2) for r in range(1,4) ] cursor.executemany('INSERT into productdims (product,version,os_name,release) values (%s,%s,%s,%s)',self.productDimData) cursor.connection.commit() cursor.execute("SELECT id, product,version, os_name, release from productdims") productDimData = cursor.fetchall() cursor.connection.commit() self.baseDate = dt.date(2008,1,1) self.intervals = { '0.1.1':(self.baseDate ,self.baseDate+dt.timedelta(days=30)), '0.1.2':(self.baseDate + dt.timedelta(days=10),self.baseDate + dt.timedelta(days=40)), '0.1.3':(self.baseDate + dt.timedelta(days=20),self.baseDate + dt.timedelta(days=50)), '1.1.1':(self.baseDate + dt.timedelta(days=10),self.baseDate + dt.timedelta(days=40)), '1.1.2':(self.baseDate + dt.timedelta(days=20),self.baseDate + dt.timedelta(days=50)), '1.1.3':(self.baseDate + dt.timedelta(days=30),self.baseDate + dt.timedelta(days=60)), } # processing days are located at and beyond the extremes of the full range, and # at some interior points, midway between each pair of interior points # layout is: (a date, the day-offset from baseDate, the expected resulting [ids]) PDindexes = [-1,0,5,10,15,25,35,45,55,60,61] productsInProcessingDay = [ [], # -1, [1,4],# 0, [1,4],# 5, [1,2,4,5,7,10],# 10, [1,2,4,5,7,10],# 15, [1,2,3,4,5,6,7,8,10,11],# 25, [2,3,5,6,7,8,9,10,11,12],# 35, [3,6,8,9,11,12],# 45, [9,12],# 55, [9,12],# 60, [],# 61, ] self.processingDays = [ (self.baseDate+dt.timedelta(days=PDindexes[x]),PDindexes[x],productsInProcessingDay[x]) for x in range(len(PDindexes))] # (id), productdims_id, start_dt, end_dt : Date-interval when product is interesting configData =[ (x[0],self.intervals[x[2]][0],self.intervals[x[2]][1] ) for x in productDimData ] cursor.executemany('insert into mtbfconfig (productdims_id,start_dt,end_dt) values(%s,%s,%s)',configData) cursor.connection.commit() self.expectedFacts = { # key is offset from baseDate # value is array of (productDims_id,day,avg_seconds,report_count,count(distinct(user)) # This data WAS NOT CALCULATED BY HAND: The test was run once with prints in place # and that output was encoded here. As of 2009-Feb, count of unique users is always 0 -1: [], 0: [ (1, dt.date(2008,1,1), 5, 6, 0), (4, dt.date(2008,1,1), 20, 6, 0), ], 5: [ (1, dt.date(2008,1,6), 5, 6, 0), (4, dt.date(2008,1,6), 20, 6, 0), ], 10: [ (1, dt.date(2008,1,11), 5, 6, 0), (2, dt.date(2008,1,11), 10, 6, 0), (4, dt.date(2008,1,11), 20, 6, 0), (5, dt.date(2008,1,11), 25, 6, 0), (7, dt.date(2008,1,11), 35, 6, 0), (10, dt.date(2008,1,11), 50, 6, 0), ], 15: [ (1, dt.date(2008,1,16), 5, 6, 0), (2, dt.date(2008,1,16), 10, 6, 0), (4, dt.date(2008,1,16), 20, 6, 0), (5, dt.date(2008,1,16), 25, 6, 0), (7, dt.date(2008,1,16), 35, 6, 0), (10, dt.date(2008,1,16), 50, 6, 0), ], 25: [ (1, dt.date(2008,1,26), 5, 6, 0), (2, dt.date(2008,1,26), 10, 6, 0), (3, dt.date(2008,1,26), 15, 6, 0), (4, dt.date(2008,1,26), 20, 6, 0), (5, dt.date(2008,1,26), 25, 6, 0), (6, dt.date(2008,1,26), 30, 6, 0), (7, dt.date(2008,1,26), 35, 6, 0), (8, dt.date(2008,1,26), 40, 6, 0), (10, dt.date(2008,1,26), 50, 6, 0), (11, dt.date(2008,1,26), 55, 6, 0), ], 35: [ (2, dt.date(2008,2,5), 10, 6, 0), (3, dt.date(2008,2,5), 15, 6, 0), (5, dt.date(2008,2,5), 25, 6, 0), (6, dt.date(2008,2,5), 30, 6, 0), (7, dt.date(2008,2,5), 35, 6, 0), (8, dt.date(2008,2,5), 40, 6, 0), (9, dt.date(2008,2,5), 45, 6, 0), (10, dt.date(2008,2,5), 50, 6, 0), (11, dt.date(2008,2,5), 55, 6, 0), (12, dt.date(2008,2,5), 60, 6, 0), ], 45: [ (3, dt.date(2008,2,15), 15, 6, 0), (6, dt.date(2008,2,15), 30, 6, 0), (8, dt.date(2008,2,15), 40, 6, 0), (9, dt.date(2008,2,15), 45, 6, 0), (11, dt.date(2008,2,15), 55, 6, 0), (12, dt.date(2008,2,15), 60, 6, 0), ], 55: [ (9, dt.date(2008,2,25), 45, 6, 0), (12, dt.date(2008,2,25), 60, 6, 0), ], 60: [ (9, dt.date(2008,3,1), 45, 6, 0), (12, dt.date(2008,3,1), 60, 6, 0), ], 61: [], } def fillReports(self,cursor): """fill enough data to test mtbf behavior: - AVG(uptime); COUNT(date_processed); COUNT(DISTINCT(user_id)) """ self.fillMtbfTables(cursor) # prime the pump sql = 'insert into reports (uuid, uptime, date_processed,product,version,os_name) values(%s,%s,%s,%s,%s,%s)' processTimes = ['00:00:00','05:00:00','10:00:00','15:00:00','20:00:00','23:59:59'] uptimes = [5*x for x in range(1,15)] data = [] uuidGen = dbtestutil.moreUuid() uptimeIndex = 0 for product in self.productDimData: uptime = uptimes[uptimeIndex%len(uptimes)] uptimeIndex += 1 for d,off,ig in self.processingDays: for pt in processTimes: dp = "%s %s"%(d.isoformat(),pt) tup = (uuidGen.next(), uptime,dp,product[0],product[1],product[2]) data.append(tup) cursor.executemany(sql,data) cursor.connection.commit() # ========================================================================== # def testCalculateMtbf(self): """ testCalculateMtbf(self): slow(1) check that we get the expected data. This is NOT hand-calculated, just a regression check """ cursor = self.connection.cursor() self.fillReports(cursor) sql = 'select productdims_id,day,avg_seconds,report_count,unique_users from mtbffacts WHERE day = %s' self.connection.commit() for pd in self.processingDays: self.config.processingDay = pd[0].isoformat() mtbf.calculateMtbf(self.config, self.logger) cursor.execute(sql,(pd[0].isoformat(),)) data = cursor.fetchall() self.connection.commit() expected = set(self.expectedFacts[pd[1]]) got = set(data) assert expected==got, 'Expected: %s\nGot: %s'%(expected,got) #end of loop through processingDay # ========================================================================== # def testGetProductsToUpdate(self): """ testGetProductsToUpdate(self): check that we get the appropriate list of products when: - we have none (on either end of the range) - we have only one or several check that we correctly log when there are no products in range """ cursor = self.connection.cursor() self.fillMtbfTables(cursor) for d in self.processingDays: self.config.processingDay = d[0].isoformat() self.logger.clear() products = mtbf.getProductsToUpdate(cursor,self.config,self.logger) self.connection.commit() if d[1] in (-1,61): # be sure that when appropriate we log a warning about no configured products assert 2 == len(self.logger) assert logging.WARNING == self.logger.levels[1] assert 'Currently there are no MTBF products configured' == self.logger.buffer[1] else: # ignore the expected logging: It could change. Catch unexpected logging calls assert len(d[2])+2 == len(self.logger) INFO = 0 DBG = 0 oth = 0 for i in self.logger.levels: if logging.INFO == i: INFO += 1 elif logging.DEBUG == i: DBG += 1 else: oth += 1 # Don't care about DBG or INFO counts, except we expect no other #assert len(d[2])+1 == INFO, 'expected %d, but %s\n%s'%(len(d[2])+1,INFO,str(self.logger)) #assert 1 == DBG assert 0 == oth pids = set([x.dimensionId for x in products]) expected = set(d[2]) # This is the meat of the test assert expected == pids, 'Expected %s, got %s'%(expected,pids) # ========================================================================== # def testGetWhereClauseFor(self): """ testGetWhereClauseFor(self): check that we correctly handle the 'ALL' product check that we correctly order and truncate version,product,os_name """ class P: pass p = P() p.product = 'ALL' assert '' == mtbf.getWhereClauseFor(p) p.product = 'product' assert_raises(AttributeError,mtbf.getWhereClauseFor,p) p.version = 'version' assert_raises(AttributeError,mtbf.getWhereClauseFor,p) p.os_name='os_name' expected = " AND version = 'version' AND product = 'product' AND substr(os_name, 1, 3) = 'os_name' " assert expected == mtbf.getWhereClauseFor(p), 'but "%s"'%(mtbf.getWhereClauseFor(p)) # ========================================================================== # def testClassProductDimension(self): """ testClassProductDimension(self): check that we handle config with correct or greater items in config """ assert_raises(IndexError,mtbf.ProductDimension,[]) assert_raises(IndexError,mtbf.ProductDimension,[1]) assert_raises(IndexError,mtbf.ProductDimension,[1,2]) assert_raises(IndexError,mtbf.ProductDimension,[1,2,3]) self.logger.clear() assert_raises(IndexError,mtbf.ProductDimension,[1,2,3,4]) config = [999,'umlaut',3.14,'OX','lemme go','toomuch'] pd = mtbf.ProductDimension(config,self.logger) assert 999 == pd.dimensionId assert 'umlaut' == pd.product assert 3.14 == pd.version assert 'OX' == pd.os_name assert 'lemme go' == pd.release assert 5 == len(pd.__dict__), "Assure nobody adds another dimension element without updating tests" assert 0 == len(self.logger),'but logger:\n%s'%self.logger
class TestBugzilla(unittest.TestCase): def setUp(self): global me self.config = configurationManager.newConfiguration(configurationModule = testConfig, applicationName='Testing bugzilla') myDir = os.path.split(__file__)[0] if not myDir: myDir = '.' replDict = {'testDir':'%s'%myDir} for i in self.config: try: self.config[i] = self.config.get(i)%(replDict) except: pass self.logger = TestingLogger(me.fileLogger) self.connection = me.database.connection() #self.connection = psycopg2.connect(me.dsn) self.testConfig = configurationManager.Config([('t','testPath', True, './TEST-BUGZILLA', ''), ('f','testFileName', True, 'lastrun.pickle', ''), ('', 'daysIntoPast', True, 0),]) self.testConfig["persistentDataPathname"] = os.path.join(self.testConfig.testPath, self.testConfig.testFileName) def tearDown(self): self.logger.clear() def test_bugzilla_iterator(self): csv = ['bug_id,"bug_status","resolution","short_desc","cf_crash_signature"\n', '1,"RESOLVED",,"this is a comment","This sig, while bogus, has a ] bracket"', '2,"CLOSED","WONTFIX","comments are not too important","single [@ BogusClass::bogus_sig (const char**) ] signature"', '3,"ASSIGNED",,"this is a comment. [@ nanojit::LIns::isTramp()]","[@ js3250.dll@0x6cb96] [@ valid.sig@0x333333]"', '4,"CLOSED","RESOLVED","two sigs enter, one sig leaves","[@ layers::Push@0x123456] [@ layers::Push@0x123456]"', '5,"ASSIGNED","INCOMPLETE",,"[@ MWSBAR.DLL@0x2589f] and a broken one [@ sadTrombone.DLL@0xb4s455"', '6,"ASSIGNED",,"empty crash sigs should not throw errors",""', '7,"CLOSED",,"gt 525355 gt","[@gfx::font(nsTArray<nsRefPtr<FontEntry> > const&)]"', '8,"CLOSED","RESOLVED","newlines in sigs","[@ legitimate(sig)] \n junk \n [@ another::legitimate(sig) ]"' ] correct = [ (1, "RESOLVED", "", "this is a comment", set([])), (2, "CLOSED", "WONTFIX", "comments are not too important", set(["BogusClass::bogus_sig (const char**)"])), (3, "ASSIGNED", "", "this is a comment. [@ nanojit::LIns::isTramp()]", set(["js3250.dll@0x6cb96", "valid.sig@0x333333"])), (4, "CLOSED", "RESOLVED", "two sigs enter, one sig leaves", set(["layers::Push@0x123456"])), (5, "ASSIGNED", "INCOMPLETE", "", set(["MWSBAR.DLL@0x2589f"])), (6, "ASSIGNED", "", "empty crash sigs should not throw errors", set([])), (7, "CLOSED", "", "gt 525355 gt", set(["gfx::font(nsTArray<nsRefPtr<FontEntry> > const&)"])), (8, "CLOSED", "RESOLVED", "newlines in sigs", set(['another::legitimate(sig)', 'legitimate(sig)'])) ] for expected, actual in zip(bug.bugzilla_iterator(csv, iter), correct): assert expected == actual, "expected %s, got %s" % (str(expected), str(actual)) def test_signature_is_found(self): global me assert bug.signature_is_found("js3250.dll@0x6cb96", me.cur) assert not bug.signature_is_found("sir_not_appearing_in_this_film", me.cur) me.cur.connection.rollback() def verify_tables(self, correct): global me # bug_status count = 0 for expected, actual in zip(psy.execute(me.cur, "select id, status, resolution, short_desc from bugs order by 1"), correct["bugs"]): count += 1 assert expected == actual, "expected %s, got %s" % (str(expected), str(actual)) assert len(correct["bugs"]) == count, "expected %d entries in bugs but found %d" % (len(correct["bugs"]), count) #bug_associations count = 0 for expected, actual in zip(psy.execute(me.cur, "select signature, bug_id from bug_associations order by 1, 2"), correct["bug_associations"]): count += 1 assert expected == actual, "expected %s, got %s" % (str(expected), str(actual)) assert len(correct["bug_associations"]) == count, "expected %d entries in bug_associations but found %d" % (len(correct["bug_associations"]), count) def test_insert_or_update_bug_in_database(self): #bugId, statusFromBugzilla, resolutionFromBugzilla, signatureListFromBugzilla #new * * empty #new * * 1 new #new * * 2 new #old * * empty #old new new global me def true(x, y): return True def hasYES(x, y): return "YES" in x me.cur = me.conn.cursor() #me.cur = me.conn.cursor(cursor_factory=psy.LoggingCursor) #me.cur.setLogger(me.fileLogger) psy.execute(me.cur, "delete from bug_status") me.cur.connection.commit() # test intial insert sample1 = [ (2,"CLOSED","WONTFIX","a short desc",set(["aaaa"])), (3,"NEW","","a short desc",set([])), (343324,"ASSIGNED","","a short desc",set(["bbbb","cccc"])), (343325,"CLOSED","RESOLVED","a short desc",set(["dddd"])), ] correct1 = { "bugs": [(2, "CLOSED", "WONTFIX", "a short desc"), (343324,"ASSIGNED","", "a short desc"), (343325,"CLOSED", "RESOLVED","a short desc")], "bug_associations": [("aaaa", 2), ("bbbb", 343324), ("cccc", 343324), ("dddd", 343325)] } for bugId, statusFromBugzilla, resolutionFromBugzilla, shortDescFromBugzilla, signatureListFromBugzilla in sample1: bug.insert_or_update_bug_in_database(bugId, statusFromBugzilla, resolutionFromBugzilla, shortDescFromBugzilla, signatureListFromBugzilla, me.cur, true) self.verify_tables(correct1) #test removing existing associations sample2 = [ (2,"CLOSED","WONTFIX","a short desc",set([])), (343324,"ASSIGNED","","a short desc",set(["bbbb"])), ] correct2 = { "bugs": [(343324,"ASSIGNED","","a short desc"), (343325,"CLOSED", "RESOLVED","a short desc")], "bug_associations": [("bbbb", 343324), ("dddd", 343325)] } for bugId, statusFromBugzilla, resolutionFromBugzilla, shortDescFromBugzilla, signatureListFromBugzilla in sample2: bug.insert_or_update_bug_in_database(bugId, statusFromBugzilla, resolutionFromBugzilla, shortDescFromBugzilla, signatureListFromBugzilla, me.cur, true) self.verify_tables(correct2) #test updating existing associations sample2 = [(343324,"CLOSED","RESOLVED","a short desc",set(["bbbb"])), ] correct2 = { "bugs": [(343324,"CLOSED","RESOLVED","a short desc"), (343325,"CLOSED", "RESOLVED","a short desc")], "bug_associations": [("bbbb", 343324), ("dddd", 343325)] } for bugId, statusFromBugzilla, resolutionFromBugzilla, shortDescFromBugzilla, signatureListFromBugzilla in sample2: bug.insert_or_update_bug_in_database(bugId, statusFromBugzilla, resolutionFromBugzilla, shortDescFromBugzilla, signatureListFromBugzilla, me.cur, true) self.verify_tables(correct2) def test_get_and_set_last_run_date(self): try: os.makedirs(self.testConfig.testPath) except OSError, x: if errno.EEXIST == x.errno: pass else: raise try: os.unlink(self.testConfig.persistentDataPathname) except OSError, x: pass
class TestBuilds(unittest.TestCase): def setUp(self): global me self.config = configurationManager.newConfiguration(configurationModule = testConfig, applicationName='Testing builds') myDir = os.path.split(__file__)[0] if not myDir: myDir = '.' replDict = {'testDir':'%s'%myDir} for i in self.config: try: self.config[i] = self.config.get(i)%(replDict) except: pass self.logger = TestingLogger(me.fileLogger) self.testConfig = configurationManager.Config([('t','testPath', True, './TEST-BUILDS', ''), ('f','testFileName', True, 'lastrun.pickle', ''), ]) self.testConfig["persistentDataPathname"] = os.path.join(self.testConfig.testPath, self.testConfig.testFileName) def tearDown(self): self.logger.clear() def do_nightlyBuildExists(self, d, correct): me.cur = me.conn.cursor(cursor_factory=psy.LoggingCursor) me.cur.setLogger(me.fileLogger) actual = builds.nightlyBuildExists(me.cur, d[0], d[1], d[2], d[3]) assert actual == correct, "expected %s, got %s " % (correct, actual) def do_releaseBuildExists(self, d, correct): me.cur = me.conn.cursor(cursor_factory=psy.LoggingCursor) me.cur.setLogger(me.fileLogger) actual = builds.releaseBuildExists(me.cur, d[0], d[1], d[2], d[3], d[4], d[5]) assert actual == correct, "expected %s, got %s " % (correct, actual) def test_buildExists(self): d = ( "failfailfail", "VERSIONAME1", "PLATFORMNAME1", "1" ) self.do_nightlyBuildExists(d, False) d = ( "PRODUCTNAME1", "VERSIONAME1", "PLATFORMNAME1", "1" ) self.do_nightlyBuildExists(d, True) d = ( "failfailfail", "VERSIONAME1", "1", "BUILDTYPE1", "PLATFORMNAME1", "1" ) self.do_releaseBuildExists(d, False) r = ( "PRODUCTNAME1", "VERSIONAME1", "1", "BUILDTYPE1", "PLATFORMNAME1", "1" ) self.do_releaseBuildExists(r, True) def test_fetchBuild(self): fake_response_contents_1 = '11111' fake_response_contents_2 = '22222' fake_response_contents = '%s %s' % (fake_response_contents_1, fake_response_contents_2) fake_urllib2_url = 'http://www.example.com/' self.config.base_url = fake_urllib2_url fakeResponse = exp.DummyObjectWithExpectations() fakeResponse.code = 200 fakeResponse.expect('read', (), {}, fake_response_contents) fakeResponse.expect('close', (), {}) fakeUrllib2 = exp.DummyObjectWithExpectations() fakeUrllib2.expect('urlopen', (fake_urllib2_url,), {}, fakeResponse) try: actual = builds.fetchBuild(fake_urllib2_url, fakeUrllib2) assert actual[0] == fake_response_contents_1, "expected %s, got %s " % (fake_response_contents_1, actual) assert actual[1] == fake_response_contents_2, "expected %s, got %s " % (fake_response_contents_2, actual) except Exception, x: print "Exception in test_fetchBuild() ... Error: ",type(x),x socorro.lib.util.reportExceptionAndAbort(me.fileLogger)
class TestFtpScraper(unittest.TestCase): def setUp(self): global me self.config = cfgManager.newConfiguration( configurationModule=testConfig, applicationName='Testing ftpscraper') myDir = os.path.split(__file__)[0] if not myDir: myDir = '.' replDict = {'testDir': '%s' % myDir} for i in self.config: try: self.config[i] = self.config.get(i) % (replDict) except: pass self.logger = TestingLogger(me.fileLogger) self.testConfig = cfgManager.Config([('t', 'testPath', True, './TEST-BUILDS', ''), ('f', 'testFileName', True, 'lastrun.pickle', '')]) self.testConfig["persistentDataPathname"] = os.path.join( self.testConfig.testPath, self.testConfig.testFileName) def tearDown(self): self.logger.clear() def do_build_exists(self, d, correct): me.cur = me.conn.cursor(cursor_factory=psy.LoggingCursor) me.cur.setLogger(me.fileLogger) actual = buildutil.build_exists( me.cur, d[0], d[1], d[2], d[3], d[4], d[5], d[6]) assert actual == correct, "expected %s, got %s " % (correct, actual) def test_build_exists(self): d = ("failfailfail", "VERSIONAME1", "PLATFORMNAME1", "1", "BUILDTYPE1", "1", "REPO1") self.do_build_exists(d, False) r = ("PRODUCTNAME1", "VERSIONAME1", "PLATFORMNAME1", "1", "BUILDTYPE1", "1", "REPO1") self.do_build_exists(r, True) def test_insert_build(self): me.cur = me.conn.cursor(cursor_factory=psy.LoggingCursor) me.cur.setLogger(me.fileLogger) sql = """DELETE FROM releases_raw WHERE product_name = 'PRODUCTNAME5'""" me.cur.execute(sql) me.cur.connection.commit() try: buildutil.insert_build(me.cur, 'PRODUCTNAME5', 'VERSIONAME5', 'PLATFORMNAME5', '5', 'BUILDTYPE5', '5', 'REPO5') actual = buildutil.build_exists(me.cur, 'PRODUCTNAME5', 'VERSIONAME5', 'PLATFORMNAME5', '5', 'BUILDTYPE5', '5', 'REPO5') assert actual == 1, "expected 1, got %s" % (actual) except Exception, x: print "Exception in do_insert_build() ... Error: ", type(x), x socorro.lib.util.reportExceptionAndAbort(me.fileLogger) finally:
class TestFtpScraper(unittest.TestCase): def setUp(self): global me self.config = cfgManager.newConfiguration( configurationModule=testConfig, applicationName='Testing ftpscraper') myDir = os.path.split(__file__)[0] if not myDir: myDir = '.' replDict = {'testDir': '%s' % myDir} for i in self.config: try: self.config[i] = self.config.get(i) % (replDict) except: pass self.logger = TestingLogger(me.fileLogger) self.testConfig = cfgManager.Config([ ('t', 'testPath', True, './TEST-BUILDS', ''), ('f', 'testFileName', True, 'lastrun.pickle', '') ]) self.testConfig["persistentDataPathname"] = os.path.join( self.testConfig.testPath, self.testConfig.testFileName) def tearDown(self): self.logger.clear() def test_getLinks(self): self.config.products = ('PRODUCT1', 'PRODUCT2') self.config.base_url = 'http://www.example.com/' fake_response_url = "%s%s" % (self.config.base_url, self.config.products[0]) fake_response_contents = """ blahblahblahblahblah <a href="product1-v1.en-US.p1.txt">product1-v1.en-US.p1.txt</a> <a href="product1-v1.en-US.p1.zip">product1-v1.en-US.p1.zip</a> <a href="product2-v2.en-US.p2.txt">product2-v2.en-US.p2.txt</a> <a href="product2-v2.en-US.p2.zip">product2-v2.en-US.p2.zip</a> blahblahblahblahblah """ fakeResponse = exp.DummyObjectWithExpectations() fakeResponse.code = 200 fakeResponse.expect('read', (), {}, fake_response_contents) fakeResponse.expect('close', (), {}) fakeUrllib2 = exp.DummyObjectWithExpectations() fakeUrllib2.expect('urlopen', (fake_response_url, ), {}, fakeResponse) actual = ftpscraper.getLinks('http://www.example.com/PRODUCT1', startswith='product1', urllib=fakeUrllib2) expected = ['product1-v1.en-US.p1.txt', 'product1-v1.en-US.p1.zip'] assert actual == expected, "expected %s, got %s" % (expected, actual) fakeResponse = exp.DummyObjectWithExpectations() fakeResponse.code = 200 fakeResponse.expect('read', (), {}, fake_response_contents) fakeResponse.expect('close', (), {}) fakeUrllib2 = exp.DummyObjectWithExpectations() fakeUrllib2.expect('urlopen', (fake_response_url, ), {}, fakeResponse) expected = ['product1-v1.en-US.p1.zip', 'product2-v2.en-US.p2.zip'] actual = ftpscraper.getLinks('http://www.example.com/PRODUCT1', endswith='.zip', urllib=fakeUrllib2) assert actual == expected, "expected %s, got %s" % (expected, actual) def test_parseInfoFile(self): self.config.products = ('PRODUCT1', 'PRODUCT2') self.config.base_url = 'http://www.example.com/' fake_response_url = "%s%s" % (self.config.base_url, self.config.products[0]) fake_response_contents = """ 20111011042016 http://hg.mozilla.org/releases/mozilla-aurora/rev/327f5fdae663 """ fakeResponse = exp.DummyObjectWithExpectations() fakeResponse.code = 200 fakeResponse.expect('read', (), {}, fake_response_contents) fakeResponse.expect('close', (), {}) fakeUrllib2 = exp.DummyObjectWithExpectations() fakeUrllib2.expect('urlopen', (fake_response_url, ), {}, fakeResponse) rev = 'http://hg.mozilla.org/releases/mozilla-aurora/rev/327f5fdae663' expected = {'buildID': '20111011042016', 'rev': rev} actual = ftpscraper.parseInfoFile('http://www.example.com/PRODUCT1', nightly=True, urllib=fakeUrllib2) assert actual == expected, "expected %s, got %s" % (expected, actual) fake_response_contents = """ buildID=20110705195857 """ fakeResponse = exp.DummyObjectWithExpectations() fakeResponse.code = 200 fakeResponse.expect('read', (), {}, fake_response_contents) fakeResponse.expect('close', (), {}) fakeUrllib2 = exp.DummyObjectWithExpectations() fakeUrllib2.expect('urlopen', (fake_response_url, ), {}, fakeResponse) expected = {'buildID': '20110705195857'} actual = ftpscraper.parseInfoFile('http://www.example.com/PRODUCT1', nightly=False, urllib=fakeUrllib2) assert actual == expected, "expected %s, got %s" % (expected, actual)