def checkOneClass(aClass,aType): global me connection = psycopg2.connect(me.dsn) cursor = connection.cursor() table = aClass(logger = me.logger) expectedList = [] expectedTableClasses = schema.getOrderedSetupList([aClass]) for t in expectedTableClasses: expectedList.append(t(logger = me.logger)) try: schema.teardownDatabase(me.config,me.logger) matchingTables = [x for x in socorro_psg.tablesMatchingPattern(table.name+'%',cursor) if not x.endswith('_id_seq')] assert [] == matchingTables ,'For class %s saw %s'%(table.name,matchingTables) # call create before = set(socorro_psg.tablesMatchingPattern('%',cursor)) ignore = [x for x in before if (x.startswith('pg_toast') or x.endswith('id_seq'))] before -= set(ignore) table.create(cursor) connection.commit() after = set(socorro_psg.tablesMatchingPattern('%',cursor)) ignore = [x for x in after if (x.startswith('pg_toast') or x.endswith('id_seq'))] after -= set(ignore) expectedDiff = hardCodedSchemaClasses[aClass][1] assert expectedDiff == after - before, 'for %s: after-before=\n got:%s\nwanted:%s'%(table.name,after-before,expectedDiff) # call drop table.drop(cursor) connection.commit() afterDrop = set(socorro_psg.tablesMatchingPattern('%',cursor)) assert not table.name in afterDrop finally: cursor.execute("DROP TABLE IF EXISTS %s CASCADE"%(','.join([x.name for x in expectedList]))) connection.commit() connection.close()
def testPartitionInsert(self): """ TestPartitionedTable.testPartitionInsert(): - check that we automagically create the needed partition on insert """ global me tz = UTC cursor = self.connection.cursor() me.logger.debug("DEBUG before createDB") # test in this order, because other things depend on reports insertRows = [ # uuid, client_crash_date, date_processed, install_age,last_crash,uptime,user_comments, app_notes, distributor, distributor_version,productdims_id,urldims_id #[schema.CrashReportsTable,['0bba61c5-dfc3-43e7-dead-8afd20071025',dt.datetime(2007,12,25,5,4,3,21,tz),dt.datetime(2007,12,25,5,4,3,33,tz),10000,100,110,"","","","",1,1]], [schema.ReportsTable, ['0bba61c5-dfc3-43e7-dead-8afd20071025',dt.datetime(2007,12,25,5,4,3,21,tz),dt.datetime(2007,12,25,5,4,3,33,tz),'Firefox','1.0b4', '200403041354','http://www.a.com', 10000, 100, 110, "", dt.datetime(2004,3,4,13,54,tzinfo=tz),"", "", "", "", "",None,None,None,'bogus_hangid',None,'some_chonnel']], [schema.ExtensionsTable,[1,dt.datetime(2007,12,25,5,4,3,33,tz),1,'extensionid','version']], [schema.FramesTable,[1,2,dt.datetime(2007,12,25,5,4,3,33,tz),'somesignature']], #[schema.DumpsTable,[1,dt.datetime(2007,12,25,5,4,3,33,tz),"data"]], ] # call insert, expecting auto-creation of partitions me.dsn = "host=%s dbname=%s user=%s password=%s" % (me.config.databaseHost,me.config.databaseName, me.config.databaseUserName,me.config.databasePassword) me.testDB.createDB(me.config,me.logger) dbtestutil.fillDimsTables(cursor) before = set([x for x in socorro_psg.tablesMatchingPattern('%',cursor) if not 'pg_toast' in x]) for t in insertRows: obj = t[0](logger=me.logger) obj.insert(cursor,t[1],self.altConnectionCursor,date_processed=dt.datetime(2007,12,25,5,4,3,33,tz)) self.connection.commit() current = set([x for x in socorro_psg.tablesMatchingPattern('%',cursor) if not 'pg_toast' in x]) diff = current - before assert set(['%s_20071224'%obj.name]) == diff,'Expected set([%s_20071224]), got %s'%(obj.name,diff) before = current
def testPartitionInsert(self): """ TestPartitionedTable.testPartitionInsert(): - check that we automagically create the needed partition on insert """ global me tz = dtutil.UTC() # test in this order, because other things depend on reports insertRows = [ [schema.ReportsTable,['0bba61c5-dfc3-43e7-dead-8afd20071025',dt.datetime(2007,12,25,5,4,3,21,tz),dt.datetime(2007,12,25,5,4,3,33),'product','version','build','url',3000,0,22,'email',dt.date(2007,12,1),None,"","","",""]], [schema.ExtensionsTable,[1,dt.datetime(2007,12,25,5,4,3,33),1,'extensionid','version']], [schema.FramesTable,[1,2,dt.datetime(2007,12,25,5,4,3,33),'somesignature']], [schema.DumpsTable,[1,dt.datetime(2007,12,25,5,4,3,33),"data"]], ] # call insert, expecting auto-creation of partitions cursor = self.connection.cursor() me.dsn = "host=%s dbname=%s user=%s password=%s" % (me.config.databaseHost,me.config.databaseName, me.config.databaseUserName,me.config.databasePassword) schema.setupDatabase(me.config,me.logger) before = set([x for x in socorro_psg.tablesMatchingPattern('%',cursor) if not 'pg_toast' in x]) for t in insertRows: obj = t[0](logger=me.logger) obj.insert(cursor,t[1],self.altConnectionCursor,date_processed=dt.datetime(2007,12,25,5,4,3,33)) self.connection.commit() current = set([x for x in socorro_psg.tablesMatchingPattern('%',cursor) if not 'pg_toast' in x]) diff = current - before assert set(['%s_20071224'%obj.name]) == diff,'Expected set([%s_20071224]), got %s'%(obj.name,diff) before = current
def testCreatePartitions_one(self): """ TestPartitionedTable.testCreatePartitions_one(): - assure that we create the expected partition(s) for a PartitionedTable that has no dependencies """ global me cursor = self.connection.cursor() cursor.execute("DROP TABLE IF EXISTS tpt, tpt3 CASCADE") self.connection.commit() testPt = TPT(logger=me.logger) try: tptSet0 = set(socorro_psg.tablesMatchingPattern('tpt%', cursor)) assert set() == tptSet0, 'Assure we start with clean slate' testPt.create(cursor) self.connection.commit() tptSet1 = set(socorro_psg.tablesMatchingPattern('tpt%', cursor)) testPt.createPartitions(cursor, iter(range(2))) self.connection.commit() tptSet2 = set(socorro_psg.tablesMatchingPattern('tpt%', cursor)) assert set([ 'tpt_0', 'tpt_1', ]) == tptSet2 - tptSet1, 'Got tptSet2: %s minus tptSet1: %s' % ( tptSet2, tptSet1) finally: cursor.execute("DROP TABLE IF EXISTS tpt, tpt3 CASCADE") self.connection.commit()
def checkOneClass(aClass,aType): global me connection = psycopg2.connect(me.dsn) cursor = connection.cursor() table = aClass(logger = me.logger) expectedList = [] expectedTableClasses = schema.getOrderedSetupList([aClass]) for t in expectedTableClasses: expectedList.append(t(logger = me.logger)) try: schema.teardownDatabase(me.config,me.logger) matchingTables = [x for x in socorro_psg.tablesMatchingPattern(table.name+'%',cursor) if not x.endswith('_id_seq')] assert [] == matchingTables ,'For class %s saw %s'%(table.name,matchingTables) # call create before = set(socorro_psg.tablesMatchingPattern('%',cursor)) print 'creating: ', table.name table.create(cursor) connection.commit() after = set(socorro_psg.tablesMatchingPattern('%',cursor)) expected = me.hardCodedSchemaClasses[aClass][1] - set(['release_enum']) assertSameTableDiff(table.name,expected,before,after) # call drop table.drop(cursor) connection.commit() afterDrop = set(socorro_psg.tablesMatchingPattern('%',cursor)) assert not table.name in afterDrop finally: cursor.execute("DROP TABLE IF EXISTS %s CASCADE"%(','.join([x.name for x in expectedList]))) connection.commit() connection.close()
def printDbTablenames(tag, aCursor): """Debugging utility""" all = socorro_psg.tablesMatchingPattern("%", aCursor) some = [x for x in all if (x == "server_status" or not "_" in x)] some = [ x for x in some if ( not x in [ "triggers", "views", "sequences", "tables", "domains", "parameters", "routines", "schemata", "attributes", "columns", ] ) ] some.sort() print tag, ", ".join(some)
def printDbTablenames(tag,aCursor): """Debugging utility""" all = socorro_psg.tablesMatchingPattern('%',aCursor) some = [x for x in all if (x == 'server_status' or not '_' in x)] some = [x for x in some if (not x in ['triggers','views','sequences','tables','domains','parameters','routines','schemata','attributes','columns'])] some.sort() print tag,', '.join(some)
def testModuleCreatePartitions(): """ testModuleCreatePartitions(): """ global me connection = psycopg2.connect(me.dsn) try: cursor = connection.cursor() me.testDB.removeDB(me.config,me.logger) me.testDB.createDB(me.config,me.logger) me.config.startDate = dt.date(2008,1,1) me.config.endDate = dt.date(2008,1,1) reportSet = set(socorro_psg.tablesMatchingPattern('reports%',cursor)) extensionSet = set(socorro_psg.tablesMatchingPattern('extensions%',cursor)) frameSet0 = set(socorro_psg.tablesMatchingPattern('frames%',cursor)) schema.databaseObjectClassListForWeeklyPartitions = [schema.ExtensionsTable] schema.createPartitions(me.config,me.logger) moreReportSet = set(socorro_psg.tablesMatchingPattern('report%',cursor))-reportSet moreExtensionSet = set(socorro_psg.tablesMatchingPattern('extensions%',cursor))-extensionSet assert set(['reports_20071231']) == moreReportSet assert set(['extensions_20071231']) == moreExtensionSet frameSet = set(socorro_psg.tablesMatchingPattern('frames%',cursor)) assert frameSet0 == frameSet schema.databaseObjectClassListForWeeklyPartitions = [schema.FramesTable] schema.createPartitions(me.config,me.logger) moreFrameSet = set(socorro_psg.tablesMatchingPattern('frames%',cursor))-frameSet assert set(['frames_20071231']) == moreFrameSet finally: connection.close()
def removePriorityTables(self,config,logger): dbCon,dbCur = db_schema.connectToDatabase(config,logger) priorityTableNames = db_postgresql.tablesMatchingPattern('priority_job_%%',dbCur) if priorityTableNames: sql = "DROP TABLE IF EXISTS %s CASCADE;"%(", ".join(priorityTableNames)) dbCur.execute(sql) dbCon.commit() else: logger.info("There were no priority_job tables to close")
def testTablesMatchingPattern(self): cursor = self.connection.cursor() createSql = "CREATE TABLE %s (id integer)" # postgresql allows empty tables, but it makes me itch... for tn in testTableNames: cursor.execute(createSql%tn) self.connection.commit() for pat in testTablePatterns: result = postg.tablesMatchingPattern(pat,cursor) expected = testTablePatterns[pat] assert set(expected)==set(result), "for %s: expected:%s, result:%s"%(pat,expected,result) self.connection.commit()
def removePriorityTables(self, config, logger): dbCon, dbCur = db_schema.connectToDatabase(config, logger) priorityTableNames = db_postgresql.tablesMatchingPattern( 'priority_job_%%', dbCur) if priorityTableNames: sql = "DROP TABLE IF EXISTS %s CASCADE;" % ( ", ".join(priorityTableNames)) dbCur.execute(sql) dbCon.commit() else: logger.info("There were no priority_job tables to close")
def printDbTablenames(tag, aCursor): """Debugging utility""" all = socorro_psg.tablesMatchingPattern('%', aCursor) some = [x for x in all if (x == 'server_status' or not '_' in x)] some = [ x for x in some if (not x in [ 'triggers', 'views', 'sequences', 'tables', 'domains', 'parameters', 'routines', 'schemata', 'attributes', 'columns' ]) ] some.sort() print tag, ', '.join(some)
def testCreatePartitions_depend(self): """ TestPartitionedTable.testCreatePartitions_depend(): - assure that we create the expected partition(s) for a PartitionedTable that has dependencies """ global me cursor = self.connection.cursor() cursor.execute("DROP TABLE IF EXISTS tpt, tpt3 CASCADE") self.connection.commit() testPt = ThreePT(logger = me.logger) try: tptSet0 = set(socorro_psg.tablesMatchingPattern('tpt%',cursor)) reportSet0 = set(socorro_psg.tablesMatchingPattern('report%',cursor)) assert set() == tptSet0 assert set() == reportSet0 testPt.create(cursor) schema.ReportsTable(me.logger).create(cursor) self.connection.commit() tptSet1 = set(socorro_psg.tablesMatchingPattern('tpt%',cursor)) reportSet1 = set(socorro_psg.tablesMatchingPattern('reports%',cursor)) schema.databaseDependenciesForPartition[ThreePT] = [schema.ReportsTable] testPt.createPartitions(cursor,iter([(dt.date(2008,1,1),dt.date(2008,1,1)),(dt.date(2008,2,2),dt.date(2008,2,9))])) self.connection.commit() tptSet2 = set(socorro_psg.tablesMatchingPattern('tpt%',cursor)) reportSet2 = set(socorro_psg.tablesMatchingPattern('reports%',cursor)) assert set(['tpt3_2008_1_1','tpt3_2008_2_2']) == tptSet2 - tptSet1, "But %s"%(tptSet2-tptSet1) assert set(['reports_20080101', 'reports_20080202']) == reportSet2 - reportSet1, "But %s"%(reportSet2-reportSet1) finally: cursor.execute("DROP TABLE IF EXISTS tpt, tpt3, reports CASCADE") self.connection.commit()
def testTablesMatchingPattern(self): cursor = self.connection.cursor() createSql = "CREATE TABLE %s (id integer)" # postgresql allows empty tables, but it makes me itch... for tn in testTableNames: cursor.execute(createSql % tn) self.connection.commit() for pat in testTablePatterns: result = postg.tablesMatchingPattern(pat, cursor) expected = testTablePatterns[pat] assert set(expected) == set( result), "for %s: expected:%s, result:%s" % (pat, expected, result) self.connection.commit()
def testCreatePartitions_one(self): """ TestPartitionedTable.testCreatePartitions_one(): - assure that we create the expected partition(s) for a PartitionedTable that has no dependencies """ global me cursor = self.connection.cursor() cursor.execute("DROP TABLE IF EXISTS tpt, tpt3 CASCADE") self.connection.commit() testPt = TPT(logger=me.logger) try: tptSet0 = set(socorro_psg.tablesMatchingPattern('tpt%',cursor)) assert set() == tptSet0, 'Assure we start with clean slate' testPt.create(cursor) self.connection.commit() tptSet1 = set(socorro_psg.tablesMatchingPattern('tpt%',cursor)) testPt.createPartitions(cursor,iter(range(2))) self.connection.commit() tptSet2 = set(socorro_psg.tablesMatchingPattern('tpt%',cursor)) assert set(['tpt_0', 'tpt_1',]) == tptSet2 - tptSet1,'Got tptSet2: %s minus tptSet1: %s'%(tptSet2,tptSet1) finally: cursor.execute("DROP TABLE IF EXISTS tpt, tpt3 CASCADE") self.connection.commit()
def checkOneClass(aClass, aType): global me connection = psycopg2.connect(me.dsn) cursor = connection.cursor() table = aClass(logger=me.logger) expectedList = [] expectedTableClasses = schema.getOrderedSetupList([aClass]) for t in expectedTableClasses: expectedList.append(t(logger=me.logger)) try: schema.teardownDatabase(me.config, me.logger) matchingTables = [ x for x in socorro_psg.tablesMatchingPattern(table.name + '%', cursor) if not x.endswith('_id_seq') ] assert [] == matchingTables, 'For class %s saw %s' % (table.name, matchingTables) # call create before = set(socorro_psg.tablesMatchingPattern('%', cursor)) print 'creating: ', table.name table.create(cursor) connection.commit() after = set(socorro_psg.tablesMatchingPattern('%', cursor)) expected = me.hardCodedSchemaClasses[aClass][1] - set(['release_enum']) assertSameTableDiff(table.name, expected, before, after) # call drop table.drop(cursor) connection.commit() afterDrop = set(socorro_psg.tablesMatchingPattern('%', cursor)) assert not table.name in afterDrop finally: cursor.execute("DROP TABLE IF EXISTS %s CASCADE" % (','.join([x.name for x in expectedList]))) connection.commit() connection.close()
def testFillProcessorTable_NoMap(): """ testDbtestutil:testFillProcessorTable_NoMap(): - check correct behavior for presence or absence of parameter 'stamp' - check correct number of entries created - check correct number of priority_job_X tables created """ global me cursor = me.connection.cursor() ssql = "SELECT id,name,startdatetime,lastseendatetime FROM processors" dsql = "DELETE FROM processors" dropSql = "DROP TABLE IF EXISTS %s" stamps = [ None, None, dt.datetime(2008, 1, 2, 3, 4, 5, 666, tzinfo=UTC), dt.datetime(2009, 1, 2, 3, tzinfo=UTC), None, dt.datetime(2010, 12, 11, 10, 9, 8, 777, tzinfo=UTC) ] try: for i in range(len(stamps)): before = utc_now() time.sleep(.01) dbtu.fillProcessorTable(cursor, i, stamp=stamps[i]) time.sleep(.01) after = utc_now() cursor.execute(ssql) data = cursor.fetchall() assert i == len(data) for d in data: if stamps[i]: assert stamps[i] == d[2] assert stamps[i] == d[3] else: assert before < d[2] and d[2] < after assert d[2] == d[3] priJobsTables = db_postgresql.tablesMatchingPattern( "priority_jobs_%", cursor) assert i == len(priJobsTables) cursor.execute(dsql) if priJobsTables: cursor.execute(dropSql % (','.join(priJobsTables))) me.connection.commit() finally: pt = db_schema.ProcessorsTable(logger) pt.drop(cursor) pt.create(cursor) cursor.execute('DELETE FROM jobs') me.connection.commit()
def testFillProcessorTable_NoMap(): """ testDbtestutil:testFillProcessorTable_NoMap(): - check correct behavior for presence or absence of parameter 'stamp' - check correct number of entries created - check correct number of priority_job_X tables created """ global me cursor = me.connection.cursor() ssql = "SELECT id,name,startdatetime,lastseendatetime FROM processors" dsql = "DELETE FROM processors" dropSql = "DROP TABLE IF EXISTS %s" stamps = [None,None,dt.datetime(2008,1,2,3,4,5,666,tzinfo=UTC),dt.datetime(2009,1,2,3,tzinfo=UTC), None, dt.datetime(2010,12,11,10,9,8,777,tzinfo=UTC)] try: for i in range(len(stamps)): before = utc_now() time.sleep(.01) dbtu.fillProcessorTable(cursor,i,stamp=stamps[i]) time.sleep(.01) after = utc_now() cursor.execute(ssql) data = cursor.fetchall() assert i == len(data) for d in data: if stamps[i]: assert stamps[i] == d[2] assert stamps[i] == d[3] else: assert before < d[2] and d[2] < after assert d[2] == d[3] priJobsTables = db_postgresql.tablesMatchingPattern("priority_jobs_%",cursor) assert i == len(priJobsTables) cursor.execute(dsql) if priJobsTables: cursor.execute(dropSql%(','.join(priJobsTables))) me.connection.commit() finally: pt = db_schema.ProcessorsTable(logger) pt.drop(cursor) pt.create(cursor) cursor.execute('DELETE FROM jobs') me.connection.commit()
def testModuleCreatePartitions(): """ testModuleCreatePartitions(): """ global me connection = psycopg2.connect(me.dsn) try: cursor = connection.cursor() me.testDB.removeDB(me.config, me.logger) me.testDB.createDB(me.config, me.logger) me.config['startDate'] = dt.date(2008, 1, 1) me.config['endDate'] = dt.date(2008, 1, 1) reportSet = set(socorro_psg.tablesMatchingPattern('reports%', cursor)) extensionSet = set( socorro_psg.tablesMatchingPattern('extensions%', cursor)) frameSet0 = set(socorro_psg.tablesMatchingPattern('frames%', cursor)) schema.databaseObjectClassListForWeeklyPartitions = [ schema.ExtensionsTable ] schema.createPartitions(me.config, me.logger) moreReportSet = set( socorro_psg.tablesMatchingPattern('reports%', cursor)) - reportSet moreExtensionSet = set( socorro_psg.tablesMatchingPattern('extensions%', cursor)) - extensionSet assert set(['reports_20071231' ]) == moreReportSet, 'but got %s' % moreReportSet assert set(['extensions_20071231' ]) == moreExtensionSet, 'but got %s' % moreExtensionSet frameSet = set(socorro_psg.tablesMatchingPattern('frames%', cursor)) assert frameSet0 == frameSet schema.databaseObjectClassListForWeeklyPartitions = [ schema.FramesTable ] schema.createPartitions(me.config, me.logger) moreFrameSet = set(socorro_psg.tablesMatchingPattern( 'frames%', cursor)) - frameSet assert set(['frames_20071231' ]) == moreFrameSet, 'but got %s' % moreFrameSet finally: connection.close()
def testCreatePartitions_depend(self): """ TestPartitionedTable.testCreatePartitions_depend(): - assure that we create the expected partition(s) for a PartitionedTable that has dependencies """ global me cursor = self.connection.cursor() cursor.execute("DROP TABLE IF EXISTS tpt, tpt3 CASCADE") self.connection.commit() testPt = ThreePT(logger=me.logger) try: tptSet0 = set(socorro_psg.tablesMatchingPattern('tpt%', cursor)) reportSet0 = set( socorro_psg.tablesMatchingPattern('report%', cursor)) assert set() == tptSet0 assert set() == reportSet0 testPt.create(cursor) schema.ReportsTable(me.logger).create(cursor) self.connection.commit() tptSet1 = set(socorro_psg.tablesMatchingPattern('tpt%', cursor)) reportSet1 = set( socorro_psg.tablesMatchingPattern('reports%', cursor)) schema.databaseDependenciesForPartition[ThreePT] = [ schema.ReportsTable ] testPt.createPartitions( cursor, iter([(dt.date(2008, 1, 1), dt.date(2008, 1, 1)), (dt.date(2008, 2, 2), dt.date(2008, 2, 9))])) self.connection.commit() tptSet2 = set(socorro_psg.tablesMatchingPattern('tpt%', cursor)) reportSet2 = set( socorro_psg.tablesMatchingPattern('reports%', cursor)) assert set(['tpt3_2008_1_1', 'tpt3_2008_2_2' ]) == tptSet2 - tptSet1, "But %s" % (tptSet2 - tptSet1) assert set([ 'reports_20080101', 'reports_20080202' ]) == reportSet2 - reportSet1, "But %s" % (reportSet2 - reportSet1) finally: cursor.execute("DROP TABLE IF EXISTS tpt, tpt3, reports CASCADE") self.connection.commit()
def testPartitionInsert(self): """ TestPartitionedTable.testPartitionInsert(): - check that we automagically create the needed partition on insert """ global me tz = UTC cursor = self.connection.cursor() me.logger.debug("DEBUG before createDB") # test in this order, because other things depend on reports insertRows = [ #[schema.CrashReportsTable,['0bba61c5-dfc3-43e7-dead-8afd20071025',dt.datetime(2007,12,25,5,4,3,21,tz),dt.datetime(2007,12,25,5,4,3,33,tz),10000,100,110,"","","","",1,1]], #"uuid", "client_crash_date", "date_processed", "product", "version", "build", "url", "install_age", "last_crash", "uptime", "email", "user_id", "user_comments", "app_notes", "distributor", "distributor_version", "topmost_filenames", "addons_checked", "flash_version", "hangid", "process_type", "release_channel" [ schema.ReportsTable, [ '0bba61c5-dfc3-43e7-dead-8afd20071025', dt.datetime(2007, 12, 25, 5, 4, 3, 21, tz), dt.datetime(2007, 12, 25, 5, 4, 3, 33, tz), 'Firefox', '1.0b4', '200403041354', 'http://www.a.com', 10000, 100, 110, "", "", "", "", "", "", None, None, None, 'bogus_hangid', None, 'some_chonnel' ] ], [ schema.ExtensionsTable, [ 1, dt.datetime(2007, 12, 25, 5, 4, 3, 33, tz), 1, 'extensionid', 'version' ] ], [ schema.FramesTable, [ 1, 2, dt.datetime(2007, 12, 25, 5, 4, 3, 33, tz), 'somesignature' ] ], #[schema.DumpsTable,[1,dt.datetime(2007,12,25,5,4,3,33,tz),"data"]], ] # call insert, expecting auto-creation of partitions me.dsn = "host=%s dbname=%s user=%s password=%s" % ( me.config.databaseHost, me.config.databaseName, me.config.databaseUserName, me.config.databasePassword) me.testDB.createDB(me.config, me.logger) before = set([ x for x in socorro_psg.tablesMatchingPattern('%', cursor) if not 'pg_toast' in x ]) for t in insertRows: obj = t[0](logger=me.logger) obj.insert(cursor, t[1], self.altConnectionCursor, date_processed=dt.datetime(2007, 12, 25, 5, 4, 3, 33, tz)) self.connection.commit() current = set([ x for x in socorro_psg.tablesMatchingPattern('%', cursor) if not 'pg_toast' in x ]) diff = current - before assert set([ '%s_20071224' % obj.name ]) == diff, 'Expected set([%s_20071224]), got %s' % (obj.name, diff) before = current
def getOldPartitionList (databaseCursor, tableName): return sorted([x for x in socorro_pg.tablesMatchingPattern("%s_part%%%%" % tableName, databaseCursor)])