def testJobScheduleIter_StartUnbalanced(self):
   """
   testJobScheduleIter_StartUnbalanced(self):
   Assure that an unbalanced start eventually produces balanced result
   """
   numProcessors = 5
   dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
   self.connection.commit()
   m = monitor.Monitor(me.config)
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     dbtestutil.addSomeJobs(dbCur,dict([(1+x,1+x) for x in range(numProcessors)]),logger=me.logger)
     iter = m.jobSchedulerIter(dbCur)
     num = 0
     hits = dict(((1+x,0) for x in range (numProcessors)))
     for id in iter:
       num += 1
       hits[int(id)] += 1
       me.logger.debug('HIT on %d: %d'%(id,hits[id]))
       if num >= 3*numProcessors: break
     for i in range(numProcessors):
       assert hits[i+1] == 5 - i, 'Expected num hits to be count down sequence from 5 to 1, but at idx %d, got %d'%(i+1,hits[i+1])
       me.logger.debug('ONE: At index %d, got count %d'%(i+1, hits[i+1]))
   finally:
     m.databaseConnectionPool.cleanup()
 def testUnbalancedJobSchedulerIter(self):
   """
   testUnbalancedJobSchedulerIter(self):
   With an unbalanced load on the processors, each processor still gets the same number of hits
   """
   global me
   numProcessors = 5
   loopCount = 3
   dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
   self.connection.commit()
   m = monitor.Monitor(me.config)
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     dbtestutil.addSomeJobs(dbCur,{1:12},logger=me.logger)
     iter = m.unbalancedJobSchedulerIter(dbCur)
     num = 0
     hits = dict(((1+x,0) for x in range (numProcessors)))
     for id in iter:
       num += 1
       hits[int(id)] += 1
       if num >= loopCount*numProcessors: break
     for i in range(numProcessors):
       assert hits[i+1] == loopCount, 'expected %d for processor %d, but got %d'%(loopCount,i+1,hits[i+1])
   finally:
     m.databaseConnectionPool.cleanup()
Beispiel #3
0
def testFillProcessorTable_WithMap():
  """testDbtestutil:testFillProcessorTable_WithMap():
  - check that othr params ignored for non-empty map
  - check that mapped data is used correctly (id is ignored, mapped stamp is lastseendatetime)
  """
  global me
  cursor = me.connection.cursor()
  ssql = "SELECT id,name,startdatetime,lastseendatetime FROM processors"
  dsql = "DELETE FROM processors"
  dropSql = "DROP TABLE IF EXISTS %s"
  tmap = {12:dt.datetime(2008,3,4,5,6,12,tzinfo=UTC),37:dt.datetime(2009,5,6,7,8,37,tzinfo=UTC)}
  try:
    dbtu.fillProcessorTable(cursor,7,stamp=dt.datetime(2009,4,5,6,7,tzinfo=UTC),processorMap=tmap)
    cursor.execute(ssql)
    data = cursor.fetchall()
    me.connection.commit()
    assert 2 == len(data)
    expectSet = set([dt.datetime(2008,3,4,5,6,12,tzinfo=UTC),dt.datetime(2009,5,6,7,8,37,tzinfo=UTC)])
    gotSet = set()
    for d in data:
      assert dt.datetime(2009,4,5,6,7,tzinfo=UTC) == d[2]
      gotSet.add(d[3])
      assert d[0] in [1,2]
    assert expectSet == gotSet
  finally:
    pt = db_schema.ProcessorsTable(logger)
    pt.drop(cursor)
    pt.create(cursor)
    cursor.execute('DELETE FROM jobs')
    me.connection.commit()
 def testJobSchedulerIterGood(self):
   """
   testJobSchedulerIterGood(self):
   Plain vanilla test of the balanced job scheduler.
   """
   global me
   numProcessors = 15
   dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
   m = monitor.Monitor(me.config)
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
    iter = m.jobSchedulerIter(dbCur)
    dbCon.commit()
    num = 0
    hits = dict(((1+x,0) for x in range (numProcessors)))
    for id in iter:
      num += 1
      hits[int(id)] += 1
      if num >= numProcessors: break
    for i in range(numProcessors):
      assert hits[i+1] == 1, 'At index %d, got count %d'%(i+1, hits[i+1])
    for id in iter:
      num += 1
      hits[int(id)] += 1
      if num >= 3*numProcessors: break
   finally:
     m.databaseConnectionPool.cleanup()
   for i in range(numProcessors):
     assert hits[i+1] == 3, 'At index %d, got count %d'%(i+1, hits[i+1])
Beispiel #5
0
def testAddSomeJobs():
  global me
  cursor = me.connection.cursor()
  cursor.execute("SELECT id from processors")
  me.connection.commit()
  jdMap = {1:1,2:2,3:3,4:0}
  xdMap = {1:set(),2:set(),3:set(),4:set()}
  gdMap = {1:set(),2:set(),3:set(),4:set()}
  data = _makeJobDetails(jdMap)
  for d in data:
    xdMap[d[2]].add(d)
  try:
    dbtu.fillProcessorTable(cursor,3,logger=logger)
    cursor.execute("SELECT id from processors")
    me.connection.commit()
    addedJobs = dbtu.addSomeJobs(cursor,jdMap)
    me.connection.commit()
    assert data == addedJobs
    cursor.execute("SELECT pathname,uuid,owner FROM jobs ORDER BY OWNER ASC")
    me.connection.commit()
    data2 = cursor.fetchall()
    assert len(data) == len(data2)
    for d in data2:
      gdMap[d[2]].add(d)
    assert xdMap == gdMap
  finally:
    pt = db_schema.ProcessorsTable(logger)
    pt.drop(cursor)
    pt.create(cursor)
    cursor.execute("DELETE from jobs")
    me.connection.commit()
Beispiel #6
0
def testSetPriority_Jobs():
  global me
  cursor = me.connection.cursor()
  try:
    dbtu.fillProcessorTable(cursor,3,stamp=dt.datetime(2008,3,4,5,6,7,tzinfo=UTC))
    cursor.execute("SELECT id FROM processors")
    me.connection.commit()
    counts = dict((x[0],x[0]) for x in cursor.fetchall())
    dbtu.addSomeJobs(cursor,counts,logger)
    cursor.execute("SELECT id FROM jobs")
    me.connection.commit()
    jobIds = [x[0] for x in cursor.fetchall()]
    half = len(jobIds)/2
    expectPri = jobIds[:half]
    expectNon = jobIds[half:]
    dbtu.setPriority(cursor,expectPri)
    cursor.execute("SELECT id FROM jobs WHERE priority > 0 ORDER BY id")
    gotPri = [x[0] for x in cursor.fetchall()]
    cursor.execute("SELECT id FROM jobs WHERE priority = 0 ORDER BY id")
    gotNon = [x[0] for x in cursor.fetchall()]
    me.connection.commit()
    assert expectPri == gotPri
    assert expectNon == gotNon
  finally:
    jt = db_schema.JobsTable(logger)
    jt.drop(cursor)
    jt.create(cursor)
    pt = db_schema.ProcessorsTable(logger)
    pt.drop(cursor)
    pt.create(cursor)
    me.connection.commit()
Beispiel #7
0
def testAddSomeJobs():
    global me
    cursor = me.connection.cursor()
    cursor.execute("SELECT id from processors")
    me.connection.commit()
    jdMap = {1: 1, 2: 2, 3: 3, 4: 0}
    xdMap = {1: set(), 2: set(), 3: set(), 4: set()}
    gdMap = {1: set(), 2: set(), 3: set(), 4: set()}
    data = _makeJobDetails(jdMap)
    for d in data:
        xdMap[d[2]].add(d)
    try:
        dbtu.fillProcessorTable(cursor, 3, logger=logger)
        cursor.execute("SELECT id from processors")
        me.connection.commit()
        addedJobs = dbtu.addSomeJobs(cursor, jdMap)
        me.connection.commit()
        assert data == addedJobs
        cursor.execute(
            "SELECT pathname,uuid,owner FROM jobs ORDER BY OWNER ASC")
        me.connection.commit()
        data2 = cursor.fetchall()
        assert len(data) == len(data2)
        for d in data2:
            gdMap[d[2]].add(d)
        assert xdMap == gdMap
    finally:
        pt = db_schema.ProcessorsTable(logger)
        pt.drop(cursor)
        pt.create(cursor)
        cursor.execute("DELETE from jobs")
        me.connection.commit()
 def testCleanUpCompletedAndFailedJobs_WithSaves(self):
   """
   testCleanUpCompletedAndFailedJobs_WithSaves(self):
   The default config asks for successful and failed jobs to be saved
   """
   global me
   cursor = self.connection.cursor()
   dbtestutil.fillProcessorTable(cursor,4)
   m = monitor.Monitor(me.config)
   createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
   runInOtherProcess(m.standardJobAllocationLoop, stopCondition=(lambda : self.jobsAllocated() == 14))
   started = dbtestutil.datetimeNow(cursor)
   self.connection.commit()
   completed = started + dt.timedelta(microseconds=100)
   idTimesAndSuccessSeq = [
     [started,completed,True,1],
     [started,completed,True,3],
     [started,completed,True,5],
     [started,completed,True,11],
     [started,None,False,2],
     [started,None,False,4],
     [started,None,False,8],
     [started,None,False,12],
     ]
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     jobdata = self.setJobSuccess(dbCur,idTimesAndSuccessSeq)
     m.cleanUpCompletedAndFailedJobs()
   finally:
     m.databaseConnectionPool.cleanup()
   successSave = set()
   failSave = set()
   expectSuccessSave = set()
   expectFailSave = set()
   remainBehind = set()
   for dir, dirs, files in os.walk(me.config.storageRoot):
     remainBehind.update(os.path.splitext(x)[0] for x in files)
   for d in idTimesAndSuccessSeq:
     if d[2]:
       expectSuccessSave.add(d[3])
     else:
       expectFailSave.add(d[3])
   for dir,dirs,files in os.walk(me.config.saveSuccessfulMinidumpsTo):
     successSave.update((os.path.splitext(x)[0] for x in files))
   for dir,dirs,files in os.walk(me.config.saveFailedMinidumpsTo):
     failSave.update((os.path.splitext(x)[0] for x in files))
   for x in jobdata:
     if None == x[2]:
       assert not x[1] in failSave and not x[1] in successSave, "if we didn't set success state for %s, then it wasn't copied"%(x[1])
       assert x[1] in remainBehind, "if we didn't set success state for %s, then it should remain behind"%(x[1])
       assert not x[0] in expectFailSave and not x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
     elif True == x[2]:
       assert  not x[1] in failSave and x[1] in successSave, "if we set success for %s, it is copied to %s"%(x[1],me.config.saveSussessfulMinidumpsTo)
       assert not x[0] in expectFailSave and x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
       assert not x[1] in remainBehind, "if we did set success state for %s, then it should not remain behind"%(x[1])
     elif False == x[2]:
       assert  x[1] in failSave and not x[1] in successSave, "if we set failure for %s, it is copied to %s"%(x[1],me.config.saveFailedMinidumpsTo)
       assert  x[0] in expectFailSave and not x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
       assert not x[1] in remainBehind, "if we did set success state for %s, then it should not remain behind"%(x[1])
 def testQueuePriorityJob(self):
   """
   testQueuePriorityJob(self):
   queuePriorityJob does:
     removes job uuid from priorityjobs table (if possible)
     add uuid to priority_jobs_NNN table for NNN the processor id
     add uuid, id, etc to jobs table with priority > 0
   """
   global me
   m = monitor.Monitor(me.config)
   numProcessors = 4
   dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
   data = dbtestutil.makeJobDetails({1:2,2:2,3:3,4:3})
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     procIdGenerator = m.jobSchedulerIter(dbCur)
     insertSql = "INSERT into priorityjobs (uuid) VALUES (%s);"
     uuidToId = {}
     for tup in data:
       uuidToId[tup[1]] = tup[2]
     uuids = uuidToId.keys()
     for uuid in uuids:
       if uuidToId[uuid]%2:
         dbCur.execute(insertSql,[uuid])
     dbCon.commit()
     countSql = "SELECT count(*) from %s;"
     dbCur.execute(countSql%('priorityjobs'))
     priorityJobCount = dbCur.fetchone()[0]
     dbCur.execute(countSql%('jobs'))
     jobCount = dbCur.fetchone()[0]
     eachPriorityJobCount = {}
     for uuid in uuids:
       procId = m.queuePriorityJob(dbCur,uuid, procIdGenerator)
       dbCur.execute('SELECT count(*) from jobs where jobs.priority > 0')
       assert dbCur.fetchone()[0] == 1 + jobCount, 'Expect that each queuePriority will increase jobs table by one'
       jobCount += 1
       try:
         eachPriorityJobCount[procId] += 1
       except KeyError:
         eachPriorityJobCount[procId] = 1
       if uuidToId[uuid]%2:
         dbCur.execute(countSql%('priorityjobs'))
         curCount = dbCur.fetchone()[0]
         assert curCount == priorityJobCount -1, 'Expected to remove one job from priorityjobs for %s'%uuid
         priorityJobCount -= 1
     for id in eachPriorityJobCount.keys():
       dbCur.execute(countSql%('priority_jobs_%s'%id))
       count = dbCur.fetchone()[0]
       assert eachPriorityJobCount[id] == count, 'Expected that the count %s added to id %s matches %s found'%(eachPriorityJobCount[id],id,count)
   finally:
     m.databaseConnectionPool.cleanup()
 def testUnbalancedJobSchedulerIter_AllOldProcs(self):
   """
   testUnbalancedJobSchedulerIter_AllOldProcs(self):
   With only processors that are too old, we will get a system exit
   """
   global me
   m = monitor.Monitor(me.config)
   cur = self.connection.cursor()
   try:
     stamp = dbtestutil.datetimeNow(cur) - dt.timedelta(minutes=10)
     dbtestutil.fillProcessorTable(cur, 5, stamp=stamp)
     iter = m.unbalancedJobSchedulerIter(cur)
     assert_raises(SystemExit, iter.next)
   finally:
     self.connection.commit()    
 def testCleanUpDeadProcessors_AllDead(self):
   """
   testCleanUpDeadProcessors_AllDead(self):
   As of 2009-01-xx, Monitor.cleanUpDeadProcessors(...) does nothing except write to a log file
   ... and fail if there are no live processors
   """
   global me
   m = monitor.Monitor(me.config)
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     now = dbtestutil.datetimeNow(dbCur)
     then = now - dt.timedelta(minutes=10)
     dbtestutil.fillProcessorTable(dbCur, None, processorMap = {1:then,2:then,3:then,4:then,5:then })
     assert_raises(SystemExit,m.cleanUpDeadProcessors, dbCur)
   finally:
     m.databaseConnectionPool.cleanup()
Beispiel #12
0
def testFillProcessorTable_NoMap():
    """ testDbtestutil:testFillProcessorTable_NoMap():
  - check correct behavior for presence or absence of parameter 'stamp'
  - check correct number of entries created
  - check correct number of priority_job_X tables created
  """
    global me
    cursor = me.connection.cursor()
    ssql = "SELECT id,name,startdatetime,lastseendatetime FROM processors"
    dsql = "DELETE FROM processors"
    dropSql = "DROP TABLE IF EXISTS %s"
    stamps = [
        None, None,
        dt.datetime(2008, 1, 2, 3, 4, 5, 666, tzinfo=UTC),
        dt.datetime(2009, 1, 2, 3, tzinfo=UTC), None,
        dt.datetime(2010, 12, 11, 10, 9, 8, 777, tzinfo=UTC)
    ]
    try:
        for i in range(len(stamps)):
            before = utc_now()
            time.sleep(.01)
            dbtu.fillProcessorTable(cursor, i, stamp=stamps[i])
            time.sleep(.01)
            after = utc_now()
            cursor.execute(ssql)
            data = cursor.fetchall()
            assert i == len(data)
            for d in data:
                if stamps[i]:
                    assert stamps[i] == d[2]
                    assert stamps[i] == d[3]
                else:
                    assert before < d[2] and d[2] < after
                    assert d[2] == d[3]
            priJobsTables = db_postgresql.tablesMatchingPattern(
                "priority_jobs_%", cursor)
            assert i == len(priJobsTables)
            cursor.execute(dsql)
            if priJobsTables:
                cursor.execute(dropSql % (','.join(priJobsTables)))
            me.connection.commit()
    finally:
        pt = db_schema.ProcessorsTable(logger)
        pt.drop(cursor)
        pt.create(cursor)
        cursor.execute('DELETE FROM jobs')
        me.connection.commit()
 def testLookForPriorityJobsInJsonDumpStorage(self):
   """
   testLookForPriorityJobsInJsonDumpStorage(self):
     assure that we can find each uuid in standard and deferred storage
     assure that we do not find any bogus uuid
     assure that found uuids are added to jobs table with priority 1, and priority_jobs_NNN table for processor id NNN
   """
   global me
   m = monitor.Monitor(me.config)
   createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
   createJDS.createTestSet(createJDS.jsonMoreData,jsonKwargs={'logger':me.logger},rootDir=me.config.deferredStorageRoot)
   normUuids = createJDS.jsonFileData.keys()
   defUuids =  createJDS.jsonMoreData.keys()
   allUuids = []
   allUuids.extend(normUuids)
   allUuids.extend(defUuids)
   badUuid = '0bad0bad-0bad-6666-9999-0bad20001025'
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     numProcessors = 5
     dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
     self.markLog()
     m.lookForPriorityJobsInJsonDumpStorage(dbCur,allUuids)
     assert [] == allUuids, 'Expect that all the uuids were found and removed from the looked for "set"'
     m.lookForPriorityJobsInJsonDumpStorage(dbCur,(badUuid,))
     self.markLog()
     seg = self.extractLogSegment()
     getIdAndPrioritySql = "SELECT owner,priority from jobs WHERE uuid = %s"
     getCountSql = "SELECT count(*) from %s"
     idCounts = dict( ( (x,0) for x in range(1,numProcessors+1) ) )
     allUuids.extend(normUuids)
     allUuids.extend(defUuids)
     for uuid in allUuids:
       dbCur.execute(getIdAndPrioritySql,(uuid,))
       procid,pri = dbCur.fetchone()
       assert 1 == pri, 'Expected priority of 1 for %s, but got %s'%(uuid,pri)
       idCounts[procid] += 1
     dbCur.execute(getIdAndPrioritySql,(badUuid,))
     assert not dbCur.fetchone(), "Expect to get None entries in jobs table for badUuid"
     for id,expectCount in idCounts.items():
       dbCur.execute(getCountSql%('priority_jobs_%s'%id))
       seenCount = dbCur.fetchone()[0]
       assert expectCount == seenCount, 'Expected %s, got %s as count in priority_jobs_%s'%(expectCount,seenCount,id)
   finally:
     m.databaseConnectionPool.cleanup()
 def testQueueJob(self):
   """
   testQueueJob(self):
     make sure jobs table starts empty
     make sure returned values reflect database values
     make sure assigned processors are correctly reflected
     make sure duplicate uuid is caught, reported, and work continues
   """
   global me
   m = monitor.Monitor(me.config)
   sql = 'SELECT pathname,uuid,owner from jobs;'
   numProcessors = 4
   dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     procIdGenerator = m.jobSchedulerIter(dbCur)
     dbCur.execute(sql)
     beforeJobsData = dbCur.fetchall()
     assert 0 == len(beforeJobsData), 'There should be no queued jobs before we start our run'
     expectedHits = dict(((1+x,0) for x in range (numProcessors)))
     mapper = {}
     hits = dict(((1+x,0) for x in range (numProcessors)))
     for uuid,data in createJDS.jsonFileData.items():
       procId = m.queueJob(dbCur,uuid,procIdGenerator)
       expectedHits[procId] += 1;
       mapper[uuid] = procId
     dbCur.execute(sql)
     afterJobsData = dbCur.fetchall()
     for row in afterJobsData:
       hits[row[2]] += 1
       #me.logger.debug("ASSERT %s == %s for index %s"%(mapper.get(row[1],'WHAT?'), row[2], row[1]))
       assert mapper[row[1]] == row[2], 'Expected %s from %s but got %s'%(mapper.get(row[1],"WOW"),row[1],row[2])
     for key in expectedHits.keys():
       #me.logger.debug("ASSERTING %s == %s for index %s"%(expectedHits.get(key,'BAD KEY'),hits.get(key,'EVIL KEY'),key))
       assert expectedHits[key] == hits[key], "Expected count of %s for %s, but got %s"%(expectedHits[key],key,hits[key])
     self.markLog()
     dupUuid = createJDS.jsonFileData.keys()[0]
     try:
       procId = m.queueJob(dbCur,dupUuid,procIdGenerator)
       assert False, "Expected that IntegrityError would be raised queue-ing %s  but it wasn't"%(dupUuid)
     except psycopg2.IntegrityError:
       pass
     except Exception,x:
       assert False, "Expected that only IntegrityError would be raised, but got %s: %s"%(type(x),x)
     self.markLog()
Beispiel #15
0
def testSetPriority_PriorityJobs():
    global me
    cursor = me.connection.cursor()
    try:
        dbtu.fillProcessorTable(cursor,
                                3,
                                stamp=dt.datetime(2008,
                                                  3,
                                                  4,
                                                  5,
                                                  6,
                                                  7,
                                                  tzinfo=UTC))
        cursor.execute("SELECT id FROM processors")
        counts = dict((x[0], x[0]) for x in cursor.fetchall())
        dbtu.addSomeJobs(cursor, counts, logger)
        cursor.execute("SELECT id,uuid FROM jobs")
        me.connection.commit()
        data = cursor.fetchall()
        jobIds = [x[0] for x in data]
        jobUuids = [x[1] for x in data]
        half = len(jobIds) / 2
        expect1Pri = jobIds[:half]
        expect2Pri = jobIds[half:]
        expect1Uuid = sorted(jobUuids[:half])
        expect2Uuid = sorted(jobUuids[half:])
        dbtu.setPriority(cursor, expect1Pri, 'priority_jobs_1')
        dbtu.setPriority(cursor, expect2Pri, 'priority_jobs_2')
        sql = "SELECT uuid from %s ORDER BY uuid"
        cursor.execute(sql % 'priority_jobs_1')
        got1Uuid = [x[0] for x in cursor.fetchall()]
        cursor.execute(sql % 'priority_jobs_2')
        got2Uuid = [x[0] for x in cursor.fetchall()]
        me.connection.commit()
        assert expect1Uuid == got1Uuid
        assert expect2Uuid == got2Uuid
    finally:
        jt = db_schema.JobsTable(logger)
        jt.drop(cursor)
        jt.create(cursor)
        pt = db_schema.ProcessorsTable(logger)
        pt.drop(cursor)
        pt.create(cursor)
        me.connection.commit()
Beispiel #16
0
def testFillProcessorTable_NoMap():
  """ testDbtestutil:testFillProcessorTable_NoMap():
  - check correct behavior for presence or absence of parameter 'stamp'
  - check correct number of entries created
  - check correct number of priority_job_X tables created
  """
  global me
  cursor = me.connection.cursor()
  ssql = "SELECT id,name,startdatetime,lastseendatetime FROM processors"
  dsql = "DELETE FROM processors"
  dropSql = "DROP TABLE IF EXISTS %s"
  stamps = [None,None,dt.datetime(2008,1,2,3,4,5,666,tzinfo=UTC),dt.datetime(2009,1,2,3,tzinfo=UTC), None, dt.datetime(2010,12,11,10,9,8,777,tzinfo=UTC)]
  try:
    for i in range(len(stamps)):
      before = utc_now()
      time.sleep(.01)
      dbtu.fillProcessorTable(cursor,i,stamp=stamps[i])
      time.sleep(.01)
      after =  utc_now()
      cursor.execute(ssql)
      data = cursor.fetchall()
      assert i == len(data)
      for d in data:
        if stamps[i]:
          assert stamps[i] == d[2]
          assert stamps[i] == d[3]
        else:
          assert before < d[2] and d[2] < after
          assert d[2] == d[3]
      priJobsTables = db_postgresql.tablesMatchingPattern("priority_jobs_%",cursor)
      assert i == len(priJobsTables)
      cursor.execute(dsql)
      if priJobsTables:
        cursor.execute(dropSql%(','.join(priJobsTables)))
      me.connection.commit()
  finally:
    pt = db_schema.ProcessorsTable(logger)
    pt.drop(cursor)
    pt.create(cursor)
    cursor.execute('DELETE FROM jobs')
    me.connection.commit()
Beispiel #17
0
def testFillProcessorTable_WithMap():
    """testDbtestutil:testFillProcessorTable_WithMap():
  - check that othr params ignored for non-empty map
  - check that mapped data is used correctly (id is ignored, mapped stamp is lastseendatetime)
  """
    global me
    cursor = me.connection.cursor()
    ssql = "SELECT id,name,startdatetime,lastseendatetime FROM processors"
    dsql = "DELETE FROM processors"
    dropSql = "DROP TABLE IF EXISTS %s"
    tmap = {
        12: dt.datetime(2008, 3, 4, 5, 6, 12, tzinfo=UTC),
        37: dt.datetime(2009, 5, 6, 7, 8, 37, tzinfo=UTC)
    }
    try:
        dbtu.fillProcessorTable(cursor,
                                7,
                                stamp=dt.datetime(2009, 4, 5, 6, 7,
                                                  tzinfo=UTC),
                                processorMap=tmap)
        cursor.execute(ssql)
        data = cursor.fetchall()
        me.connection.commit()
        assert 2 == len(data)
        expectSet = set([
            dt.datetime(2008, 3, 4, 5, 6, 12, tzinfo=UTC),
            dt.datetime(2009, 5, 6, 7, 8, 37, tzinfo=UTC)
        ])
        gotSet = set()
        for d in data:
            assert dt.datetime(2009, 4, 5, 6, 7, tzinfo=UTC) == d[2]
            gotSet.add(d[3])
            assert d[0] in [1, 2]
        assert expectSet == gotSet
    finally:
        pt = db_schema.ProcessorsTable(logger)
        pt.drop(cursor)
        pt.create(cursor)
        cursor.execute('DELETE FROM jobs')
        me.connection.commit()
Beispiel #18
0
def testSetPriority_Jobs():
    global me
    cursor = me.connection.cursor()
    try:
        dbtu.fillProcessorTable(cursor,
                                3,
                                stamp=dt.datetime(2008,
                                                  3,
                                                  4,
                                                  5,
                                                  6,
                                                  7,
                                                  tzinfo=UTC))
        cursor.execute("SELECT id FROM processors")
        me.connection.commit()
        counts = dict((x[0], x[0]) for x in cursor.fetchall())
        dbtu.addSomeJobs(cursor, counts, logger)
        cursor.execute("SELECT id FROM jobs")
        me.connection.commit()
        jobIds = [x[0] for x in cursor.fetchall()]
        half = len(jobIds) / 2
        expectPri = jobIds[:half]
        expectNon = jobIds[half:]
        dbtu.setPriority(cursor, expectPri)
        cursor.execute("SELECT id FROM jobs WHERE priority > 0 ORDER BY id")
        gotPri = [x[0] for x in cursor.fetchall()]
        cursor.execute("SELECT id FROM jobs WHERE priority = 0 ORDER BY id")
        gotNon = [x[0] for x in cursor.fetchall()]
        me.connection.commit()
        assert expectPri == gotPri
        assert expectNon == gotNon
    finally:
        jt = db_schema.JobsTable(logger)
        jt.drop(cursor)
        jt.create(cursor)
        pt = db_schema.ProcessorsTable(logger)
        pt.drop(cursor)
        pt.create(cursor)
        me.connection.commit()
Beispiel #19
0
def testSetPriority_PriorityJobs():
  global me
  cursor = me.connection.cursor()
  try:
    dbtu.fillProcessorTable(cursor,3,stamp=dt.datetime(2008,3,4,5,6,7,tzinfo=UTC))
    cursor.execute("SELECT id FROM processors")
    counts = dict((x[0],x[0]) for x in cursor.fetchall())
    dbtu.addSomeJobs(cursor,counts,logger)
    cursor.execute("SELECT id,uuid FROM jobs")
    me.connection.commit()
    data = cursor.fetchall()
    jobIds = [x[0] for x in data]
    jobUuids = [x[1] for x in data]
    half = len(jobIds)/2
    expect1Pri = jobIds[:half]
    expect2Pri = jobIds[half:]
    expect1Uuid = sorted(jobUuids[:half])
    expect2Uuid = sorted(jobUuids[half:])
    dbtu.setPriority(cursor,expect1Pri,'priority_jobs_1')
    dbtu.setPriority(cursor,expect2Pri,'priority_jobs_2')
    sql = "SELECT uuid from %s ORDER BY uuid"
    cursor.execute(sql%'priority_jobs_1')
    got1Uuid = [x[0] for x in cursor.fetchall()]
    cursor.execute(sql%'priority_jobs_2')
    got2Uuid = [x[0] for x in cursor.fetchall()]
    me.connection.commit()
    assert expect1Uuid == got1Uuid
    assert expect2Uuid == got2Uuid
  finally:
    jt = db_schema.JobsTable(logger)
    jt.drop(cursor)
    jt.create(cursor)
    pt = db_schema.ProcessorsTable(logger)
    pt.drop(cursor)
    pt.create(cursor)
    me.connection.commit()
 def testUnbalancedJobSchedulerIter_SomeOldProcs(self):
   """
   testUnbalancedJobSchedulerIter_SomeOldProcs(self):
   With some processors that are too old, we will get only the young ones in some order
   """
   global me
   m = monitor.Monitor(me.config)
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     now = dbtestutil.datetimeNow(dbCur)
     then = now - dt.timedelta(minutes=10)
     dbtestutil.fillProcessorTable(dbCur, None, processorMap = {1:then,2:then,3:now,4:then,5:then })
     iter = m.unbalancedJobSchedulerIter(dbCur)
     hits = dict(((x,0) for x in range (1,6)))
     num = 0;
     for id in iter:
       num += 1
       hits[int(id)] += 1
       if num > 3: break
     for i in (1,2,4,5):
       assert hits[i] == 0, 'Expected that no old processors would be used in the iterator'
     assert hits[3] == 4, 'Expected that all the iterations would choose the one live processor'
   finally:
     m.databaseConnectionPool.cleanup()
 def testLookForPriorityJobsAlreadyInQueue(self):
   """
   testLookForPriorityJobsAlreadyInQueue(self):
     Check that we erase jobs from priorityjobs table if they are there
     Check that we increase by one the priority in jobs table
     Check that we add job (only) to appropriate priority_jobs_NNN table
     Check that attempting same uuid again raises IntegrityError
   """
   global me
   numProcessors = 5
   dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
   m = monitor.Monitor(me.config)
   data = dbtestutil.makeJobDetails({1:2,2:2,3:3,4:3,5:2})
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     procIdGenerator = m.jobSchedulerIter(dbCur)
     insertSql = "INSERT into priorityjobs (uuid) VALUES (%s);"
     updateSql = "UPDATE jobs set priority = 1 where uuid = %s;"
     allUuids = [x[1] for x in data]
     priorityJobUuids = [];
     missingUuids = []
     uuidToProcId = {}
     for counter in range(len(allUuids)):
       uuid = allUuids[counter]
       if 0 == counter % 3: # add to jobs and priorityjobs table
         uuidToProcId[uuid] = m.queueJob(dbCur,data[counter][1],procIdGenerator)
         priorityJobUuids.append((uuid,))
       elif 1 == counter % 3: # add to jobs table only
         uuidToProcId[uuid] = m.queueJob(dbCur,data[counter][1],procIdGenerator)
       else: # 2== counter %3 # don't add anywhere
         missingUuids.append(uuid)
     dbCur.executemany(insertSql,priorityJobUuids)
     dbCon.commit()
     for uuid in priorityJobUuids:
       dbCur.execute(updateSql,(uuid,))
     self.markLog()
     m.lookForPriorityJobsAlreadyInQueue(dbCur,allUuids)
     self.markLog()
     seg = self.extractLogSegment()
     for line in seg:
       date,tyme,level,dash,thr,ddash,msg = line.split(None,6)
       assert thr == 'MainThread','Expected only MainThread log lines, got[%s]'%(line)
       uuid = msg.split()[2]
       assert not uuid in missingUuids, 'Found %s that should not be in missingUuids'%(uuid)
       assert uuid in uuidToProcId.keys(), 'Found %s that should be in uuidToProcId'%(uuid)
     countSql = "SELECT count(*) from %s;"
     dbCur.execute(countSql%('priorityjobs'))
     priCount = dbCur.fetchone()[0]
     assert 0 == priCount, 'Expect that all the priority jobs are removed, but found %s'%(priCount)
     countSql = "SELECT count(*) from priority_jobs_%s WHERE uuid = %%s;"
     for uuid,procid in uuidToProcId.items():
       dbCur.execute(countSql%(procid),(uuid,))
       priCount = dbCur.fetchone()[0]
       assert priCount == 1, 'Expect to find %s in priority_jobs_%s exactly once'%(uuid,procid)
       for badid in range(1,numProcessors+1):
         if badid == procid: continue
         dbCur.execute(countSql%(badid),(uuid,))
         badCount = dbCur.fetchone()[0]
         assert 0 == badCount, 'Expect to find %s ONLY in other priority_jobs_NNN, found it in priority_jobs_%s'%(uuid,badid)
     for uuid,procid in uuidToProcId.items():
       try:
         m.lookForPriorityJobsAlreadyInQueue(dbCur,(uuid,))
         assert False, 'Expected line above would raise IntegrityError or InternalError'
       except psycopg2.IntegrityError,x:
         dbCon.rollback()
       except:
         assert False, 'Expected only IntegrityError from the try block'
 def testCleanUpCompletedAndFailedJobs_WithoutSaves(self):
   """
   testCleanUpCompletedAndFailedJobs_WithoutSaves(self):
   First, dynamically set config to not save successful or failed jobs. They are still removed from the file system
   """
   global me
   cc = copy.copy(me.config)
   cursor = self.connection.cursor()
   dbtestutil.fillProcessorTable(cursor,4)
   for conf in ['saveSuccessfulMinidumpsTo','saveFailedMinidumpsTo']:
     cc[conf] = ''
   m = monitor.Monitor(cc)
   createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
   runInOtherProcess(m.standardJobAllocationLoop, stopCondition=(lambda : self.jobsAllocated() == 14))
   started = dbtestutil.datetimeNow(cursor)
   self.connection.commit()
   completed = started + dt.timedelta(microseconds=100)
   idTimesAndSuccessSeq = [
     [started,completed,True,1],
     [started,completed,True,3],
     [started,completed,True,5],
     [started,completed,True,11],
     [started,None,False,2],
     [started,None,False,4],
     [started,None,False,8],
     [started,None,False,12],
     ]
   dbCon,dbCur = m.getDatabaseConnectionPair()
   try:
     jobdata = self.setJobSuccess(dbCur,idTimesAndSuccessSeq)
     m.cleanUpCompletedAndFailedJobs()
   finally:
     m.databaseConnectionPool.cleanup()
   successSave = set()
   failSave = set()
   expectSuccessSave = set()
   expectFailSave = set()
   for d in idTimesAndSuccessSeq:
     if d[2]:
       expectSuccessSave.add(d[3])
     else:
       expectFailSave.add(d[3])
   for dir,dirs,files in os.walk(me.config.saveSuccessfulMinidumpsTo):
     successSave.update((os.path.splitext(x)[0] for x in files))
   for dir,dirs,files in os.walk(me.config.saveFailedMinidumpsTo):
     failSave.update((os.path.splitext(x)[0] for x in files))
   remainBehind = set()
   for dir, dirs, files in os.walk(me.config.storageRoot):
     remainBehind.update(os.path.splitext(x)[0] for x in files)
   assert len(successSave) == 0, "We expect not to save any successful jobs with this setting"
   assert len(failSave) == 0, "We expect not to save any failed jobs with this setting"
   for x in jobdata:
     if None ==  x[2]:
       assert  not x[0] in expectFailSave and not x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
       assert x[1] in remainBehind, "if we didn't set success state for %s, then it should remain behind"%(x[1])
     elif True ==  x[2]:
       assert not x[1] in remainBehind, "if we did set success state for %s, then it should not remain behind"%(x[1])
       assert not x[0] in expectFailSave and x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
     elif False == x[2]:
       assert not x[1] in remainBehind, "if we did set success state for %s, then it should not remain behind"%(x[1])
       assert x[0] in expectFailSave and not x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])