Esempio n. 1
0
 def handleEvent(self,payload):
      """
      The payload of for a cleanup handler is a job id. 
      """
      if self.failureArchive == None:
          logging.error("No Failure Archive set: Cannot Archive Job:\n %s" % payload)
          return
      try:
          logging.debug(">FailureCleanupHandler< archiving  "+\
                         "information for jobspec: "+str(payload))
          try:
              os.makedirs(self.failureArchive)
          except:
              pass
          cacheDirLocation=JobState.general(str(payload))['CacheDirLocation']
          logging.debug(">FailureCleanupHandler< archiving and removing directory: "+cacheDirLocation)
          #NOTE: check what this does when it is repeated (e.g. after a crash)
          tar=tarfile.open(self.failureArchive+'/'+str(payload)+'.tar.gz','w:gz')
          short_root=cacheDirLocation.split('/')[-1]
          tar.add(cacheDirLocation,short_root)
          tar.close()
          try:
              for root, dirs, files in os.walk(cacheDirLocation, topdown=False):
                  for name in files:
                      os.remove(os.path.join(root, name))
                  for name in dirs:
                      os.rmdir(os.path.join(root, name))
              os.rmdir(cacheDirLocation)
          except Exception,ex:
              logging.debug(">FailureCleanupHandler< WARNING job cleanup: "+str(ex))
          JobState.cleanout(str(payload))
          Job.remove(str(payload))
          logging.debug(">FailureCleanupHandler< archived completed for jobspecID: "+str(payload))
Esempio n. 2
0
 def testA(self):
     """change state test"""
     Session.set_database(dbConfig)
     Session.connect()
     Session.start_transaction()
     try:
       for i in [1,2]:
           JobState.cleanout("jobClassID"+str(i))
     except StandardError, ex:
         msg = "Failed State Change TestA:\n"
         msg += str(ex)
         self.fail(msg)
Esempio n. 3
0
        # kill job
        self.killJob(jobSpecId, erase=True)

        # set number of executions to be equal to the maximum number of
        # allowed retries so jobs will not be resubmitted, or even
        # not submitted at all if they have not been submitted yet
        try:
            JobState.doNotAllowMoreSubmissions([jobSpecId])
        except ProdAgentException, ex:
            msg = "Updating max racers fields failed for job %s\n" % jobSpecId
            msg += str(ex)
            logging.error(msg)
            raise

        # remove all entries
        JobState.cleanout(jobSpecId)

    def eraseWorkflow(self, workflowSpecId):
        """

        Arguments:

          workflowSpecId -- the workflow id.

        Return:

          none

        """

        logging.info("BossLiteKiller.eraseWorkflow(%s)" % workflowSpecId)