Example #1
0
 def delete(self, jobStoreID):
     # The jobStoreID is the relative path to the directory containing the job,
     # removing this directory deletes the job.
     if self.exists(jobStoreID):
         # Remove the job-associated files in need of cleanup, which may or
         # may not live under the job's directory.
         robust_rmtree(self._getJobFilesCleanupDir(jobStoreID))
         # Remove the job's directory itself.
         robust_rmtree(self._getJobDirFromId(jobStoreID))
Example #2
0
def destroy_all_process_names():
    """
    Delete all our process name files because our process is going away.

    We let all our FDs get closed by the process death.

    We assume there is nobody else using the system during exit to race with.
    """

    global current_process_name_for

    for workDir, name in current_process_name_for.items():
        robust_rmtree(os.path.join(workDir, name))
Example #3
0
    def _removeDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
        """
        Look at the state of all jobs registered in the individual job state files, and handle them
        (clean up the disk)

        :param str nodeInfo: The location of the workflow directory on the node.
        :param bool batchSystemShutdown: Is the batch system in the process of shutting down?
        :return:
        """

        for jobState in cls._getAllJobStates(nodeInfo):
            if not process_name_exists(nodeInfo, jobState['jobProcessName']):
                # We need to have a race to pick someone to clean up.

                try:
                    # Open the directory
                    dirFD = os.open(jobState['jobDir'], os.O_RDONLY)
                except FileNotFoundError:
                    # The cleanup has happened and we can't contest for it
                    continue

                try:
                    # Try and lock it
                    fcntl.lockf(dirFD, fcntl.LOCK_EX | fcntl.LOCK_NB)
                except IOError as e:
                    # We lost the race. Someone else is alive and has it locked.
                    os.close(dirFD)
                else:
                    # We got it
                    logger.warning(
                        'Detected that job (%s) prematurely terminated.  Fixing the '
                        'state of the job on disk.', jobState['jobName'])

                    try:
                        if not batchSystemShutdown:
                            logger.debug(
                                "Deleting the stale working directory.")
                            # Delete the old work directory if it still exists.  Do this only during
                            # the life of the program and dont' do it during the batch system
                            # cleanup. Leave that to the batch system cleanup code.
                            robust_rmtree(jobState['jobDir'])
                    finally:
                        fcntl.lockf(dirFD, fcntl.LOCK_UN)
                        os.close(dirFD)
Example #4
0
 def destroy(self):
     if os.path.exists(self.jobStoreDir):
         robust_rmtree(self.jobStoreDir)