Exemple #1
0
 def cancel_all_jobs(self):
     """
     call all executing jobs for all volumes.
     """
     with lock_timeout_log(self.lock):
         for volname in list(self.q):
             self._cancel_jobs(volname)
Exemple #2
0
 def queue_job(self, volname):
     """
     queue a volume for asynchronous job execution.
     """
     log.info("queuing job for volume '{0}'".format(volname))
     with lock_timeout_log(self.lock):
         if not volname in self.q:
             self.q.append(volname)
             self.jobs[volname] = []
         self.cv.notifyAll()
Exemple #3
0
    def cancel_job(self, volname, job):
        """
        override base class `cancel_job`. interpret @job as (clone, group) tuple.
        """
        clonename = job[0]
        groupname = job[1]
        track_idx = None

        try:
            with open_volume(self.fs_client, volname) as fs_handle:
                with open_group(fs_handle, self.vc.volspec, groupname) as group:
                    with open_subvol(self.fs_client.mgr, fs_handle, self.vc.volspec, group, clonename, SubvolumeOpType.CLONE_CANCEL) as clone_subvolume:
                        status = clone_subvolume.status
                        clone_state = SubvolumeStates.from_value(status['state'])
                        if not self.is_clone_cancelable(clone_state):
                            raise VolumeException(-errno.EINVAL, "cannot cancel -- clone finished (check clone status)")
                        track_idx = self.get_clone_tracking_index(fs_handle, clone_subvolume)
                        if not track_idx:
                            log.warning("cannot lookup clone tracking index for {0}".format(clone_subvolume.base_path))
                            raise VolumeException(-errno.EINVAL, "error canceling clone")
                        clone_job = (track_idx, clone_subvolume.base_path)
                        jobs = [j[0] for j in self.jobs[volname]]
                        with lock_timeout_log(self.lock):
                            if SubvolumeOpSm.is_init_state(SubvolumeTypes.TYPE_CLONE, clone_state) and not clone_job in jobs:
                                logging.debug("Cancelling pending job {0}".format(clone_job))
                                # clone has not started yet -- cancel right away.
                                self._cancel_pending_clone(fs_handle, clone_subvolume, clonename, groupname, status, track_idx)
                                return
            # cancelling an on-going clone would persist "canceled" state in subvolume metadata.
            # to persist the new state, async cloner accesses the volume in exclusive mode.
            # accessing the volume in exclusive mode here would lead to deadlock.
            assert track_idx is not None
            with lock_timeout_log(self.lock):
                with open_volume_lockless(self.fs_client, volname) as fs_handle:
                    with open_group(fs_handle, self.vc.volspec, groupname) as group:
                        with open_subvol(self.fs_client.mgr, fs_handle, self.vc.volspec, group, clonename, SubvolumeOpType.CLONE_CANCEL) as clone_subvolume:
                            if not self._cancel_job(volname, (track_idx, clone_subvolume.base_path)):
                                raise VolumeException(-errno.EINVAL, "cannot cancel -- clone finished (check clone status)")
        except (IndexException, MetadataMgrException) as e:
            log.error("error cancelling clone {0}: ({1})".format(job, e))
            raise VolumeException(-errno.EINVAL, "error canceling clone")
Exemple #4
0
 def run(self):
     log.debug("tick thread {} starting".format(self.name))
     with lock_timeout_log(self.lock):
         while not self.stopping.is_set():
             c = len(self.threads)
             if c > self.nr_concurrent_jobs:
                 # Decrease concurrency: notify threads which are waiting for a job to terminate.
                 log.debug("waking threads to terminate due to job reduction")
                 self.cv.notifyAll()
             elif c < self.nr_concurrent_jobs:
                 # Increase concurrency: create more threads.
                 log.debug("creating new threads to job increase")
                 for i in range(c, self.nr_concurrent_jobs):
                     self.threads.append(JobThread(self, self.vc, name="{0}.{1}.{2}".format(self.name_pfx, time.time(), i)))
                     self.threads[-1].start()
             self.cv.wait(timeout=5)
Exemple #5
0
 def cancel_jobs(self, volname):
     """
     cancel all executing jobs for a given volume.
     """
     with lock_timeout_log(self.lock):
         self._cancel_jobs(volname)
Exemple #6
0
 def cancel_job(self, volname, job):
     with lock_timeout_log(self.lock):
         return self._cancel_job(volname, job)