def _create_locks(self, job): """Create StateLock instances based on a Job's dependencies, and add in any extras the job returns from Job.create_locks """ locks = [] # Take read lock on everything from job.self._dep_cache.get for dependency in self._dep_cache.get(job).all(): locks.append( StateLock(job=job, locked_item=dependency.stateful_object, write=False)) if isinstance(job, StateChangeJob): stateful_object = job.get_stateful_object() # Take read lock on everything from get_stateful_object's self._dep_cache.get if # this is a StateChangeJob. We do things depended on by both the old # and the new state: e.g. if we are taking a mount from unmounted->mounted # then we need to lock the new state's requirement of lnet_up, whereas # if we're going from mounted->unmounted we need to lock the old state's # requirement of lnet_up (to prevent someone stopping lnet while # we're still running) from itertools import chain for d in chain( self._dep_cache.get(stateful_object, job.old_state).all(), self._dep_cache.get(stateful_object, job.state_transition.new_state).all(), ): locks.append( StateLock(job=job, locked_item=d.stateful_object, write=False)) # Take a write lock on get_stateful_object if this is a StateChangeJob locks.append( StateLock( job=job, locked_item=stateful_object, begin_state=job.old_state, end_state=job.state_transition.new_state, write=True, )) locks.extend(job.create_locks()) return locks
def create_locks(self): return [ StateLock( job=self, locked_item=ManagedFilesystem.objects.get(name=self.fsname), write=False) ]
def create_locks(self): locks = super(MigrateTargetJob, self).create_locks() locks.append( StateLock(job=self, locked_item=self.target, begin_state="mounted", end_state="mounted", write=True)) return locks
def create_locks(self): return [ StateLock(job=self, locked_item=self.corosync_configuration, write=True) ]