def refresh_lock(lock, relatedobjects=None): if not lock.is_acquired(): raise exceptions.LockException('The lock on %s has expired.' % lock.path) lock.refresh() logutil.info(relatedobjects, 'Refreshed lock %s' % lock.name)
def refresh_lock(lock, log_ctx=LOG): if not lock.is_acquired(): log_ctx.withField('lock', lock.name).info( 'Attempt to refresh an expired lock') raise exceptions.LockException( 'The lock on %s has expired.' % lock.path) lock.refresh() log_ctx.withField('lock', lock.name).debug('Refreshed lock')
def __exit__(self, _exception_type, _exception_value, _traceback): if not self.release(): locks = list(get_all(LOCK_PREFIX, None)) self.log_ctx.withFields({ 'locks': locks, 'key': self.name, }).error('Cannot release lock') raise exceptions.LockException('Cannot release lock: %s' % self.name)
def refresh_lock(lock, log_ctx=LOG): if read_only_cache(): raise exceptions.ForbiddenWhileUsingReadOnlyCache( 'You cannot hold locks while using a read only cache') if not lock.is_acquired(): log_ctx.with_field( 'lock', lock.name).info('Attempt to refresh an expired lock') raise exceptions.LockException('The lock on %s has expired.' % lock.path) lock.refresh() log_ctx.with_field('lock', lock.name).debug('Refreshed lock')
def __enter__(self): start_time = time.time() slow_warned = False threshold = int(config.parsed.get('SLOW_LOCK_THRESHOLD')) try: while time.time() - start_time < self.timeout: res = self.acquire() if res: return self duration = time.time() - start_time if (duration > threshold and not slow_warned): db.add_event(self.objecttype, self.objectname, 'lock', 'acquire', None, 'Waiting for lock more than threshold') node, pid = self.get_holder() logutil.info( self.relatedobjects, 'Waiting for lock on %s: %.02f seconds, threshold ' '%d seconds. Holder is pid %s on %s.' % (self.path, duration, threshold, pid, node)) slow_warned = True time.sleep(1) duration = time.time() - start_time db.add_event( self.objecttype, self.objectname, 'lock', 'failed', None, 'Failed to acquire lock after %.02f seconds' % duration) node, pid = self.get_holder() logutil.info( self.relatedobjects, 'Failed to acquire lock %s after %.02f seconds. Holder is pid %s on %s.' % (self.path, duration, pid, node)) raise exceptions.LockException( 'Cannot acquire lock %s, timed out after %.02f seconds' % (self.name, duration)) finally: duration = time.time() - start_time if duration > threshold: db.add_event(self.objecttype, self.objectname, 'lock', 'acquired', None, 'Waited %d seconds for lock' % duration) logutil.info( self.relatedobjects, 'Acquiring a lock on %s was slow: %.02f seconds' % (self.path, duration))
def __enter__(self): start_time = time.time() slow_warned = False threshold = int(config.SLOW_LOCK_THRESHOLD) while time.time() - start_time < self.timeout: res = self.acquire() if res: duration = time.time() - start_time if duration > threshold: db.add_event(self.objecttype, self.objectname, 'lock', 'acquired', None, 'Waited %d seconds for lock' % duration) self.log_ctx.with_field( 'duration', duration).info('Acquiring a lock was slow') return self duration = time.time() - start_time if (duration > threshold and not slow_warned): db.add_event(self.objecttype, self.objectname, 'lock', 'acquire', None, 'Waiting for lock more than threshold') node, pid = self.get_holder() self.log_ctx.with_fields({ 'duration': duration, 'threshold': threshold, 'holder-pid': pid, 'holder-node': node, 'requesting-op': self.operation, }).info('Waiting for lock') slow_warned = True time.sleep(1) duration = time.time() - start_time db.add_event(self.objecttype, self.objectname, 'lock', 'failed', None, 'Failed to acquire lock after %.02f seconds' % duration) node, pid = self.get_holder() self.log_ctx.with_fields({ 'duration': duration, 'holder-pid': pid, 'holder-node': node, 'requesting-op': self.operation, }).info('Failed to acquire lock') raise exceptions.LockException( 'Cannot acquire lock %s, timed out after %.02f seconds' % (self.name, self.timeout))
def get_object_lock(obj, ttl=60, timeout=ETCD_ATTEMPT_TIMEOUT, relatedobjects=None, log_ctx=LOG, op=None): obj_type, obj_name = obj.unique_label() if not (obj_type and obj_name): raise exceptions.LockException('Could not derive lock name from %s' % obj) return get_lock(obj_type, None, obj_name, ttl=ttl, timeout=timeout, relatedobjects=relatedobjects, log_ctx=log_ctx, op=op)
def get(self, locks, related_object): """Wrap some lock retries around the get.""" # NOTE(mikal): this deliberately retries the lock for a long time # because the other option is failing instance start and fetching # an image can take an extremely long time. This still means that # for very large images you should probably pre-cache before # attempting a start. exc = None for _ in range(30): db.refresh_locks(locks) try: return self._get(locks, related_object) except exceptions.LockException as e: time.sleep(10) exc = e raise exceptions.LockException( 'Failed to acquire image fetch lock after retries: %s' % exc)
def __exit__(self, _exception_type, _exception_value, _traceback): if not self.release(): raise exceptions.LockException( 'Cannot release lock: %s' % self.name)