def test_sign_url_gcs(self): """ CREDENTIAL: Sign a URL for Google Cloud Storage """ assert_raises(UnsupportedOperation, get_signed_url, 'fake-service', 'read', 'http://dummy') assert_raises(UnsupportedOperation, get_signed_url, 'gcs', 'transmogrify', 'http://dummy') assert_raises(UnsupportedOperation, get_signed_url, 'gcs', 'read', None) assert_raises(UnsupportedOperation, get_signed_url, 'gcs', 'read', '') assert_equal(get_signed_url('gcs', 'read', 'http://storage/directory/file', lifetime=None), 'https://storage.googleapis.com/directory/file?GoogleAccessId=rucio-test@rucio-test' '.iam.gserviceaccount.com&Expires=0&Signature=u9cBWowYX22sAyApH5YySD9h0m%2FbIPLHLgY' '0Db%2BQ4a0wICQ2PZzUfTuHXQF8dUbMJG04VH90U5EMzYg3qSUGyfnp6Jptnvgivf7iSHepJsYhyAYSBGs' 'bvTOqf%2BXMQHR5VTh06G8WriZPV2OgSJ61c8qY7k8h0ju4bwcdDMFD2CT933KsnYSVatLN3EfORonLLZv' 'Ydgf0WCQjUcVKRv8zY65HJS6ZKoCjhOqNBJNlpI6uR54MhmLN2CJWch1MnLIdO6bKfDup%2Bzkt8e9Xe9S' '8pTeva5cN8ZFlMkeCz7JvNkVJb1KPhI1XHPWyfuPUa2ALHh9wAD2yFSOU3cDiORFE6A%3D%3D') assert_equal(get_signed_url('gcs', 'write', 'http://storage/directory/file', lifetime=None), 'https://storage.googleapis.com/directory/file?GoogleAccessId=rucio-test@rucio-test' '.iam.gserviceaccount.com&Expires=0&Signature=Gn%2FL0%2FjGkBIdpHZ9bKw7tvqRCdslC11gt' 'jbLk5AG2jA4Ywd6mTvOinUB%2BZxHY2I3XzEuMfyMnFj0vfXSemN6XmmcQkiQBhl6P3zr0GrOuO4y0xjKT' 'am1MijMKLKFS9pZ6BBYrFgwKcYUcGJmVpq0Fo%2Bl5pLovBKhJbi3RE0YbGTCDA5UEM6WuWLMcQiY8smfK' '6EH9bW5tAEs70vOwNNPPUm%2FbcNKnR4z6jqThXw2mn375L02SRPx1qQ853sZKHng6O4ydm%2BSW8i7rb1' '%2BnqImWDOdvmcLIZzc6x9l6b7ETOqSL2OqOCStpBHPzpQU0spgJS96IB09uGRQum1Ej2ui5g%3D%3D') assert_equal(get_signed_url('gcs', 'delete', 'http://storage/directory/file', lifetime=None), 'https://storage.googleapis.com/directory/file?GoogleAccessId=rucio-test@rucio-test' '.iam.gserviceaccount.com&Expires=0&Signature=FVDNroX1epdTCv%2BC74o%2B8uWyvJXrqiIWg' 'kdcedaOoryhRMjuv%2FVdKecnhViY%2BGOP%2B0CoI1uFOHBz%2B%2Bm10U9A3i%2B1v7AZRN5L6nbbS%2' 'BJTk4oiSBMJ3FpNT9knbOVd4aSPdiBwfTybwpkWSzEb8cKQsqzrGZk4hVffipMOKkxj7UgMe%2F0DiwqyF' 'o3NZsey12b9TG2xPVCZ5mJdIvJY0E5KiqEGXVCVChEhecZEyP0cUxjs8xM%2BxhOJ%2BioPQzRsFwVKtVv' 'LXestniEGBMY8SY4UuthQVO1Kmq2hg30KcsgXpLzAFheK1tz0GunqPU7%2BYACZMuHj1Hp%2BTnvKNxVuJ' '5MT5g%3D%3D')
def get_signed_url(account, appid, ip, rse, service, operation, url, lifetime): """ Get a signed URL for a particular service and operation. The signed URL will be valid for 1 hour. :param account: Account identifier as a string. :param appid: The application identifier as a string. :param ip: IP address of the client as a string. :param rse: The name of the RSE to which the URL points. :param service: The service to authorise, currently only 'gsc'. :param operation: The operation to sign, either 'read', 'write', or 'delete'. :param url: The URL to sign. :param lifetime: Lifetime in seconds. :returns: Signed URL as a variable-length string. """ kwargs = {'account': InternalAccount(account)} if not permission.has_permission(issuer=account, action='get_signed_url', kwargs=kwargs): raise exception.AccessDenied('Account %s can not get signed URL for rse=%s, service=%s, operation=%s, url=%s, lifetime=%s' % (account, rse, service, operation, url, lifetime)) # look up RSE ID for name rse_id = get_rse_id(rse) return credential.get_signed_url(rse_id, service, operation, url, lifetime)
def get_signed_url(account, appid, ip, service, operation, url, lifetime, vo='def'): """ Get a signed URL for a particular service and operation. The signed URL will be valid for 1 hour. :param account: Account identifier as a string. :param appid: The application identifier as a string. :param ip: IP address of the client as a string. :param service: The service to authorise, currently only 'gsc'. :param operation: The operation to sign, either 'read', 'write', or 'delete'. :param url: The URL to sign. :param lifetime: Lifetime in seconds. :param vo: The vo to act on. :returns: Signed URL as a variable-length string. """ kwargs = {'account': account} if not permission.has_permission( issuer=account, vo=vo, action='get_signed_url', kwargs=kwargs): raise exception.AccessDenied( 'Account %s can not get signed URL for service=%s, operation=%s, url=%s, lifetime=%s' % (account, service, operation, url, lifetime)) return credential.get_signed_url(service, operation, url, lifetime)
def get_signed_url_server(rse, service, op, url, vo='def'): ''' get_signed_url_server ''' from rucio.core.rse import get_rse_id from rucio.core.credential import get_signed_url rse_id = get_rse_id(rse=rse, vo=vo) return get_signed_url(rse_id, service, op, url)
def delete_from_storage(replicas, prot, rse_info, staging_areas, auto_exclude_threshold, logger=logging.log): deleted_files = [] rse_name = rse_info['rse'] rse_id = rse_info['id'] noaccess_attempts = 0 pfns_to_bulk_delete = [] try: prot.connect() for replica in replicas: # Physical deletion try: deletion_dict = { 'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse_name, 'file-size': replica['bytes'], 'bytes': replica['bytes'], 'url': replica['pfn'], 'protocol': prot.attributes['scheme'] } if replica['scope'].vo != 'def': deletion_dict['vo'] = replica['scope'].vo logger(logging.DEBUG, 'Deletion ATTEMPT of %s:%s as %s on %s', replica['scope'], replica['name'], replica['pfn'], rse_name) start = time.time() # For STAGING RSEs, no physical deletion if rse_id in staging_areas: logger( logging.WARNING, 'Deletion STAGING of %s:%s as %s on %s, will only delete the catalog and not do physical deletion', replica['scope'], replica['name'], replica['pfn'], rse_name) deleted_files.append({ 'scope': replica['scope'], 'name': replica['name'] }) continue if replica['pfn']: pfn = replica['pfn'] # sign the URL if necessary if prot.attributes['scheme'] == 'https' and rse_info[ 'sign_url'] is not None: pfn = get_signed_url(rse_id, rse_info['sign_url'], 'delete', pfn) if prot.attributes['scheme'] == 'globus': pfns_to_bulk_delete.append(replica['pfn']) else: prot.delete(pfn) else: logger(logging.WARNING, 'Deletion UNAVAILABLE of %s:%s as %s on %s', replica['scope'], replica['name'], replica['pfn'], rse_name) monitor.record_timer('daemons.reaper.delete.{scheme}.{rse}', (time.time() - start) * 1000, labels={ 'scheme': prot.attributes['scheme'], 'rse': rse_name }) duration = time.time() - start deleted_files.append({ 'scope': replica['scope'], 'name': replica['name'] }) deletion_dict['duration'] = duration add_message('deletion-done', deletion_dict) logger( logging.INFO, 'Deletion SUCCESS of %s:%s as %s on %s in %.2f seconds', replica['scope'], replica['name'], replica['pfn'], rse_name, duration) except SourceNotFound: duration = time.time() - start err_msg = 'Deletion NOTFOUND of %s:%s as %s on %s in %.2f seconds' % ( replica['scope'], replica['name'], replica['pfn'], rse_name, duration) logger(logging.WARNING, '%s', err_msg) deletion_dict['reason'] = 'File Not Found' deletion_dict['duration'] = duration add_message('deletion-not-found', deletion_dict) deleted_files.append({ 'scope': replica['scope'], 'name': replica['name'] }) except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error: duration = time.time() - start logger(logging.WARNING, 'Deletion NOACCESS of %s:%s as %s on %s: %s in %.2f', replica['scope'], replica['name'], replica['pfn'], rse_name, str(error), duration) deletion_dict['reason'] = str(error) deletion_dict['duration'] = duration add_message('deletion-failed', deletion_dict) noaccess_attempts += 1 if noaccess_attempts >= auto_exclude_threshold: logger( logging.INFO, 'Too many (%d) NOACCESS attempts for %s. RSE will be temporarly excluded.', noaccess_attempts, rse_name) REGION.set('temporary_exclude_%s' % rse_id, True) labels = {'rse': rse_name} EXCLUDED_RSE_GAUGE.labels(**labels).set(1) break except Exception as error: duration = time.time() - start logger( logging.CRITICAL, 'Deletion CRITICAL of %s:%s as %s on %s in %.2f seconds : %s', replica['scope'], replica['name'], replica['pfn'], rse_name, duration, str(traceback.format_exc())) deletion_dict['reason'] = str(error) deletion_dict['duration'] = duration add_message('deletion-failed', deletion_dict) if pfns_to_bulk_delete and prot.attributes['scheme'] == 'globus': logger(logging.DEBUG, 'Attempting bulk delete on RSE %s for scheme %s', rse_name, prot.attributes['scheme']) prot.bulk_delete(pfns_to_bulk_delete) except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error: for replica in replicas: logger(logging.WARNING, 'Deletion NOACCESS of %s:%s as %s on %s: %s', replica['scope'], replica['name'], replica['pfn'], rse_name, str(error)) payload = { 'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse_name, 'file-size': replica['bytes'], 'bytes': replica['bytes'], 'url': replica['pfn'], 'reason': str(error), 'protocol': prot.attributes['scheme'] } if replica['scope'].vo != 'def': payload['vo'] = replica['scope'].vo add_message('deletion-failed', payload) logger(logging.INFO, 'Cannot connect to %s. RSE will be temporarly excluded.', rse_name) REGION.set('temporary_exclude_%s' % rse_id, True) labels = {'rse': rse_name} EXCLUDED_RSE_GAUGE.labels(**labels).set(1) finally: prot.close() return deleted_files
def reaper(rses, worker_number=0, child_number=0, total_children=1, chunk_size=100, once=False, greedy=False, scheme=None, delay_seconds=0): """ Main loop to select and delete files. :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs. :param worker_number: The worker number. :param child_number: The child number. :param total_children: The total number of children created per worker. :param chunk_size: the size of chunk for deletion. :param once: If True, only runs one iteration of the main loop. :param greedy: If True, delete right away replicas with tombstone. :param scheme: Force the reaper to use a particular protocol, e.g., mock. """ logging.info('Starting Reaper: Worker %(worker_number)s, ' 'child %(child_number)s will work on RSEs: ' % locals() + ', '.join([rse['rse'] for rse in rses])) pid = os.getpid() thread = threading.current_thread() hostname = socket.gethostname() executable = ' '.join(sys.argv) # Generate a hash just for the subset of RSEs rse_names = [rse['rse'] for rse in rses] hash_executable = hashlib.sha256((sys.argv[0] + ''.join(rse_names)).encode()).hexdigest() sanity_check(executable=None, hostname=hostname) nothing_to_do = {} while not GRACEFUL_STOP.is_set(): try: # heartbeat heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable) checkpoint_time = datetime.datetime.now() # logging.info('Reaper({0[worker_number]}/{0[child_number]}): Live gives {0[heartbeat]}'.format(locals())) max_deleting_rate = 0 for rse in sort_rses(rses): try: if checkpoint_time + datetime.timedelta(minutes=1) < datetime.datetime.now(): heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable) # logging.info('Reaper({0[worker_number]}/{0[child_number]}): Live gives {0[heartbeat]}'.format(locals())) checkpoint_time = datetime.datetime.now() if rse['id'] in nothing_to_do and nothing_to_do[rse['id']] > datetime.datetime.now(): continue logging.info('Reaper %s-%s: Running on RSE %s %s', worker_number, child_number, rse['rse'], nothing_to_do.get(rse['id'])) rse_info = rsemgr.get_rse_info(rse_id=rse['id']) rse_protocol = rse_core.get_rse_protocols(rse_id=rse['id']) if not rse_protocol['availability_delete']: logging.info('Reaper %s-%s: RSE %s is not available for deletion', worker_number, child_number, rse_info['rse']) nothing_to_do[rse['id']] = datetime.datetime.now() + datetime.timedelta(minutes=30) continue # Temporary hack to force gfal for deletion for protocol in rse_info['protocols']: if protocol['impl'] == 'rucio.rse.protocols.srm.Default' or protocol['impl'] == 'rucio.rse.protocols.gsiftp.Default': protocol['impl'] = 'rucio.rse.protocols.gfal.Default' needed_free_space, max_being_deleted_files = None, 100 needed_free_space_per_child = None if not greedy: max_being_deleted_files, needed_free_space, used, free = __check_rse_usage(rse_id=rse['id']) logging.info('Reaper %(worker_number)s-%(child_number)s: Space usage for RSE %(rse)s - max_being_deleted_files: %(max_being_deleted_files)s, needed_free_space: %(needed_free_space)s, used: %(used)s, free: %(free)s' % locals()) if needed_free_space <= 0: needed_free_space, needed_free_space_per_child = 0, 0 logging.info('Reaper %s-%s: free space is above minimum limit for %s', worker_number, child_number, rse['rse']) else: if total_children and total_children > 0: needed_free_space_per_child = needed_free_space / float(total_children) start = time.time() with monitor.record_timer_block('reaper.list_unlocked_replicas'): replicas = list_unlocked_replicas(rse_id=rse['id'], bytes=needed_free_space_per_child, limit=max_being_deleted_files, worker_number=child_number, total_workers=total_children, delay_seconds=delay_seconds) logging.debug('Reaper %s-%s: list_unlocked_replicas on %s for %s bytes in %s seconds: %s replicas', worker_number, child_number, rse['rse'], needed_free_space_per_child, time.time() - start, len(replicas)) if not replicas: nothing_to_do[rse['id']] = datetime.datetime.now() + datetime.timedelta(minutes=30) logging.info('Reaper %s-%s: No replicas to delete %s. The next check will occur at %s', worker_number, child_number, rse['rse'], nothing_to_do[rse['id']]) continue prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme) for files in chunks(replicas, chunk_size): logging.debug('Reaper %s-%s: Running on : %s', worker_number, child_number, str(files)) try: update_replicas_states(replicas=[dict(list(replica.items()) + [('state', ReplicaState.BEING_DELETED), ('rse_id', rse['id'])]) for replica in files], nowait=True) for replica in files: try: replica['pfn'] = str(list(rsemgr.lfns2pfns(rse_settings=rse_info, lfns=[{'scope': replica['scope'].external, 'name': replica['name'], 'path': replica['path']}], operation='delete', scheme=scheme).values())[0]) except (ReplicaUnAvailable, ReplicaNotFound) as error: err_msg = 'Failed to get pfn UNAVAILABLE replica %s:%s on %s with error %s' % (replica['scope'], replica['name'], rse['rse'], str(error)) logging.warning('Reaper %s-%s: %s', worker_number, child_number, err_msg) replica['pfn'] = None monitor.record_counter(counters='reaper.deletion.being_deleted', delta=len(files)) try: deleted_files = [] prot.connect() for replica in files: try: deletion_dict = {'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse_info['rse'], 'rse_id': rse_info['id'], 'file-size': replica['bytes'], 'bytes': replica['bytes'], 'url': replica['pfn'], 'protocol': prot.attributes['scheme']} if replica['scope'].vo != 'def': deletion_dict['vo'] = replica['scope'].vo logging.info('Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse']) start = time.time() if rse['staging_area'] or rse['rse'].endswith("STAGING"): logging.warning('Reaper %s-%s: Deletion STAGING of %s:%s as %s on %s, will only delete the catalog and not do physical deletion', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse']) else: if replica['pfn']: pfn = replica['pfn'] # sign the URL if necessary if prot.attributes['scheme'] == 'https' and rse_info['sign_url'] is not None: pfn = get_signed_url(rse['id'], rse_info['sign_url'], 'delete', pfn) prot.delete(pfn) else: logging.warning('Reaper %s-%s: Deletion UNAVAILABLE of %s:%s as %s on %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse']) monitor.record_timer('daemons.reaper.delete.%s.%s' % (prot.attributes['scheme'], rse['rse']), (time.time() - start) * 1000) duration = time.time() - start deleted_files.append({'scope': replica['scope'], 'name': replica['name']}) deletion_dict['duration'] = duration add_message('deletion-done', deletion_dict) logging.info('Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], duration) except SourceNotFound: err_msg = 'Deletion NOTFOUND of %s:%s as %s on %s' % (replica['scope'], replica['name'], replica['pfn'], rse['rse']) logging.warning(err_msg) deleted_files.append({'scope': replica['scope'], 'name': replica['name']}) if replica['state'] == ReplicaState.AVAILABLE: deletion_dict['reason'] = str(err_msg) add_message('deletion-failed', deletion_dict) except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error: logging.warning('Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(error)) deletion_dict['reason'] = str(error) add_message('deletion-failed', deletion_dict) except Exception as error: logging.critical('Reaper %s-%s: Deletion CRITICAL of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(traceback.format_exc())) deletion_dict['reason'] = str(error) add_message('deletion-failed', deletion_dict) except: logging.critical('Reaper %s-%s: Deletion CRITICAL of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(traceback.format_exc())) except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error: for replica in files: logging.warning('Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(error)) payload = {'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse_info['rse'], 'rse_id': rse_info['id'], 'file-size': replica['bytes'], 'bytes': replica['bytes'], 'url': replica['pfn'], 'reason': str(error), 'protocol': prot.attributes['scheme']} if replica['scope'].vo != 'def': deletion_dict['vo'] = replica['scope'].vo add_message('deletion-failed', payload) break finally: prot.close() start = time.time() with monitor.record_timer_block('reaper.delete_replicas'): delete_replicas(rse_id=rse['id'], files=deleted_files) logging.debug('Reaper %s-%s: delete_replicas successes %s %s %s', worker_number, child_number, rse['rse'], len(deleted_files), time.time() - start) monitor.record_counter(counters='reaper.deletion.done', delta=len(deleted_files)) except DatabaseException as error: logging.warning('Reaper %s-%s: DatabaseException %s', worker_number, child_number, str(error)) except UnsupportedOperation as error: logging.warning('Reaper %s-%s: UnsupportedOperation %s', worker_number, child_number, str(error)) except: logging.critical(traceback.format_exc()) except RSENotFound as error: logging.warning('Reaper %s-%s: RSE not found %s', worker_number, child_number, str(error)) except: logging.critical(traceback.format_exc()) if once: break time.sleep(1) except DatabaseException as error: logging.warning('Reaper: %s', str(error)) except: logging.critical(traceback.format_exc()) die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable) logging.info('Graceful stop requested') logging.info('Graceful stop done') return
def delete_from_storage(replicas, prot, rse_info, staging_areas, prepend_str): deleted_files = [] rse_name = rse_info['rse'] rse_id = rse_info['id'] try: prot.connect() for replica in replicas: # Physical deletion try: deletion_dict = { 'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse_name, 'file-size': replica['bytes'], 'bytes': replica['bytes'], 'url': replica['pfn'], 'protocol': prot.attributes['scheme'] } logging.info('%s Deletion ATTEMPT of %s:%s as %s on %s', prepend_str, replica['scope'], replica['name'], replica['pfn'], rse_name) start = time.time() # For STAGING RSEs, no physical deletion if rse_id in staging_areas: logging.warning( '%s Deletion STAGING of %s:%s as %s on %s, will only delete the catalog and not do physical deletion', prepend_str, replica['scope'], replica['name'], replica['pfn'], rse_name) deleted_files.append({ 'scope': replica['scope'], 'name': replica['name'] }) continue if replica['pfn']: pfn = replica['pfn'] # sign the URL if necessary if prot.attributes['scheme'] == 'https' and rse_info[ 'sign_url'] is not None: pfn = get_signed_url(rse_info['sign_url'], 'delete', pfn) prot.delete(pfn) else: logging.warning( '%s Deletion UNAVAILABLE of %s:%s as %s on %s', prepend_str, replica['scope'], replica['name'], replica['pfn'], rse_name) monitor.record_timer( 'daemons.reaper.delete.%s.%s' % (prot.attributes['scheme'], rse_name), (time.time() - start) * 1000) duration = time.time() - start deleted_files.append({ 'scope': replica['scope'], 'name': replica['name'] }) deletion_dict['duration'] = duration add_message('deletion-done', deletion_dict) logging.info( '%s Deletion SUCCESS of %s:%s as %s on %s in %s seconds', prepend_str, replica['scope'], replica['name'], replica['pfn'], rse_name, duration) except SourceNotFound: err_msg = '%s Deletion NOTFOUND of %s:%s as %s on %s' % ( prepend_str, replica['scope'], replica['name'], replica['pfn'], rse_name) logging.warning(err_msg) deleted_files.append({ 'scope': replica['scope'], 'name': replica['name'] }) if replica['state'] == ReplicaState.AVAILABLE: deletion_dict['reason'] = str(err_msg) add_message('deletion-failed', deletion_dict) except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error: logging.warning( '%s Deletion NOACCESS of %s:%s as %s on %s: %s', prepend_str, replica['scope'], replica['name'], replica['pfn'], rse_name, str(error)) deletion_dict['reason'] = str(error) add_message('deletion-failed', deletion_dict) except Exception as error: logging.critical( '%s Deletion CRITICAL of %s:%s as %s on %s: %s', prepend_str, replica['scope'], replica['name'], replica['pfn'], rse_name, str(traceback.format_exc())) deletion_dict['reason'] = str(error) add_message('deletion-failed', deletion_dict) except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error: for replica in replicas: logging.warning('%s Deletion NOACCESS of %s:%s as %s on %s: %s', prepend_str, replica['scope'], replica['name'], replica['pfn'], rse_name, str(error)) add_message( 'deletion-failed', { 'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse_name, 'file-size': replica['bytes'], 'bytes': replica['bytes'], 'url': replica['pfn'], 'reason': str(error), 'protocol': prot.attributes['scheme'] }) finally: prot.close() return deleted_files