def schedule_deletions(self, replica_list, operation_id, comments=''): #override LOG.info('Ignoring deletion schedule of %d replicas (operation %d)', len(replica_list), operation_id) result = [] for replica, block_replicas in replica_list: clone_replica = DatasetReplica(replica.dataset, replica.site) clone_replica.copy(replica) if block_replicas is None: result.append((clone_replica, None)) else: clone_block_replicas = [] for block_replica in block_replicas: clone_block_replica = BlockReplica(block_replica.block, block_replica.site, block_replica.group) clone_block_replica.copy(block_replica) clone_block_replica.last_update = int(time.time()) clone_block_replicas.append(clone_block_replica) result.append((clone_replica, clone_block_replicas)) return result
def schedule_copies(self, replica_list, operation_id, comments = ''): #override sites = set(r.site for r in replica_list) if len(sites) != 1: raise OperationalError('schedule_copies should be called with a list of replicas at a single site.') LOG.info('Scheduling copy of %d replicas to %s using RLFSM (operation %d)', len(replica_list), list(sites)[0], operation_id) result = [] for replica in replica_list: # Function spec is to return clones (so that if specific block fails to copy, we can return a dataset replica without the block) clone_replica = DatasetReplica(replica.dataset, replica.site) clone_replica.copy(replica) result.append(clone_replica) for block_replica in replica.block_replicas: LOG.debug('Subscribing files for %s', str(block_replica)) if block_replica.file_ids is None: LOG.debug('No file to subscribe for %s', str(block_replica)) return all_files = block_replica.block.files missing_files = all_files - block_replica.files() for lfile in missing_files: self.rlfsm.subscribe_file(block_replica.site, lfile) clone_block_replica = BlockReplica(block_replica.block, block_replica.site, block_replica.group) clone_block_replica.copy(block_replica) clone_block_replica.last_update = int(time.time()) clone_replica.block_replicas.add(clone_block_replica) # no external dependency - everything is a success return result
def schedule_deletions(self, replica_list, operation_id, comments=''): #override sites = set(r.site for r, b in replica_list) if len(sites) != 1: raise OperationalError( 'schedule_copies should be called with a list of replicas at a single site.' ) site = list(sites)[0] LOG.info( 'Scheduling deletion of %d replicas from %s using RLFSM (operation %d)', len(replica_list), site.name, operation_id) clones = [] for dataset_replica, block_replicas in replica_list: if block_replicas is None: to_delete = dataset_replica.block_replicas else: to_delete = block_replicas for block_replica in to_delete: for lfile in block_replica.files(): self.rlfsm.desubscribe_file(block_replica.site, lfile) # No external dependency -> all operations are successful clone_replica = DatasetReplica(dataset_replica.dataset, dataset_replica.site) clone_replica.copy(dataset_replica) if block_replicas is None: clones.append((clone_replica, None)) else: clones.append((clone_replica, [])) for block_replica in block_replicas: clone_block_replica = BlockReplica(block_replica.block, block_replica.site) clone_block_replica.copy(block_replica) clone_block_replica.last_update = int(time.time()) clones[-1][1].append(clone_block_replica) return clones
def schedule_copies(self, replica_list, operation_id, comments = ''): #override LOG.info('Ignoring copy schedule of %d replicas (operation %d)', len(replica_list), operation_id) result = [] for replica in replica_list: clone_replica = DatasetReplica(replica.dataset, replica.site) clone_replica.copy(replica) result.append(clone_replica) for block_replica in replica.block_replicas: clone_block_replica = BlockReplica(block_replica.block, block_replica.site, block_replica.group) clone_block_replica.copy(block_replica) clone_block_replica.last_update = int(time.time()) clone_replica.block_replicas.add(clone_block_replica) return result
def schedule_copies(self, replica_list, operation_id, comments = ''): #override sites = set(r.site for r in replica_list) if len(sites) != 1: raise OperationalError('schedule_copies should be called with a list of replicas at a single site.') LOG.info('Scheduling copy of %d replicas to %s using RLFSM (operation %d)', len(replica_list), list(sites)[0], operation_id) result = [] for replica in replica_list: # Function spec is to return clones (so that if specific block fails to copy, we can return a dataset replica without the block) clone_replica = DatasetReplica(replica.dataset, replica.site) clone_replica.copy(replica) result.append(clone_replica) for block_replica in replica.block_replicas: LOG.debug('Subscribing files for %s', str(block_replica)) if block_replica.file_ids is None: LOG.debug('No file to subscribe for %s', str(block_replica)) return all_files = block_replica.block.files missing_files = all_files - block_replica.files() for lfile in missing_files: self.rlfsm.subscribe_file(block_replica.site, lfile) clone_block_replica = BlockReplica(block_replica.block, block_replica.site, block_replica.group) clone_block_replica.copy(block_replica) clone_block_replica.last_update = int(time.time()) clone_replica.block_replicas.add(clone_block_replica) if not self._read_only: for clone_replica in result: if clone_replica.growing: self.mysql.query('INSERT INTO `phedex_transfer_reservations` (`operation_id`, `item`, `site`, `group`) VALUES (%s, %s, %s, %s)', operation_id, clone_replica.dataset.name, clone_replica.site.name, clone_replica.group.name) else: for block_replica in clone_replica.block_replicas: self.mysql.query('INSERT INTO `phedex_transfer_reservations` (`operation_id`, `item`, `site`, `group`) VALUES (%s, %s, %s, %s)', operation_id, block_replica.block.full_name(), clone_replica.site.name, block_replica.group.name) # no external dependency - everything is a success return result
def schedule_deletions(self, replica_list, operation_id, comments = ''): #override LOG.info('Ignoring deletion schedule of %d replicas (operation %d)', len(replica_list), operation_id) result = [] for replica, block_replicas in replica_list: clone_replica = DatasetReplica(replica.dataset, replica.site) clone_replica.copy(replica) if block_replicas is None: result.append((clone_replica, None)) else: clone_block_replicas = [] for block_replica in block_replicas: clone_block_replica = BlockReplica(block_replica.block, block_replica.site, block_replica.group) clone_block_replica.copy(block_replica) clone_block_replica.last_update = int(time.time()) clone_block_replicas.append(clone_block_replica) result.append((clone_replica, clone_block_replicas)) return result
def schedule_deletions(self, replica_list, operation_id, comments = ''): #override sites = set(r.site for r, b in replica_list) if len(sites) != 1: raise OperationalError('schedule_copies should be called with a list of replicas at a single site.') site = list(sites)[0] LOG.info('Scheduling deletion of %d replicas from %s using RLFSM (operation %d)', len(replica_list), site.name, operation_id) clones = [] for dataset_replica, block_replicas in replica_list: if block_replicas is None: to_delete = dataset_replica.block_replicas else: to_delete = block_replicas for block_replica in to_delete: for lfile in block_replica.files(): self.rlfsm.desubscribe_file(block_replica.site, lfile) # No external dependency -> all operations are successful clone_replica = DatasetReplica(dataset_replica.dataset, dataset_replica.site) clone_replica.copy(dataset_replica) if block_replicas is None: clones.append((clone_replica, None)) else: clones.append((clone_replica, [])) for block_replica in block_replicas: clone_block_replica = BlockReplica(block_replica.block, block_replica.site) clone_block_replica.copy(block_replica) clone_block_replica.last_update = int(time.time()) clones[-1][1].append(clone_block_replica) return clones
def schedule_deletions(self, replica_list, operation_id, comments=''): #override sites = set(r.site for r, b in replica_list) if len(sites) != 1: raise OperationalError( 'schedule_deletions should be called with a list of replicas at a single site.' ) site = list(sites)[0] LOG.info( 'Scheduling deletion of %d replicas from %s using RLFSM (operation %d)', len(replica_list), site.name, operation_id) clones = [] for dataset_replica, block_replicas in replica_list: if block_replicas is None: to_delete = dataset_replica.block_replicas else: to_delete = block_replicas for block_replica in to_delete: for lfile in block_replica.files(): self.rlfsm.desubscribe_file(block_replica.site, lfile) # No external dependency -> all operations are successful clone_replica = DatasetReplica(dataset_replica.dataset, dataset_replica.site) clone_replica.copy(dataset_replica) if block_replicas is None: clones.append((clone_replica, None)) else: clones.append((clone_replica, [])) for block_replica in block_replicas: clone_block_replica = BlockReplica(block_replica.block, block_replica.site, block_replica.group) clone_block_replica.copy(block_replica) clone_block_replica.last_update = int(time.time()) clones[-1][1].append(clone_block_replica) if not self._read_only: for clone_replica, block_replicas in clones: if block_replicas is None: self.mysql.query( 'INSERT INTO `phedex_deletion_reservations` (`operation_id`, `item`, `site`) VALUES (%s, %s, %s)', operation_id, clone_replica.dataset.name, clone_replica.site.name) else: for block_replica in block_replicas: self.mysql.query( 'INSERT INTO `phedex_deletion_reservations` (`operation_id`, `item`, `site`) VALUES (%s, %s, %s)', operation_id, block_replica.block.full_name(), clone_replica.site.name) return clones
def schedule_deletions(self, replica_list, operation_id, comments=''): #override sites = set(r.site for r, b in replica_list) if len(sites) != 1: raise OperationalError( 'schedule_copies should be called with a list of replicas at a single site.' ) site = list(sites)[0] if site.storage_type == Site.TYPE_MSS and not self.allow_tape_deletion: LOG.warning('Deletion from MSS not allowed by configuration.') return [] if self.allow_tape_deletion and self.auto_approval: LOG.warning( 'You cannot have auto-approved tape deletions. Set auto-approval to False.' ) return [] # execute the deletions in two steps: one for dataset-level and one for block-level datasets = [] blocks = [] # maps used later for cloning # getting ugly here.. should come up with a better way of making clones replica_map = {} block_replica_map = {} for dataset_replica, block_replicas in replica_list: if block_replicas is None: datasets.append(dataset_replica.dataset) else: blocks.extend(br.block for br in block_replicas) replica_map[dataset_replica.dataset] = dataset_replica block_replica_map.update( (br.block, br) for br in block_replicas) success = [] deleted_datasets = self._run_deletion_request(operation_id, site, 'dataset', datasets, comments) for dataset in deleted_datasets: replica = DatasetReplica(dataset, site, growing=False, group=Group.null_group) success.append((replica, None)) tmp_map = dict((dataset, []) for dataset in replica_map.iterkeys()) deleted_blocks = self._run_deletion_request(operation_id, site, 'block', blocks, comments) for block in deleted_blocks: tmp_map[block.dataset].append(block) for dataset, blocks in tmp_map.iteritems(): replica = DatasetReplica(dataset, site) replica.copy(replica_map[dataset]) success.append((replica, [])) for block in blocks: block_replica = BlockReplica(block, site, Group.null_group) block_replica.copy(block_replica_map[block]) block_replica.last_update = int(time.time()) success[-1][1].append(block_replica) return success