def __init__(self, config): WebModule.__init__(self, config) self.copy_manager = CopyRequestManager() self.copy_manager.set_read_only() self.dele_manager = DeletionRequestManager() self.dele_manager.set_read_only() self.mysql_hist = HistoryDatabase(config.get('history', None))
class TransferRequestList(WebModule): """ request listing """ def __init__(self, config): WebModule.__init__(self, config) self.copy_manager = CopyRequestManager() self.copy_manager.set_read_only() self.mysql_hist = HistoryDatabase(config.get('history', None)) def run(self, caller, request, inventory): if 'request' not in request: return {'request':[]} req_id = int(request['request']) sql_line = 'select operation_id from phedex_requests as pr where pr.id = ' + str(req_id) LOG.info(sql_line) dbRequests = self.mysql_hist.db.query(sql_line) for line in dbRequests: req_id = int(line) break req_hash = self.copy_manager.get_requests(request_id=req_id) if req_id not in req_hash: return {'request':[]} req_obj = req_hash[req_id] request_by = {'requested_by':{'name':req_obj.user,'username':req_obj.user,'dn':req_obj.user_dn}} destinations = [] sites = req_obj.find_sites(inventory) for site_obj in sites: node_json = [] node_json.append({'se':site_obj.host,'name':site_obj.name,'id':site_obj.id,'desided_by':{'time_decided':req_obj.last_request,'decision':'y','dn':req_obj.user_dn} } ) destinations.append(node_json) datasets = req_obj.find_items(inventory) all_bites = 0 all_files = 0 dset_part = [] for dset_name in datasets: dset_obj = datasets[dset_name] dset_part.append({'bites':dset_obj.size, 'files':dset_obj.num_files,'name':dset_name,'id':dset_obj.id}) all_bites = all_bites + dset_obj.size all_files = all_files + dset_obj.num_files data_part = {'bites':all_bites,'files':all_files,'time_create':req_obj.first_request, 'group':req_obj.group,'dbs':{'dataset':dset_part}} return {'request': [{"priority":"low","time_start":'null',"move":"n","id":req_id, "data":data_part,'requested_by':request_by,'destinations':destinations} ]}
def __init__(self, config): BaseHandler.__init__(self, 'DirectRequests') registry_config = Configuration(config.registry) registry_config['reuse_connection'] = True # need to work with table locks self.request_manager = CopyRequestManager(config.get('manager', None)) # maximum size that can be requested self.max_size = config.max_size * 1.e+12 # convert block-level requests to dataset-level if requested size is greater than # dataset size * block_request_max self.block_request_max = config.block_request_max # list of group names from which ownership of blocks can be taken away self.overwritten_groups = config.get('overwritten_groups', []) self.activated_requests = []
class CopyRequestsHandler(BaseHandler): """Process direct transfer requests made to the registry.""" def __init__(self, config): BaseHandler.__init__(self, 'DirectRequests') registry_config = Configuration(config.registry) registry_config[ 'reuse_connection'] = True # need to work with table locks self.request_manager = CopyRequestManager(config.get('manager', None)) # maximum size that can be requested self.max_size = config.max_size * 1.e+12 # convert block-level requests to dataset-level if requested size is greater than # dataset size * block_request_max self.block_request_max = config.block_request_max # list of group names from which ownership of blocks can be taken away self.overwritten_groups = config.get('overwritten_groups', []) self.activated_requests = [] def set_read_only(self, value=True): #override self._read_only = value self.request_manager.set_read_only(value) def get_requests(self, inventory, policy): #override """ 1. Request all active transfers in new state (these were not queued in the last cycle) 2. Find all transfer requests with status new. 3. Decide whether to accept the request. Set status accordingly. 4. Find the destinations if wildcard was used. """ partition = inventory.partitions[policy.partition_name] overwritten_groups = [ inventory.groups[name] for name in self.overwritten_groups ] self.activated_requests = [] # full list of blocks to be proposed to Dealer blocks_to_propose = {} # {site: {dataset: set of blocks}} now = int(time.time()) # Re-request new actions within activated requests self.request_manager.lock() active_requests = self.request_manager.get_requests( statuses=[Request.ST_ACTIVATED]) activation_list = [] for request in active_requests.itervalues(): updated = False to_be_activated = False for action in request.actions: if action.status != RequestAction.ST_NEW: continue try: site = inventory.sites[action.site] except KeyError: action.status = RequestAction.ST_FAILED action.last_update = now updated = True continue try: dataset_name, block_name = Block.from_full_name( action.item) except ObjectError: # action.item is (supposed to be) a dataset name try: dataset = inventory.datasets[action.item] except KeyError: action.status = RequestAction.ST_FAILED action.last_update = now updated = True continue existing_replica = site.find_dataset_replica(dataset) if existing_replica is not None: if existing_replica.is_complete(): action.status = RequestAction.ST_COMPLETED else: # it was queued by someone action.status = RequestAction.ST_QUEUED action.last_update = now updated = True else: activation_list.append((dataset, site)) to_be_activated = True else: # action.item is a block name try: dataset = inventory.datasets[dataset_name] except KeyError: action.status = RequestAction.ST_FAILED action.last_update = now updated = True continue block = dataset.find_block(block_name) if block is None: action.status = RequestAction.ST_FAILED action.last_update = now updated = True continue existing_replica = block.find_replica(site) if existing_replica is not None: if existing_replica.is_complete(): action.status = RequestAction.ST_COMPLETED else: action.status = RequestAction.ST_QUEUED action.last_update = now updated = True else: activation_list.append((block, site)) to_be_activated = True if updated: self.request_manager.update_request(request) if to_be_activated: self.activated_requests.append(request) self.request_manager.unlock() for item, site in activation_list: try: site_blocks = blocks_to_propose[site] except KeyError: site_blocks = blocks_to_propose[site] = {} if type(item) is Dataset: site_blocks[item] = set(item.blocks) else: dataset = item.dataset try: blocks = site_blocks[dataset] except KeyError: blocks = site_blocks[dataset] = set() blocks.add(item) ## deal with new requests self.request_manager.lock() new_requests = self.request_manager.get_requests( statuses=[Request.ST_NEW]) def reject(request, reason): request.status = Request.ST_REJECTED request.reject_reason = reason self.request_manager.update_request(request) for request in new_requests.itervalues(): try: group = inventory.groups[request.group] except KeyError: reject(request, 'Invalid group name %s' % request.group) continue invalid_items = [] datasets = request.find_items(inventory, invalid_items) sites = filter(lambda s: s in policy.target_sites, request.find_sites(inventory)) if len(invalid_items) != 0: reject(request, 'Invalid item names: [%s]' % ','.join(invalid_items)) continue if len(sites) == 0: reject(request, 'Target sites not available for transfers') continue # convert to DealerRequests proto_dealer_requests = [] # process the items list for dataset, blocks in datasets.iteritems(): if blocks is None: if dataset.size > self.max_size: reject( request, 'Dataset %s is too large (>%.0f TB)' % (dataset.name, self.max_size * 1.e-12)) break item = dataset else: total_size = sum(b.size for b in blocks) if total_size > self.max_size: reject( request, 'Request size for %s too large (>%.0f TB)' % (dataset.name, self.max_size * 1.e-12)) break if total_size > float( dataset.size) * self.block_request_max: # if the total size of requested blocks is large enough, just copy the dataset # covers the case where we actually have the full list of blocks (if block_request_max is less than 1) item = dataset else: item = list(blocks) proto_dealer_requests.append(DealerRequest(item, group=group)) if request.status == Request.ST_REJECTED: continue # list of (item, site) to be activated (not necessarily proposed to dealer - there can be another request for the same item-site) activation_list = [] # list of dealer proposals new_dealer_requests = [] # find destinations (request.n times) for each item for proto_request in proto_dealer_requests: # try to make a dealer request for all requests, except when there is a full copy of the item if request.n == 0: # make one copy at each site for destination in sites: dealer_request = DealerRequest(proto_request.item(), destination=destination) if dealer_request.item_already_exists() == 2: # nothing to do for this one continue rejection_reason = policy.check_destination( dealer_request, partition) if rejection_reason is not None: reject( request, 'Cannot copy %s to %s' % (dealer_request.item_name(), destination.name)) break new_dealer_requests.append(dealer_request) if request.status == Request.ST_REJECTED: break else: # total of n copies candidate_sites = [] num_new = request.n # bring sites where the item already exists first (may want to just "flip" the ownership) sites_and_existence = [] for destination in sites: exists = proto_request.item_already_exists( destination) # 0, 1, or 2 if exists != 0: sites_and_existence.insert(0, (destination, exists)) else: sites_and_existence.append((destination, exists)) for destination, exists in sites_and_existence: if num_new == 0: break dealer_request = DealerRequest(proto_request.item(), destination=destination) # copies proposed by other requests -> just activate try: proposed_blocks = blocks_to_propose[destination][ dealer_request.dataset] except KeyError: pass else: if dealer_request.blocks is not None: if set(dealer_request.blocks ) <= proposed_blocks: num_new -= 1 for block in dealer_request.blocks: activation_list.append( (block.full_name(), dealer_request.destination.name, now)) continue else: if dealer_request.dataset.blocks == proposed_blocks: num_new -= 1 activation_list.append( (dealer_request.item_name(), dealer_request.destination.name, now)) continue # if the item already exists, it's a complete copy - don't activate, don't propose if exists == 2: num_new -= 1 elif exists == 1: # if the current group can be overwritten, make a request # otherwise skip single_owner = dealer_request.item_owned_by( ) # None if owned by multiple groups if single_owner is not None and single_owner in overwritten_groups: new_dealer_requests.append(dealer_request) num_new -= 1 else: candidate_sites.append(destination) for icopy in range(num_new): dealer_request = DealerRequest(proto_request.item()) # pick a destination randomly (weighted by available space) policy.find_destination_for(dealer_request, partition, candidates=candidate_sites) if dealer_request.destination is None: # if any of the item cannot find any of the num_new destinations, reject the request reject( request, 'Destination %d for %s not available' % (icopy, dealer_request.item_name())) break candidate_sites.remove(dealer_request.destination) new_dealer_requests.append(dealer_request) # if request.n == 0, else if request.status == Request.ST_REJECTED: break # for each item in request if request.status == Request.ST_REJECTED: continue if len(new_dealer_requests) == 0 and len(activation_list) == 0: # nothing to do request.status = Request.ST_COMPLETED self.request_manager.update_request(request) continue # finally add to the returned requests for dealer_request in new_dealer_requests: try: site_blocks = blocks_to_propose[dealer_request.destination] except KeyError: site_blocks = blocks_to_propose[ dealer_request.destination] = {} if dealer_request.blocks is not None: try: blocks = site_blocks[dealer_request.dataset] except KeyError: blocks = site_blocks[dealer_request.dataset] = set() blocks.update(dealer_request.blocks) for block in dealer_request.blocks: activation_list.append( (block.full_name(), dealer_request.destination.name, now)) else: site_blocks[dealer_request.dataset] = set( dealer_request.dataset.blocks) activation_list.append( (dealer_request.item_name(), dealer_request.destination.name, now)) # create actions and set request status to ACTIVATED request.activate(activation_list) self.request_manager.update_request(request) self.activated_requests.append(request) self.request_manager.unlock() # throw away all the DealerRequest objects we've been using and form the final proposal dealer_requests = [] for site, block_list in blocks_to_propose.iteritems(): for dataset, blocks in block_list.iteritems(): if blocks == dataset.blocks: dealer_requests.append( DealerRequest(dataset, destination=site)) else: dealer_requests.append( DealerRequest(list(blocks), destination=site)) return dealer_requests def postprocess(self, cycle_number, copy_list): # override """ Create active copy entries for accepted copies. """ for request in self.activated_requests: updated = False for action in request.actions: try: dataset_name, block_name = Block.from_full_name( action.item) except ObjectError: dataset_name = action.item block_name = None for replica in copy_list: if replica.site.name != action.site: continue if replica.growing: # full dataset copy - dataset and block requests are both queued if dataset_name == replica.dataset.name: action.status = RequestAction.ST_QUEUED else: # match block-by-block if block_name is None: # dataset request continue for block_replica in replica.block_replicas: if block_name == block_replica.block.real_name(): action.status = RequestAction.ST_QUEUED break if action.status == RequestAction.ST_QUEUED: updated = True # action got queued - no need to check other replicas break if updated: self.request_manager.update_request(request)
class CopyRequestsHandler(BaseHandler): """Process direct transfer requests made to the registry.""" def __init__(self, config): BaseHandler.__init__(self, 'DirectRequests') registry_config = Configuration(config.registry) registry_config['reuse_connection'] = True # need to work with table locks self.request_manager = CopyRequestManager(config.get('manager', None)) # maximum size that can be requested self.max_size = config.max_size * 1.e+12 # convert block-level requests to dataset-level if requested size is greater than # dataset size * block_request_max self.block_request_max = config.block_request_max # list of group names from which ownership of blocks can be taken away self.overwritten_groups = config.get('overwritten_groups', []) self.activated_requests = [] def set_read_only(self, value = True): #override self._read_only = value self.request_manager.set_read_only(value) def get_requests(self, inventory, policy): #override """ 1. Request all active transfers in new state (these were not queued in the last cycle) 2. Find all transfer requests with status new. 3. Decide whether to accept the request. Set status accordingly. 4. Find the destinations if wildcard was used. """ partition = inventory.partitions[policy.partition_name] overwritten_groups = [inventory.groups[name] for name in self.overwritten_groups] self.activated_requests = [] # full list of blocks to be proposed to Dealer blocks_to_propose = {} # {site: {dataset: set of blocks}} now = int(time.time()) # Re-request new actions within activated requests self.request_manager.lock() active_requests = self.request_manager.get_requests(statuses = [Request.ST_ACTIVATED]) activation_list = [] for request in active_requests.itervalues(): updated = False to_be_activated = False for action in request.actions: if action.status != RequestAction.ST_NEW: continue try: site = inventory.sites[action.site] except KeyError: action.status = RequestAction.ST_FAILED action.last_update = now updated = True continue try: dataset_name, block_name = Block.from_full_name(action.item) except ObjectError: # action.item is (supposed to be) a dataset name try: dataset = inventory.datasets[action.item] except KeyError: action.status = RequestAction.ST_FAILED action.last_update = now updated = True continue existing_replica = site.find_dataset_replica(dataset) if existing_replica is not None: if existing_replica.is_complete(): action.status = RequestAction.ST_COMPLETED else: # it was queued by someone action.status = RequestAction.ST_QUEUED action.last_update = now updated = True else: activation_list.append((dataset, site)) to_be_activated = True else: # action.item is a block name try: dataset = inventory.datasets[dataset_name] except KeyError: action.status = RequestAction.ST_FAILED action.last_update = now updated = True continue block = dataset.find_block(block_name) if block is None: action.status = RequestAction.ST_FAILED action.last_update = now updated = True continue existing_replica = block.find_replica(site) if existing_replica is not None: if existing_replica.is_complete(): action.status = RequestAction.ST_COMPLETED else: action.status = RequestAction.ST_QUEUED action.last_update = now updated = True else: activation_list.append((block, site)) to_be_activated = True if updated: self.request_manager.update_request(request) if to_be_activated: self.activated_requests.append(request) self.request_manager.unlock() for item, site in activation_list: try: site_blocks = blocks_to_propose[site] except KeyError: site_blocks = blocks_to_propose[site] = {} if type(item) is Dataset: site_blocks[item] = set(item.blocks) else: dataset = item.dataset try: blocks = site_blocks[dataset] except KeyError: blocks = site_blocks[dataset] = set() blocks.add(item) ## deal with new requests self.request_manager.lock() new_requests = self.request_manager.get_requests(statuses = [Request.ST_NEW]) def reject(request, reason): request.status = Request.ST_REJECTED request.reject_reason = reason self.request_manager.update_request(request) for request in new_requests.itervalues(): try: group = inventory.groups[request.group] except KeyError: reject(request, 'Invalid group name %s' % request.group) continue invalid_items = [] datasets = request.find_items(inventory, invalid_items) sites = filter(lambda s: s in policy.target_sites, request.find_sites(inventory)) if len(invalid_items) != 0: reject(request, 'Invalid item names: [%s]' % ','.join(invalid_items)) continue if len(sites) == 0: reject(request, 'Target sites not available for transfers') continue # convert to DealerRequests proto_dealer_requests = [] # process the items list for dataset, blocks in datasets.iteritems(): if blocks is None: if dataset.size > self.max_size: reject(request, 'Dataset %s is too large (>%.0f TB)' % (dataset.name, self.max_size * 1.e-12)) break item = dataset else: total_size = sum(b.size for b in blocks) if total_size > self.max_size: reject(request, 'Request size for %s too large (>%.0f TB)' % (dataset.name, self.max_size * 1.e-12)) break if total_size > float(dataset.size) * self.block_request_max: # if the total size of requested blocks is large enough, just copy the dataset # covers the case where we actually have the full list of blocks (if block_request_max is less than 1) item = dataset else: item = list(blocks) proto_dealer_requests.append(DealerRequest(item, group = group)) if request.status == Request.ST_REJECTED: continue # list of (item, site) to be activated (not necessarily proposed to dealer - there can be another request for the same item-site) activation_list = [] # list of dealer proposals new_dealer_requests = [] # find destinations (request.n times) for each item for proto_request in proto_dealer_requests: # try to make a dealer request for all requests, except when there is a full copy of the item if request.n == 0: # make one copy at each site for destination in sites: dealer_request = DealerRequest(proto_request.item(), destination = destination) if dealer_request.item_already_exists() == 2: # nothing to do for this one continue rejection_reason = policy.check_destination(dealer_request, partition) if rejection_reason is not None: reject(request, 'Cannot copy %s to %s' % (dealer_request.item_name(), destination.name)) break new_dealer_requests.append(dealer_request) if request.status == Request.ST_REJECTED: break else: # total of n copies candidate_sites = [] num_new = request.n # bring sites where the item already exists first (may want to just "flip" the ownership) sites_and_existence = [] for destination in sites: exists = proto_request.item_already_exists(destination) # 0, 1, or 2 if exists != 0: sites_and_existence.insert(0, (destination, exists)) else: sites_and_existence.append((destination, exists)) for destination, exists in sites_and_existence: if num_new == 0: break dealer_request = DealerRequest(proto_request.item(), destination = destination) # copies proposed by other requests -> just activate try: proposed_blocks = blocks_to_propose[destination][dealer_request.dataset] except KeyError: pass else: if dealer_request.blocks is not None: if set(dealer_request.blocks) <= proposed_blocks: num_new -= 1 for block in dealer_request.blocks: activation_list.append((block.full_name(), dealer_request.destination.name, now)) continue else: if dealer_request.dataset.blocks == proposed_blocks: num_new -= 1 activation_list.append((dealer_request.item_name(), dealer_request.destination.name, now)) continue # if the item already exists, it's a complete copy - don't activate, don't propose if exists == 2: num_new -= 1 elif exists == 1: # if the current group can be overwritten, make a request # otherwise skip single_owner = dealer_request.item_owned_by() # None if owned by multiple groups if single_owner is not None and single_owner in overwritten_groups: new_dealer_requests.append(dealer_request) num_new -= 1 else: candidate_sites.append(destination) for icopy in range(num_new): dealer_request = DealerRequest(proto_request.item()) # pick a destination randomly (weighted by available space) policy.find_destination_for(dealer_request, partition, candidates = candidate_sites) if dealer_request.destination is None: # if any of the item cannot find any of the num_new destinations, reject the request reject(request, 'Destination %d for %s not available' % (icopy, dealer_request.item_name())) break candidate_sites.remove(dealer_request.destination) new_dealer_requests.append(dealer_request) # if request.n == 0, else if request.status == Request.ST_REJECTED: break # for each item in request if request.status == Request.ST_REJECTED: continue if len(new_dealer_requests) == 0 and len(activation_list) == 0: # nothing to do request.status = Request.ST_COMPLETED self.request_manager.update_request(request) continue # finally add to the returned requests for dealer_request in new_dealer_requests: try: site_blocks = blocks_to_propose[dealer_request.destination] except KeyError: site_blocks = blocks_to_propose[dealer_request.destination] = {} if dealer_request.blocks is not None: try: blocks = site_blocks[dealer_request.dataset] except KeyError: blocks = site_blocks[dealer_request.dataset] = set() blocks.update(dealer_request.blocks) for block in dealer_request.blocks: activation_list.append((block.full_name(), dealer_request.destination.name, now)) else: site_blocks[dealer_request.dataset] = set(dealer_request.dataset.blocks) activation_list.append((dealer_request.item_name(), dealer_request.destination.name, now)) # create actions and set request status to ACTIVATED request.activate(activation_list) self.request_manager.update_request(request) self.activated_requests.append(request) self.request_manager.unlock() # throw away all the DealerRequest objects we've been using and form the final proposal dealer_requests = [] for site, block_list in blocks_to_propose.iteritems(): for dataset, blocks in block_list.iteritems(): if blocks == dataset.blocks: dealer_requests.append(DealerRequest(dataset, destination = site)) else: dealer_requests.append(DealerRequest(list(blocks), destination = site)) return dealer_requests def postprocess(self, cycle_number, copy_list): # override """ Create active copy entries for accepted copies. """ for request in self.activated_requests: updated = False for action in request.actions: try: dataset_name, block_name = Block.from_full_name(action.item) except ObjectError: dataset_name = action.item block_name = None for replica in copy_list: if replica.site.name != action.site: continue if replica.growing: # full dataset copy - dataset and block requests are both queued if dataset_name == replica.dataset.name: action.status = RequestAction.ST_QUEUED else: # match block-by-block if block_name is None: # dataset request continue for block_replica in replica.block_replicas: if block_name == block_replica.block.real_name(): action.status = RequestAction.ST_QUEUED break if action.status == RequestAction.ST_QUEUED: updated = True # action got queued - no need to check other replicas break if updated: self.request_manager.update_request(request)
class RequestList(WebModule): """ request listing """ def __init__(self, config): WebModule.__init__(self, config) self.copy_manager = CopyRequestManager() self.copy_manager.set_read_only() self.dele_manager = DeletionRequestManager() self.dele_manager.set_read_only() self.mysql_hist = HistoryDatabase(config.get('history', None)) def pro_requests(self, erequests, request, inventory): response = [] for reqid, req_obj in erequests.iteritems(): sql_line = 'select * from phedex_requests as pr where pr.operation_id = ' + str( reqid) if 'decision' in request: approved = 1 decision = 'approved' if request['decision'] == 'pending': approved = 0 decision = 'pending' sql_line += ' and approved = ' + str(approved) dbRequests = self.mysql_hist.db.query(sql_line) if len(dbRequests) < 1: continue phedex_id = None req_type = None for line in dbRequests: #rep_array = line.split() phedex_id = int(line[0]) req_type = line[1] if req_type == 'copy': req_type = 'xfer' if req_type == 'deletion': req_type = 'delete' break nodelines = [] for site_name in req_obj.sites: if site_name not in inventory.sites: continue site_obj = inventory.sites[site_name] nodelines.append({ 'id': site_obj.id, 'name': site_obj.name, 'se': site_obj.host, 'decision': decision, 'decided_by': req_obj.user, 'time_decided': req_obj.first_request }) response.append({ 'id': phedex_id, 'type': req_type, 'approval': decision, 'requested_by': req_obj.user, 'time_create': req_obj.first_request, 'node': nodelines }) return response def run(self, caller, request, inventory): site_names = None data_names = None if 'node' in request: site_names = [] nodepat = re.compile(fnmatch.translate(request['node'])) for site in inventory.sites: if nodepat.match(site): site_names.append(site) if len(site_names) < 1: site_names = None if 'dataset' in request: data_names = [] dset_name = request['dataset'] if '*' in dset_name: pattern = re.compile(fnmatch.translate(dset_name)) for thename in inventory.datasets.iterkeys(): if pattern.match(thename): data_names.append(thename) elif dset_name in inventory.datasets: data_names.append(dset_name) if len(data_names) < 1: data_names = None cpquests = self.copy_manager.get_requests(sites=site_names, items=data_names) dequests = self.dele_manager.get_requests(sites=site_names, items=data_names) a1 = self.pro_requests(cpquests, request, inventory) a2 = self.pro_requests(dequests, request, inventory) response = a1 + a2 return {'request': response}
class TransferRequestList(WebModule): """ request listing """ def __init__(self, config): WebModule.__init__(self, config) self.copy_manager = CopyRequestManager() self.copy_manager.set_read_only() self.mysql_hist = HistoryDatabase(config.get('history', None)) def run(self, caller, request, inventory): if 'request' not in request: return {'request': []} req_id = int(request['request']) sql_line = 'select operation_id from phedex_requests as pr where pr.id = ' + str( req_id) LOG.info(sql_line) dbRequests = self.mysql_hist.db.query(sql_line) for line in dbRequests: req_id = int(line) break req_hash = self.copy_manager.get_requests(request_id=req_id) LOG.info(req_id) LOG.info(req_hash) if req_id not in req_hash: return {'request': []} req_obj = req_hash[req_id] request_by = { 'requested_by': { 'name': req_obj.user, 'username': req_obj.user, 'dn': req_obj.user_dn } } destinations = [] sites = req_obj.find_sites(inventory) for site_obj in sites: node_json = [] node_json.append({ 'se': site_obj.host, 'name': site_obj.name, 'id': site_obj.id, 'desided_by': { 'time_decided': req_obj.last_request, 'decision': 'y', 'dn': req_obj.user_dn } }) destinations.append(node_json) datasets = req_obj.find_items(inventory) all_bites = 0 all_files = 0 dset_part = [] for dset_name in datasets: dset_obj = datasets[dset_name] dset_part.append({ 'bites': dset_obj.size, 'files': dset_obj.num_files, 'name': dset_name, 'id': dset_obj.id }) all_bites = all_bites + dset_obj.size all_files = all_files + dset_obj.num_files data_part = { 'bites': all_bites, 'files': all_files, 'time_create': req_obj.first_request, 'group': req_obj.group, 'dbs': { 'dataset': dset_part } } return { 'request': [{ "priority": "low", "time_start": 'null', "move": "n", "id": req_id, "data": data_part, 'requested_by': request_by, 'destinations': destinations }] }
class RequestList(WebModule): """ request listing """ def __init__(self, config): WebModule.__init__(self, config) self.copy_manager = CopyRequestManager() self.copy_manager.set_read_only() self.dele_manager = DeletionRequestManager() self.dele_manager.set_read_only() self.mysql_hist = HistoryDatabase(config.get('history', None)) def pro_requests(self,erequests,request,inventory): response = [] for reqid, req_obj in erequests.iteritems(): sql_line = 'select * from phedex_requests as pr where pr.operation_id = ' + str(reqid) if 'decision' in request: approved = 1 decision = 'approved' if request['decision'] == 'pending': approved = 0 decision = 'pending' sql_line += ' and approved = ' + str(approved) dbRequests = self.mysql_hist.db.query(sql_line) if len(dbRequests) < 1 : continue phedex_id = None req_type = None for line in dbRequests: #rep_array = line.split() phedex_id = int(line[0]) req_type = line[1] if req_type == 'copy': req_type = 'xfer' if req_type == 'deletion': req_type = 'delete' break nodelines = [] for site_name in req_obj.sites: if site_name not in inventory.sites: continue site_obj = inventory.sites[site_name] nodelines.append({'id': site_obj.id, 'name': site_obj.name, 'se': site_obj.host, 'decision': decision, 'decided_by': req_obj.user, 'time_decided': req_obj.first_request}) response.append({'id': phedex_id, 'type': req_type, 'approval': decision, 'requested_by': req_obj.user, 'time_create': req_obj.first_request, 'node': nodelines}) return response def run(self, caller, request, inventory): site_names = None data_names = None if 'node' in request: site_names = [] nodepat = re.compile(fnmatch.translate(request['node'])) for site in inventory.sites: if nodepat.match(site): site_names.append(site) if len(site_names) < 1: site_names = None if 'dataset' in request: data_names = [] dset_name = request['dataset'] if '*' in dset_name: pattern = re.compile(fnmatch.translate(dset_name)) for thename in inventory.datasets.iterkeys(): if pattern.match(thename): data_names.append(thename) elif dset_name in inventory.datasets: data_names.append(dset_name) if len(data_names) < 1: data_names = None cpquests = self.copy_manager.get_requests(sites=site_names, items=data_names) dequests = self.dele_manager.get_requests(sites=site_names, items=data_names) a1 = self.pro_requests(cpquests,request,inventory) a2 = self.pro_requests(dequests,request,inventory) response = a1 + a2 return {'request': response}
def __init__(self, config): WebModule.__init__(self, config) ParseInputMixin.__init__(self, config) self.manager = CopyRequestManager()