def get_site(self, name): #override try: site_config = self.config[name] except KeyError: raise RuntimeError('Site %s not in configuration') storage_type = Site.storage_type_val(site_config.storage_type) backend = site_config.backend return Site(name, host = site_config.host, storage_type = storage_type, backend = backend)
def get_site(self, name, inventory): #override try: site_config = self.config[name] except KeyError: raise RuntimeError('Site %s not in configuration') storage_type = Site.storage_type_val(site_config.storage_type) backend = site_config.backend site_obj = Site(name, host = site_config.host, storage_type = storage_type, backend = backend) if name in inventory.sites: old_site_obj = inventory.sites[name] site_obj.x509proxy = old_site_obj.x509proxy return site_obj
def run(self, caller, request, inventory): node_array = [] site_objs = [] if 'node' in request: node_name = request['node'] if '*' in node_name: pattern = re.compile(fnmatch.translate(node_name)) for site_name in inventory.sites: if pattern.match(site_name): site_objs.append(inventory.sites[site_name]) else: try: site_objs.append(inventory.sites[node_name]) except KeyError: pass else: for site_name in inventory.sites: site_objs.append(inventory.sites[node_name]) for siteObj in site_objs: kind = Site.storage_type_name(siteObj.storage_type) se = siteObj.host technology= "" siteid = siteObj.id hash_entry = {'kind':kind, 'se':se, 'technology':technology, 'name':siteObj.name, 'id':siteid} node_array.append(hash_entry) return {'node': node_array}
def get_site_status(self, site_name): #override try: site_config = self.config[site_name] except KeyError: raise RuntimeError('Site %s not in configuration') return Site.status_val(site_config.status)
def maker_deletions(block, block_entry): replicas = [] for deletion_entry in block_entry['deletion']: block_replica = BlockReplica(block, Site(deletion_entry['node']), Group.null_group) replicas.append(block_replica) return replicas
def _load_sites(self, inventory, site_names, id_site_map): sql = 'SELECT s.`id`, s.`name`, s.`host`, s.`storage_type`+0, s.`backend`, s.`storage`, s.`cpu`, `status`+0 FROM `sites` AS s' if site_names is not None: # first dump the site ids into a temporary table, then constrain the original table self._mysql.query( 'CREATE TABLE `sites_load_tmp` (`id` int(11) unsigned NOT NULL, PRIMARY KEY (`id`))' ) sqlbase = 'INSERT INTO `sites_load_tmp` SELECT `id` FROM `sites`' self._mysql.execute_many(sqlbase, 'name', site_names) sql += ' INNER JOIN `sites_load_tmp` AS t ON t.`id` = s.`id`' for site_id, name, host, storage_type, backend, storage, cpu, status in self._mysql.xquery( sql): site = Site(name, host=host, storage_type=storage_type, backend=backend, storage=storage, cpu=cpu, status=status) inventory.sites[name] = site id_site_map[site_id] = site for partition in inventory.partitions.itervalues(): site.partitions[partition] = SitePartition(site, partition) # Load site quotas sql = 'SELECT q.`site_id`, p.`name`, q.`storage` FROM `quotas` AS q INNER JOIN `partitions` AS p ON p.`id` = q.`partition_id`' if site_names is not None: sql += ' INNER JOIN `sites_load_tmp` AS t ON t.`id` = q.`site_id`' for site_id, partition_name, storage in self._mysql.xquery(sql): try: site = id_site_map[site_id] except KeyError: continue partition = inventory.partitions[partition_name] site.partitions[partition].set_quota(storage * 1.e+12)
def get_site(self, name): #override if not self.check_allowed_site(name): LOG.info('get_site(%s) %s is excluded by configuration.', name, name) return None LOG.info('get_site(%s) Fetching information of %s from PhEDEx', name, name) # General site info result = self._phedex.make_request('nodes', ['node=' + name]) if len(result) == 0: return None entry = result[0] host = entry['se'] storage_type = Site.storage_type_val(entry['kind']) return Site(name, host=host, storage_type=storage_type)
def maker_deletions(block, block_entry, site_check=None): replicas = [] for deletion_entry in block_entry['deletion']: if site_check and not site_check(deletion_entry['node']): continue block_replica = BlockReplica(block, Site(deletion_entry['node']), Group.null_group) replicas.append(block_replica) return replicas
def get_site_list(self, inventory): #override LOG.info('get_site_list Fetching the list of nodes from PhEDEx') site_list = [] for entry in self._phedex.make_request('nodes'): site_name = entry['name'] if not self.check_allowed_site(site_name): continue siteObj_new = Site(site_name, host=entry['se'], storage_type=Site.storage_type_val( entry['kind'])) if site_name in inventory.sites: siteObj_old = inventory.sites[site_name] siteObj_new.backend = siteObj_old.backend siteObj_new.x509proxy = siteObj_old.x509proxy site_list.append(siteObj_new) return site_list
def maker_blockreplicas(block, block_entry): replicas = [] for replica_entry in block_entry['replica']: block_replica = BlockReplica( block, Site(replica_entry['node']), Group(replica_entry['group']), is_complete=(replica_entry['bytes'] == block.size), is_custodial=(replica_entry['custodial'] == 'y'), size=replica_entry['bytes'], last_update=int(replica_entry['time_update'])) replicas.append(block_replica) return replicas
class InventoryStatCategories(object): """ Just a holder for available data categorization. Specify (category_name, (category_title, target, mapping)) where target is either Dataset, Site, or Group and mapping is a function that takes an instance of the target class and returns a value to be used for categorization. Categories can be made specific to the Dynamo instance using _customize.customize_stats. """ categories = collections.OrderedDict([ ('data_type', ('Dataset type', Dataset, lambda d: Dataset.data_type_name(d.data_type))), ('dataset_status', ('Dataset status', Dataset, lambda d: Dataset.status_name(d.status))), ('dataset_software_version', ('Dataset software version', Dataset, lambda d: d.software_version)), ('dataset', ('Dataset name', Dataset, lambda d: d.name)), ('site', ('Site name', Site, lambda s: s.name)), ('site_status', ('Site status', Site, lambda s: Site.status_name(s.status))), ('group', ('Group name', Group, lambda g: g.name)) ])
def run(self, caller, request, inventory): node_array = [] site_objs = [] if 'node' in request: node_name = request['node'] if '*' in node_name: pattern = re.compile(fnmatch.translate(node_name)) for site_name in inventory.sites: if pattern.match(site_name): site_objs.append(inventory.sites[site_name]) else: try: site_objs.append(inventory.sites[node_name]) except KeyError: pass else: for site_name in inventory.sites: site_objs.append(inventory.sites[node_name]) for siteObj in site_objs: kind = Site.storage_type_name(siteObj.storage_type) se = siteObj.host technology = "" siteid = siteObj.id hash_entry = { 'kind': kind, 'se': se, 'technology': technology, 'name': siteObj.name, 'id': siteid } node_array.append(hash_entry) return {'node': node_array}
def run(self, caller, request, inventory): sites = set() # collect information from the inventory and registry according to the requests if 'site' in request: if type(request['site']) is list: match_names = request['site'] else: match_names = request['site'].split(',') for match_name in match_names: if '*' in match_name: pattern = re.compile(fnmatch.translate(match_name)) for name in inventory.sites.iterkeys(): if pattern.match(name): sites.add(inventory.sites[name]) else: try: sites.add(inventory.sites[match_name]) except KeyError: pass else: sites.update(inventory.sites.itervalues()) partitions = set() if 'partition' in request: if type(request['partition']) is list: match_names = request['partition'] else: match_names = request['partition'].split(',') for match_name in match_names: if '*' in match_name: pattern = re.compile(fnmatch.translate(match_name)) for name in inventory.partitions.iterkeys(): if pattern.match(name): partitions.add(inventory.partitions[name]) else: try: partitions.add(inventory.partitions[match_name]) except KeyError: pass else: partitions.update(inventory.partitions.itervalues()) response = [] for site in sorted(sites, key=lambda s: s.name): data = { 'name': site.name, 'host': site.host, 'storage_type': Site.storage_type_name(site.storage_type), 'status': Site.status_name(site.status), 'partitions': [] } total_quota = 0. total_used = 0. total_projected = 0. for partition in sorted(inventory.partitions.itervalues(), key=lambda p: p.name): sp = site.partitions[partition] quota = sp.quota if partition.subpartitions is None: part_type = 'basic' else: part_type = 'composite' used = sp.occupancy_fraction() * quota projected = sp.occupancy_fraction(physical=False) * quota if part_type == 'basic' and quota > 0.: total_quota += quota total_used += used total_projected += projected if partition in partitions: data['partitions'].append({ 'name': partition.name, 'type': part_type, 'quota': quota * 1.e-12, 'usage': used * 1.e-12, 'projected_usage': projected * 1.e-12 }) data['total_quota'] = total_quota * 1.e-12 data['total_usage'] = total_used * 1.e-12 data['total_projected_usage'] = total_projected * 1.e-12 response.append(data) # return any JSONizable python object (maybe should be limited to a list) return response
def maker_blockreplicas(block, block_entry, site_check=None): """Return a list of block replicas using blockreplicas data or a combination of blockreplicas and filereplicas calls.""" sites = {} invalid_sites = set() groups = {} block_replicas = {} for replica_entry in block_entry['replica']: site_name = replica_entry['node'] try: site = sites[site_name] except KeyError: if site_check: if site_name in invalid_sites: continue if not site_check(site_name): invalid_sites.add(site_name) continue site = sites[site_name] = Site(site_name) group_name = replica_entry['group'] try: group = groups[group_name] except KeyError: group = groups[group_name] = Group(group_name) try: time_update = int(replica_entry['time_update']) except TypeError: # time_update was None time_update = 0 block_replica = BlockReplica( block, site, group, is_custodial=(replica_entry['custodial'] == 'y'), last_update=time_update) block_replicas[site_name] = block_replica if replica_entry['complete'] == 'n': # temporarily make this a list block_replica.file_ids = [] block_replica.size = 0 LOG.info("Incomplete %s" % str(block_replica)) if 'file' in block_entry: for file_entry in block_entry['file']: for replica_entry in file_entry['replica']: site_name = replica_entry['node'] try: block_replica = block_replicas[site_name] except KeyError: continue if block_replica.file_ids is None: continue # add LFN instead of file id block_replica.file_ids.append(file_entry['name']) file_size = file_entry['bytes'] if file_size is not None: block_replica.size += file_size try: time_create = int(replica_entry['time_create']) except TypeError: pass else: if time_create > block_replica.last_update: block_replica.last_update = time_create for block_replica in block_replicas.itervalues(): if block_replica.file_ids is not None: block_replica.file_ids = tuple(block_replica.file_ids) return block_replicas.values()
def run(self, caller, request, inventory): sites = set() # collect information from the inventory and registry according to the requests if 'site' in request: if type(request['site']) is list: match_names = request['site'] else: match_names = request['site'].split(',') for match_name in match_names: if '*' in match_name: pattern = re.compile(fnmatch.translate(match_name)) for name in inventory.sites.iterkeys(): if pattern.match(name): sites.add(inventory.sites[name]) else: try: sites.add(inventory.sites[match_name]) except KeyError: pass else: sites.update(inventory.sites.itervalues()) partitions = set() if 'partition' in request: if type(request['partition']) is list: match_names = request['partition'] else: match_names = request['partition'].split(',') for match_name in match_names: if '*' in match_name: pattern = re.compile(fnmatch.translate(match_name)) for name in inventory.partitions.iterkeys(): if pattern.match(name): partitions.add(inventory.partitions[name]) else: try: partitions.add(inventory.partitions[match_name]) except KeyError: pass else: partitions.update(inventory.partitions.itervalues()) response = [] for site in sorted(sites, key = lambda s: s.name): data = { 'name': site.name, 'host': site.host, 'storage_type': Site.storage_type_name(site.storage_type), 'status': Site.status_name(site.status), 'partitions': [] } total_quota = 0. total_used = 0. total_projected = 0. for partition in sorted(inventory.partitions.itervalues(), key = lambda p: p.name): sp = site.partitions[partition] quota = sp.quota if partition.subpartitions is None: part_type = 'basic' else: part_type = 'composite' used = sp.occupancy_fraction() * quota projected = sp.occupancy_fraction(physical = False) * quota if part_type == 'basic' and quota > 0.: total_quota += quota total_used += used total_projected += projected if partition in partitions: data['partitions'].append({ 'name': partition.name, 'type': part_type, 'quota': quota * 1.e-12, 'usage': used * 1.e-12, 'projected_usage': projected * 1.e-12 }) data['total_quota'] = total_quota * 1.e-12 data['total_usage'] = total_used * 1.e-12 data['total_projected_usage'] = total_projected * 1.e-12 response.append(data) # return any JSONizable python object (maybe should be limited to a list) return response