def match_se_to_site(se, sites): global _se_to_site_cache if se in _se_to_site_cache: return _se_to_site_cache[se] try: site = join_FK(se, sites, "SiteUniqueID") except Exception, ve: log.warn("Unable to match SE to a site:\n%s" % se) return None
def do_se_info(cp): site_entries = read_bdii(cp, "(objectClass=GlueSite)") se_entries = read_bdii(cp, "(objectClass=GlueSE)") #conn = getGipDBConn(cp) #curs = conn.cursor() today = datetime.date.today() time_now = time.time() gratia_info = {} for entry in se_entries: try: site = join_FK(entry, site_entries, "SiteUniqueID") except ValueError, ve: log.warn("Unable to match SE:\n%s" % entry) continue try: site_name = site.glue['SiteName'] total = int(entry.glue['SESizeTotal']) free = int(entry.glue['SESizeFree']) se_name = entry.glue['SEName'] except: log.warn("Unable to parse attributes:\n%s" % entry) continue #curs.execute(insert_se_info, {'date': today, 'site': site_name, 'se': \ # se_name, 'total': total, 'free': free}) if total == 0 and free == 0: continue unique_id = entry.glue['SEUniqueID'] probeName = 'gip_storage:%s:%s' % (unique_id, hostname) Gratia.Config.setMeterName(probeName) Gratia.Config.setSiteName(se_name) se = StorageElement.StorageElement() space_unique_id = "%s:%s:%s" % (unique_id, "SE", se_name) se.UniqueID(space_unique_id) se.SE(se_name) se.Name(se_name) se.SpaceType("SE") se.Timestamp(time_now) se.Implementation(entry.glue['SEImplementationName']) se.Version(entry.glue['SEImplementationVersion']) se.Status(entry.glue['SEStatus']) se_list = gratia_info.setdefault((probeName, se_name), []) se_list.append(se) ser = StorageElementRecord.StorageElementRecord() ser.UniqueID(space_unique_id) ser.MeasurementType("raw") ser.StorageType("disk") ser.Timestamp(time_now) ser.TotalSpace(total*1000**3) ser.FreeSpace(free*1000**3) ser.UsedSpace((total-free)*1000**3) se_list.append(ser)
def create_site_dict(ce_entries, cp): """ Determine site ownership of CEs. """ # Query BDII for the cluster and site entries cluster_entries = read_bdii(cp, query="(objectClass=GlueCluster)", multi=True) site_entries = read_bdii(cp, query="(objectClass=GlueSite)") ownership = {} # Determine the site's advertised ownership. for ce in ce_entries: try: # First, we join the CE to the cluster: cluster = join_FK(ce, cluster_entries, "ClusterUniqueID") if ce.glue['CEHostingCluster'] == 'red.unl.edu': print cluster # Then, join the cluster to the site: site = join_FK(cluster, site_entries, "SiteUniqueID") ownership[ce.glue["CEHostingCluster"]] = site.glue["SiteName"] except Exception, e: print e pass
def do_cms_se_info(cp): site_entries = read_bdii(cp, "(objectClass=GlueSite)") se_entries = read_bdii(cp, "(objectClass=GlueSE)") sa_entries = read_bdii(cp, "(objectClass=GlueSA)", multi=True) today = datetime.date.today() time_now = time.time() gratia_info = {} for sa in sa_entries: if 'SAAccessControlBaseRule' not in sa.glue: continue supports_cms = False for acbr in sa.glue['SAAccessControlBaseRule']: if 'cms' in acbr.lower(): supports_cms = True break if not supports_cms: continue try: total = int(sa.glue['SATotalOnlineSize'][0]) free = int(sa.glue['SAFreeOnlineSize'][0]) used = int(sa.glue['SAUsedOnlineSize'][0]) except: log.warn("Unable to parse attributes:\n%s" % sa) continue try: se = join_FK(sa, se_entries, "SEUniqueID", join_fk_name="ChunkKey") except Exception, ve: log.warn("Unable to match SA to SE:\n%s" % sa) continue site = match_se_to_site(se, site_entries) if not site: log.warn("Unable to match SE %s to site." % \ se.glue['SEUniqueID']) continue print sa.glue['ChunkKey'][0] se_unique_id = se.glue['SEUniqueID'] sa_name = sa.glue['SAName'][0] probeName = 'gip_storage:%s' % se_unique_id se_name = se.glue['SEName'] gse = StorageElement.StorageElement() space_unique_id = "%s:%s:%s" % (se_unique_id, "GlueStorageArea", sa_name) gse.ParentID("%s:%s:%s" % (se_unique_id, "SE", se_name)) gse.UniqueID(space_unique_id) gse.SE(se_name) gse.Name(sa_name) gse.SpaceType("GlueStorageArea") gse.Timestamp(time_now) gse.Implementation(se.glue['SEImplementationName']) gse.Version(se.glue['SEImplementationVersion']) gse.VO("cms") gse.Status(se.glue['SEStatus']) se_list = gratia_info.setdefault((probeName, se_name), []) se_list.append(gse) ser = StorageElementRecord.StorageElementRecord() ser.UniqueID(space_unique_id) ser.MeasurementType("logical") ser.StorageType("disk") ser.Timestamp(time_now) ser.TotalSpace(total*1000**3) ser.FreeSpace(free*1000**3) ser.UsedSpace(used*1000**3) se_list.append(ser)
def do_cms_se_info(cp): site_entries = read_bdii(cp, "(objectClass=GlueSite)") se_entries = read_bdii(cp, "(objectClass=GlueSE)") sa_entries = read_bdii(cp, "(objectClass=GlueSA)", multi=True) today = datetime.date.today() time_now = time.time() gratia_info = {} for sa in sa_entries: if 'SAAccessControlBaseRule' not in sa.glue: continue supports_cms = False for acbr in sa.glue['SAAccessControlBaseRule']: if 'cms' in acbr.lower(): supports_cms = True break if not supports_cms: continue try: total = int(sa.glue['SATotalOnlineSize'][0]) free = int(sa.glue['SAFreeOnlineSize'][0]) used = int(sa.glue['SAUsedOnlineSize'][0]) except: log.warn("Unable to parse attributes:\n%s" % sa) continue try: se = join_FK(sa, se_entries, "SEUniqueID", join_fk_name="ChunkKey") except Exception, ve: log.warn("Unable to match SA to SE:\n%s" % sa) continue site = match_se_to_site(se, site_entries) if not site: log.warn("Unable to match SE %s to site." % \ se.glue['SEUniqueID']) continue print sa.glue['ChunkKey'][0] se_unique_id = se.glue['SEUniqueID'] sa_name = sa.glue['SAName'][0] probeName = 'gip_storage:%s' % se_unique_id se_name = se.glue['SEName'] gse = StorageElement.StorageElement() space_unique_id = "%s:%s:%s" % (se_unique_id, "GlueStorageArea", sa_name) gse.ParentID("%s:%s:%s" % (se_unique_id, "SE", se_name)) gse.UniqueID(space_unique_id) gse.SE(se_name) gse.Name(sa_name) gse.SpaceType("GlueStorageArea") gse.Timestamp(time_now) gse.Implementation(se.glue['SEImplementationName']) gse.Version(se.glue['SEImplementationVersion']) gse.VO("cms") gse.Status(se.glue['SEStatus']) se_list = gratia_info.setdefault((probeName, se_name), []) se_list.append(gse) ser = StorageElementRecord.StorageElementRecord() ser.UniqueID(space_unique_id) ser.MeasurementType("logical") ser.StorageType("disk") ser.Timestamp(time_now) ser.TotalSpace(total * 1000**3) ser.FreeSpace(free * 1000**3) ser.UsedSpace(used * 1000**3) se_list.append(ser)
def ownership_info(ce_entries, cp): """ Determine ownership of clusters from the sites's SiteSponsor attribute. """ # Query BDII for the cluster and site entries cluster_entries = read_bdii(cp, query="(objectClass=GlueCluster)", multi=True) site_entries = read_bdii(cp, query="(objectClass=GlueSite)") ownership = {} # Determine the site's advertised ownership. for ce in ce_entries: # First, we join the CE to the cluster: try: cluster = join_FK(ce, cluster_entries, "ClusterUniqueID") except: print "Unable to find cluster for CE; skipping\n%s" % ce continue #print cluster # Then, join the cluster to the site: try: site = join_FK(cluster, site_entries, "SiteUniqueID") except: continue #print site try: ownership[ce] = site.glue["SiteSponsor"] #print site.glue["SiteName"], site.glue["SiteSponsor"] except: ownership[ce] = "unknown" # Refine ownership; turn string into list of tuples and make sure # that everything adds up to 100%. # This is a bit awkward. I should consider turning list of tuples # into a dictionary. refined_ownership = {} for ce, val in ownership.items(): val = val.lower() val = val.replace('"', '') val = val.replace("'", '') refined = [] ctr = 0 #print val, ownership_re.findall(val) for entry in ownership_re.findall(val): info = entry.split(':') vo = correct_vo(info[0], cp) if len(info) == 1: refined.append((vo, 100)) ctr += 100 else: try: amt = info[1] amt = amt.replace("'", "") refined.append((vo, int(amt))) ctr += int(amt) except: print entry raise if ctr < 100: new_refined = [] has_unknown = False for entry in refined: if entry[0] == "unknown": new_refined.append(("unknown", entry[1]+(100-ctr))) has_unknown = True else: new_refined.append(entry) if not has_unknown: new_refined.append(("unknown", 100-ctr)) refined = new_refined refined_ownership[ce.glue["CEInfoHostName"]] = refined return refined_ownership