def do_se_info(cp):
    site_entries = read_bdii(cp, "(objectClass=GlueSite)")
    se_entries = read_bdii(cp, "(objectClass=GlueSE)")
    #conn = getGipDBConn(cp)
    #curs = conn.cursor()
    today = datetime.date.today()
    time_now = time.time()

    gratia_info = {}
    for entry in se_entries:
        try:
            site = join_FK(entry, site_entries, "SiteUniqueID")
        except ValueError, ve:
            log.warn("Unable to match SE:\n%s" % entry)
            continue
        try:
            site_name = site.glue['SiteName']
            total = int(entry.glue['SESizeTotal'])
            free = int(entry.glue['SESizeFree'])
            se_name = entry.glue['SEName']
        except:
            log.warn("Unable to parse attributes:\n%s" % entry)
            continue
        #curs.execute(insert_se_info, {'date': today, 'site': site_name, 'se': \
        #    se_name, 'total': total, 'free': free})

        if total == 0 and free == 0:
            continue

        unique_id = entry.glue['SEUniqueID']
        probeName = 'gip_storage:%s:%s' % (unique_id, hostname)
        Gratia.Config.setMeterName(probeName)
        Gratia.Config.setSiteName(se_name)
        se = StorageElement.StorageElement()
        space_unique_id = "%s:%s:%s" % (unique_id, "SE", se_name)
        se.UniqueID(space_unique_id)
        se.SE(se_name)
        se.Name(se_name)
        se.SpaceType("SE")
        se.Timestamp(time_now)
        se.Implementation(entry.glue['SEImplementationName'])
        se.Version(entry.glue['SEImplementationVersion'])
        se.Status(entry.glue['SEStatus'])
        se_list = gratia_info.setdefault((probeName, se_name), [])
        se_list.append(se)

        ser = StorageElementRecord.StorageElementRecord()
        ser.UniqueID(space_unique_id)
        ser.MeasurementType("raw")
        ser.StorageType("disk")
        ser.Timestamp(time_now)
        ser.TotalSpace(total*1000**3)
        ser.FreeSpace(free*1000**3)
        ser.UsedSpace((total-free)*1000**3)
        se_list.append(ser)
Beispiel #2
0
def main():

    cp = config_file()
    # Read the CE entries from the BDII.
    entries = read_bdii(cp, query="(objectClass=GlueCE)")
    cluster_info = create_count_dict(entries)
    sc_info = sub_cluster_info(cluster_info.keys(), cp)
    specint = get_cpu_normalizations()
    for key, val in specint.items():
        if isinstance(val, types.TupleType):
            specint[key] = val[0]
    ksi2k_info = {}
    site_dict = create_site_dict(entries, cp)
    sites = cp.get("site_normalization", "sites").split(",")
    sites = [i.strip() for i in sites]
    for cluster, cpu in cluster_info.items():
        correct_sc_info(cluster, cpu, sc_info, specint)
        ksi2k = 0
        sc_cores = 0
        for sc in sc_info[cluster]:
            ksi2k += sc.glue["KSI2K"]
            sc_cores += int(sc.glue["SubClusterLogicalCPUs"])
        try:
            site = site_dict[cluster]
        except:
            print "Problem with %s" % cluster
            continue
        if site in sites:
            print site, (ksi2k * 1000) / sc_cores
def main():

    cp = config_file()
    # Read the CE entries from the BDII.
    entries = read_bdii(cp, query="(objectClass=GlueCE)")
    cluster_info = create_count_dict(entries)
    sc_info = sub_cluster_info(cluster_info.keys(), cp)
    specint = get_cpu_normalizations()
    for key, val in specint.items():
        if isinstance(val, types.TupleType):
            specint[key] = val[0]
    ksi2k_info = {}
    site_dict = create_site_dict(entries, cp)
    sites = cp.get("site_normalization", "sites").split(",")
    sites = [i.strip() for i in sites]
    for cluster, cpu in cluster_info.items():
        correct_sc_info(cluster, cpu, sc_info, specint)
        ksi2k = 0
        sc_cores = 0
        for sc in sc_info[cluster]:
            ksi2k += sc.glue["KSI2K"]
            sc_cores += int(sc.glue["SubClusterLogicalCPUs"])
        try:
            site = site_dict[cluster]
        except:
            print "Problem with %s" % cluster
            continue
        if site in sites:
            print site, (ksi2k*1000) / sc_cores
Beispiel #4
0
def create_site_dict(ce_entries, cp):
    """
    Determine site ownership of CEs.
    """
    # Query BDII for the cluster and site entries
    cluster_entries = read_bdii(cp, query="(objectClass=GlueCluster)", multi=True)
    site_entries = read_bdii(cp, query="(objectClass=GlueSite)")
    ownership = {}

    # Determine the site's advertised ownership.
    for ce in ce_entries:
        try:
            # First, we join the CE to the cluster:
            cluster = join_FK(ce, cluster_entries, "ClusterUniqueID")
            if ce.glue['CEHostingCluster'] == 'red.unl.edu':
                print cluster
            # Then, join the cluster to the site:
            site = join_FK(cluster, site_entries, "SiteUniqueID")
            ownership[ce.glue["CEHostingCluster"]] = site.glue["SiteName"]
        except Exception, e:
            print e
            pass
def do_site_info(cp):
    ce_entries = read_bdii(cp, "(objectClass=GlueCE)")
    cluster_entries = read_bdii(cp, "(objectClass=GlueCluster)", multi=True)
    site_entries = read_bdii(cp, "(objectClass=GlueSite)")
    ce_map = {}
    ce_map2 = {}
    for ce in ce_entries:
        try:
            cluster = ce.glue['ForeignKey'].split('=')[1]
        except:
            continue
        ce_map[ce.glue['CEHostingCluster']] = cluster
        ce_map2[ce.glue['CEUniqueID']] = cluster
    cluster_map = {}
    for cluster in cluster_entries:
        try:
            site = None
            for key in cluster.glue['ForeignKey']:
                kind, name = key.split('=', 1)
                if kind != 'GlueSiteUniqueID':
                    continue
                site = name
            if not site:
                continue
        except:
            continue
        cluster_map[cluster.glue['ClusterName'][0]] = site
    site_map = {}
    for site in site_entries:
        site_map[site.glue['SiteUniqueID']] = site.glue['SiteName']
    #conn = getGipDBConn(cp)
    #curs = conn.cursor()
    #for ce, cluster in ce_map.items():
    #    my_cluster = cluster_map.get(cluster, None)
    #    my_site = site_map.get(my_cluster, None)
    #    if my_site:
    #        curs.execute(insert_site_info, {'site': my_site, 'cename': ce})
    #conn.commit()
    return ce_map2, cluster_map, site_map
Beispiel #6
0
def sub_cluster_info(ce_list, cp):
    """
    Given a list of CE names (not LDAP entries), return a dictionary where
    the key is the CE name and the value is a list of SubClusters associated
    with that CE.
    """
    sc_entries = read_bdii(cp, query="(objectClass=GlueSubCluster)")
    sc_info = {}
    sc_total = {}
    for ce in ce_list:
        my_sc = sc_info.get(ce, [])
        sc_info[ce] = my_sc
        for sc in sc_entries:
            if "999999" in sc.glue['SubClusterLogicalCPUs']:
                continue
            desired_ck = "GlueClusterUniqueID=%s" % ce
            if "ChunkKey" in sc.glue and sc.glue["ChunkKey"] == desired_ck:
                my_sc.append(sc)
    return sc_info
def main():

    # Load up the config file.
    cp = config_file()

    # Load the DB
    filename = os.path.expandvars("$HOME/dbinfo/DBParam.xml")
    if not os.path.exists(filename):
        filename = os.path.expandvars("$DBPARAM_LOCATION")
        if not os.path.exists(filename):
            filename = '/etc/DBParam.xml'
    x = XmlConfig(file=filename)
    conn = x.globals['GIPConnMan'].get_connection(None).get_connection()
    curs = conn.cursor()

    # Read the CE entries from the BDII.    
    entries = read_bdii(cp, query="(&(objectClass=GlueCE))")
    
    cluster_info = create_count_dict(entries)

    # Map from the cluster hostname to the unique ID
    id_to_hostname = {}
    for entry in entries:
        fk = entry.glue['ForeignKey']
        info = fk.split("=", 1)
        if len(info) != 2:
            continue
        if info[0] != "GlueClusterUniqueID":
            print >> sys.stderr, "Entry has unknown cluster FK: %s" % entry
            continue
        id = info[1]
        id_to_hostname[id] = entry.glue['CEHostingCluster']

    sc_info = sub_cluster_info(id_to_hostname.keys(), cp)

    # For each unique cluster ID, map to one of the cluster hostnames
    new_sc_info = {}
    for id, info in sc_info.items():
        if id not in id_to_hostname:
            print >> sys.stderr, "ID %s has no matching cluster hostname." % id
            continue
        new_sc_info[id_to_hostname[id]] = info
    sc_info = new_sc_info

    specint = get_cpu_normalizations()

    now = datetime.datetime.now()

    curs.execute("DELETE FROM cpu_score");
    for cpu, score in specint.items():
        if isinstance(score, types.TupleType):
            score = score[0]
            #specint[cpu] = score
        curs.execute("INSERT INTO cpu_score VALUES (%s, %s, %s)", (cpu, \
            int(score), int(0)));

    site_ownership = create_site_dict(entries, cp)
    ownership = ownership_info(entries, cp)

    # Initialize the Probe's configuration
    ProbeConfig = '/etc/osg-storage-report/ProbeConfig'
    try:        
        Gratia.Initialize(ProbeConfig)
    except Exception, e:
        print e
        raise
def do_cms_se_info(cp):
    site_entries = read_bdii(cp, "(objectClass=GlueSite)")
    se_entries = read_bdii(cp, "(objectClass=GlueSE)")
    sa_entries = read_bdii(cp, "(objectClass=GlueSA)", multi=True)
    today = datetime.date.today()
    time_now = time.time()

    gratia_info = {}
    for sa in sa_entries:
        if 'SAAccessControlBaseRule' not in sa.glue:
            continue
        supports_cms = False
        for acbr in sa.glue['SAAccessControlBaseRule']:
            if 'cms' in acbr.lower():
                supports_cms = True
                break
        if not supports_cms:
            continue
        try:
            total = int(sa.glue['SATotalOnlineSize'][0])
            free =  int(sa.glue['SAFreeOnlineSize'][0])
            used =  int(sa.glue['SAUsedOnlineSize'][0])
        except:
            log.warn("Unable to parse attributes:\n%s" % sa)
            continue

        try:
            se = join_FK(sa, se_entries, "SEUniqueID", join_fk_name="ChunkKey")
        except Exception, ve:
            log.warn("Unable to match SA to SE:\n%s" % sa)
            continue
        site = match_se_to_site(se, site_entries)
        if not site:
            log.warn("Unable to match SE %s to site." % \
                se.glue['SEUniqueID'])
            continue
        print sa.glue['ChunkKey'][0]
        se_unique_id = se.glue['SEUniqueID']
        sa_name = sa.glue['SAName'][0]
        probeName = 'gip_storage:%s' % se_unique_id
        se_name = se.glue['SEName']
        gse = StorageElement.StorageElement()
        space_unique_id = "%s:%s:%s" % (se_unique_id, "GlueStorageArea",
            sa_name)
        gse.ParentID("%s:%s:%s" % (se_unique_id, "SE", se_name))
        gse.UniqueID(space_unique_id)
        gse.SE(se_name)
        gse.Name(sa_name)
        gse.SpaceType("GlueStorageArea")
        gse.Timestamp(time_now)
        gse.Implementation(se.glue['SEImplementationName'])
        gse.Version(se.glue['SEImplementationVersion'])
        gse.VO("cms")
        gse.Status(se.glue['SEStatus'])
        se_list = gratia_info.setdefault((probeName, se_name), [])
        se_list.append(gse)

        ser = StorageElementRecord.StorageElementRecord()
        ser.UniqueID(space_unique_id)
        ser.MeasurementType("logical")
        ser.StorageType("disk")
        ser.Timestamp(time_now)
        ser.TotalSpace(total*1000**3)
        ser.FreeSpace(free*1000**3)
        ser.UsedSpace(used*1000**3)
        se_list.append(ser)
Beispiel #9
0
def do_cms_se_info(cp):
    site_entries = read_bdii(cp, "(objectClass=GlueSite)")
    se_entries = read_bdii(cp, "(objectClass=GlueSE)")
    sa_entries = read_bdii(cp, "(objectClass=GlueSA)", multi=True)
    today = datetime.date.today()
    time_now = time.time()

    gratia_info = {}
    for sa in sa_entries:
        if 'SAAccessControlBaseRule' not in sa.glue:
            continue
        supports_cms = False
        for acbr in sa.glue['SAAccessControlBaseRule']:
            if 'cms' in acbr.lower():
                supports_cms = True
                break
        if not supports_cms:
            continue
        try:
            total = int(sa.glue['SATotalOnlineSize'][0])
            free = int(sa.glue['SAFreeOnlineSize'][0])
            used = int(sa.glue['SAUsedOnlineSize'][0])
        except:
            log.warn("Unable to parse attributes:\n%s" % sa)
            continue

        try:
            se = join_FK(sa, se_entries, "SEUniqueID", join_fk_name="ChunkKey")
        except Exception, ve:
            log.warn("Unable to match SA to SE:\n%s" % sa)
            continue
        site = match_se_to_site(se, site_entries)
        if not site:
            log.warn("Unable to match SE %s to site." % \
                se.glue['SEUniqueID'])
            continue
        print sa.glue['ChunkKey'][0]
        se_unique_id = se.glue['SEUniqueID']
        sa_name = sa.glue['SAName'][0]
        probeName = 'gip_storage:%s' % se_unique_id
        se_name = se.glue['SEName']
        gse = StorageElement.StorageElement()
        space_unique_id = "%s:%s:%s" % (se_unique_id, "GlueStorageArea",
                                        sa_name)
        gse.ParentID("%s:%s:%s" % (se_unique_id, "SE", se_name))
        gse.UniqueID(space_unique_id)
        gse.SE(se_name)
        gse.Name(sa_name)
        gse.SpaceType("GlueStorageArea")
        gse.Timestamp(time_now)
        gse.Implementation(se.glue['SEImplementationName'])
        gse.Version(se.glue['SEImplementationVersion'])
        gse.VO("cms")
        gse.Status(se.glue['SEStatus'])
        se_list = gratia_info.setdefault((probeName, se_name), [])
        se_list.append(gse)

        ser = StorageElementRecord.StorageElementRecord()
        ser.UniqueID(space_unique_id)
        ser.MeasurementType("logical")
        ser.StorageType("disk")
        ser.Timestamp(time_now)
        ser.TotalSpace(total * 1000**3)
        ser.FreeSpace(free * 1000**3)
        ser.UsedSpace(used * 1000**3)
        se_list.append(ser)
Beispiel #10
0
def main():

    # Load up the config file.
    cp = config_file()

    # Load the DB
    filename = os.path.expandvars("$HOME/dbinfo/DBParam.xml")
    if not os.path.exists(filename):
        filename = os.path.expandvars("$DBPARAM_LOCATION")
        if not os.path.exists(filename):
            filename = '/etc/DBParam.xml'
    x = XmlConfig(file=filename)
    conn = x.globals['GIPConnMan'].get_connection(None).get_connection()
    curs = conn.cursor()

    # Read the CE entries from the BDII.
    entries = read_bdii(cp, query="(&(objectClass=GlueCE))")

    cluster_info = create_count_dict(entries)

    # Map from the cluster hostname to the unique ID
    id_to_hostname = {}
    for entry in entries:
        fk = entry.glue['ForeignKey']
        info = fk.split("=", 1)
        if len(info) != 2:
            continue
        if info[0] != "GlueClusterUniqueID":
            print >> sys.stderr, "Entry has unknown cluster FK: %s" % entry
            continue
        id = info[1]
        id_to_hostname[id] = entry.glue['CEHostingCluster']

    sc_info = sub_cluster_info(id_to_hostname.keys(), cp)

    # For each unique cluster ID, map to one of the cluster hostnames
    new_sc_info = {}
    for id, info in sc_info.items():
        if id not in id_to_hostname:
            print >> sys.stderr, "ID %s has no matching cluster hostname." % id
            continue
        new_sc_info[id_to_hostname[id]] = info
    sc_info = new_sc_info

    specint = get_cpu_normalizations()

    now = datetime.datetime.now()

    curs.execute("DELETE FROM cpu_score")
    for cpu, score in specint.items():
        if isinstance(score, types.TupleType):
            score = score[0]
            #specint[cpu] = score
        curs.execute("INSERT INTO cpu_score VALUES (%s, %s, %s)", (cpu, \
            int(score), int(0)))

    site_ownership = create_site_dict(entries, cp)
    ownership = ownership_info(entries, cp)

    # Initialize the Probe's configuration
    ProbeConfig = '/etc/osg-storage-report/ProbeConfig'
    try:
        Gratia.Initialize(ProbeConfig)
    except Exception, e:
        print e
        raise
Beispiel #11
0
def ownership_info(ce_entries, cp):
    """
    Determine ownership of clusters from the sites's SiteSponsor attribute.
    """
    # Query BDII for the cluster and site entries
    cluster_entries = read_bdii(cp, query="(objectClass=GlueCluster)", multi=True)
    site_entries = read_bdii(cp, query="(objectClass=GlueSite)")
    ownership = {}

    # Determine the site's advertised ownership.
    for ce in ce_entries:
        # First, we join the CE to the cluster:
        try:
            cluster = join_FK(ce, cluster_entries, "ClusterUniqueID")
        except:
            print "Unable to find cluster for CE; skipping\n%s" % ce
            continue
        #print cluster
        # Then, join the cluster to the site:
        try:
            site = join_FK(cluster, site_entries, "SiteUniqueID")
        except:
            continue
        #print site
        try:
            ownership[ce] = site.glue["SiteSponsor"]
            #print site.glue["SiteName"], site.glue["SiteSponsor"]
        except:
            ownership[ce] = "unknown"
                    
    # Refine ownership; turn string into list of tuples and make sure
    # that everything adds up to 100%.
    # This is a bit awkward.  I should consider turning list of tuples
    # into a dictionary.
    refined_ownership = {}
    for ce, val in ownership.items():
        val = val.lower()
        val = val.replace('"', '')
        val = val.replace("'", '')
        refined = []
        ctr = 0
        #print val, ownership_re.findall(val)
        for entry in ownership_re.findall(val):
            info = entry.split(':')
            vo = correct_vo(info[0], cp)
            if len(info) == 1:
                refined.append((vo, 100))
                ctr += 100
            else:
                try:
                    amt = info[1]
                    amt = amt.replace("'", "")
                    refined.append((vo, int(amt)))
                    ctr += int(amt)
                except:
                    print entry
                    raise
        if ctr < 100:
            new_refined = []
            has_unknown = False
            for entry in refined:
                if entry[0] == "unknown":
                    new_refined.append(("unknown", entry[1]+(100-ctr)))
                    has_unknown = True
                else:
                    new_refined.append(entry)
            if not has_unknown:
                new_refined.append(("unknown", 100-ctr))
            refined = new_refined
        refined_ownership[ce.glue["CEInfoHostName"]] = refined
    return refined_ownership
def main():
    # Determine any filters we should apply
    parser = optparse.OptionParser(add_help_option=False)
    parser.add_option("-e", "--endpoint")
    parser.add_option("-h", "--help")
    parser.add_option("-c", "--config")
    parser.parse_args()
    (options, args) = parser.parse_args()
    if len(args) >= 1:
        ce_glob = args[0]
    else:
        ce_glob = "*"

    # Load up the config file.
    cp = config_file()

    # Read the CE entries from the BDII.    
    entries = read_bdii(cp, 
        query="(&(objectClass=GlueCE)(GlueCEInfoHostName=%s))" % ce_glob)
    cluster_info = create_count_dict(entries)
    sc_info = sub_cluster_info(cluster_info.keys(), cp)
    specint = get_cpu_normalizations()
    for key, val in specint.items():
        if isinstance(val, types.TupleType):
            specint[key] = val[0]
    correction = eval(cp.get("cpu_count", "correction"))
    duplicate = eval(cp.get("cpu_count", "duplicate"))
    msi2k_ctr = 0.0
    ksi2k_info = {}
    ownership = ownership_info(entries, cp)
    gk_ctr = 0
    add_missing = cp.getboolean("cpu_count", "add_missing")
    do_not_add_missing = cp.get("cpu_count", "do_not_add_missing").split(',')
    do_not_add_missing = [i.strip() for i in do_not_add_missing]
    for cluster, cpu in cluster_info.items():
        print "* Cluster: ", cluster
        my_sc_cores = 0
        ksi2k_ctr = 0
        correct_sc_info(cluster, cpu, sc_info, specint)
        
        # Print out SC info.
        if len(sc_info[cluster]) > 0:
            print " - Sub-clusters:"
        for sc in sc_info[cluster]:
            ksi2k = sc.glue["KSI2K"]
            msi2k_ctr += ksi2k / 1000.0
            my_sc_cores += int(sc.glue["SubClusterLogicalCPUs"])
            print "   - %(SubClusterUniqueID)s, CPU Model:" \
                " %(HostProcessorModel)s, Cores: %(SubClusterLogicalCPUs)s," \
                " KSI2K: %(KSI2K)s" % sc.glue
            ksi2k_ctr += ksi2k
        
        # Do any KSI2K/CPU adjustments necessary.
        if my_sc_cores == 0:
            avg_ksi2k = 1.3
        else:
            avg_ksi2k = ksi2k_ctr / float(my_sc_cores)
        if my_sc_cores > cpu: # Not enough CPUs; use sum from SC.
            cpu = my_sc_cores
            cluster_info[cluster] = cpu
        elif my_sc_cores < cpu and add_missing and (cluster not in \
                do_not_add_missing): 
            # Not enough KSI2K; add average froom SCs.
            addl_ksi2k = avg_ksi2k * (cpu - my_sc_cores)
            print " - Additional kSI2K for %s: %i" % (cluster, addl_ksi2k)
            ksi2k_ctr += addl_ksi2k
            msi2k_ctr += addl_ksi2k / 1000.0
        ksi2k_info[cluster] = ksi2k_ctr

        # Print out any correction factors or duplicate clusters
        if cluster in correction:
            print " - Correction factor: %s" % correction[cluster]
        if cluster in duplicate:
            print " - Duplicates of this cluster: %s" % duplicate[cluster]
        try:
            print " - Ownership:", pretty_ownership(ownership[cluster])
        except:
            pass
        print " - Core count:", cpu
        print " - KSI2K: %.1f" % ksi2k_ctr
        gk_ctr += 1

    print "----------------------------"
    core_count, msi2k_count, vo_info = correct_count(cluster_info, ksi2k_info, 
        ownership, correction, duplicate)
    print "----------------------------"
    print "OSG cores sum:", core_count
    print "OSG MSI2K: %.2f" % msi2k_count
    print "OSG gatekeepers count:", gk_ctr
    print "----------------------------"
    other_cores = 0
    other_msi2k = 0
    other_vos = []
    print_vos = [i.strip() for i in cp.get("cpu_count", "print_vos").split(',')]
    for vo, info in vo_info.items():
        if vo not in print_vos:
            other_cores += info[0]
            other_msi2k += info[1]
            other_vos.append(vo)
            continue
        print "%s cores sum: %i" % (vo, info[0])
        print "%s MSI2K: %.2f" % (vo, info[1])

    print "Other cores sum: %i" % other_cores
    print "Other MSI2K: %.2f" % other_msi2k
    print "Other VOs:", other_vos