예제 #1
0
def get_data(q):
   api = RhApi(DEFAULT_URL, debug = False)

   p = {"class": "Collisions18"}
   qid = api.qid(q)

   return api.csv(q, p)
def query_runregistry(query):
    """
    query run registry's tables using resthub API
    """
    try:
        api = RhApi("http://vocms00170:2113", debug=options.verbose)
        rr_data = api.json_all(query, inline_clobs=True)
    except Exception as ex:
        print("Error while using RestHub API: %s" % (ex))
        sys.exit(-1)

    return rr_data
예제 #3
0
    def get_list_of_lumis(self, query):
        """
        get list of lumis for runs with specified query
        """

        try:
            api = RhApi(self.restHubURL, debug=self.verbose)
            rr_data = api.json_all(query, inline_clobs=True)
        except Exception as ex:
            print("Error while using RestHub API: %s" % (ex))
            sys.exit(-1)

        return rr_data
예제 #4
0
    def get_list_of_runs(self, query):
        """
        return list of runs from restHub API
        """

        try:
            api = RhApi(self.restHubURL, debug=self.verbose)
            rr_data = api.json_all(query, inline_clobs=True)
        except Exception as ex:
            print("Error while using RestHub API: %s" % (ex))
            sys.exit(-1)

        # convert resthub format to useful list of runs
        # 0th value is runnumber as specified in __query

        list_of_runs = [el[0] for el in rr_data]
        return list_of_runs
예제 #5
0
def main(argv=None):

    lst = []

    input_file = open('json_DCSONLY.txt', 'r')
    json_outfile = "Run_LHCFill_RunDuration.json"

    data = json.load(input_file)

    api = RhApi(DEFAULT_URL, debug=False)

    for key in data.keys():
        #print "Run = ",key
        q = "select r.lhcfill from runreg_tracker.runs r where r.runnumber between " + key[:
                                                                                           6] + " and " + key[:
                                                                                                              6]
        p = {"class": "Collisions17"}
        #print "RR Query = ",q
        lhcfill_init = api.json(q, p)[u'data']
        lhcfill_middle = lhcfill_init[0]

        q1 = "select r.duration from runreg_tracker.runs r where r.runnumber between " + key[:
                                                                                             6] + " and " + key[:
                                                                                                                6]
        p1 = {"class": "Collisions17"}
        #print "RR Query = ",q
        dur_init = api.json(q1, p1)[u'data']
        dur_middle = dur_init[0]

        print key, " ", lhcfill_middle[0], "  ", dur_middle[0]

        d = {}
        d['run'] = key
        d['lhcfill'] = lhcfill_middle[0]
        d['rundur'] = dur_middle[0]
        lst.append(d)

    lst = sorted(lst, key=lambda x: x['run'])

    obj = {}
    obj[json_outfile] = lst
    print json.dumps(obj, indent=2)

    outfile = open(json_outfile, 'w')
    json.dump(obj, outfile, indent=4)
    def writeJSON(self):
        from rhapi import DEFAULT_URL, RhApi
        api = RhApi(DEFAULT_URL, debug=False)

        p = {"class": self.group}
        results = api.json_all(self.query, p)
        #print results
        json_ = {}
        for entry in results:
            try:
                json_[str(entry[0])].append([entry[1], entry[2]])
            except:
                json_[str(entry[0])] = [[entry[1], entry[2]]]

        for key in json_.keys():
            json_[key] = self.merge_intervals(json_[key])
        with open(self.jsonfile, 'w') as f:
            f.write(json.dumps(json_, sort_keys=True))
def main(argv=None):
    parser = OptionParser()
    parser.add_option("-c",
                      "--cosmics",
                      dest="cosmics",
                      action="store_true",
                      default=False,
                      help="cosmic runs")
    (option, args) = parser.parse_args()

    lst = []
    if option.cosmics:
        input_file = open('json_DCSONLY_cosmics.txt', 'r')
        json_outfile = "Run_LHCFill_RunDuration_Cosmics.json"
    else:
        input_file = open('json_DCSONLY.txt', 'r')
        json_outfile = "Run_LHCFill_RunDuration.json"

    data = json.load(input_file)

    api = RhApi(DEFAULT_URL, debug=False)
    print "Getting run duration info........"
    for key in data.keys():
        #print "Run = ",key
        q = "select r.lhcfill from runreg_tracker.runs r where r.runnumber between " + key[:
                                                                                           6] + " and " + key[:
                                                                                                              6]
        if option.cosmics:
            p = {"class": "Cosmics18CRUZET || Cosmics18"}
        else:
            p = {"class": "Collisions18"}

    #print "RR Query = ",q
        lhcfill_init = api.json(q, p)[u'data']
        lhcfill_middle = lhcfill_init[0]

        q1 = "select r.duration from runreg_tracker.runs r where r.runnumber between " + key[:
                                                                                             6] + " and " + key[:
                                                                                                                6]
        if option.cosmics:
            p1 = {"class": "Cosmics18CRUZET || Cosmics18"}
        else:
            p1 = {"class": "Collisions18"}
    #print "RR Query = ",q
        dur_init = api.json(q1, p1)[u'data']
        dur_middle = dur_init[0]

        #	 print key," ",lhcfill_middle[0], "  ", dur_middle[0]

        d = {}
        d['run'] = key
        d['lhcfill'] = lhcfill_middle[0]
        d['rundur'] = dur_middle[0]
        lst.append(d)

    lst = sorted(lst, key=lambda x: x['run'])

    obj = {}
    obj[json_outfile] = lst
    #      print  json.dumps(obj,indent=2)

    outfile = open(json_outfile, 'w')
    json.dump(obj, outfile, indent=4)
    print ".....done! Output on {0}".format(json_outfile)
예제 #8
0
# Check the runs infos from RunRegistry for Collisions in ExpressStream
# Output list saved in runlist.txt

import sys
from rhapi import RhApi, DEFAULT_URL

api = RhApi(DEFAULT_URL, debug=False)

#print api.folders()
#print api.tables('runreg_global')
#print api.tables('runreg_tracker')


def get_runs_in_fill(fill):
    q = "select r.runnumber, r.run_class_name, r.lhcfill, r.triggers, r.starttime, r.stoptime from runreg_tracker.runs r where r.run_class_name like :class and r.lhcfill = :fill order by r.runnumber asc"
    p = {"class": "Collisions%", "fill": str(fill)}
    qid = api.qid(q)
    #print api.query(qid)
    output = api.json(q, p)['data']

    runs = {}
    for run in output:
        runs[int(run[0])] = {
            'CLASSNAME': run[1],
            'FILL': run[2],
            'TRIGGERS': run[3],
            'STARTTIME': run[4],
            'STOPTIME': run[5]
        }
    return runs
예제 #9
0
파일: RC.py 프로젝트: valdasraps/certTools
            "workspace": "TRACKER",
            "DB_column": "RDA_CMP_PIXEL"
        },
        "STRIP": {
            "workspace": "TRACKER",
            "DB_column": "RDA_CMP_STRIP"
        },
        "TRACK": {
            "workspace": "TRACKER",
            "DB_column": "RDA_CMP_TRACKING"
        },
    }

    new_url = "http://vocms00170:2113"
    dev_url = "http://vocms0185/rhapi"
    api = RhApi(new_url, debug=False)

    sorted_list_of_POGS = [
        'CSC', 'CTPPS', 'DT', 'ECAL', 'ES', 'HCAL', 'HLT', 'L1tmu', 'L1tcalo',
        'RPC', 'PIX', 'STRIP', 'TRACK'
    ]

    #we get list of runs, their bfield and number of events
    runlist = get_bfield_events("GLOBAL", "Online", options.group,
                                options.days, options.rr_files)

    for pog in sorted_list_of_POGS:
        logging.info("Cheking %s worspace for Express runs" % (pog))
        columns = ['rda_wor_name', 'run_number', 'rda_state']
        if pog != "CTPPS":
            db_column = map_DB_to_column[pog.upper()]['DB_column'].lower()
예제 #10
0
    options = parser.parse_args()

    if options.verbose:
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO

    logging.basicConfig(format='[%(asctime)s] [%(levelname)s] %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S', level=log_level)

    logging.debug("verbose:%s" % (options.verbose))
    runs_data = {}

    new_url = "http://vocms00170:2113"
    dev_url = "http://vocms0185/rhapi"
    api = RhApi(dev_url, debug=False)

    checkRR_sync()

    runs_data["online"] = sorted(getRR2("Online", options.group).keys())
    runs_data["offline"] = getRR2("PromptReco/", options.group, columns=["rda_state"])
    runs_data["lowbfield"] = getRR2("Online", options.group, special="bfield")
    runs_data["specialHLT"] = getRR2("Online", options.group, hlt_key="/cdaq/special",
            special="hlt")

    __runs_completed = []
    __runs_signoff = []
    __runs_open = []
    __short_run = []
    __short_run_online = []
    __short_run_offline = []
예제 #11
0
import pandas as pd
from rhapi import RhApi

DEFAULT_URL = "http://vocms00170:2113"
api = RhApi(DEFAULT_URL, debug=False)

MAX_FILL = 6291
MIN_FILL = 6018

for fill in range(MIN_FILL, MAX_FILL + 1):
    query = ("select * from wbm.runs r where r.lhcfill = %s" % fill)
    response = api.csv(query)

    with open("tmp.csv", "w") as file:
        file.write(response)
    df = pd.read_csv("tmp.csv")

    lhcFills = {}
    for _, row in df.iterrows():
        with open("./data/lhc_fills.csv", "a+") as file:
            file.write("%s, %s \n" % (row["RUNNUMBER"], fill))
예제 #12
0
파일: sql.py 프로젝트: zorache/AutoDQM
def retrieve(max_run=320008,
             min_run=316766,
             folder="runreg_csc",
             table="datasets",
             ref_runs=[]):

    api = RhApi(DEFAULT_URL, debug=False)

    # Get column names and name of run column
    col_table = api.table(folder=folder, table=table)["columns"]
    cols = []
    r_num = ""
    r_num_i = 0
    for col in col_table:
        col_name = str(col["name"])
        if _get_data_col(col_name, table): cols.append(col_name)
        if not r_num and _get_run_col(col_name):
            cols.append(col_name)
            r_num = col_name
            r_num_i = cols.index(col_name)

    if not r_num:
        r_num = cols[0]

    # Form query
    c = ",".join("r." + x for x in cols)
    q = "select {0} from {1}.{2} r where r.{3}>=:minrun and r.{3}<=:maxrun order by r.{3}".format(
        c, folder, table, r_num)
    if ref_runs:
        p = {"maxrun": max(ref_runs), "minrun": min(ref_runs)}
    else:
        p = {"maxrun": str(max_run), "minrun": str(min_run)}
    qid = api.qid(q)

    # Allow for fetch() to update a pre-existing dict
    data = {}
    if type(ref_runs) == dict:
        dqm = ref_runs
        ref_runs = ref_runs.keys()
    skipped = 0
    it = 0
    while True:
        runs = []
        raw_data = api.json(q, p)["data"]

        for i in range(0, len(raw_data)):
            run = str(raw_data[i][r_num_i])
            # Only fetch runs relevant to AutoDQM
            if run not in ref_runs: continue
            # Get source of data (Global, Express, or PromptReco)
            if "RDA_NAME" in cols:
                rda_name = raw_data[i][cols.index("RDA_NAME")].lower()
            runs.append(run)
            # Only make a new entry for new runs
            if run not in data:
                data[run] = {}
            for j in range(0, len(raw_data[i])):
                if j == r_num_i: continue
                # Handle <folder>.datasets
                if table != "runs":
                    if "is_good" not in data[run]: pass
                    elif not data[run]["is_good"]: continue
                    else:
                        # The "BAD" tag gets priority
                        if raw_data[i][j] == "BAD":
                            data[run]["is_good"] = False
                        # Skip "NONSET" tags if marked as "GOOD" elsewhere
                        continue
                    if "RDA_NAME" in data[run]:
                        # Update source
                        data[run]["RDA_NAME"] = rda_name
                    # Update status
                    if raw_data[i][j] == "GOOD": data[run]["is_good"] = True
                    elif raw_data[i][j] == "BAD": data[run]["is_good"] = False
                    else: continue
                # Handle <folder>.runs
                else:
                    data[run][cols[j]] = raw_data[i][j]

            if table != "runs":
                # handle cases where all statuses are NOTSET
                if "is_good" not in data[run]: data[run]["is_good"] = False

        if len(raw_data) < 1 or max(
                runs) == p["minrun"] or p["minrun"] >= p["maxrun"]:
            break
        p["minrun"] = max(runs)
        it += 1

    if table == "runs" and ref_runs:
        refs = {"ref_data": [], "ref_cands": []}
        for run in data:
            if run == max(ref_runs): continue
            refs["ref_data"].append(
                dict(ref.get_wbm_data(max(ref_runs), run, data), **dqm[run]))
        refs["ref_cands"] = ref.get_ref_cands(refs["ref_data"])
        return refs
    elif data:
        return data
    else:
        return None
예제 #13
0
test_source_dir = getcwd()
chdir(".")
main_source_dir = getcwd()

sys.path.insert(0, main_source_dir)
from rhapi import RhApi, CLIClient
chdir(test_source_dir)

# App URL
URL = "http://runregistry.web.cern.ch/runregistry/:8112"
SCHEMA_NAME = u'store'
TABLE = u'customer'
COUNT = 1
RESOURCES_PATH = './resources/'
RHAPI_PATH = main_source_dir + '/rhapi.py'
api = RhApi(URL, debug=False)
clicl = CLIClient()


class Files(object):
    def saveToFile(self, query_data, file_name):
        if type(query_data) is dict or type(query_data) is list:
            with open(file_name, 'w') as f:
                json.dump(query_data, f)
            f.close()
        else:
            output_file = open(file_name, 'w+')
            output_file.write(query_data)
            output_file.close()

    def loadFromFile(self, file_name, t):
예제 #14
0
        __run_alias, options.min, __run_alias, options.max)

    if options.infile:
        print("Opening file %s which contains the run list" % (options.infile))
        with open(options.infile, "r") as inputfile:
            for run in inputfile:
                print(run)
                __query += "OR %s.RUNNUMBER = %s " % (__run_alias, run)

    print("Using run class name: %s" % (options.data_type))
    __query += "AND %s.RUN_CLASS_NAME = '%s' " % (__run_alias,
                                                  options.data_type)
    __query += "AND %s.RDA_NAME like '%s' " % (__dataset_alias, "%Online%")

    # do a join
    __query += "AND %s.RUNNUMBER = %s.RUN_NUMBER" % (__run_alias,
                                                     __dataset_alias)

    try:
        api = RhApi("http://vocms00170:2113", debug=True)
        rr_data = api.json(__query, inline_clobs=True)
    except Exception as ex:
        print("Error while using RestHub API: %s" % (ex))
        sys.exit(-1)

    # print rr_data
    print("RUN_NUMBER\tEVENTS\tBFIELD\tHLTKEYDESCRIPTION")
    rr_data["data"].sort(key=lambda x: x[0])
    for el in rr_data["data"]:
        print("%s\t%s\t%s\t%s" % (el[0], el[1] if el[1] else "", el[2], el[3]))