def getRR(first=272000, last=326000, tkr_IN=True, tkr_strip_status='GOOD', tkr_pix_status='GOOD', name='UL', cl='Collisions'): runs = runregistry.get_datasets( filter={ 'run_number': { 'and': [{ '>': first }, { '<': last }] }, 'tracker_included': tkr_IN, 'tracker-strip': tkr_strip_status, #'tracker-pixel': tkr_pix_status, 'class': { 'like': '%{}%'.format(cl) }, 'dataset_name': { 'like': '%{}%'.format(name) }, }) return runs
def get_runs_by_list(self, list_of_run_numbers): """ Get list of run dictionaries from the Tracker workspace in the Run Registry Example: >>> client = TrackerRunRegistryClient() >>> runs = client.get_runs_by_list(["323423"]) >>> runs[0]["state"] 'COMPLETED' :param list_of_run_numbers: list of run numbers :return: dictionary containing the queryset """ if not list_of_run_numbers: return [] runs = runregistry.get_datasets( filter={ 'run_number': { 'or': list_of_run_numbers }, 'dataset_name': { 'notlike': '%online%' } }) return runs
def retrieve_dataset(run_number): datasets = runregistry.get_datasets( filter={'run_number': { '=': run_number }}) for dataset in datasets: if "online" not in dataset["name"]: if not TrackerCertification.objects.filter( runreconstruction__run__run_number=run_number, runreconstruction__reconstruction=get_reco_from_dataset( dataset["name"])).exists(): return dataset["name"] if len(datasets) != 0: raise Exception("No available datasets for run {}".format(run_number)) else: raise Exception("Run {} has been fully certified".format(run_number))
def get_dataset_name(run_id, keyword=express_2017_settings.KEYWORD_FOR_PULLED_LABEL): dataset_informations = rr.get_datasets(filter={'run_number': run_id}) if run_id not in dataset_name_cache: try: dataset_name_cache[run_id] = tuple( map( lambda x: x['name'], filter( lambda x: express_2017_settings. KEYWORD_FOR_PULLED_LABEL in x['name'], dataset_informations)))[0] print(run_id, dataset_name_cache[run_id]) except IndexError: print("!! There is no dataset_name for selected keywork '{}' !!". format(express_2017_settings.KEYWORD_FOR_PULLED_LABEL)) exit() return dataset_name_cache[run_id]
def retrieve_dataset_by_reco(run_number, reco): datasets = runregistry.get_datasets( filter={'run_number': { '=': run_number }}) for dataset in datasets: if reco in dataset["name"].lower(): return dataset["name"] if reco == "rereco": if reco in dataset["name"].lower() and "UL" not in dataset["name"]: return dataset["name"] if reco == "rerecoul": if "rereco" in dataset["name"].lower() and "UL" in dataset["name"]: return dataset["name"] raise Exception("Could not find reconstruction:{} for run {}".format( reco, run_number))
def get_dataset_name(recon_name="PromptReco", run_id=None): dataset_name_cache_key = "%s___%s" % (recon_name, run_id) if dataset_name_cache_key in dataset_name_cache: # print("Yay, found %s" % (dataset_name_cache_key)) return dataset_name_cache[dataset_name_cache_key] if recon_name in regex_cache: r = regex_cache[recon_name] else: r = re.compile(".*{}".format(recon_name)) regex_cache[recon_name] = r dataset_informations = rr.get_datasets(filter={'run_number': int(run_id)}) dataset_names = list(map(lambda x: x['name'], dataset_informations)) prompt_reco_dataset = list(filter(r.match, dataset_names)) if len(prompt_reco_dataset) > 1: raise PromptRecoDatasetNotUniqueError dataset_name_cache[dataset_name_cache_key] = prompt_reco_dataset[0] return prompt_reco_dataset[0]
def get_datasets_of_runs(runs, user): for run in runs: datasets = runregistry.get_datasets( filter={ 'run_number': { '=': run["run_number"] }, 'global_state': { '=': 'OPEN' }, }) today = timezone.now().strftime("%Y-%m-%d") run_check = OpenRuns.objects.filter(run_number=run["run_number"]) if not run_check.exists(): dataset_express = "" dataset_prompt = "" dataset_rereco = "" dataset_rereco_ul = "" state_express = "" state_prompt = "" state_rereco = "" state_rereco_ul = "" for dataset in datasets: if "express" in dataset["name"].lower(): dataset_express = dataset["name"] state_express = dataset["dataset_attributes"][ "global_state"] if "global_state" in dataset[ "dataset_attributes"] else "SIGNOFF" elif "prompt" in dataset["name"].lower(): dataset_prompt = dataset["name"] state_prompt = dataset["dataset_attributes"][ "global_state"] if "global_state" in dataset[ "dataset_attributes"] else "SIGNOFF" elif "rereco" in dataset["name"].lower( ) and "UL" in dataset["name"]: dataset_rereco_ul = dataset["name"] state_rereco_ul = dataset["dataset_attributes"][ "global_state"] if "global_state" in dataset[ "dataset_attributes"] else "SIGNOFF" elif "rereco" in dataset["name"].lower(): dataset_rereco = dataset["name"] state_rereco = dataset["dataset_attributes"][ "global_state"] if "global_state" in dataset[ "dataset_attributes"] else "SIGNOFF" if dataset_express != "" or dataset_prompt != "" or dataset_rereco != "" or dataset_rereco_ul != "": OpenRuns.objects.create(run_number=run["run_number"], dataset_express=dataset_express, user=user, state_express=state_express, date_retrieved=today) OpenRuns.objects.filter(run_number=run["run_number"]).update( dataset_prompt=dataset_prompt, state_prompt=state_prompt) OpenRuns.objects.filter(run_number=run["run_number"]).update( dataset_rereco=dataset_rereco, state_rereco=state_rereco) OpenRuns.objects.filter(run_number=run["run_number"]).update( dataset_rereco_ul=dataset_rereco_ul, state_rereco_ul=state_rereco_ul) else: run_check.update(date_retrieved=today)
for run in sorted_runs: run_num = run['run_number'] dataset_str = '' ntriggers = run['oms_attributes']['l1_triggers_counter'] ls_good = 0 ls_tot = 0 pixel_flag = 'NOTSET' strip_flag = 'NOTSET' tracking_flag = 'NOTSET' datasets = runregistry.get_datasets( filter={ 'run_number': run_num, 'class': { 'like': '%Collisions%' }, 'dataset_name': { 'like': '%' + stream + '%' } }) ndat = len(datasets) if ndat != 1: if ndat == 0: print('>> No dataset found for the run ' + str(run_num) + ' !! Skipping it.') if ndat > 1: print('>> Several datasets found for the run ' + str(run_num) + ' !! Skipping it.') print('>> The list is : ') for dataset in datasets: print('>> ', dataset['name'])