def main(): """Main function""" # get the options optmgr = MyOptionParser() opts = optmgr.get_opt() # build the result from user input result = Result(unicode(opts.path)) result.description = unicode(opts.desc) result.author = unicode(opts.author) result.creation_time = opts.datetime # connect to the MySQL database using default credentials dbstore = DbStore() # unless the source is set, prompt the user and present a list to make a choice if opts.inputSamples is None: inputSamples = prompt_samples(dbstore) else: inputSamples = parse_samples(opts.inputSamples) # create and store the relations samples = dbstore.find(Sample,Sample.sample_id.is_in(inputSamples)) if samples.is_empty(): dbstore.add(result) else: for sample in samples: sample.results.add(result) print result if confirm(prompt="Insert into the database?", resp=True): dbstore.commit()
def main(): """Main function""" # get the options optmgr = MyOptionParser() opts = optmgr.get_opt() # connect to the MySQL database using default credentials dbstore = DbStore() # check that the LHCO exists and obtain the dataset id check = dbstore.find(Sample,Sample.sample_id==opts.lhco_id) if check.is_empty() or check.one().sampletype != "LHCO": raise IndexError("No LHCO with such index: %d"%opts.lhco_id) opts.dataset = findDataset(check.one()) if opts.dataset is None: raise RuntimeError("Impossible to get the dataset id.") # check that the process exists check = dbstore.find(MadWeight,MadWeight.process_id==opts.process) if check.is_empty(): raise IndexError("No process with such index: %d"%opts.process) # create the MW run object mw_run = MadWeightRun(opts.process,opts.lhco_id) mw_run.systematics = unicode(opts.syst) mw_run.user_comment = unicode(opts.comment) mw_run.version = opts.version if mw_run.version is None: check = dbstore.find(MadWeightRun,(MadWeightRun.madweight_process==mw_run.madweight_process) & (MadWeightRun.lhco_sample_id==mw_run.lhco_sample_id)) if not check.is_empty(): mw_run.version = check.order_by(MadWeightRun.version).last().version + 1 else: mw_run.version = 1 else: check = dbstore.find(MadWeightRun,(MadWeightRun.madweight_process==mw_run.madweight_process) & (MadWeightRun.lhco_sample_id==mw_run.lhco_sample_id) & (MadWeightRun.version==mw_run.version)) if not check.is_empty(): raise RuntimeError("There is already one such MadWeight run with the same version number:\n%s\n"%str(check.one())) # read the file inputfile = open(opts.filepath) count = 0 for line in inputfile: data = line.rstrip('\n').split('\t') # get the event run_number = int(data[0].split('.')[0]) event_number = int(data[0].split('.')[1]) event_query = dbstore.find(Event, (Event.event_number==event_number) & (Event.run_number==run_number) & (Event.dataset_id==opts.dataset)) if event_query.is_empty(): event = Event(event_number,run_number,opts.dataset) else: event = event_query.one() # create the weight weight = Weight() weight.event = event weight.mw_run = mw_run weight.value = float(data[1]) weight.uncertainty = float(data[2]) dbstore.add(weight) count += 1 # confirm and commit print mw_run print "Adding weights to %d events."%count if confirm(prompt="Insert into the database?", resp=True): dbstore.commit()
def add_sample(NAME, localpath, type, dataset_nevents, nselected, AnaUrl, FWUrl, dataset_id): # Large part of this imported from SAMADhi add_sample.py sample = Sample(unicode(NAME), unicode(localpath), unicode(type), dataset_nevents) sample.nevents = nselected sample.normalization = 1.0 sample.luminosity = 40028954.499 / 1e6 # FIXME: figure out the fix for data whenever the tools will stabilize and be on cvmfs sample.code_version = unicode(AnaUrl + ' ' + FWUrl) #NB: limited to 255 characters, but so far so good # sample.user_comment = sample.source_dataset_id = dataset_id # sample.source_sample_id = None sample.author = unicode(getpwuid(os.stat(os.getcwd()).st_uid).pw_name) # sample.creation_time = # connect to the MySQL database using default credentials dbstore = DbStore() # check that source dataset exist if dbstore.find(Dataset,Dataset.dataset_id==sample.source_dataset_id).is_empty(): raise IndexError("No dataset with such index: %d"%sample.source_dataset_id) # check that there is no existing entry checkExisting = dbstore.find(Sample,Sample.name==sample.name) if checkExisting.is_empty(): print sample if confirm(prompt="Insert into the database?", resp=True): dbstore.add(sample) # compute the luminosity, if possible if sample.luminosity is None: dbstore.flush() sample.luminosity = sample.getLuminosity() else: existing = checkExisting.one() prompt = "Replace existing " prompt += str(existing) prompt += "\nby new " prompt += str(sample) prompt += "\n?" if confirm(prompt, resp=False): existing.replaceBy(sample) if existing.luminosity is None: dbstore.flush() existing.luminosity = existing.getLuminosity() # commit dbstore.commit()
def add_sample(NAME, localpath, type, nevents, nselected, AnaUrl, FWUrl, dataset_id, sumw, has_job_processed_everything, dataset_nevents, files, processed_lumi=None): dbstore = DbStore() sample = None # check that source dataset exist if dbstore.find(Dataset, Dataset.dataset_id == dataset_id).is_empty(): raise IndexError("No dataset with such index: %d" % sample.dataset_id) # check that there is no existing entry update = False checkExisting = dbstore.find(Sample, Sample.name == unicode(NAME)) if checkExisting.is_empty(): sample = Sample(unicode(NAME), unicode(localpath), unicode(type), nevents) else: update = True sample = checkExisting.one() sample.removeFiles(dbstore) sample.nevents_processed = nevents sample.nevents = nselected sample.normalization = 1 sample.event_weight_sum = sumw # sample.luminosity = 40028954.499 / 1e6 # FIXME: figure out the fix for data whenever the tools will stabilize and be on cvmfs sample.code_version = unicode(AnaUrl + ' ' + FWUrl) #NB: limited to 255 characters, but so far so good if not has_job_processed_everything: sample.user_comment = unicode("Sample was not fully processed, only " + str(nevents) + "/" + str(dataset_nevents) + " events were processed") else: sample.user_comment = u"" sample.source_dataset_id = dataset_id sample.author = unicode(getpwuid(os.stat(os.getcwd()).st_uid).pw_name) if processed_lumi: # Convert to json import json processed_lumi = json.dumps(processed_lumi, separators=(',', ':')) sample.processed_lumi = unicode(processed_lumi) else: sample.processed_lumi = None for f in files: sample.files.add(f) if not update: dbstore.add(sample) if sample.luminosity is None: sample.luminosity = sample.getLuminosity() print sample if confirm(prompt="Insert into the database?", resp=True): dbstore.commit() return else: sample.luminosity = sample.getLuminosity() prompt = "A sample with the same name already exists in the database. Replace by:\n" prompt += str(sample) prompt += "\n?" if confirm(prompt, resp=False): dbstore.commit() return # rollback dbstore.rollback()
def add_merged_sample(NAME, type, AnaUrl, FWUrl, samples, comment): # samples is a simple dict containing three keys: 'process', 'dataset_id', 'sample_id' dbstore = DbStore() sample = None # check that source dataset exist # Skip: should exist, the check has been done before calling this function # check that there is no existing entry update = False localpath = '' nevents = 0 checkExisting = dbstore.find(Sample, Sample.name == unicode(NAME)) if checkExisting.is_empty(): sample = Sample(unicode(NAME), unicode(localpath), unicode(type), nevents) else: update = True sample = checkExisting.one() sample.removeFiles(dbstore) # collecting contents sample.nevents_processed = 0 sample.nevents = 0 sample.normalization = 1 sample.event_weight_sum = 0 extras_event_weight_sum = {} dataset_nevents = 0 processed_lumi = LumiList() for i, s in enumerate(samples): if i == 0: sample.source_dataset_id = s['dataset_id'] sample.source_sample_id = s['sample_id'] results = dbstore.find(Sample, Sample.sample_id == s['sample_id']) # Should exist, the check has been done before calling this function sample.nevents_processed += results[0].nevents_processed sample.nevents += results[0].nevents sample.event_weight_sum += results[0].event_weight_sum extra_sumw = results[0].extras_event_weight_sum if extra_sumw is not None: extra_sumw = json.loads(extra_sumw) for key in extra_sumw: try: extras_event_weight_sum[key] += extra_sumw[key] except KeyError: extras_event_weight_sum[key] = extra_sumw[key] tmp_processed_lumi = results[0].processed_lumi if tmp_processed_lumi is not None: tmp_processed_lumi = json.loads( tmp_processed_lumi ) processed_lumi = processed_lumi | LumiList(compactList = tmp_processed_lumi) # Get info from file table results = dbstore.find(File, File.sample_id == s['sample_id']) for lfn, pfn, event_weight_sum, file_extras_event_weight_sum, nevents in list(results.values(File.lfn, File.pfn, File.event_weight_sum, File.extras_event_weight_sum, File.nevents)): f = File(lfn, pfn, event_weight_sum, file_extras_event_weight_sum, nevents) sample.files.add(f) # Get info from parent datasets results = dbstore.find(Dataset, Dataset.dataset_id == s['dataset_id']) dataset_nevents += results[0].nevents if len(extras_event_weight_sum) > 0: sample.extras_event_weight_sum = unicode(json.dumps(extras_event_weight_sum)) if len(processed_lumi.getCompactList()) > 0: sample.processed_lumi = unicode(json.dumps(processed_lumi.getCompactList())) sample.code_version = unicode(AnaUrl + ' ' + FWUrl) #NB: limited to 255 characters, but so far so good if sample.nevents_processed != dataset_nevents: sample.user_comment = unicode("Sample was not fully processed, only " + str(sample.nevents_processed) + "/" + str(dataset_nevents) + " events were processed. " + comment) else: sample.user_comment = unicode(comment) sample.author = unicode(getpwuid(os.stat(os.getcwd()).st_uid).pw_name) if not update: dbstore.add(sample) if sample.luminosity is None: sample.luminosity = sample.getLuminosity() print sample dbstore.commit() return else: sample.luminosity = sample.getLuminosity() print("Sample updated") print(sample) dbstore.commit() return # rollback dbstore.rollback()
def add_sample(NAME, localpath, type, nevents, nselected, AnaUrl, FWUrl, dataset_id, sumw, extras_sumw, has_job_processed_everything, dataset_nevents, files, processed_lumi=None): dbstore = DbStore() sample = None # check that source dataset exist if dbstore.find(Dataset, Dataset.dataset_id == dataset_id).is_empty(): raise IndexError("No dataset with such index: %d" % sample.dataset_id) # check that there is no existing entry update = False checkExisting = dbstore.find(Sample, Sample.name == unicode(NAME)) if checkExisting.is_empty(): sample = Sample(unicode(NAME), unicode(localpath), unicode(type), nevents) else: update = True sample = checkExisting.one() sample.removeFiles(dbstore) sample.nevents_processed = nevents sample.nevents = nselected sample.normalization = 1 sample.event_weight_sum = sumw sample.extras_event_weight_sum = unicode(json.dumps(extras_sumw, separators=(',', ':'))) sample.code_version = unicode(AnaUrl + ' ' + FWUrl) #NB: limited to 255 characters, but so far so good if not has_job_processed_everything: sample.user_comment = unicode("Sample was not fully processed, only " + str(nevents) + "/" + str(dataset_nevents) + " events were processed") else: sample.user_comment = u"" sample.source_dataset_id = dataset_id sample.author = unicode(getpwuid(os.stat(os.getcwd()).st_uid).pw_name) if processed_lumi: # Convert to json processed_lumi = json.dumps(processed_lumi, separators=(',', ':')) sample.processed_lumi = unicode(processed_lumi) else: sample.processed_lumi = None for f in files: sample.files.add(f) if not update: dbstore.add(sample) if sample.luminosity is None: sample.luminosity = sample.getLuminosity() print sample dbstore.commit() return else: sample.luminosity = sample.getLuminosity() print("Sample updated") print(sample) dbstore.commit() return # rollback dbstore.rollback()
def add_sample(NAME, localpath, type, nevents, nselected, AnaUrl, FWUrl, dataset_id, sumw, has_job_processed_everything, dataset_nevents, files, processed_lumi=None): dbstore = DbStore() sample = None # check that source dataset exist if dbstore.find(Dataset, Dataset.dataset_id == dataset_id).is_empty(): raise IndexError("No dataset with such index: %d" % sample.dataset_id) # check that there is no existing entry update = False checkExisting = dbstore.find(Sample, Sample.name == unicode(NAME)) if checkExisting.is_empty(): sample = Sample(unicode(NAME), unicode(localpath), unicode(type), nevents) else: update = True sample = checkExisting.one() sample.removeFiles(dbstore) sample.nevents_processed = nevents sample.nevents = nselected sample.normalization = 1 sample.event_weight_sum = sumw # sample.luminosity = 40028954.499 / 1e6 # FIXME: figure out the fix for data whenever the tools will stabilize and be on cvmfs sample.code_version = unicode( AnaUrl + ' ' + FWUrl) #NB: limited to 255 characters, but so far so good if not has_job_processed_everything: sample.user_comment = unicode("Sample was not fully processed, only " + str(nevents) + "/" + str(dataset_nevents) + " events were processed") else: sample.user_comment = u"" sample.source_dataset_id = dataset_id sample.author = unicode(getpwuid(os.stat(os.getcwd()).st_uid).pw_name) if processed_lumi: # Convert to json import json processed_lumi = json.dumps(processed_lumi, separators=(',', ':')) sample.processed_lumi = unicode(processed_lumi) else: sample.processed_lumi = None for f in files: sample.files.add(f) if not update: dbstore.add(sample) if sample.luminosity is None: sample.luminosity = sample.getLuminosity() print sample if confirm(prompt="Insert into the database?", resp=True): dbstore.commit() return else: sample.luminosity = sample.getLuminosity() prompt = "A sample with the same name already exists in the database. Replace by:\n" prompt += str(sample) prompt += "\n?" if confirm(prompt, resp=False): dbstore.commit() return # rollback dbstore.rollback()
def add_sample(NAME, localpath, type, nevents, nselected, AnaUrl, FWUrl, dataset_id, sumw, extras_sumw, has_job_processed_everything, dataset_nevents, files, processed_lumi=None): dbstore = DbStore() sample = None # check that source dataset exist if dbstore.find(Dataset, Dataset.dataset_id == dataset_id).is_empty(): raise IndexError("No dataset with such index: %d" % sample.dataset_id) # check that there is no existing entry update = False checkExisting = dbstore.find(Sample, Sample.name == unicode(NAME)) if checkExisting.is_empty(): sample = Sample(unicode(NAME), unicode(localpath), unicode(type), nevents) else: update = True sample = checkExisting.one() sample.removeFiles(dbstore) sample.nevents_processed = nevents sample.nevents = nselected sample.normalization = 1 sample.event_weight_sum = sumw sample.extras_event_weight_sum = unicode( json.dumps(extras_sumw, separators=(',', ':'))) sample.code_version = unicode( AnaUrl + ' ' + FWUrl) #NB: limited to 255 characters, but so far so good if not has_job_processed_everything: sample.user_comment = unicode("Sample was not fully processed, only " + str(nevents) + "/" + str(dataset_nevents) + " events were processed") else: sample.user_comment = u"" sample.source_dataset_id = dataset_id sample.author = unicode(getpwuid(os.stat(os.getcwd()).st_uid).pw_name) if processed_lumi: # Convert to json processed_lumi = json.dumps(processed_lumi, separators=(',', ':')) sample.processed_lumi = unicode(processed_lumi) else: sample.processed_lumi = None for f in files: sample.files.add(f) if not update: dbstore.add(sample) if sample.luminosity is None: sample.luminosity = sample.getLuminosity() print sample dbstore.commit() return else: sample.luminosity = sample.getLuminosity() print("Sample updated") print(sample) dbstore.commit() return # rollback dbstore.rollback()
def main(): """Main function""" # get the options optmgr = MyOptionParser() opts = optmgr.get_opt() # build the sample from user input sample = Sample(unicode(opts.name), unicode(opts.path), unicode(opts.sampletype), opts.nevents_processed) sample.nevents = opts.nevents sample.normalization = opts.normalization sample.luminosity = opts.luminosity sample.code_version = unicode(opts.code_version) sample.user_comment = unicode(opts.user_comment) sample.source_dataset_id = opts.source_dataset_id sample.source_sample_id = opts.source_sample_id sample.author = unicode(opts.author) sample.creation_time = opts.datetime # connect to the MySQL database using default credentials dbstore = DbStore() # unless the source is set, prompt the user and present a list to make a choice if sample.source_dataset_id is None: prompt_dataset(sample,dbstore) if sample.source_sample_id is None: prompt_sample(sample,dbstore) # check that source sample and dataset exist if sample.source_dataset_id is not None: checkExisting = dbstore.find(Dataset,Dataset.dataset_id==sample.source_dataset_id) if checkExisting.is_empty(): raise IndexError("No dataset with such index: %d"%sample.source_dataset_id) if sample.source_sample_id is not None: checkExisting = dbstore.find(Sample,Sample.sample_id==sample.source_sample_id) if checkExisting.is_empty(): raise IndexError("No sample with such index: %d"%sample.source_sample_id) # if opts.nevents is not set, take #events from source sample (if set) or from source dataset (if set) in that order if sample.nevents_processed is None and sample.source_sample_id is not None: sample.nevents_processed = dbstore.find(Sample,Sample.sample_id==sample.source_sample_id).one().nevents_processed if sample.nevents_processed is None and sample.source_dataset_id is not None: sample.nevents_processed = dbstore.find(Dataset,Dataset.dataset_id==sample.source_dataset_id).one().nevents if sample.nevents_processed is None: print "Warning: Number of processed events not given, and no way to guess it." # check that there is no existing entry checkExisting = dbstore.find(Sample,Sample.name==sample.name) if checkExisting.is_empty(): print sample if confirm(prompt="Insert into the database?", resp=True): dbstore.add(sample) # compute the luminosity, if possible if sample.luminosity is None: dbstore.flush() sample.luminosity = sample.getLuminosity() else: existing = checkExisting.one() prompt = "Replace existing " prompt += str(existing) prompt += "\nby new " prompt += str(sample) prompt += "\n?" if confirm(prompt, resp=False): existing.replaceBy(sample) if existing.luminosity is None: dbstore.flush() existing.luminosity = existing.getLuminosity() # commit dbstore.commit()
def main(): """Main function""" # get the options optmgr = DASOptionParser() opts = optmgr.get_opt() host = opts.host debug = opts.verbose sample = opts.sample query1 = "dataset="+sample+" | grep dataset.name, dataset.nevents, dataset.size, dataset.tag, dataset.datatype, dataset.creation_time" query2 = "release dataset="+sample+" | grep release.name" idx = opts.idx thr = opts.threshold ckey = opts.ckey cert = opts.cert das_h = opts.das_headers # perform the DAS queries jsondict1 = get_data(host, query1, idx, 1, debug, thr, ckey, cert, das_h) jsondict2 = get_data(host, query2, idx, 1, debug, thr, ckey, cert, das_h) # check the result if len(jsondict1)>1: print "Error: more than one element in jsondict1..." tmp = [{u'dataset' : [{}]},] for i in range(0,len(jsondict1[0]["dataset"])): if jsondict1[0]["dataset"][i]["name"]==sample: for key in jsondict1[0]["dataset"][i]: tmp[0]["dataset"][0][key] = jsondict1[0]["dataset"][i][key] if not "tag" in tmp[0]["dataset"][0]: print "global tag not found: looks to be always the case now, value will be 'None'" tmp[0]["dataset"][0][u'tag']=None print "****das query:", tmp jsondict1 = tmp if not(isinstance(jsondict1, list) and len(jsondict1)==1 and isinstance(jsondict1[0], dict) and isinstance(jsondict1[0]["dataset"],list) and len(jsondict1[0]["dataset"])==1 and isinstance(jsondict1[0]["dataset"][0],dict) and isinstance(jsondict2, list) and len(jsondict2)==1 and isinstance(jsondict2[0], dict) and isinstance(jsondict2[0]["release"],list) and len(jsondict2[0]["release"])==1 and isinstance(jsondict2[0]["release"][0],dict)): raise RuntimeError("Incorrect response from DAS:\n"+str(jsondict1)+"\n"+str(jsondict2)) # prepare the summary json object jsondict1[0]["dataset"][0][u"release"] = jsondict2[0]["release"][0]["name"] jsondict1[0]["dataset"][0].update({ u"process":unicode(opts.process), u"xsection":opts.xsection, u"energy":opts.energy, u"comment":unicode(opts.comment) }) # convert the jsondict into a Dataset dataset = asDataset(jsondict1[0]["dataset"][0]) # connect to the MySQL database using default credentials dbstore = DbStore() # check that there is no existing entry checkExisting = dbstore.find(Dataset,Dataset.name==dataset.name) if checkExisting.is_empty(): print dataset if confirm(prompt="Insert into the database?", resp=True): dbstore.add(dataset) else: existing = checkExisting.one() prompt = "Replace existing entry:\n" prompt += str(existing) prompt += "\nby new entry:\n" prompt += str(dataset) prompt += "\n?" if confirm(prompt, resp=False): existing.replaceBy(dataset) # commit dbstore.commit()
def main(): """Main function""" # get the options optmgr = MyOptionParser() opts = optmgr.get_opt() # build the configuration from user input madweightCfg = MadWeight(unicode(opts.name)) for card in cards: setattr(madweightCfg, card, unicode(open(opts.path+"/Cards/"+card+".dat","r").read())) # get the transfert functions madweightCfg.transfer_fctVersion = unicode(open('%s/Source/MadWeight/transfer_function/Transfer_FctVersion.txt'%opts.path,"r").read().strip('\n')) theCfg = madweightCfg.transfer_fctVersion.split(':')[0] if not os.path.exists("%s/Source/MadWeight/transfer_function/data/TF_%s.dat"%(opts.path,theCfg)): raise RuntimeError("Could not find the transfert functions TF_%s.dat"%theCfg) madweightCfg.transfer_function = unicode(open("%s/Source/MadWeight/transfer_function/data/TF_%s.dat"%(opts.path,theCfg),"r").read()) # find the generate line(s) theCfg = filter(lambda x:x.startswith("generate"),map(lambda x:x.lstrip(' \t'),madweightCfg.proc_card_mg5.splitlines())) if len(theCfg)!=1: raise RuntimeError("Could not find a unique generate statement in proc_card_mg5.dat") madweightCfg.diagram = theCfg[0][8:].lstrip(' \t') # find the ISR correction parameter theCfg = filter(lambda x:x.startswith("isr"),map(lambda x:x.lstrip(' \t'),madweightCfg.MadWeight_card.splitlines())) if len(theCfg)!=1: raise RuntimeError("Could not find a unique isr statement in MadWeight_card.dat") madweightCfg.isr=int(theCfg[0].split(None,2)[1]) # find the NWA configuration parameter theCfg = filter(lambda x:x.startswith("nwa"),map(lambda x:x.lstrip(' \t'),madweightCfg.MadWeight_card.splitlines())) if len(theCfg)!=1: raise RuntimeError("Could not find a unique nwa statement in MadWeight_card.dat") nwa = theCfg[0].split(None,2)[1] if nwa=='F': madweightCfg.nwa=False elif nwa=='T': madweightCfg.nwa=True else: raise RuntimeError("Unrecognized value for the nwa parameter in MadWeight_card.dat: %s"%nwa) # find the beam energy and store cm energy in TeV theCfg = filter(lambda x:"ebeam1" in x,madweightCfg.run_card.splitlines()) try: madweightCfg.cm_energy = float(theCfg[0].split()[0])*0.002 except: print "Cannot find the beam energy in the run card" raise # find and add the Higgs weight (can be null, so no error if missing) theCfg = filter(lambda x:x.startswith("DECAY"),map(lambda x:x.lstrip(' \t'),madweightCfg.param_card_1.splitlines())) for cfg in theCfg: fields = cfg.split() if fields[1]=="25": madweightCfg.higgs_width = float(fields[2]) # connect to the MySQL database using default credentials dbstore = DbStore() # check that there is no existing entry checkExisting = dbstore.find(MadWeight,MadWeight.name==madweightCfg.name) if checkExisting.is_empty(): print madweightCfg if confirm(prompt="Insert into the database?", resp=True): dbstore.add(madweightCfg) else: existing = checkExisting.one() prompt = "Replace existing " prompt += str(existing) prompt += "\nby new " prompt += str(madweightCfg) prompt += "\n?" if confirm(prompt, resp=False): existing.replaceBy(madweightCfg) # commit dbstore.commit()