def answer(flow_id, question_id, answer_id): flow = graph.flow(flow_id) question = graph.question(flow_id, question_id) answer = first([answer for answer in question.answers if answer["id"] == answer_id]) rewards = [rel.end for rel in answer.relationships.outgoing(["Reward"])] next_question = first([rel.end for rel in answer.relationships.outgoing(["Next"])]) return render_template( "answer.html", flow=flow, question=question, answer=answer, rewards=rewards, next_question=next_question )
def main() : parser = optparse.OptionParser() parser.add_option("-n", "--histo-name") parser.add_option("-o", "--output-dir", default='./') parser.add_option("-s", "--suffix") (options, args) = parser.parse_args() histoName = options.histo_name outputDir = options.output_dir suffix = options.suffix inputFileNames = args print '\n'.join(inputFileNames) sampleForFilename = dict((f, guessSampleFromFilename(f, samples)) for f in inputFileNames) assert all(s for f,s in sampleForFilename.iteritems())," cannot identify sample for some inputs %s"%pformat(sampleForFilename) filenames = collections.defaultdict(list) for fn, s in sampleForFilename.iteritems() : filenames[s].append(fn) assert all(len(fnames)==1 for s,fnames in filenames.iteritems()),"expected one file per sample, got %s"%pformat(filenames) filenames = dict((s, first(fn)) for s, fn in filenames.iteritems()) files = dict((s, r.TFile.Open(fn)) for s, fn in filenames.iteritems()) if histoName : print "plotting %s"%histoName canvasname = histoName+("_%s"%suffix if suffix else '') plotHisto(files, histoName, canvasname, outputDir) else : printHistoNames(first(files))
def weightedAverage(histosEff={}, histosWeight={}, histoName='', histoTitle='', verbose=False): getBinIndices, getBinContents, getBinning = rootUtils.getBinIndices, rootUtils.getBinContents, rootUtils.getBinning assert sorted(histosEff.keys())==sorted(histosWeight.keys()),"effs and weights must have the same keys:\n\teffs %s\n\tweights %s"%(str(histosEff.keys()), str(histosWeight.keys())) hout = first(histosEff).Clone(histoName if histoName else 'weighted_average_eff') hout.SetTitle(histoTitle) hout.Reset() def validateWeights(h): bins, values = getBinIndices(h), getBinContents(h) allValid = all(v>=0.0 and v<=1.0 for v in values) if not allValid: if verbose : print "warning '%s' weights not in [0,1] : [%s]"%(h.GetName(), ', '.join(("%.3f"%v for v in values))) print "setting them to 0.0 or 1.0" for b, v in zip(bins, values) : h.SetBinContent(b, 0.0 if v<0.0 else 1.0 if v>1.0 else v) return allValid [validateWeights(hw) for hw in histosWeight.values()] bins, binning = getBinIndices(first(histosEff)), getBinning(first(histosEff)) for h in histosWeight.values()+histosEff.values(): if getBinning(h)!=binning : print "warning: %s has binning %s, expecting %s"%(h.GetName(), str(getBinning(h)), str(binning)) groups = sorted(histosEff.keys()) epsilon = 1.0e-3 binWeightNormalizations = [sum(histosWeight[g].GetBinContent(b) for g in groups) for b in bins] weightsAreNormalized = all(abs(1.0-norm)<epsilon for norm in binWeightNormalizations) if not weightsAreNormalized : print "warning, compositions are not normalized : [%s]"%', '.join(("%.3f"%v for v in binWeightNormalizations)) print '-- ',histoName,'-- ' for g in groups: bws, bcs = getBinContents(histosWeight[g]), getBinContents(histosEff[g]) print "adding %18s : %s"%(g, ' : '.join("%.4f*%.4f"%(bw, bc) for bw, bc in zip(bws, bcs))) histoEff, histoWeight = histosEff[g], histosWeight[g] histoEff.Multiply(histoWeight) hout.Add(histoEff) print "tot weight : %s"%' '.join(("%.4f"%v for v in (sum(histosWeight[g].GetBinContent(b) for g in groups) for b in bins))) print "weighted avg : %s"%' '.join(("%.4f"%v for v in getBinContents(hout))) return hout
def buildWeightedHisto(histos={}, fractions={}, histoName='', histoTitle='') : "was getFinalRate" hout = first(histos).Clone(histoName if histoName else 'final_rate') # should pick a better default hout.SetTitle(histoTitle) hout.Reset() flatFraction = type(first(fractions)) is float if flatFraction : print "averaging flat ",histoName print 'keys -> ',histos.keys() for b in getBinIndices(hout) : tot, err2 = binWeightedSum(histos, fractions, b) hout.SetBinContent(b, tot) hout.SetBinError(b, sqrt(err2)) else : bH, bF = getBinning(first(histos)), getBinning(first(fractions)) assert bH == bF,"different binning %s: %s, %s: %s"%(first(histos).GetName(), bH, first(fractions).GetName(), bF) weightedHistos = dict((p, h.Clone(h.GetName()+'_weighted_for_'+histoName)) for p,h in histos.iteritems()) # preserve originals print "averaging 2d ",histoName for b in getBinIndices(hout): print "bin %d (w=%.1f): %.3f = %s"%(b, sum(fractions[p].GetBinContent(b) for p in fractions.keys()), sum(fractions[p].GetBinContent(b)*weightedHistos[p].GetBinContent(b) for p in fractions.keys()), '+'.join("%.2f*%.2f"%(fractions[p].GetBinContent(b), weightedHistos[p].GetBinContent(b)) for p in fractions.keys())) for p,h in weightedHistos.iteritems() : h.Multiply(fractions[p]) hout.Add(h) return hout
def plotStackedHistos(histosFlavorSlice={}, canvasName='', outputDir='./', frameTitle='stack', colors={}) : "Plot the input histos used to compute the fractions" histos = histosFlavorSlice can = r.TCanvas(canvasName, canvasName, 800, 600) can.cd() stack = r.THStack('stack_'+canvasName, '') leg = topRightLegend(can, 0.275, 0.475, shift=-0.025) leg.SetBorderSize(0) colors = SampleUtils.colors if not colors else colors procs = sorted(histos.keys()) for p in procs: h = histos[p] h.SetFillColor(colors[p]) h.SetLineColor(h.GetFillColor()) h.SetDrawOption('bar') stack.Add(h) for s in procs[::-1] : leg.AddEntry(histos[s], s, 'F') # stack goes b-t, legend goes t-b stack.Draw('hist e') leg.Draw('same') tex = r.TLatex() tex.SetNDC(True) tex.SetTextFont(first(histos).GetTitleFont()) tex.SetTextSize(first(histos).GetTitleSize()) tex.DrawLatex(0.1, 0.925, frameTitle.split(';')[0]) can.Update() # force stack to create canMaster canMaster = stack.GetHistogram() canMaster.SetTitle(frameTitle) canMaster.Draw('axis same') can._graphical_objects = [stack, canMaster, leg, tex] + [h for h in stack.GetStack()] can.Update() for ext in ['png','eps'] : outFilename = outputDir+'/'+canvasName+'.'+ext rmIfExists(outFilename) can.SaveAs(outFilename)
def latest_obs_and_forecast(site_id): result = memcache.get(site_id, "site_latest") if result: return result site = Site.get_by_key_name(site_id) if site is None: return None obs = ObservationTimestep.find_latest_by_site(site, limit=6) result = None if len(obs) > 0: forecasts = ForecastTimestep.find_by_site_closest_by_date(site, first(obs).observation_datetime, limit=50) closest_forecast = first(forecasts) if closest_forecast: matching_obs = first(filter(lambda o: o.observation_datetime == closest_forecast.forecast_datetime, obs)) matching_forecasts = ifilter(lambda f: f.forecast_datetime == closest_forecast.forecast_datetime, forecasts) if matching_obs: #finally have both... a single obs report and multiple forecasts obs_dict = to_dict_excl_sites(matching_obs) obs_dict['best_forecast'] = map(to_dict_excl_sites, make_five_day_list(matching_forecasts)) result = { 'site': site.to_dict(), 'observation': obs_dict } memcache.set(site_id, result, 60 * 60, namespace='site_latest') return result
def plotFractions(fractDict={}, outplotdir='./', prefix='') : """ input : fractDict[sr][lep_type][sample] = float """ outplotdir = outplotdir if outplotdir.endswith('/') else outplotdir+'/' def isInterestingRegion(r) : return any(k in r for k in ['CR8', 'WHSS', 'SSInc', 'SsEwk']) regions = [r for r in selectionRegions() if isInterestingRegion(r)] leptypes = sorted(first(fractDict).keys()) samples = sorted(first(first(fractDict)).keys()) ind = np.arange(len(regions)) width = 0.5 colors = dict(zip(samples, ['b','g','r','c','m','y'])) for lt in leptypes : fracPerSample = dict((s, np.array([fractDict[r][lt][s] for r in regions])) for s in samples) below = np.zeros(len(regions)) plots = [] fig, ax = plt.subplots() for s, frac in fracPerSample.iteritems() : plots.append(plt.bar(ind, frac, width, color=colors[s], bottom=below)) below = below + frac plt.ylabel('fractions') plt.title(prefix+' '+lt+' compositions') plt.xticks(ind+width/2., regions) plt.ylim((0.0, 1.0)) plt.grid(True) plt.yticks(np.arange(0.0, 1.0, 0.2)) labels = {'heavyflavor' : 'bb/cc', 'diboson' : 'VV', 'ttbar':'tt'} labels = [labels[s] if s in labels else s for s in samples] leg = plt.legend([p[0] for p in plots], labels, bbox_to_anchor=(1.135, 1.05)) leg.get_frame().set_alpha(0.5) fig.autofmt_xdate(bottom=0.25, rotation=90, ha='center') fig.savefig(outplotdir+prefix+'_'+lt+'.png')
def fetchSfHistos(inputSfFiles=[], lepton='', verbose=False): from compute_fake_el_scale_factor import histoname_sf_vs_eta fileNames = inputSfFiles histos = dict() print "fetchSfHistos: fileNames ",fileNames if not (type(fileNames)==list and len(fileNames) in [1, 2]): print "fetchSfHistos expects one or two files (hflf+conv), got %s"%str(inputSfFiles) print "returning ",histos return histos if verbose : print "retrieving scale factors from %s"%inputSfFiles fname_hflf = first(filter(lambda _ : 'hflf' in _, fileNames)) fname_conv = first(filter(lambda _ : 'conv' in _, fileNames)) file_hflf = r.TFile.Open(fname_hflf) if fname_hflf else None file_conv = r.TFile.Open(fname_conv) if fname_conv else None hname = histoname_sf_vs_eta(lepton) histo_hflf = file_hflf.Get(hname) if file_hflf else None histo_conv = file_conv.Get(hname) if file_conv else None if histo_hflf : histos['hflf'] = composeEtaHistosAs2dPtEta(input1Dhisto=histo_hflf, outhistoname=hname+'_hflf') if histo_conv : histos['conv'] = composeEtaHistosAs2dPtEta(input1Dhisto=histo_conv, outhistoname=hname+'_conv') for f in [file_hflf, file_conv] : if f : f.Close() return histos
def done(self, reels, display, line): S = [r.symbol() for r in reels] won = bool(len(set(S)) == 1) amount = symbols[first(S)] if won else 0 if won and display: print(winmsg % symbols[first(S)]) return line, amount
def frac2str(frac) : flatFraction = type(first(frac)) is float return ('\n'.join([''.join("%12s"%s for s in fakeProcesses()), ''.join("%12s"%("%.3f"%frac[s]) for s in fakeProcesses())]) if flatFraction else '\n'.join([''.join("%12s"%s for s in fakeProcesses())] +[''.join("%12s"%("%.3f"%frac[s].GetBinContent(b)) for s in fakeProcesses()) for b in getBinIndices(first(frac))]))
def plotHistos(bkgHistos, sigHistos, plotdir) : llnjs = first (sigHistos).keys() vars = first(first(sigHistos)).keys() for llnj in llnjs : for var in vars : plotVar(dict((s, bkgHistos[s][llnj][var]) for s in bkgHistos.keys()), dict((s, sigHistos[s][llnj][var]) for s in sigHistos.keys()), llnj+'_'+var, plotdir)
def plotPerSourceEff(histosPerVar={}, outputDir='', lepton='', region='', sample='', verbose=False, zoomIn=True): "plot efficiency for each source (and 'anysource') as a function of each var; expect histos[var][source][loose,tight]" variables = histosPerVar.keys() sources = [s for s in first(histosPerVar).keys() if s!='real'] # only fake eff really need a scale factor colors = colorsLineSources mkdirIfNeeded(outputDir) for var in filter(lambda x : x in ['pt1', 'eta1'], histosPerVar.keys()): histosPerSource = dict((s, histosPerVar[var][s]) for s in sources) canvasBasename = region+'_efficiency_'+lepton+'_'+var+("_%s"%sample if sample else '') missingSources = [s for s, h in histosPerSource.iteritems() if not h['loose'] or not h['tight']] if missingSources: if verbose : print "skip %s, missing histos for %s"%(var, str(missingSources)) continue anySourceLoose = summedHisto([h['loose'] for h in histosPerSource.values()]) anySourceTight = summedHisto([h['tight'] for h in histosPerSource.values()]) anySourceLoose.SetName(histoNamePerSource(var, 'any', 'loose', region)) anySourceTight.SetName(histoNamePerSource(var, 'any', 'tight', region)) histosPerSource['any'] = { 'loose' : anySourceLoose, 'tight' : anySourceTight } emptyBkg = anySourceLoose.Integral()==0 or anySourceTight.Integral()==0 if emptyBkg: if verbose : print "empty backgrounds, skip %s"%canvasBasename continue def computeEfficiencies(histosPerSource={}) : sources = histosPerSource.keys() num = dict((s, histosPerSource[s]['tight']) for s in sources) den = dict((s, histosPerSource[s]['loose']) for s in sources) eff = dict((s, h.Clone(h.GetName().replace('tight', 'tight_over_loose'))) for s, h in num.iteritems()) [eff[s].Divide(den[s]) for s in sources] return eff effs = computeEfficiencies(histosPerSource) can = r.TCanvas('c_'+canvasBasename, canvasBasename, 800, 600) can.cd() pm = first(effs) # pad master pm.SetStats(False) pm.Draw('axis') can.Update() for s, h in effs.iteritems() : h.SetMarkerColor(colors[s] if s in colors else r.kBlack) h.SetLineColor(h.GetMarkerColor()) h.SetLineWidth(2*h.GetLineWidth()) h.SetMarkerStyle(markersSources[s] if s in markersSources else r.kDot) h.Draw('ep same') h.SetDirectory(0) #pprint.pprint(effs) yMin, yMax = getMinMax(effs.values()) pm.SetMinimum(0.0) pm.SetMaximum(0.25 if yMax < 0.5 and zoomIn else 1.1) can.Update() topRightLabel(can, canvasBasename, xpos=0.125, align=13) drawLegendWithDictKeys(can, effs, opt='lp') can.RedrawAxis() can._histos = effs can.Update() outFname = os.path.join(outputDir, canvasBasename+'.png') utils.rmIfExists(outFname) can.SaveAs(outFname)
def plotStackedHistos(histosPerGroup={}, outputDir='', region='', verbose=False): groups = histosPerGroup.keys() variables = first(histosPerGroup).keys() leptonTypes = first(first(histosPerGroup)).keys() colors = getGroupColor() mkdirIfNeeded(outputDir) histosPerName = dict([(region+'_'+var+'_'+lt, # one canvas for each histo, so key with histoname w/out group dict([(g, histosPerGroup[g][var][lt]) for g in groups])) for var in variables for lt in leptonTypes]) for histoname, histosPerGroup in histosPerName.iteritems(): missingGroups = [g for g, h in histosPerGroup.iteritems() if not h] if missingGroups: if verbose : print "skip %s, missing histos for %s"%(histoname, str(missingGroups)) continue bkgHistos = dict([(g, h) for g, h in histosPerGroup.iteritems() if g not in ['data', 'signal']]) totBkg = summedHisto(bkgHistos.values()) err_band = None # buildErrBandGraph(totBkg, computeStatErr2(totBkg)) emptyBkg = totBkg.Integral()==0 if emptyBkg: if verbose : print "empty backgrounds, skip %s"%histoname continue can = r.TCanvas('c_'+histoname, histoname, 800, 600) can.cd() pm = totBkg # pad master pm.SetStats(False) pm.Draw('axis') can.Update() # necessary to fool root's dumb object ownership stack = r.THStack('stack_'+histoname,'') can.Update() r.SetOwnership(stack, False) for s, h in bkgHistos.iteritems() : h.SetFillColor(colors[s] if s in colors else r.kOrange) h.SetDrawOption('bar') h.SetDirectory(0) stack.Add(h) stack.Draw('hist same') # err_band.Draw('E2 same') data = histosPerGroup['data'] if data and data.GetEntries(): data.SetMarkerStyle(r.kFullDotLarge) data.Draw('p same') # yMin, yMax = getMinMax([h for h in [totBkg, data, err_band] if h]) # fixme with err_band yMin, yMax = 0.0, data.GetMaximum() pm.SetMinimum(0.0) pm.SetMaximum(1.1*yMax) can.Update() topRightLabel(can, histoname, xpos=0.125, align=13) # drawLegendWithDictKeys(can, dictSum(bkgHistos, {'stat err':err_band}), opt='f') drawLegendWithDictKeys(can, bkgHistos, opt='f') can.RedrawAxis() can._stack = stack can._histos = [h for h in stack.GetHists()]+[data] can.Update() outFname = os.path.join(outputDir, histoname+'.png') utils.rmIfExists(outFname) can.SaveAs(outFname)
def comp_sum(supposition_results, ar, rcdl, set_totaller, **kwargs): if isinstance(ar, str): add_remove_names = ('added', 'removed') first_drill_down = first(lambda el: el[0] == ar[0], add_remove_names) ar = lambda sr: getattr(sr, first_drill_down) if isinstance(rcdl, str): sets = ('reachability_set', 'coverage_set', 'dissimilarity_set', 'liability_set') set_names = [first(lambda el: el[0] == letter, sets) for letter in rcdl] rcdl = lambda profile: [getattr(profile, sn) for sn in set_names] assert isinstance(supposition_results, SuppositionResults) return sum(sum((set_totaller(_set, source, **kwargs) for _set in rcdl(ar(change)))) for (source, change) in supposition_results.iteritems())
def upstreamCommit(self): """The most recent commit this branch shares with its upstream. `git log` and `git reflog` are used to detect rebases on the upstream branch, in similar fashion to `git pull`. """ if self.upstream is None: return None commitHashes = set(c.hash for c in self.allCommits) firstUpstreamReference = first(h.hash for h in self.upstream._refLog if h.hash in commitHashes) upstreamCommitHashes = set(c.hash for c in self.upstream.allCommits) return first(c for c in self.allCommits if c.hash in upstreamCommitHashes or c.hash == firstUpstreamReference)
def svn_info(path): # get tag or last rev cmd = 'svn info '+path out = getCommandOutput(cmd) url_origin = first([l for l in out['stdout'].splitlines() if 'URL: ' in l]) url_origin = url_origin.replace('/', ' ').split() if url_origin else [] tag = url_origin[url_origin.index('tags')+1] if 'tags' in url_origin else None last_rev = first(first([l for l in out['stdout'].splitlines() if 'Last Changed Rev:' in l]).split()[::-1]) # get list of modified files cmd = 'svn status '+path out = getCommandOutput(cmd) modified_files = [l for l in out['stdout'].splitlines() if l.startswith('M ')] return "{0} {1}".format(tag if tag else last_rev, ('\n'.join(['']+modified_files) if modified_files else ''))
def compose2Dcompositions(inputSfFile=None, templateHistoName="%(proc)s_%(etabin)s", processes=[]) : "take two 1D fractions histograms for one eta slice each, and compose them in a 2D fractions histogram" etaBins = ['etaC', 'etaF'] histos1d = dict((e, dict((p, inputSfFile.Get(templateHistoName%{'proc':p, 'etabin':e})) for p in processes)) for e in etaBins) assert all(v for ve in histos1d.values() for v in ve.values()),"missing compositions: %s"%histos1d h1dC, h1dF = first(histos1d['etaC']), first(histos1d['etaF']) nX, xMin, xMax = h1dC.GetNbinsX(), h1dC.GetXaxis().GetBinLowEdge(1), h1dC.GetXaxis().GetBinUpEdge(h1dC.GetNbinsX()) histos2d = dict((p, r.TH2F(histos1d['etaC'][p].GetName().replace('_etaC_','_vs_eta_'), '', nX, xMin, xMax, 2, 0.0, 2.0)) for p in histos1d['etaC'].keys()) for p in processes : for iEta, eta in zip(range(1, 1+len(etaBins)), etaBins) : hEta, hEtaPt = histos1d[eta][p], histos2d[p] for iPt in range(1, 1+nX) : hEtaPt.SetBinContent(iPt, iEta, hEta.GetBinContent(iPt)) hEtaPt.SetBinError (iPt, iEta, hEta.GetBinError (iPt)) return histos2d
def ai_move(self, player): """Randomly choose between returning the move closest to completing a tile or a random move.""" tiles = [t for t in self if self.valid_move(player, t)] def to_max(t): return t.maxnum - t.num tiles.sort(key=to_max) return rndchoice( [first(tiles), rndchoice(tiles)] )
def closest(self, lst1, lst2): dist_list = [] for r in lst1: for r2 in lst2: dist_list.append( (self.dist(r,r2), r, r2) ) close = first(sorted(dist_list)) return close[1], close[2]
def add(self, vehicle: Vehicle): try: spot = first(spot for spot in self._parking_spots if spot.empty_space >= vehicle.size) except ValueError: raise OutOfSpaceError('No empty parking spot') spot.vehicles.append(vehicle)
def get_book_markdown(item_id): items = item_index["id"].get(item_id) if items is None: flask.abort( http.client.NOT_FOUND, "Item with id {item_id} was not found".format(item_id=item_id) ) item = utils.first(items) transcription = item.get("transcription") if transcription is None: flask.abort( http.client.NOT_FOUND, "Trascription for item {item_id} is not available".format( item_id=item_id ) ) markdown_file = os.path.join( config.parser.markdown_dir, transcription ) return flask.render_template( "markdown.html", markdown_data=markdown_cache.get(markdown_file), item=item )
def run(self): inp = TextInput() mfiles = [fn for fn in os.listdir(tut_dir) if not fn.startswith('.')] choices = [ (first(f.split('.')), f) for f in mfiles ] while True: Tutorial(inp.menu(choices)).play()
def __init__(self, fn, tpl): self.commands = [] self.name = first(fn.split('.')) self.tpl = tpl with open(pjoin(tut_dir, fn), encoding="utf-8") as fp: self.sections = re.split(cmdpat, fp.read())
def check_end(self): pchars = set(sf.char for sf in stars+fleets if sf.char != neutral_char) if len(pchars) == 1: board.draw() print(nl, self.winmsg % first(pchars)) sys.exit()
def test_messages_match(self): """ Tests if all the localizations have the same order translation keys """ has_mismatches = False messages = collections.defaultdict(list) for locale, catalog in self.catalogs.items(): for message in catalog: messages[locale].append(message.id) first_locale = utils.first(messages.keys()) first_message_list = messages.pop(first_locale) for locale, message_list in messages.items(): self.assertEqual( len(first_message_list), len(message_list) ) self.assertEqual( set(first_message_list), set(message_list) ) for index, message in enumerate(message_list): if message != first_message_list[index]: logging.debug("Mismatch at position {index}. {first_locale} has {first_message}, {locale} has {message}".format( index=index, first_locale=first_locale, first_message=first_message_list[index], locale=locale, message=message )) has_mismatches = True self.assertFalse(has_mismatches)
def get_book_markdown(book_id, index): items = item_index["id"].get(book_id, None) if items is None: flask.abort(http.client.NOT_FOUND, "Book with id {id} was not found".format(id=id)) item = utils.first(items) index -= 1 transcription_url = item.get("transcription_url") transcription_filename = item.get("transcription_filename") if ( (transcription_url is None) or (transcription_filename is None) or (index < 0) or (index > len(transcription_url)) or (index > len(transcription_filename)) ): flask.abort( http.client.NOT_FOUND, "Markdowned trascription for book with id {id} is not available".format( id=book_id ) ) markdown_file = os.path.join( config.parser.markdown_dir, transcription_filename[index] ) return flask.render_template( "markdown.html", markdown_data=markdown_cache.get(markdown_file), item=item )
def distribute_and_over_or(s): """Given a sentence s consisting of conjunctions and disjunctions of literals, return an equivalent sentence in CNF. >>> distribute_and_over_or((A & B) | C) ((A | C) & (B | C)) """ s = expr(s) if s.op == '|': s = associate('|', s.args) if s.op != '|': return distribute_and_over_or(s) if len(s.args) == 0: return False if len(s.args) == 1: return distribute_and_over_or(s.args[0]) conj = first(arg for arg in s.args if arg.op == '&') if not conj: return s others = [a for a in s.args if a is not conj] rest = associate('|', others) return associate('&', [distribute_and_over_or(c | rest) for c in conj.args]) elif s.op == '&': return associate('&', list(map(distribute_and_over_or, s.args))) else: return s
def get_plugin(name, config=None): plugin = utils.first(x for x in PluginBase.plugins if x.get_name() == name) if plugin == None: print PluginBase.plugins print "Not found" return None return plugin(config)
def normalizeHistos(histos) : "Normalize the input histos so that in each bin the sum of the different processes amounts to 1.0" basename = guessBaseHistoname([h.GetName() for h in histos.values() if h]) tot = first(histos).Clone(basename+'_tot') tot.Reset() for h in histos.values() : tot.Add(h) for b in getBinIndices(tot) : tot.SetBinError(b, 0.0) # norm is a constraint, without error for h in histos.values() : h.Divide(tot)
def get_random_move(self): """Return location of best move.""" def by_corner_score(loc): return board.is_corner(loc), -len(board.get_captured(self, loc)) moves = board.get_valid_moves(self) shuffle(moves) return first(sorted(moves, key=by_corner_score))
def test_first__empty() -> None: assert first([], "empty") == "empty"
def first_unassigned_variable(assignment, csp): "The default variable order." return first([var for var in csp.variables if var not in assignment])
def plotStackedHistos(histosPerGroup={}, outputDir='', region='', verbose=False): groups = histosPerGroup.keys() variables = first(histosPerGroup).keys() leptonTypes = first(first(histosPerGroup)).keys() colors = getGroupColor() mkdirIfNeeded(outputDir) histosPerName = dict([ ( region + '_' + var + '_' + lt, # one canvas for each histo, so key with histoname w/out group dict([(g, histosPerGroup[g][var][lt]) for g in groups])) for var in variables for lt in leptonTypes ]) for histoname, histosPerGroup in histosPerName.iteritems(): missingGroups = [g for g, h in histosPerGroup.iteritems() if not h] if missingGroups: if verbose: print "skip %s, missing histos for %s" % (histoname, str(missingGroups)) continue bkgHistos = dict([(g, h) for g, h in histosPerGroup.iteritems() if g not in ['data', 'signal']]) totBkg = summedHisto(bkgHistos.values()) err_band = None # buildErrBandGraph(totBkg, computeStatErr2(totBkg)) emptyBkg = totBkg.Integral() == 0 if emptyBkg: if verbose: print "empty backgrounds, skip %s" % histoname continue can = r.TCanvas('c_' + histoname, histoname, 800, 600) can.cd() pm = totBkg # pad master pm.SetStats(False) pm.Draw('axis') can.Update() # necessary to fool root's dumb object ownership stack = r.THStack('stack_' + histoname, '') can.Update() r.SetOwnership(stack, False) for s, h in bkgHistos.iteritems(): h.SetFillColor(colors[s] if s in colors else r.kOrange) h.SetDrawOption('bar') h.SetDirectory(0) stack.Add(h) stack.Draw('hist same') # err_band.Draw('E2 same') data = histosPerGroup['data'] if data and data.GetEntries(): data.SetMarkerStyle(r.kFullDotLarge) data.Draw('p same') # yMin, yMax = getMinMax([h for h in [totBkg, data, err_band] if h]) # fixme with err_band yMin, yMax = 0.0, data.GetMaximum() pm.SetMinimum(0.0) pm.SetMaximum(1.1 * yMax) can.Update() topRightLabel(can, histoname, xpos=0.125, align=13) # drawLegendWithDictKeys(can, dictSum(bkgHistos, {'stat err':err_band}), opt='f') drawLegendWithDictKeys(can, bkgHistos, opt='f') can.RedrawAxis() can._stack = stack can._histos = [h for h in stack.GetHists()] + [data] can.Update() outFname = os.path.join(outputDir, histoname + '.png') utils.rmIfExists(outFname) can.SaveAs(outFname)
def test_first() -> None: assert first([1, 2, 3], None) == 1