Пример #1
0
   def json(self):
      self._compute_()
      jret = []
      for idx in xrange(1, self.nbins+1):
         jbin = {
            'median' : self.median_value(idx),
            'one_sigma' : {'range' : self.one_sigma_range(idx)},
            'two_sigma' : {'range' : self.two_sigma_range(idx)},
            'label' : self.median_.GetXaxis().GetBinLabel(idx),
            'low_edge' : self.median_.GetXaxis().GetBinLowEdge(idx),
            'up_edge' : self.median_.GetXaxis().GetBinUpEdge(idx),
            }

         jbin['one_sigma']['val'] = max(
            abs(i - jbin['median']) 
            for i in jbin['one_sigma']['range']
            )
         jbin['two_sigma']['val'] = max(
            abs(i - jbin['median']) 
            for i in jbin['two_sigma']['range']
            )

         if jbin['median']:
            jbin['one_sigma']['relative'] = jbin['one_sigma']['val']/jbin['median']
            jbin['two_sigma']['relative'] = jbin['two_sigma']['val']/jbin['median']
         else:
            jbin['one_sigma']['relative'] = 0
            jbin['two_sigma']['relative'] = 0
         jret.append(jbin)
      return prettyjson.dumps(jret)
Пример #2
0
   def dump(self, fname):
      json = {
         'events' : self.evts,
         'lumimap' : {}
         }

      for run, lumis in self.run_map.iteritems():
         json['lumimap'][str(run)] = self.collapse(lumis)

      with open(fname, 'w') as output:
         output.write(prettyjson.dumps(json))
Пример #3
0
    def save_card(self, name):
        if not self.card:
            raise RuntimeError('There is no card to save!')
        self.card.normalize_signals()
        self.card.save(name, self.outputdir)
        self.card = None

        binning_file = '%s/%s.binning.json' % (self.outputdir, name)
        with open(binning_file, 'w') as json:
            json.write(prettyjson.dumps(self.binning))
        self.binning = {}
        logging.info('binning saved in %s' % binning_file)
Пример #4
0
    def save(self, filename, directory=''):
        'save(self, name, directory=' ') saves the datacard and the shape file'
        ##
        # Write datacard file
        ##
        txt_name = os.path.join(directory, '%s.txt' % filename)
        with open(txt_name, 'w') as txt:
            for cmt in self.comments:
                txt.write('## %s \n' % cmt)
            separator = '-' * 40 + '\n'
            ncategories = len(self.categories)
            sample_category = self.categories.values()[0]
            has_data = 'data_obs' in sample_category
            samples = sample_category.keys()
            samples = filter(lambda x: not self.shape_sys_naming.match(x),
                             samples)
            #set_trace()
            nsamples = len(samples)
            if has_data:
                nsamples -= 1
            #HEADER
            txt.write('imax    %i     number of categories \n' % ncategories)
            txt.write('jmax    %i     number of samples minus one \n' %
                      (nsamples - 1))
            txt.write('kmax    *     number of nuisance parameters \n')
            #WHERE TO FIND THE SHAPES
            txt.write(separator)
            txt.write(
                'shapes * * %s.root $CHANNEL/$PROCESS $CHANNEL/$PROCESS_$SYSTEMATIC \n'
                % filename)
            #DATA COUNT IN EACH CATEGORY (FIXME: ASSUMES YOU HAVE DATA)
            txt.write(separator)
            max_cat_name = max(max(len(i) for i in self.categories), 11) + 4
            format = (''.join(['%-', str(max_cat_name), 's'
                               ])) * (ncategories + 1) + '\n'
            txt.write(format % tuple(['bin'] + self.categories.keys()))
            txt.write(format % tuple(['observation'] + [
                '%-7.1f' % i.data_obs.Integral()
                for i in self.categories.itervalues()
            ]))
            #SAMPLES TABLE
            mcsamples = []
            bkg_idx, sig_idx = 1, 0
            for sample in samples:
                if 'data_obs' == sample:
                    continue
                if any(j.match(sample) for j in self.signals):
                    mcsamples.append((sample, sig_idx))
                    sig_idx -= 1
                else:
                    mcsamples.append((sample, bkg_idx))
                    bkg_idx += 1
            mcsamples = dict(mcsamples)

            #WRITE RATE FOR EACH SAMPLE/CATEGORY
            columns = ['header:%-30s']
            bin_line = ['bin']
            proc_num_line = ['process']
            proc_name_line = ['process']
            rate_line = ['rate']
            rates = {}
            for category, info in self.categories.iteritems():
                rates[category] = {}
                category_yield = 0
                for sample, shape in info.iteritems():
                    if sample not in mcsamples: continue
                    width = max(len(category), len(sample), 7) + 4
                    format = ''.join(['%-', str(width), 's'])
                    columns.append('%s_%s:%s' % (category, sample, format))
                    bin_line.append(category)
                    proc_num_line.append(mcsamples[sample])
                    proc_name_line.append(sample)
                    rate = shape.Integral()
                    #keep the first 6 significant digits
                    if rate <= 0:
                        logging.error('Sample %s in category %s has a negative'
                                      ' number of expected events! (%f) \n'
                                      'Clamping to zero' %
                                      (sample, category, rate))
                        shape.Reset()
                        rate = 0.
                        #raise ValueError(
                        #   'Sample %s in category %s has a negative'
                        #   ' number of expected events! (%f)' % (sample, category, rate)
                        #   )
                    rates[category][sample] = rate
                    category_yield += rate
                    mag = max(int(math.log10(abs(rate))),
                              0) if rate != 0 else 0
                    float_format = '%.' + str(max(5 - mag, 0)) + 'f'
                    rate_line.append(float_format % rate)
                if rate == 0:
                    logging.warning(
                        "Category %s does not have any expected event!" %
                        category)

            #SYSTEMATICS TABLE
            sys_table = Table(*columns, show_title=False, show_header=False)
            sys_table.add_separator()
            sys_table.add_line(*bin_line)
            sys_table.add_line(*proc_num_line)
            sys_table.add_line(*proc_name_line)
            sys_table.add_line(*rate_line)
            sys_table.add_separator()
            param_sys = []
            line = None
            for sys_name, syst in self.systematics.iteritems():
                if syst.type == 'param':
                    param_sys.append((sys_name, syst))
                    continue
                line = sys_table.new_line()
                line.header = '%s %s' % (sys_name, syst.type)
                logging.debug('Adding systematic %s' % sys_name)
                for category, info in self.categories.iteritems():
                    for sample, _ in info.iteritems():
                        if sample not in mcsamples: continue
                        if rates[category][sample] < 10.**-5:
                            line['%s_%s' % (category, sample)] = '-'
                        else:
                            line['%s_%s' % (category, sample)] = syst.effect(
                                category, sample)
            if line is not None: del line
            sys_table.add_separator()
            txt.write('%s\n' % sys_table)
            for sys_name, syst in param_sys:
                txt.write('%s  param %f %f\n' % (sys_name, syst.val, syst.unc))
        logging.info('Written file %s' % txt_name)
        ##
        # Write shape file
        ##
        shape_name = os.path.join(directory, '%s.root' % filename)
        with io.root_open(shape_name, 'recreate') as out:
            for name, cat in self.categories.iteritems():
                out.mkdir(name).cd()
                for sample, shape in cat.iteritems():
                    shape.SetName(sample)
                    shape.SetTitle(sample)
                    shape.Write()
        logging.info('Written file %s' % shape_name)
        ##
        # if we normalized the signal to outsource the yields to a json file
        # dump it too
        ##
        if self.yields:
            json_name = os.path.join(directory, '%s.json' % filename)
            with open(json_name, 'w') as txt:
                txt.write(prettyjson.dumps(self.yields))
Пример #5
0
#! /bin/env python
__doc__ = 'simple script to read a son file and dump part of it into a new json file'

import URAnalysis.Utilities.prettyjson as prettyjson
from argparse import ArgumentParser

parser = ArgumentParser(description=__doc__)
parser.add_argument('input', metavar='input.json', type=str, help='input json')
parser.add_argument('output',
                    metavar='output.json',
                    type=str,
                    help='Json output file name')
parser.add_argument(
    'toget',
    metavar='this:that',
    type=str,
    help='column-separated list of things to get, means json[this][that]')
#TODO ' futher syntax may be implemented to support list slicing and regex matching')

args = parser.parse_args()
json = prettyjson.loads(open(args.input).read())
chain = args.toget.split(':')
to_get = json
for i in chain:
    to_get = to_get[i]

with open(args.output, 'w') as out:
    out.write(prettyjson.dumps(to_get))
Пример #6
0
	elif das_name.startswith('TO BE UPDATED -- '):
		das_name = das_name.replace('TO BE UPDATED -- ', '')
	_, prim_dset, prod, tier = tuple(das_name.split('/'))

	if fnmatch(prod, args.newProd):
		print "%s already matches! Skipping..." % sample['name']
		continue

	query = 'dataset dataset=%s status=%s' % \
		 ('/'.join(['',prim_dset, args.newProd, tier]), 'VALID' if args.valid else '*')
	newsamples = das.query(query, True)
	print 'Availeble options'
	for info in enumerate(newsamples):
		print '[%d] %s' % info
	print '[%d] None of the above' % len(newsamples)
	idx = raw_input('Which should I pick? ')
	while isinstance(idx, str):
		try:
			idx = int(idx)
		except:
			idx = raw_input('%s is not a number! plase repeat: ' % idx)
			
	if idx == len(newsamples):
		print "OK, I'm skipping the sample and putting a 'TO BE UPDATED -- ' before the old sample"
		sample['DBSName'] = 'TO BE UPDATED -- %s' % das_name
	else:
		sample['DBSName'] = str(newsamples[idx])

with open(args.json, 'w') as out:
	out.write(prettyjson.dumps(json))
Пример #7
0
def run_unfolder(itoy = 0, outdir = opts.dir, tau = opts.tau):
    
    styles = {
        'scan_overlay' : {
            'markerstyle':[0, 29], 'linecolor':[1,1], 
            'markercolor':[1,2], 'drawstyle':['ALP', 'P'],
            'markersize':[0,3]
            },
        'data_overlay' : {
            'linestyle' : [1,0], 'markerstyle':[0,21], 
            'linecolor' : [2,1], 'markercolor':[2,1],
            'drawstyle' : ['hist', 'p'], 'legendstyle' : ['l', 'p']
            },
        'dots' : {
            'markerstyle' : 20, 'markersize' : 2,
            'linestyle' : 0, 'drawstyle' : 'P'
            },
        'line' : {
            'linestyle':1, 'markerstyle':0
            },
        }
    plotter = BasePlotter(
        outdir, defaults = {
            'clone' : False,
            'show_title' : True,
            }
        )
    
    #canvas = plotting.Canvas(name='adsf', title='asdf')
    if "toy" in opts.fit_file:
        data_file_basedir = 'toy_' + str(itoy)
        data_file_dir = data_file_basedir + '/' + opts.var
    else:
        data_file_dir = opts.var
    xaxislabel = set_pretty_label(opts.var)
    scale = 1.
    if opts.no_area_constraint:
        area_constraint='None'
    else:
        area_constraint='Area'
    myunfolding = URUnfolding(regmode = opts.reg_mode, constraint = area_constraint)

    ## Migration matrix preprocessing
    ## remove oflow bins
    var_dir = getattr(resp_file, opts.var)
    migration_matrix = var_dir.migration_matrix
    for bin in migration_matrix: 
        if bin.overflow:
            bin.value = 0 
            bin.error = 0
    myunfolding.matrix = migration_matrix
    thruth_unscaled = var_dir.thruth_unscaled
    reco_unscaled = var_dir.reco_unscaled
    project_reco = 'X' if myunfolding.orientation == 'Vertical' else 'Y'
    project_gen = 'Y' if myunfolding.orientation == 'Vertical' else 'X'
    reco_project = rootpy.asrootpy(
        getattr(migration_matrix, 'Projection%s' % project_reco)()
        )
    gen_project = rootpy.asrootpy(
        getattr(migration_matrix, 'Projection%s' % project_gen)()
        )
    if gen_project.Integral() < thruth_unscaled.Integral():
        eff_correction = ROOT.TGraphAsymmErrors(gen_project, thruth_unscaled)
    elif gen_project.Integral() == thruth_unscaled.Integral():
        eff_correction = None
    else:
        log.warning(
            'Efficiency correction: The visible part of the migration matrix'
            ' has a larger integral than the full one! (%.3f vs. %.3f).\n'
            'It might be a rounding error, but please check!'\
                % (reco_project.Integral(), reco_unscaled.Integral())
            )
        eff_correction = None

    if reco_project.Integral() < reco_unscaled.Integral():
        purity_correction = ROOT.TGraphAsymmErrors(reco_project, reco_unscaled)
    elif reco_project.Integral() == reco_unscaled.Integral():
        purity_correction = None
    else:
        log.warning(
            'Purity correction: The visible part of the migration matrix'
            ' has a larger integral than the full one! (%.3f vs. %.3f).\n'
            'It might be a rounding error, but please check!'\
                % (reco_project.Integral(), reco_unscaled.Integral())
            )
        purity_correction = None


    #flush graphs into histograms (easier to handle)
    eff_hist = gen_project.Clone()
    eff_hist.reset()
    eff_hist.name = 'eff_hist'
    if eff_correction:
        for idx in range(eff_correction.GetN()):
            eff_hist[idx+1].value = eff_correction.GetY()[idx]
            eff_hist[idx+1].error = max(
                eff_correction.GetEYhigh()[idx],
                eff_correction.GetEYlow()[idx]
                )
    else:
        for b in eff_hist:
            b.value = 1.
            b.error = 0.

    purity_hist = reco_project.Clone()
    purity_hist.reset()
    purity_hist.name = 'purity_hist'
    if purity_correction:
        for idx in range(purity_correction.GetN()):
            bin.value = purity_correction.GetY()[idx]
            bin.error = max(
                purity_correction.GetEYhigh()[idx],
                purity_correction.GetEYlow()[idx]
                )
    else:
        for bin in purity_hist:
            bin.value = 1.
            bin.error = 0.

    #Get measured histogram
    measured = None
    if opts.use_reco_truth:
        log.warning("Using the MC reco distribution for the unfolding!")
        measured = getattr(resp_file, opts.var).reco_distribution
    else:
        measured = getattr(data_file, data_file_dir).tt_right

    measured_no_correction = measured.Clone()
    measured_no_correction.name = 'measured_no_correction'
    measured.name = 'measured'
    measured.multiply(purity_hist)
    myunfolding.measured = measured

    #get gen-level distribution
    gen_distro = getattr(resp_file, opts.var).true_distribution.Clone()
    full_true  = gen_distro.Clone()
    full_true.name = 'complete_true_distro'
    gen_distro.multiply(eff_hist)
    gen_distro.name = 'true_distribution'    
    myunfolding.truth = gen_distro
    
    if opts.cov_matrix != 'none':
        if 'toy' in opts.fit_file:
            input_cov_matrix = make_cov_matrix(
                getattr(data_file, data_file_basedir).correlation_matrix,
                getattr(data_file, data_file_dir).tt_right
                )
            input_corr_matrix = make_corr_matrix(
                getattr(data_file, data_file_basedir).correlation_matrix,
                getattr(data_file, data_file_dir).tt_right
                )
        else:
            input_cov_matrix = make_cov_matrix(
                data_file.correlation_matrix,
                getattr(data_file, data_file_dir).tt_right
                )
            input_corr_matrix = make_corr_matrix(
                data_file.correlation_matrix,
                getattr(data_file, data_file_dir).tt_right
                )
        input_cov_matrix.name = 'input_cov_matrix'
        input_corr_matrix.name = 'input_corr_matrix'
        myunfolding.cov_matrix = input_cov_matrix
    myunfolding.InitUnfolder()
    hdata = myunfolding.measured # Duplicate. Remove!

    #plot covariance matrix
    plotter.pad.cd()
    input_corr_matrix.SetStats(False)
    input_corr_matrix.Draw('colz')
    plotter.pad.SetLogz(True)
    plotter.save('correlation_matrix.png')

    #optimize
    best_taus = {}
    if tau >= 0:
        best_taus['External'] = tau
    else:
        t_min, t_max = eval(opts.tau_range)
        best_l, l_curve, graph_x, graph_y  = myunfolding.DoScanLcurve(100, t_min, t_max)
        best_taus['L_curve'] = best_l
        l_curve.SetName('lcurve')
        l_curve.name = 'lcurve'
        graph_x.name = 'l_scan_x'
        graph_y.name = 'l_scan_y'
        l_tau = math.log10(best_l)
        points = [(graph_x.GetX()[i], graph_x.GetY()[i], graph_y.GetY()[i]) 
                  for i in xrange(graph_x.GetN())]
        best = [(x,y) for i, x, y in points if l_tau == i]
        graph_best = plotting.Graph(1)
        graph_best.SetPoint(0, *best[0])
        plotter.reset()
        plotter.overlay(
            [l_curve, graph_best], **styles['scan_overlay']
            )
        plotter.canvas.name = 'L_curve'
    
        info = plotter.make_text_box('#tau = %.5f' % best_l, 'NE')
        #ROOT.TPaveText(0.65,1-canvas.GetTopMargin(),1-canvas.GetRightMargin(),0.999, "brNDC")
        info.Draw()
        canvas.Update()
        plotter.set_subdir('L_curve')
        plotter.save()

        modes = ['RhoMax', 'RhoSquareAvg', 'RhoAvg']
        for mode in modes:
            plotter.set_subdir(mode)
            best_tau, tau_curve, index_best = myunfolding.DoScanTau(100, t_min, t_max, mode)
            best_taus[mode] = best_tau
            tau_curve.SetName('%s_scan' % mode)
            tau_curve.SetMarkerStyle(1)
            points = [(tau_curve.GetX()[i], tau_curve.GetY()[i])
                      for i in xrange(tau_curve.GetN())]
            best = [points[index_best]] 

            graph_best = plotting.Graph(1)
            graph_best.SetPoint(0, *best[0])
            plotter.overlay(
                [tau_curve, graph_best], **styles['scan_overlay']
                )
            plotter.canvas.name = 'c'+tau_curve.GetName()

            info = plotter.make_text_box('#tau = %.5f' % best_tau, 'NE') 
            #ROOT.TPaveText(0.65,1-canvas.GetTopMargin(),1-canvas.GetRightMargin(),0.999, "brNDC")
            info.Draw()
            plotter.save('Tau_curve')

        #force running without regularization
        best_taus['NoReg'] = 0
        for name, best_tau in best_taus.iteritems():
            log.info('best tau option for %s: %.3f' % (name, best_tau))

        if opts.runHandmade:
            #hand-made tau scan
            plotter.set_subdir('Handmade')
            unc_scan, bias_scan = myunfolding.scan_tau(
                200, 10**-6, 50, os.path.join(outdir, 'Handmade', 'scan_info.root'))

            bias_scan.name = 'Handmade'
            bias_scan.title = 'Avg. Bias - Handmade'
            
            plotter.plot(bias_scan, logx=True, logy=True, **styles['dots'])
            plotter.save('bias_scan')

            unc_scan.name = 'Handmade'
            unc_scan.title = 'Avg. Unc. - Handmade'
            plotter.plot(unc_scan, logx=True, logy=True, **styles['dots'])
            plotter.save('unc_scan')
        
            bias_points = [(bias_scan.GetX()[i], bias_scan.GetY()[i])
                           for i in xrange(bias_scan.GetN())]
            unc_points = [(unc_scan.GetX()[i], unc_scan.GetY()[i])
                           for i in xrange(unc_scan.GetN())]
            fom_scan = plotting.Graph(unc_scan.GetN())
            for idx, info in enumerate(zip(bias_points, unc_points)):
                binfo, uinfo = info
                tau, bias = binfo
                _, unc = uinfo
                fom_scan.SetPoint(idx, tau, quad(bias, unc))
            fom_scan.name = 'Handmade'
            fom_scan.title = 'Figure of merit - Handmade'
            plotter.plot(fom_scan, logx=True, logy=True, **styles['dots'])
            plotter.save('fom_scan')

    to_save = []
    outfile = rootpy.io.root_open(os.path.join(outdir, opts.out),'recreate')
    for name, best_tau in best_taus.iteritems():
        plotter.set_subdir(name)
        method_dir = outfile.mkdir(name)
        myunfolding.tau = best_tau

        hdata_unfolded = myunfolding.unfolded
        #apply phase space efficiency corrections
        hdata_unfolded_ps_corrected = hdata_unfolded.Clone()
        hdata_unfolded_ps_corrected.Divide(eff_hist)

        hdata_refolded = myunfolding.refolded
        #apply purity corrections
        hdata_refolded_wpurity = hdata_refolded.Clone()

        error_matrix = myunfolding.ematrix_total

        hcorrelations = myunfolding.rhoI_total
        hbias = myunfolding.bias
        #canvas = overlay(myunfolding.truth, hdata_unfolded)
        myunfolding.truth.xaxis.title = xaxislabel
        hdata_unfolded.xaxis.title = xaxislabel
        n_neg_bins = 0
        for ibin in range(1,hdata_unfolded.GetNbinsX()+1):
            if hdata_unfolded.GetBinContent(ibin) < 0:
                n_neg_bins = n_neg_bins + 1
        hn_neg_bins = plotting.Hist(
            2,-1, 1, name = 'nneg_bins', 
            title = 'Negative bins in ' + hdata_unfolded.GetName()+ ';Bin sign; N_{bins}'
            )
        hn_neg_bins.SetBinContent(1,n_neg_bins)
        hn_neg_bins.SetBinContent(2,hdata_unfolded.GetNbinsX()-n_neg_bins)
        plotter.plot(
            hn_neg_bins, writeTo='unfolding_bins_sign', **styles['line']
            )

        leg = LegendDefinition(
            title=name,
            labels=['Truth','Unfolded'],
            position='ne'
            )
        sumofpulls = 0
        sumofratios = 0
        for ibin in range(1,myunfolding.truth.GetNbinsX()+1):
            binContent1 = myunfolding.truth.GetBinContent(ibin)
            binContent2 = hdata_unfolded.GetBinContent(ibin)
            binError1 = myunfolding.truth.GetBinError(ibin)
            binError2 = hdata_unfolded.GetBinError(ibin)
            error = sqrt(binError1*binError1 + binError2*binError2)
            if error != 0:
                pull = (binContent2-binContent1)/error
            else:
                pull = 9999
            if binContent1 != 0:
                ratio = binContent2/binContent1
            sumofpulls = sumofpulls + pull
            sumofratios = sumofratios + ratio
        sumofpulls = sumofpulls / myunfolding.truth.GetNbinsX()
        sumofratios = sumofratios / myunfolding.truth.GetNbinsX()
        
        hsum_of_pulls = plotting.Hist(
            1, 0, 1, name = 'sum_of_pulls_' + hdata_unfolded.GetName(), 
            title = 'Sum of pulls wrt truth for ' + hdata_unfolded.GetName()+ ';None; #Sigma(pulls) / N_{bins}'
            )
        hsum_of_pulls[1].value = sumofpulls
        plotter.plot(hsum_of_pulls, writeTo='unfolding_sum_of_pulls', **styles['line'])
        
        hsum_of_ratios = plotting.Hist(
            1, 0, 1, name = 'sum_of_ratios_' + hdata_unfolded.GetName(), 
            title = 'Sum of ratios wrt truth for ' + hdata_unfolded.GetName()+ ';None; #Sigma(ratios) / N_{bins}'
            )
        hsum_of_ratios[1].value = sumofratios
        plotter.plot(hsum_of_ratios, writeTo='unfolding_sum_of_ratios', **styles['line'])

        
        plotter.overlay_and_compare(
            [myunfolding.truth], hdata_unfolded, 
            legend_def=leg,
            writeTo='unfolding_pull', **styles['data_overlay']
            )
        plotter.overlay_and_compare(
            [myunfolding.truth], hdata_unfolded, 
            legend_def=leg, method='ratio',
            writeTo='unfolding_ratio', **styles['data_overlay']
            )

        plotter.overlay_and_compare(
            [full_true], hdata_unfolded_ps_corrected, 
            legend_def=leg,
            writeTo='unfolding_pull', **styles['data_overlay']
            )
        plotter.overlay_and_compare(
            [full_true], hdata_unfolded_ps_corrected, 
            legend_def=leg, method='ratio',
            writeTo='unfolding_ratio', **styles['data_overlay']
            )
    
        nbins = myunfolding.measured.GetNbinsX()
        input_distro = getattr(resp_file, opts.var).prefit_distribution
        leg = LegendDefinition(title=name, position='ne')
        myunfolding.measured.xaxis.title = xaxislabel
        hdata_refolded.xaxis.title = xaxislabel
        myunfolding.measured.drawstyle = 'e1'

        style = {'linestyle':[1, 0], 'markerstyle':[20, 20],
                 'markercolor':[2,4], 'linecolor':[2,4],
                 'drawstyle' : ['hist', 'e1'], 'legendstyle' : ['l', 'p'],
                 'title' : ['Refolded', 'Reco']
                 }
        plotter.overlay_and_compare(
            [hdata_refolded], myunfolding.measured,
            legend_def=leg,
            writeTo='refolded_pull', **style
            )
        plotter.overlay_and_compare(
            [hdata_refolded], myunfolding.measured, 
            legend_def=leg, method='ratio',
            writeTo='refolded_ratio', **style
            )
        
        style = {'linestyle':[1,0,0], 'markerstyle':[20,21,21],
                 'markercolor':[2,4,1], 'linecolor':[2,4,1],
                 'drawstyle' : ['hist', 'e1', 'e1'], 'legendstyle' : ['l', 'p', 'p'],
                 'title' : ['Refolded', 'Reco', 'Input']
                 }
        measured_no_correction.drawstyle = 'e1'
        plotter.overlay_and_compare(
            [hdata_refolded_wpurity, measured_no_correction], input_distro, 
            legend_def=leg,
            writeTo='refolded_wpurity_pull', **style
            )
        plotter.overlay_and_compare(
            [hdata_refolded_wpurity, measured_no_correction], input_distro, 
            legend_def=leg, method='ratio',
            writeTo='refolded_wpurity_ratio', **style
            )

        method_dir.WriteTObject(hdata_unfolded, 'hdata_unfolded')
        method_dir.WriteTObject(hdata_unfolded_ps_corrected, 'hdata_unfolded_ps_corrected')
        method_dir.WriteTObject(hdata_refolded, 'hdata_refolded')
        method_dir.WriteTObject(hdata_refolded_wpurity, 'hdata_refolded_wpurity')
        method_dir.WriteTObject(error_matrix, 'error_matrix')
        method_dir.WriteTObject(hbias, 'bias')
        method_dir.WriteTObject(hn_neg_bins, 'hn_neg_bins')
        method_dir.WriteTObject(hsum_of_pulls, 'hsum_of_pulls')
        method_dir.WriteTObject(hsum_of_ratios, 'hsum_of_ratios')


    htruth = myunfolding.truth
    hmatrix = myunfolding.matrix
    hmeasured = myunfolding.measured

    #with rootpy.io.root_open(os.path.join(outdir, opts.out),'recreate') as outfile:
    outfile.cd()
    to_save.extend([
        measured_no_correction,
        eff_hist,
        purity_hist,
        full_true,
        myunfolding.truth,     ## 4
        myunfolding.measured,  ## 5
        myunfolding.matrix,])  ## 6
    if opts.tau < 0:
        to_save.extend([
                l_curve,               ## 9
                tau_curve,             ## 10
                graph_x,
                graph_y
                ])

    if opts.cov_matrix != 'none':
       to_save.extend([input_cov_matrix])
       to_save.extend([input_corr_matrix])

    for i, j in enumerate(to_save):
        log.debug('Saving %s as %s' % (j.name, j.GetName()))
        j.Write()
    getattr(resp_file, opts.var).reco_distribution.Write()
    getattr(resp_file, opts.var).prefit_distribution.Write()
    json = ROOT.TText(0., 0., prettyjson.dumps(best_taus))
    outfile.WriteTObject(json, 'best_taus')
    myunfolding.write_to(outfile, 'urunfolder')
    outfile.Close()
Пример #8
0
parser = ArgumentParser(description=__doc__)
parser.add_argument('output', type=str)
parser.add_argument('inputs', type=str, nargs='+')
args = parser.parse_args()

if len(args.inputs) == 1:
   shutil.copy(args.inputs[0], args.output)
else:
   nevts = 0
   nweighted = 0
	 sumw = None
   partial = args.output.replace('.json', '.part.json')
   tmp = args.output.replace('.json', '.tmp.json')
   tmp2= args.output.replace('.json', '.tmp2.json')
   with open(partial, 'w') as p:
      p.write(prettyjson.dumps({}))

   for jin in args.inputs:
      jmap = prettyjson.loads(open(jin).read())
      nevts += jmap['events']
      nweighted += jmap['weightedEvents']
			if 'sum_weights' in jmap:
				if sumw is None:
					sumw = [i for i in jmap['sum_weights']]:
				else:
					if len(jmap['sum_weights']) and len(sumw):
						raise ValueError(
							'I got a vector of size %d and'
							' I was expecting it %d long' % (len(sumw), len(jmap['sum_weights'])))
					for i in range(sumw):
						sumw[i] += jmap['sum_weights']
Пример #9
0
    def save(self,
             filename,
             png=True,
             pdf=True,
             dotc=False,
             dotroot=False,
             json=False,
             verbose=False):
        ''' Save the current canvas contents to [filename] '''
        self.pad.Draw()
        self.canvas.Update()
        if not os.path.exists(self.outputdir):
            os.makedirs(self.outputdir)
        if verbose:
            print 'saving ' + os.path.join(self.outputdir, filename) + '.png'
        if png:
            self.canvas.SaveAs(os.path.join(self.outputdir, filename) + '.png')
        if pdf:
            self.canvas.SaveAs(os.path.join(self.outputdir, filename) + '.pdf')
        if dotc:
            self.canvas.SaveAs(os.path.join(self.outputdir, filename) + '.C')
        if json:
            jdict = {}
            for obj in self.keep:
                if isinstance(obj, ROOT.TH1):
                    jdict[obj.GetTitle()] = [
                        obj.GetBinContent(1),
                        obj.GetBinError(1)
                    ]
                if isinstance(obj, ROOT.THStack):
                    jdict['hist_stack'] = {}
                    for i in obj.GetHists():
                        jdict['hist_stack'][i.GetTitle()] = [
                            i.GetBinContent(1),
                            i.GetBinError(1)
                        ]
            with open(os.path.join(self.outputdir, filename) + '.json',
                      'w') as jout:
                jout.write(prettyjson.dumps(jdict))
        if dotroot:
            outfile = ROOT.TFile.Open(
                os.path.join(self.outputdir, filename) + '.root', 'recreate')
            outfile.cd()
            self.canvas.Write()
            for obj in self.keep:
                obj.Write()
            #self.keep = []
            self.reset()
            outfile.Close()
            #self.canvas = plotting.Canvas(name='adsf', title='asdf')
            #self.canvas.cd()
            #self.pad    = plotting.Pad(0., 0., 1., 1.) #ful-size pad
            #self.pad.cd()

        if self.keep and self.lower_pad:
            #pass
            self.reset()
        else:
            # Reset keeps
            self.keep = []
        # Reset logx/y
        self.canvas.SetLogx(False)
        self.canvas.SetLogy(False)
Пример #10
0
def run_module(**kwargs):
    args = Struct(**kwargs)
    if not args.name:
        args.name = args.var
    results = [prettyjson.loads(open(i).read())[-1] for i in args.results]
    #set_trace()
    results.sort(key=lambda x: x['median'])

    nevts_graph = plotting.Graph(len(results))
    upbound_graph = plotting.Graph(len(results))
    max_unc = 0.
    bound_range = args.vrange  #results[-1]['up_edge'] - results[0]['up_edge']
    step = results[1]['up_edge'] - results[0]['up_edge']
    #bound_range += step
    bound_min = results[0]['up_edge'] - step

    for idx, info in enumerate(results):
        nevts_graph.SetPoint(idx, info['median'],
                             info["one_sigma"]["relative"])
        upbound_graph.SetPoint(idx, info['up_edge'],
                               info["one_sigma"]["relative"])
        if info["one_sigma"]["relative"] > max_unc:
            max_unc = info["one_sigma"]["relative"]

    canvas = plotting.Canvas()
    nevts_graph.Draw('APL')
    nevts_graph.GetXaxis().SetTitle('average number of events')
    nevts_graph.GetYaxis().SetTitle('relative fit uncertainty')
    canvas.SaveAs(os.path.join(args.outdir, 'nevts_%s.png' % args.name))

    tf1 = Plotter.parse_formula(
        'scale / (x - shift) + offset',
        'scale[1,0,10000],shift[%.2f,%.2f,%.2f],offset[0, 0, 1]' %
        (bound_min, bound_min - 2 * step, bound_min + step))
    # ROOT.TF1('ret', '[0]/(x - [1])', -3, 1000)
    tf1.SetRange(0, 1000)
    tf1.SetLineColor(ROOT.EColor.kAzure)
    tf1.SetLineWidth(3)
    result = upbound_graph.Fit(tf1, 'MES')  #WL

    scale = tf1.GetParameter('scale')
    shift = tf1.GetParameter('shift')
    offset = tf1.GetParameter('offset')

    upbound_graph.Draw('APL')
    upbound_graph.GetXaxis().SetTitle('upper bin edge')
    upbound_graph.GetYaxis().SetTitle('relative fit uncertainty')
    if args.fullrange:
        upbound_graph.GetYaxis().SetRangeUser(offset, max_unc * 1.2)
    upbound_graph.GetXaxis().SetRangeUser(shift, (shift + bound_range) * 1.2)

    delta = lambda x, y: ((x - shift) / bound_range)**2 + ((y - offset) /
                                                           (max_unc - offset))
    points = ((bound_min + step) + i * (bound_range / 100.)
              for i in xrange(100))
    math_best_x = min(points, key=lambda x: delta(x, tf1.Eval(x)))
    math_best_y = tf1.Eval(math_best_x)

    ## math_best_x = math.sqrt(scale*bound_range*(max_unc-offset))+shift
    ## math_best_y = tf1.Eval(math_best_x) #max_unc*math.sqrt(scale)+offset
    best = min(results, key=lambda x: abs(x['up_edge'] - math_best_x))

    upbound_best = plotting.Graph(1)
    upbound_best.SetPoint(0, best['up_edge'], best["one_sigma"]["relative"])
    upbound_best.markerstyle = 29
    upbound_best.markersize = 3
    upbound_best.markercolor = 2
    upbound_best.Draw('P same')

    print math_best_x, math_best_y
    print best['up_edge'], best["one_sigma"]["relative"]

    math_best = plotting.Graph(1)
    math_best.SetPoint(0, math_best_x, math_best_y)
    math_best.markerstyle = 29
    math_best.markersize = 3
    math_best.markercolor = ROOT.EColor.kAzure
    math_best.Draw('P same')

    canvas.SaveAs(os.path.join(args.outdir, 'upbound_%s.png' % args.name))
    json = {'best': best['up_edge'], 'unc': best["one_sigma"]["relative"]}
    with open(os.path.join(args.outdir, '%s.json' % args.name), 'w') as jfile:
        jfile.write(prettyjson.dumps(json))
Пример #11
0
                         title='%s efficiencies' % qtype)
    effs[qtype] = {}
    for wpoint, jmap in jmaps.iteritems():
        line = table.new_line()
        line.WP = wpoint
        isThere = ("lead_%sEff" % qtype) in wpoints[wpoint]
        line.clead = 100 * wpoints[wpoint]["lead_%sEff" %
                                           qtype] if isThere else 0.
        line.csub = 100 * wpoints[wpoint]["sub_%sEff" %
                                          qtype] if isThere else 0.

        leff = compute_eff(jmap, 'leading', qtype)
        seff = compute_eff(jmap, 'subleading', qtype)
        teff = (leff + seff) / 2.

        line.lead = leff * 100
        line.sub = seff * 100
        line.avg = teff * 100
        line.diff = 100 * (leff - seff) / teff
        effs[qtype][wpoint] = {
            'leading': leff,
            'subleading': seff,
            'average': teff
        }
    del line
    print '\n\n'
    print table

with open('plots/%s/btageff/mc_effs.json' % jobid, 'w') as out:
    out.write(prettyjson.dumps(effs))
Пример #12
0
				vals.append(
					(tdir, plotter.get_yields(50))
					)
			plotter.save(var)
			if 'discriminant'in var:
				plotter.mc_samples = plotter.generic_mcs

	jmap = {}
	for tdir, sams in vals:
		for sam, lo, hi in sams:
			if sam not in jmap:
				jmap[sam] = {}
			jmap[sam]['%s/%s' % (tdir, 'hi')] = hi
			jmap[sam]['%s/%s' % (tdir, 'lo')] = lo
	with open('yields.json', 'w') as out:
		out.write(prettyjson.dumps(jmap))

if args.shapes or args.all:
	for peak, dname in [('*', 'shapes'), ('Peak', 'shapes_peak'), ('Int', 'shapes_interference')]:
		plotter.set_subdir(dname)
		for mass in [400, 500, 600, 750]:
			histos = []
			for width, color in zip([5, 10, 25, 50], ['#f9a505', '#2aa198', '#0055ff', '#6666b3']):
				htt_view = plotter.get_view('HtoTT_M%d_%dpc_%s' % (mass, width, peak))
				histos.append(
					sum(
						htt_view.Get('%s/nosys/tight/MTHigh/m_tt' % i) \
							for i in ['right', 'matchable', 'unmatchable', 'noslep']
						)
					)
				histos[-1].Rebin(2)