示例#1
0
def test_smoothing(calibration):
    print("##### SMOOTHING TEST #####")

    with LatexTable("out/test_smoothing.tex") as peaktable:
        peaktable.header(" ",
                         r'\multicolumn{3}{|c|}{ohne Gl\"attung}',
                         r'\multicolumn{3}{|c|}{mit Gl\"attung}',
                         align=["c"] * 7,
                         lineafter=0)
        peaktable.row("Probe", "Höhe", "Breite", r'$\chi^2/\textrm{ndf}$',
                      "Höhe", "Breite", r'$\chi^2/\textrm{ndf}$')
        peaktable.hline()
        for filename, meta in data.items():
            peaktable.hline(1)

            experiment = Experiment("data/" + filename,
                                    calibration=calibration,
                                    title=meta["title"])
            experiment2 = Experiment("data/" + filename,
                                     calibration=calibration,
                                     title=meta["title"])
            experiment.subtract_empty("data/G20_Leer.mca", 0.5)
            experiment2.subtract_empty("data/G20_Leer.mca", 0.5)
            experiment2.smooth(0.1)
            lines = []
            for i, (mu0, sigma0) in enumerate(
                    sorted(meta["peaks"], key=lambda peak: peak[0]), 1):
                fit = experiment.find_peak(mu0, sigma0, plot=plot)
                fit2 = experiment2.find_peak(mu0, sigma0, plot=plot)

                peaktable.row(
                    (meta["title"], len(meta["peaks"])),
                    _n(fit.mu, precision=3),
                    _n(fit.sigma, precision=3),
                    _n(fit.chisqndf),
                    _n(fit2.mu, precision=3),
                    _n(fit2.sigma, precision=3),
                    _n(fit2.chisqndf),
                )
示例#2
0
def main():

    parser = argparse.ArgumentParser(description='get cutflow')
    
    parser.add_argument('-r', dest='regions', help='Region')
    parser.add_argument('-s', dest='samples', help='Samples separated with ,')
    parser.add_argument('-i', dest='files', help='Files separated with ,')
    parser.add_argument('-l', dest='luminosity', help='Luminosity [fb-1]')
    parser.add_argument('-p', dest='percentage', action='store_true', help='Show percentage')
    parser.add_argument('--latex', action='store_true', default=False, help='use LatexTable instead PrettyTable')
    parser.add_argument('--sel', dest='selection', help='Selection')
    parser.add_argument('--pre', dest='preselection', action='store_true', help='add preselection cutflow')

    if len(sys.argv) < 2:
        parser.print_usage()
        sys.exit(1)
        
    args = parser.parse_args()
    
    ## samples
    if args.samples is None and args.files is None:
        samples = [
            'data', 
            'photonjet', 
            'wgamma', 
            'zllgamma', 
            'znunugamma', 
            'ttbar', 
            'ttbarg', 
            'wjets', 
            'zjets', 
            'diboson', 
            'efake', 
            'jfake'
            ]
        

    if args.regions is None:
        args.regions = 'Sel'

    do_scale = True
    if args.luminosity == "0":
        do_scale = False

    for region in args.regions.split(','):

        #if args.samples is not None:
        #    files = [ os.path.join(MiniDir, '%s.t146_mini.root' % sample) for sample in args.samples.split(',') ]

        #if args.files is not None:
        #    files = args.files.split(',')
        
        try:
            selection = getattr(regions_, region)
        except:
            selection = args.selection

        flows = OrderedDict()

        if args.samples is not None:
            for sample in args.samples.split(','):

                cutflow = get_cutflow(sample, selection=selection, lumi=args.luminosity, preselection=args.preselection, scale=do_scale)

                cuts = [ cutflow.GetXaxis().GetBinLabel(b+1) for b in xrange(cutflow.GetNbinsX()) ]
                flows[sample] = [ cutflow.GetBinContent(b+1) for b in xrange(cutflow.GetNbinsX()) ]
    
        if args.files is not None:
            for fname in args.files.split(','):

                cutflow = get_cutflow(fname, selection=selection, preselection=args.preselection, scale=do_scale)
    
                cuts = [ cutflow.GetXaxis().GetBinLabel(b+1) for b in xrange(cutflow.GetNbinsX()) ]
                flows[os.path.basename(fname)] = [ cutflow.GetBinContent(b+1) for b in xrange(cutflow.GetNbinsX()) ]
        
        if args.latex:
            table = LatexTable()
        else:
            table = PrettyTable()

        table.add_column(region, cuts)
    
        for sample, flow in flows.iteritems():
            
            total = float(flow[0])

            if args.percentage:
                table.add_column(sample, ['%.2f (%d%%)' % (n, 100*n/total) for n in flow])
            else:
                table.add_column(sample, ['%.2f' % n for n in flow])
    
        print table
示例#3
0
def main():

    parser = argparse.ArgumentParser(description='get cutflow')

    parser.add_argument('-r', dest='regions', help='Region')
    parser.add_argument('-s', dest='samples', help='Samples separated with ,')
    parser.add_argument('-i', dest='files', help='Files separated with ,')
    parser.add_argument('-l', dest='luminosity', help='Luminosity [fb-1]')
    parser.add_argument('-p',
                        dest='percentage',
                        action='store_true',
                        help='Show percentage')
    parser.add_argument('-v', dest='version', help='Mini version')
    parser.add_argument('--latex',
                        action='store_true',
                        default=False,
                        help='use LatexTable instead PrettyTable')
    parser.add_argument('--sel', dest='selection', help='Selection')
    parser.add_argument('--pre',
                        dest='preselection',
                        action='store_true',
                        help='add preselection cutflow')

    if len(sys.argv) < 2:
        parser.print_usage()
        sys.exit(1)

    args = parser.parse_args()

    ## samples
    if args.samples is None and args.files is None:
        sys.exit(1)

    if args.regions is None:
        args.regions = 'Sel'

    do_scale = True
    if args.luminosity == "0":
        do_scale = False

    for region in args.regions.split(','):

        try:
            selection = getattr(regions_, region)
        except:
            selection = args.selection

        flows = OrderedDict()

        if args.samples is not None:
            for sample in args.samples.split(','):

                cutflow = get_cutflow(sample,
                                      selection=selection,
                                      lumi=args.luminosity,
                                      preselection=args.preselection,
                                      scale=do_scale,
                                      version=args.version)

                cuts = [
                    cutflow.GetXaxis().GetBinLabel(b + 1)
                    for b in xrange(cutflow.GetNbinsX())
                ]
                flows[sample] = [
                    cutflow.GetBinContent(b + 1)
                    for b in xrange(cutflow.GetNbinsX())
                ]

        if args.files is not None:
            for fname in args.files.split(','):

                cutflow = get_cutflow(fname,
                                      selection=selection,
                                      preselection=args.preselection,
                                      scale=do_scale,
                                      version=args.version)

                cuts = [
                    cutflow.GetXaxis().GetBinLabel(b + 1)
                    for b in xrange(cutflow.GetNbinsX())
                ]
                flows[os.path.basename(fname)] = [
                    cutflow.GetBinContent(b + 1)
                    for b in xrange(cutflow.GetNbinsX())
                ]

        if args.latex:
            table = LatexTable()
        else:
            table = PrettyTable()

        table.add_column(region, cuts)

        for sample, flow in flows.iteritems():

            total = float(flow[0])

            if args.percentage:
                table.add_column(sample, [
                    '%.2f (%d%%)' % (n, int(round(100 * (n / total))))
                    for n in flow
                ])
            else:
                table.add_column(sample, ['%.2f' % n for n in flow])

        print table
示例#4
0
def main():

    parser = argparse.ArgumentParser(description='get yields')

    parser.add_argument('-r',
                        dest='regions',
                        default='',
                        help='Regions separated with ,')
    parser.add_argument('-s', dest='samples', help='Samples separated with ,')
    parser.add_argument('-i', dest='files', help='Files separated with ,')
    parser.add_argument('-l', dest='lumi', help='Luminosity to scale')
    parser.add_argument('-v', dest='version', help='force mini version')
    parser.add_argument('--sel', dest='selection')
    parser.add_argument('--prw', action='store_true', help='apply prw weights')
    parser.add_argument('--data', help='Include data: data|data15|data16')
    parser.add_argument('--unblind', action='store_true', help='Unblind data')

    # backgrounds
    parser.add_argument('--mc', action='store_true', help='Use MC backgrounds')
    parser.add_argument('--muq', help='Normalization factor for gam+jet')
    parser.add_argument('--muw', help='Normalization factor for W gamma')
    parser.add_argument('--mut', help='Normalization factor for ttbar gamma')

    # signal
    parser.add_argument('--signal', action='store_true', help='Include signal')
    parser.add_argument('--m3', default='1400', help='M3')

    # others
    parser.add_argument('--latex',
                        action='store_true',
                        help='use LatexTable instead PrettyTable')
    parser.add_argument('--nw', action='store_true')

    if len(sys.argv) < 2:
        parser.print_usage()
        sys.exit(1)

    args = parser.parse_args()

    ## samples
    bkgs = analysis.backgrounds_mc

    dd_scale = 1.
    if args.data is None and args.samples is None and args.lumi is not None:
        bkgs.append('jfake')
        bkgs.append('efake')

        dd_scale = float(args.lumi) * 1000. / analysis.lumi_data

    elif args.data == 'data15':
        args.lumi = 'data15'
        bkgs.append('jfake15')
        bkgs.append('efake15')
    elif args.data == 'data16':
        args.lumi = 'data16'
        bkgs.append('jfake16')
        bkgs.append('efake16')
    elif args.data == 'data':
        args.lumi = 'data'
        bkgs.append('jfake')
        bkgs.append('efake')

    if args.mc:
        bkgs = [
            'photonjet',
            'multijet',
            'wgamma',
            'zgamma',
            'ttbar',
            'ttbarg',
            'vjets',
        ]

    signal = []
    for (m3, mu) in sorted(grid_m3_mu.iterkeys()):
        if int(args.m3) == m3:
            signal.append('GGM_M3_mu_%d_%d' % (m3, mu))

    if args.prw:
        get_events = partial(miniutils.get_events,
                             lumi=args.lumi,
                             version=args.version,
                             prw=True)
    elif args.nw:
        get_events = partial(miniutils.get_events,
                             lumi=args.lumi,
                             version=args.version,
                             scale=False)
    else:
        get_events = partial(miniutils.get_events,
                             lumi=args.lumi,
                             version=args.version)

    if args.regions:
        regions = args.regions.split(',')

    samples = args.samples.split(',') if args.samples is not None else []

    if args.selection is not None:
        regions = []
        regions.append(args.selection)
    if args.latex:
        table = LatexTable()
    else:
        table = PrettyTable()

    if samples:
        table.add_column('', [s for s in samples])
    elif args.data and args.signal:
        table.add_column('', [
            'Data',
        ] + bkgs + ['Total bkg'] + signal)
    elif args.data:
        table.add_column('', [
            'Data',
        ] + bkgs + ['Total bkg'])
    elif args.signal:
        table.add_column('', bkgs + ['Total bkg'] + signal)
    else:
        table.add_column('', bkgs + ['Total bkg'])

    for region in regions:

        if not region:
            continue

        try:
            selection = getattr(regions_, region)
        except:
            selection = region

        cols = OrderedDict()

        if samples:

            for sample in samples:
                evts = get_events(sample, region=region, selection=selection)
                cols[sample] = evts

        else:

            # Data
            if args.data is None:
                pass
            elif 'SR' in region and not args.unblind:
                cols['data'] = '-1'
            else:
                cols['data'] = get_events(args.data,
                                          region=region,
                                          selection=selection)

            # Bkgs
            total_bkg = Value(0)
            for sample in bkgs:

                evts = get_events(sample, region=region, selection=selection)

                if args.data is None and 'fake' in sample:
                    evts *= dd_scale

                if not region.startswith('CR'):
                    if args.muq is not None and sample == 'photonjet':
                        evts *= float(args.muq)
                    if args.muw is not None and sample == 'wgamma':
                        evts *= float(args.muw)
                    if args.mut is not None and sample == 'ttbarg':
                        evts *= float(args.mut)

                cols[sample] = evts
                total_bkg += evts

            cols['Total bkg'] = total_bkg

            if region.startswith('CR') and args.data:
                if 'CRQ' in region or 'CRM' in region:
                    mu = (cols['data'] -
                          (total_bkg - cols['photonjet'])) / cols['photonjet']
                    purity = cols['photonjet'] / total_bkg
                    cols['photonjet'] = '%s (%.2f, mu=%.2f)' % (
                        cols['photonjet'], purity.mean, mu.mean)

                elif 'CRT' in region:
                    mu = (cols['data'] -
                          (total_bkg - cols['ttbarg'])) / cols['ttbarg']
                    purity = cols['ttbarg'] / total_bkg

                    cols['ttbarg'] = '%s (%.2f%%, mu=%.2f)' % (
                        cols['ttbarg'], purity.mean, mu.mean)

                elif 'CRW' in region:
                    mu = (cols['data'] -
                          (total_bkg - cols['wgamma'])) / cols['wgamma']
                    purity = cols['wgamma'] / total_bkg

                    cols['wgamma'] = '%s (%.2f%%, mu=%.2f)' % (
                        cols['wgamma'], purity.mean, mu.mean)

            # Signals
            if args.signal:

                for sig in signal:
                    n_s = get_events(sig, region=region, selection=selection)
                    cols[sig] = '%s (%s)' % (n_s,
                                             get_significance(n_s, total_bkg))

        table.add_column(region[:10], cols.values())

    print table
示例#5
0
def main():

    parser = argparse.ArgumentParser(description='get yields')
    
    parser.add_argument('-r', dest='regions', default='', help='Regions separated with ,')
    parser.add_argument('-s', dest='samples', help='Samples separated with ,')
    parser.add_argument('-i', dest='files', help='Files separated with ,')
    parser.add_argument('-l', dest='lumi', help='Luminosity to scale')
    parser.add_argument('-v', dest='version', help='force mini version')
    parser.add_argument('--sel', dest='selection')
    parser.add_argument('--prw', action='store_true', help='apply prw weights')
    parser.add_argument('--data', help='Include data: data|data15|data16')
    parser.add_argument('--unblind', action='store_true', help='Unblind data')

    # backgrounds
    parser.add_argument('--mc', action='store_true', help='Use MC backgrounds')
    parser.add_argument('--muq', help='Normalization factor for gam+jet')
    parser.add_argument('--muw', help='Normalization factor for W gamma')
    parser.add_argument('--mut', help='Normalization factor for ttbar gamma')

    # signal
    parser.add_argument('--signal', action='store_true', help='Include signal')
    parser.add_argument('--m3', default='1400', help='M3')

    # others
    parser.add_argument('--latex', action='store_true', help='use LatexTable instead PrettyTable')
    parser.add_argument('--nw', action='store_true')


    if len(sys.argv) < 2:
        parser.print_usage()
        sys.exit(1)
        
    args = parser.parse_args()
    
    ## samples
    bkgs = analysis.backgrounds_mc

    dd_scale = 1.
    if args.data is None and args.samples is None and args.lumi is not None:
        bkgs.append('jfake')
        bkgs.append('efake')

        dd_scale = float(args.lumi) * 1000. / analysis.lumi_data


    elif args.data == 'data15':
        args.lumi = 'data15'
        bkgs.append('jfake15')
        bkgs.append('efake15')
    elif args.data == 'data16':
        args.lumi = 'data16'
        bkgs.append('jfake16')
        bkgs.append('efake16')
    elif args.data == 'data':
        args.lumi = 'data'
        bkgs.append('jfake')
        bkgs.append('efake')
    
    if args.mc:
        bkgs = [
            'photonjet',
            'multijet',
            'wgamma',
            'zgamma',
            'ttbar',
            'ttbarg',
            'vjets',
            ]
       
    signal = []
    for (m3, mu) in sorted(grid_m3_mu.iterkeys()):
        if int(args.m3) == m3:
            signal.append('GGM_M3_mu_%d_%d' % (m3, mu))


    if args.prw:
        get_events = partial(miniutils.get_events, lumi=args.lumi, version=args.version, prw=True)
    elif args.nw:
        get_events = partial(miniutils.get_events, lumi=args.lumi, version=args.version, scale=False)
    else:
        get_events = partial(miniutils.get_events, lumi=args.lumi, version=args.version)

    if args.regions:
        regions = args.regions.split(',')

    samples = args.samples.split(',') if args.samples is not None else []

    if args.selection is not None:
        regions = []
        regions.append(args.selection)
    if args.latex:
        table = LatexTable()
    else:
        table = PrettyTable()

    if samples:
        table.add_column('', [s for s in samples])
    elif args.data and args.signal:
        table.add_column('', ['Data',]+bkgs+['Total bkg']+signal)
    elif args.data:
        table.add_column('', ['Data',]+bkgs+['Total bkg'])
    elif args.signal:
        table.add_column('', bkgs+['Total bkg']+signal)
    else:
        table.add_column('', bkgs+['Total bkg'])

    for region in regions:

        if not region:
            continue

        try:
            selection = getattr(regions_, region)
        except:
            selection = region

        cols = OrderedDict()

        if samples:

            for sample in samples:
                evts = get_events(sample, region=region, selection=selection)
                cols[sample] = evts

        else:
            
            # Data
            if args.data is None:
                pass
            elif 'SR' in region and not args.unblind:
                cols['data'] = '-1'
            else:
                cols['data'] = get_events(args.data, region=region, selection=selection)

            # Bkgs
            total_bkg = Value(0)
            for sample in bkgs:
        
                evts = get_events(sample, region=region, selection=selection)

                if args.data is None and 'fake' in sample:
                    evts *= dd_scale

                if not region.startswith('CR'):
                    if args.muq is not None and sample == 'photonjet':
                        evts *= float(args.muq)
                    if args.muw is not None and sample == 'wgamma':
                        evts *= float(args.muw)
                    if args.mut is not None and sample == 'ttbarg':
                        evts *= float(args.mut)

                cols[sample] = evts
                total_bkg += evts
           
            cols['Total bkg'] = total_bkg

            if region.startswith('CR') and args.data:
                if 'CRQ' in region or 'CRM' in region:
                    mu = (cols['data']-(total_bkg-cols['photonjet']))/cols['photonjet']
                    purity = cols['photonjet'] / total_bkg
                    cols['photonjet'] = '%s (%.2f, mu=%.2f)' % (cols['photonjet'], purity.mean, mu.mean)

                elif 'CRT' in region:
                    mu = (cols['data']-(total_bkg-cols['ttbarg']))/cols['ttbarg']
                    purity = cols['ttbarg'] / total_bkg

                    cols['ttbarg'] = '%s (%.2f%%, mu=%.2f)' % (cols['ttbarg'], purity.mean, mu.mean)

                elif 'CRW' in region:
                    mu = (cols['data']-(total_bkg-cols['wgamma']))/cols['wgamma']
                    purity = cols['wgamma'] / total_bkg

                    cols['wgamma'] = '%s (%.2f%%, mu=%.2f)' % (cols['wgamma'], purity.mean, mu.mean)


            # Signals
            if args.signal:

                for sig in signal:
                    n_s = get_events(sig, region=region, selection=selection)
                    cols[sig] = '%s (%s)' % (n_s, get_significance(n_s, total_bkg))

        table.add_column(region[:10], cols.values())
    
        
    print table
示例#6
0
def systable(workspace, samples, channels, output_name):

    chan_str = channels.replace(",","_")
    chan_list = channels.split(",")

    chosen_sample = False
    if samples:
        sample_str = samples.replace(",","_") + "_"
        from cmdLineUtils import cmdStringToListOfLists
        sample_list = cmdStringToListOfLists(samples)
        chosen_sample = True

    show_percent = True
    doAsym = True

    result_name = 'RooExpandedFitResult_afterFit'

    skip_list = ['sqrtnobsa', 'totbkgsysa', 'poisqcderr','sqrtnfitted','totsyserr','nfitted']

    chan_sys = {}
    orig_chan_list = list(chan_list)
    chan_list = []

    # calculate the systematics breakdown for each channel/region given in chanList
    # choose whether to use method-1 or method-2
    # choose whether calculate systematic for full model or just a sample chosen by user
    for chan in orig_chan_list:

        if not chosen_sample:
            reg_sys = latexfitresults(workspace, chan, '', result_name, 'obsData', doAsym)

            chan_sys[chan] = reg_sys
            chan_list.append(chan)
        else:
            for sample in sample_list:
                sample_name = getName(sample)

                reg_sys = latexfitresults(workspace, chan, sample, result_name, 'obsData', doAsym)
                chan_sys[chan+"_"+sample_name] = reg_sys
                chan_list.append(chan+"_"+sample_name)

    # write out LaTeX table by calling function from SysTableTex.py function tablefragment
    #line_chan_sys_tight = tablefragment(chanSys,chanList,skiplist,chanStr,showPercent)
    if not chosen_sample:
        field_names = ['\\textbf{Uncertainties}',] + [ '\\textbf{%s}' % reg for reg in  chan_list ]
    elif len(sample_list) == 1:
        sample_label = labels_latex_dict.get(getName(sample_list[0]), getName(sample_list[0]))

        field_names = ['\\textbf{Uncertainties (%s)}' % sample_label ] + [ '\\textbf{%s}' % (reg.split('_')[0]) for reg in  chan_list ]
    else:
        field_names = ['\\textbf{Uncertainties}',] + [ '\\textbf{%s (%s)}' % (reg.split('_')[0], reg.split('_')[1]) for reg in  chan_list ]
    align = ['l',] + [ 'r' for i in chan_list ]

    tablel = LatexTable(field_names, align=align, env=True)

    # print the total fitted (after fit) number of events
    row = ['Total background expectation',]
    for region in chan_list:
        row.append("$%.2f$"  % chan_sys[region]['nfitted'])

    tablel.add_row(row)
    tablel.add_line()

    # print sqrt(N_obs) - for comparison with total systematic
    row = ['Total statistical $(\\sqrt{N_\\mathrm{exp}})$',]
    for region in chan_list:
        row.append("$\\pm %.2f$" % chan_sys[region]['sqrtnfitted'])

    tablel.add_row(row)

    # print total systematic uncertainty
    row = [ 'Total background systematic', ]

    for region in chan_list:
        percentage = chan_sys[region]['totsyserr']/chan_sys[region]['nfitted'] * 100.0
        row.append("$\\pm %.2f\ [%.2f\%%]$" % (chan_sys[region]['totsyserr'], percentage))

    tablel.add_row(row)
    tablel.add_line()
    tablel.add_line()

    # print systematic uncertainty per floated parameter (or set of parameters, if requested)
    d = chan_sys[chan_list[0]]
    m_listofkeys = sorted(d.iterkeys(), key=lambda k: d[k], reverse=True)


    # uncertanties dict
    unc_dict = dict()
    unc_order = []
    for name in m_listofkeys:

        if name in skip_list:
            continue

        printname = name.replace('syserr_','')

        #slabel = label.split('_')
        #label = 'MC stat. (%s)' % slabel[2]

        # skip negligible uncertainties in all requested regions:
        zero = True
        for index, region in enumerate(chan_list):
            percentage = chan_sys[region][name]/chan_sys[region]['nfitted'] * 100.0

            if ('%.4f' % chan_sys[region][name]) != '0.0000' and ('%.2f' % percentage) != '0.00':
                zero = False

        if zero:
            continue

        # Parameter name -> parameter label
        if printname.startswith('gamma_stat'):
            label = 'MC stat.'

        elif printname.startswith('gamma_shape_JFAKE_STAT_jfake'):
            label = 'jet $\\to\\gamma$ fakes stat.'

        elif printname.startswith('gamma_shape_EFAKE_STAT_efake'):
            label = '$e\\to\\gamma$ fakes stat.'

        else:
            if printname in systdict and systdict[printname]:
                label = systdict[printname]
            else:
                label = printname

        # Fill dict
        for index, region in enumerate(chan_list):

            if printname.startswith('gamma') and not region.split('_')[0] in printname:
                continue

            if not label in unc_dict:
                unc_dict[label] = []
                unc_order.append(label)

            if not show_percent:
                unc_dict[label].append("$\\pm %.2f$" % chan_sys[region][name])
            else:
                percentage = chan_sys[region][name]/chan_sys[region]['nfitted'] * 100.0
                if percentage < 1:
                    unc_dict[label].append("$\\pm %.2f\ [%.2f\%%]$" % (chan_sys[region][name], percentage))
                else:
                    unc_dict[label].append("$\\pm %.2f\ [%.1f\%%]$" % (chan_sys[region][name], percentage))



    # fill table
    for label in unc_order:
        tablel.add_row([label,] + unc_dict[label])

    tablel.add_line()

    tablel.save_tex(output_name)
示例#7
0
def yieldstable(workspace, samples, channels, output_name, table_name, is_cr=False, show_before_fit=False, unblind=True):

    if is_cr:
        show_before_fit=True
        normalization_factors = get_normalization_factors(workspace)

    #sample_str = samples.replace(",","_")
    from cmdLineUtils import cmdStringToListOfLists
    samples_list = cmdStringToListOfLists(samples)

    regions_list = [ '%s_cuts' % r for r in channels.split(",") ]
    #samples_list = samples.split(",")

    # call the function to calculate the numbers, or take numbers from pickle file  
    if workspace.endswith(".pickle"):
        print "READING PICKLE FILE"
        f = open(workspace, 'r')
        m = pickle.load(f)
        f.close()
    else:
        #m = YieldsTable.latexfitresults(workspace, regions_list, samples_list, 'obsData') 
        m = latexfitresults(workspace, regions_list, samples_list)

        with open(output_name.replace('.tex',  '.pickle'), 'w') as f:
            pickle.dump(m, f)


    regions_names = [ region.replace("_cuts", "").replace('_','\_') for region in m['names'] ]

    field_names = [table_name,] + regions_names
    align = ['l',] + [ 'r' for i in regions_names ]

    samples_list_decoded = []
    for isam, sample in enumerate(samples_list):
        sampleName = getName(sample)
        samples_list_decoded.append(sampleName)

    samples_list = samples_list_decoded

    tablel = LatexTable(field_names, align=align, env=True)
    tablep = PrettyTable(field_names, align=align)

    #  number of observed events
    if unblind:
        row = ['Observed events',] + [ '%d' % n for n in m['nobs'] ]
    else:
        row = ['Observed events',] + [ '-' for n in m['nobs'] ]

    tablel.add_row(row)
    tablep.add_row(row)
    tablel.add_line()
    tablep.add_line()

    #print the total fitted (after fit) number of events
    # if the N_fit - N_error extends below 0, make the error physical , meaning extend to 0
    rowl = ['Expected SM events', ]
    rowp = ['Expected SM events', ]

    for index, n in enumerate(m['TOTAL_FITTED_bkg_events']):

        if (n - m['TOTAL_FITTED_bkg_events_err'][index]) > 0. :
            rowl.append('$%.2f \pm %.2f$' % (n, m['TOTAL_FITTED_bkg_events_err'][index]))
            rowp.append('%.2f &plusmn %.2f' % (n, m['TOTAL_FITTED_bkg_events_err'][index]))

        else:
            #print "WARNING:   negative symmetric error after fit extends below 0. for total bkg pdf:  will print asymmetric error w/ truncated negative error reaching to 0."
            rowl.append('$%.2f_{-%.2f}^{+%.2f}$' % (n, n, m['TOTAL_FITTED_bkg_events_err'][index]))
            rowp.append('%.2f -%.2f +%.2f' % (n, n, m['TOTAL_FITTED_bkg_events_err'][index]))

    tablel.add_row(rowl)
    tablel.add_line()
    tablep.add_row(rowp)
    tablep.add_line()

    map_listofkeys = m.keys()

    # print fitted number of events per sample
    # if the N_fit - N_error extends below 0, make the error physical , meaning extend to 0
    for sample in samples_list:
        for name in map_listofkeys:

            rowl = []
            rowp = []

            if not "Fitted_events_" in name: 
                continue

            sample_name = name.replace("Fitted_events_", "")
            if sample_name != sample:
                continue
        
            rowl.append('%s' % labels_latex_dict.get(sample_name, sample_name).replace('_', '\_'))
            rowp.append('%s' % labels_html_dict.get(sample_name, sample_name))

            for index, n in enumerate(m[name]):

                if ((n - m['Fitted_err_'+sample][index]) > 0.) or not abs(n) > 0.00001:
                    rowl.append('$%.2f \\pm %.2f$' % (n, m['Fitted_err_'+sample][index]))
                    rowp.append('%.2f &plusmn %.2f' % (n, m['Fitted_err_'+sample][index]))

                else:
                    #print "WARNING:   negative symmetric error after fit extends below 0. for sample", sample, "    will print asymmetric error w/ truncated negative error reaching to 0."
                    rowl.append('$%.2f_{-%.2f}^{+%.2f}$' % (n, n, m['Fitted_err_'+sample][index]))
                    rowp.append('%.2f -%.2f +%.2f' % (n, n, m['Fitted_err_'+sample][index]))

            tablel.add_row(rowl)
            tablep.add_row(rowp)
  
    tablel.add_line()
    tablep.add_line()

    # print the total expected (before fit) number of events
    if show_before_fit:

        # if the N_fit - N_error extends below 0, make the error physical , meaning extend to 0
        rowl = ['Before SM events',]
        rowp = ['(before fit) SM events',]

        total_before = []
        purity_before = []
            
        for index, n in enumerate(m['TOTAL_MC_EXP_BKG_events']):

            if regions_names[index].startswith('CR'):
                total_before.append(n)

            rowl.append('$%.2f$' % n)
            rowp.append('%.2f' % n)

        tablel.add_row(rowl)
        tablel.add_line()

        tablep.add_row(rowp)
        tablep.add_line()

        map_listofkeys = m.keys()

        # print expected number of events per sample
        # if the N_fit - N_error extends below 0, make the error physical , meaning extend to 0
        for sample in samples_list:

            for name in map_listofkeys:

                rowl = []
                rowp = []

                if "MC_exp_events_" in name and sample in name:

                    sample_name = name.replace("MC_exp_events_","")

                    if sample_name != sample:
                        continue
              
                    rowl.append('(before fit) %s' % labels_latex_dict.get(sample_name, sample_name).replace('_', '\_'))
                    rowp.append('(before fit) %s' % labels_html_dict.get(sample_name, sample_name))

                    for index, n in enumerate(m[name]):
                    
                        if regions_names[index] == 'CRQ' and sample == 'photonjet':
                            purity_before.append(n)
                        if regions_names[index] == 'CRW' and sample == 'wgamma':
                            purity_before.append(n)
                        if regions_names[index] == 'CRT' and sample == 'ttbarg':
                            purity_before.append(n)

                        rowl.append('$%.2f$' % n)
                        rowp.append('%.2f' % n)

                    tablel.add_row(rowl)
                    tablep.add_row(rowp)
  
        tablel.add_line()
        tablep.add_line()

    if show_before_fit and all([r.startswith('CR') for r in regions_names]) and normalization_factors is not None:

        tablel.add_row(['', '', '', ''])
        tablel.add_line()

        tablep.add_row(['', '', '', ''])
        tablep.add_line()

        # purity
        rowl = ['Background purity',]
        rowp = ['Background purity',]

        for index, region in enumerate(regions_names):

            purity = int(purity_before[index]/total_before[index] * 100.)

            rowl.append('$%i\%%$' % purity)
            rowp.append('%i%%' % purity)
            
        tablel.add_row(rowl)
        tablel.add_line()

        tablep.add_row(rowp)
        tablep.add_line()

        # normalization
        rowl = ['Normalization factor ($\mu$)',]
        rowp = ['Normalization factor (mu)',]
        for region in regions_names:
            rowl.append('$%.2f \pm %.2f$' % normalization_factors[region])
            rowp.append('%.2f &plusmn %.2f' % normalization_factors[region])

        tablel.add_row(rowl)
        tablel.add_line()
        tablep.add_row(rowp)
        tablep.add_line()


    tablel.save_tex(output_name)

    with open(output_name.replace('.tex', '.html'), 'w+') as f:
        f.write(tablep.get_html_string())
示例#8
0
def plot_energy_spectra():
    mus = []
    widths = []
    energies = []

    #leer = TkaFile("data/EnergiespektrumLeer.TKA")

    with LatexTable("out/calib_peaks.tex") as peaktable, LatexTable(
            "out/rates.tex") as ratetable:
        peaktable.header("El.",
                         "Höhe",
                         r"Channel $x_\textrm{max}$",
                         r"Breite $\Delta x$",
                         r'$\chi^2/\textrm{ndf}$',
                         "Energie / keV",
                         lineafter=1)
        ratetable.header("El.", "Events", "Messzeit", "Rate")

        for filename, meta in energy_spectra.items():
            peaktable.hline()

            tka = TkaFile("data/" + filename)
            x = np.arange(len(tka))
            #y = tka.data - leer.data
            y = tka.data

            #errors = np.sqrt(tka.data + leer.data + 2)
            errors = np.sqrt(tka.data + 1)

            plt.clf()
            plt.plot(x, y, ',', color="black")

            for i, (mu0, sigma0, energy,
                    sigma_energy) in enumerate(meta["peaks"], 1):
                fit = local_fit(x, y, errors, mu=mu0, sigma=sigma0)
                fit.plot(fit.mu - 5 * fit.sigma,
                         fit.mu + 5 * fit.sigma,
                         1000,
                         zorder=10000,
                         color="red")
                mus.append((fit.mu, fit.error_mu))

                if meta["element"] in ("Na", "Cs"):
                    E_gamma = 0.09863 * fit.mu - 19.056
                    m_electron = 511  # keV
                    E_compton = E_gamma / (1 + m_electron / (2 * E_gamma))

                    c_compton = (E_compton + 19.056) / 0.09863

                    plt.axvline(c_compton, color="red", linestyle="--")

                peaktable.row((meta["element"], len(meta["peaks"])),
                              formatQuantityLatex(fit.A, fit.error_A),
                              formatQuantityLatex(fit.mu, fit.error_mu),
                              formatQuantityLatex(fit.sigma, fit.error_sigma),
                              "%.2f" % fit.chi2ndf,
                              formatQuantityLatex(energy, sigma_energy))

                widths.append((fit.sigma, fit.error_sigma))
                energies.append((energy, sigma_energy))
                #print(meta["element"], i, fit)

            plt.xlabel("Kanal")
            plt.ylabel("Count")
            plt.xlim(0, 2**14)
            plt.title(meta["title"])
            plt.savefig("out/" + clean_filename(filename) + "_all." + SAVETYPE)

            N = ufloat(y.sum(), errors.sum())
            t = ufloat(tka.real_time, 1 / math.sqrt(12))
            m = N / t  # Hz/Bq
            r = ufloat(91.5, 0.01)  # mm
            A = ufloat(*meta["activity"]) * 1000  # Bq
            I_g = 1
            r = ufloat(15, 5)
            F_D = ufloat(7.45, 0.05) * ufloat(80.75, 0.05)  # mm^2

            efficiency = 4 * math.pi * r**2 * m / (F_D * A * I_g)
            print("Efficiency:", meta["element"], efficiency)

            ratetable.row(meta["element"], formatUFloatLatex(N),
                          formatUFloatLatex(t, unit="s"),
                          formatUFloatLatex(m, unit="1/s"))

    mus, error_mus = np.array(mus).T
    widths, error_widths = np.array(widths).T
    energies, error_energies = np.array(energies).T

    # Kalibration
    plt.clf()
    fit = Fit(LINEAR)
    fit.slope = 0.1
    fit.offset = -20
    for i in range(5):
        errors = fit.combine_errors(mus, error_mus, error_energies)
        fit.fit(mus, energies, errors)

    plt.errorbar(mus,
                 energies,
                 xerr=error_mus,
                 yerr=error_energies,
                 fmt=',',
                 color="black")
    fit.plot(box='tl',
             units={
                 "slope": "eV / Channel",
                 "offset": "eV"
             },
             factors={
                 "slope": 1000,
                 "offset": 1000
             },
             color="red")
    plt.xlabel("Kanal")
    plt.ylabel("Energie / keV")
    plt.savefig("out/calib_fit." + SAVETYPE)

    plt.clf()
    errs = fit.combine_errors(mus, xerr=error_mus, yerr=error_energies)
    fit.plot_residual(mus, energies, errs, box='tl', fmt=',', color="black")
    plt.xlabel("Kanal")
    plt.ylabel("Energie / keV")
    plt.savefig("out/calibresiduum." + SAVETYPE)

    s = formatQuantityLatex(fit.slope * 1000,
                            fit.error_slope * 1000,
                            unit="eV / Kanal",
                            math=False)
    o = formatQuantityLatex(fit.offset * 1000,
                            fit.error_offset * 1000,
                            unit="eV",
                            math=False)

    with open('out/calib.tex', 'w') as file:
        file.write(
            r'Die in diesem Versuch verwendeten Einstellungen f\"uhren zu einer Einteilung von \[ '
            + s + r' \] wobei der erste Kanal die Energie \[ ' + o +
            r' \] besitzt.')

    # Energieauflösung
    error_widths = np.sqrt(
        np.power(fit.error_slope * widths, 2) +
        np.power(error_widths * fit.slope, 2))
    widths = widths * fit.slope

    error_energies = np.sqrt(
        np.power(fit.error_offset, 2) + np.power(mus * fit.error_slope, 2) +
        np.power(fit.slope * error_mus, 2))
    energies = fit.slope * mus + fit.offset

    X = energies
    Y = widths
    SX = error_energies
    SY = error_widths

    func = lambda E, a, b: np.sqrt(np.power(a * E, 2) + np.power(b, 2) * E)
    fit = Fit(func)
    fit.a = 0.01
    fit.b = 0.01

    for _ in range(10):
        err = fit.combine_errors(X, SX, SY)
        fit.fit(X, Y, err)

    plt.clf()
    plt.errorbar(X, Y, xerr=SX, yerr=SY, fmt=',', color="black")
    fit.plot(box='br', units={'b': r'\sqrt{\textrm{keV}}'}, color="red")
    plt.xlabel(r"$ E $ / keV")
    plt.ylabel(r"$ \Delta E / keV$")
    plt.title(
        r'Energieauflösung: Fit zu $ \Delta E = \sqrt{\left(a E\right)^2 + b^2 E} $'
    )
    plt.savefig("out/energyresolution_fit." + SAVETYPE)

    print("a =", formatQuantity(fit.a, fit.error_a))
    print("b =", formatQuantity(fit.b, fit.error_b))

    plt.clf()
    err = fit.combine_errors(X, SX, SY)
    fit.plot_residual(X, Y, err, box='tr', fmt=",", color="black")
    plt.xlabel(r"$ E $ / keV")
    plt.ylabel(r"$ \Delta E / keV$")
    plt.title("Energieauflösung: Residuen")
    plt.savefig("out/energyresolution_residual." + SAVETYPE)

    with LatexTable("out/energyresolution.tex") as table:
        table.header("Parameter", "Wert")
        table.row("$a$", formatQuantityLatex(fit.a, fit.error_a))
        table.row("$b$",
                  formatQuantityLatex(fit.b, fit.error_b, unit="\sqrt{keV}"))
示例#9
0
def yieldstable(workspace,
                samples,
                channels,
                output_name,
                table_name='',
                show_before_fit=False,
                unblind=True,
                show_cr_info=False,
                cr_dict={}):

    if show_cr_info:
        show_before_fit = True
        normalization_factors = get_normalization_factors(workspace)

    samples_list = cmdStringToListOfLists(samples)

    regions_list = ['%s_cuts' % r for r in channels.split(",")]

    # call the function to calculate the numbers, or take numbers from pickle file
    if workspace.endswith(".pickle"):
        print "Reading from pickle file"
        f = open(workspace, 'r')
        m = pickle.load(f)
        f.close()
    else:
        #m = YieldsTable.latexfitresults(workspace, regions_list, samples_list, 'obsData')
        m = latexfitresults(workspace, regions_list, samples_list)

        with open(output_name.replace('.tex', '.pickle'), 'w') as f:
            pickle.dump(m, f)

    regions_names = [
        region.replace("_cuts", "").replace('_', '\_') for region in m['names']
    ]

    field_names = [
        table_name,
    ] + regions_names
    align = [
        'l',
    ] + ['r' for i in regions_names]

    samples_list_decoded = []
    for isam, sample in enumerate(samples_list):
        sampleName = getName(sample)
        samples_list_decoded.append(sampleName)

    samples_list = samples_list_decoded

    tablel = LatexTable(field_names, align=align, env=True)

    #  number of observed events
    if unblind:
        row = [
            'Observed events',
        ] + ['%d' % n for n in m['nobs']]
    else:
        row = [
            'Observed events',
        ] + ['-' for n in m['nobs']]

    tablel.add_row(row)
    tablel.add_line()

    # Total fitted (after fit) number of events
    # if the N_fit - N_error extends below 0, make the error physical, meaning extend to 0
    rowl = [
        'Expected SM events',
    ]

    for index, n in enumerate(m['TOTAL_FITTED_bkg_events']):

        if (n - m['TOTAL_FITTED_bkg_events_err'][index]) > 0.:
            rowl.append('$%.2f \pm %.2f$' %
                        (n, m['TOTAL_FITTED_bkg_events_err'][index]))
        else:
            rowl.append('$%.2f_{-%.2f}^{+%.2f}$' %
                        (n, n, m['TOTAL_FITTED_bkg_events_err'][index]))

    tablel.add_row(rowl)
    tablel.add_line()

    map_listofkeys = m.keys()

    # After fit number of events per sample (if the N_fit-N_error extends below 0, make the error physical, meaning extend to 0)
    for sample in samples_list:
        for name in map_listofkeys:

            rowl = []

            if not "Fitted_events_" in name:
                continue

            sample_name = name.replace("Fitted_events_", "")
            if sample_name != sample:
                continue

            rowl.append('%s' % labels_latex_dict.get(
                sample_name, sample_name).replace('_', '\_'))

            for index, n in enumerate(m[name]):

                if ((n - m['Fitted_err_' + sample][index]) >
                        0.) or not abs(n) > 0.00001:
                    rowl.append('$%.2f \\pm %.2f$' %
                                (n, m['Fitted_err_' + sample][index]))
                else:
                    rowl.append('$%.2f_{-%.2f}^{+%.2f}$' %
                                (n, n, m['Fitted_err_' + sample][index]))

            tablel.add_row(rowl)

    tablel.add_line()

    # Total expected (before fit) number of events
    if show_before_fit:

        # if the N_fit - N_error extends below 0, make the error physical, meaning extend to 0
        rowl = [
            'Before fit SM events',
        ]

        total_before = {}
        purity_before = {}

        for index, n in enumerate(m['TOTAL_MC_EXP_BKG_events']):

            reg_name = regions_names[index]

            if cr_dict and reg_name in cr_dict:
                total_before[reg_name] = n

            rowl.append('$%.2f$' % n)

        tablel.add_row(rowl)
        tablel.add_line()

        map_listofkeys = m.keys()

        # Expected number of events per sample (if the N_fit - N_error extends below 0, make the error physical, meaning extend to 0)
        for sample in samples_list:

            for name in map_listofkeys:

                rowl = []

                if "MC_exp_events_" in name and sample in name:

                    sample_name = name.replace("MC_exp_events_", "")

                    if sample_name != sample:
                        continue

                    rowl.append('Before fit %s' % labels_latex_dict.get(
                        sample_name, sample_name).replace('_', '\_'))

                    for index, n in enumerate(m[name]):
                        reg_name = regions_names[index]
                        if cr_dict and reg_name in cr_dict and sample == cr_dict[
                                reg_name]:
                            purity_before[reg_name] = n

                        rowl.append('$%.2f$' % n)

                    tablel.add_row(rowl)

        tablel.add_line()

    if show_cr_info and normalization_factors is not None:

        tablel.add_row(['' for i in range(len(regions_names) + 1)])
        tablel.add_line()

        # purity
        rowl = [
            'Background purity',
        ]

        for region in regions_names:

            try:
                purity = int(
                    round(purity_before[region] / total_before[region] * 100.))
                rowl.append('$%i\%%$' % purity)
            except:
                rowl.append('-')

        tablel.add_row(rowl)
        tablel.add_line()

        # normalization
        rowl = [
            'Normalization factor ($\mu$)',
        ]
        for region in regions_names:
            try:
                rowl.append('$%.2f \pm %.2f$' % normalization_factors[region])
            except:
                rowl.append('-')

        tablel.add_row(rowl)
        tablel.add_line()

    tablel.save_tex(output_name)
示例#10
0
def energieaufloesung(calibration, plot=True):
    print("##### ENERGY RESOLUTION #####")

    energies = []
    widths = []
    sigma_energies = []
    sigma_widths = []

    for filename, meta in kalib.items():
        experiment = Experiment("data/" + filename,
                                title=meta["title"],
                                calibration=calibration)
        for peak in meta["peaks"]:
            mu0, sigma0 = peak[0], peak[1]
            fit = experiment.find_peak(mu0, sigma0, plot=False)
            energies.append(experiment.channel2energy(fit.mu))
            widths.append(experiment.channelwidth2energywidth(fit.sigma))
            sigma_energies.append(
                experiment.channelwidth2energywidth(fit.sigma_mu))
            sigma_widths.append(
                experiment.channelwidth2energywidth(fit.sigma_sigma))

    for filename, meta in data.items():
        experiment = Experiment("data/" + filename,
                                title=filename,
                                calibration=calibration)
        experiment.subtract_empty("data/G20_Leer.mca", 0.5)
        for peak in meta["peaks"]:
            mu0, sigma0 = peak[0], peak[1]
            fit = experiment.find_peak(mu0, sigma0, plot=False)
            energies.append(experiment.channel2energy(fit.mu))
            widths.append(experiment.channelwidth2energywidth(fit.sigma))
            sigma_energies.append(
                experiment.channelwidth2energywidth(fit.sigma_mu))
            sigma_widths.append(
                experiment.channelwidth2energywidth(fit.sigma_sigma))

    energies = np.array(energies)
    widths = np.array(widths)
    sigma_energies = np.array(sigma_energies)
    sigma_widths = np.array(sigma_widths)

    X = energies
    Y = np.power(widths, 2)
    SX = sigma_energies
    SY = 2 * widths * sigma_widths

    func = lambda x, a0, a1, : a0 + x * a1  #+np.power(x,2)*a2

    fit = Fit(func)
    fit.a0 = 1
    fit.a1 = 1

    for _ in range(10):
        err = fit.combine_errors(X, SX, SY)
        fit.fit(X, Y, err)

    if plot:
        plt.clf()
        plt.errorbar(X, Y, xerr=SX, yerr=SY, fmt=',')
        fit.plot(np.min(X),
                 np.max(X),
                 box='br',
                 units={
                     'a0': 'keV',
                     'a1': r'\sqrt{keV}'
                 })
        plt.xlabel(r"$ E $ / keV")
        plt.ylabel(r"$ \Delta E^2 / keV^2$")
        plt.title(r'Energieauflösung: Fit zu $ \Delta E^2 = a_0 + a_1 E $'
                  )  #  \oplus \frac{c}{E}
        plt.savefig("out/energyresolution_fit." + SAVETYPE)

        plt.clf()
        err = fit.combine_errors(X, SX, SY)
        fit.plot_residuums(X, Y, err, box='tr', fmt=",")
        plt.xlabel(r"$ E $ / keV")
        plt.ylabel(r"$ \Delta E^2 / keV^2$")
        plt.title("Energieauflösung: Residuen")
        plt.savefig("out/energyresolution_residuum." + SAVETYPE)

    with LatexTable("out/energyresolution.tex") as table:
        table.header("Parameter", "Wert")
        table.row("$a_0$", format_error(fit.a0, fit.sigma_a0, unit="keV^2"))
        table.row("$a_1$", format_error(fit.a1, fit.sigma_a1, unit="keV"))
        table.hline()
        a = math.sqrt(fit.a0)
        b = math.sqrt(fit.a1)
        sa = 0.5 * fit.sigma_a0 / a
        sb = 0.5 * fit.sigma_a1 / b
        table.row("$a$", format_error(a, sa, unit="keV"))
        table.row("$b$", format_error(b, sb, unit="\sqrt{keV}"))

    with LatexTable("out/energyresolution_examples.tex") as table:
        energies = np.linspace(10, 60, 6)
        sigmas = np.sqrt(fit.apply(energies))
        deltas = sigmas * 2 * math.sqrt(2 * math.log(2))
        table.header("Energie",
                     "Auflösung",
                     "Relative Auflösung",
                     "FWHM",
                     lineafter=0)
        table.row("$E$", "$\sigma$", "$\sigma / E$",
                  "$2 \sqrt{2 \ln{2}} \sigma$")
        table.hline(2)
        for energy, sigma, delta in zip(energies, sigmas, deltas):
            e = "%d keV" % energy
            s = _n(sigma * 1000) + " eV"
            d = "%d eV" % (delta * 1000)
            table.row(e, s, _n(sigma / energy * 100) + r"\%", d)
示例#11
0
def auswertung(calibration, plot=True, smooth=False):
    print("##### UNKNOWN #####")

    with LatexTable("out/test_peaks.tex") as peaktable, LatexTable(
            "out/test_counts.tex") as counttable:
        peaktable.header("Probe", "Höhe", "Energie", "Breite", lineafter=0)
        peaktable.row("", "", r"$E_\textrm{max}$", r"$\Delta E$")
        peaktable.hline()
        counttable.header("Element", "Messdauer", "Ereignisse", "korrigiert",
                          "Anteil")

        leer = Experiment("data/G20_Leer.mca")
        counttable.row(
            "Leermessung",
            r'$' + leer.mcameta["Accumulation Time"] + r' \unit{s}$',
            leer.mcameta["Slow Count"], "-", "-")
        counttable.hline()

        for filename, meta in data.items():
            peaktable.hline()
            #print("="*10, filename, "="*10)
            experiment = Experiment("data/" + filename,
                                    title=meta["title"],
                                    calibration=calibration)

            count = int(experiment.mcameta["Slow Count"])
            reduced = int(count - 0.5 * int(leer.mcameta["Slow Count"]))
            percentage = "%.1f \\%%" % (reduced / count * 100)
            counttable.row(
                meta["title"],
                r'$' + experiment.mcameta["Accumulation Time"] + r' \unit{s}$',
                count, reduced, percentage)
            if plot:
                plt.clf()
                experiment.plot()
                plt.title("Probe: " + meta["title"] + " - Rohdaten")
                plt.xlim(0, 4096)
                plt.savefig("out/" + clean_filename(filename) + "_raw." +
                            SAVETYPE)

            experiment.subtract_empty("data/G20_Leer.mca", 0.5)

            if smooth:
                experiment.smooth(0.1)

            if plot:
                plt.clf()
                experiment.errorplot()
                plt.title("Probe: " + meta["title"])
                experiment.set_energy_labels(stepsize=0.1)

            lines = []
            for i, (mu0, sigma0) in enumerate(
                    sorted(meta["peaks"], key=lambda peak: peak[0]), 1):
                fit = experiment.find_peak(mu0, sigma0, plot=plot)

                mu = experiment.channel2energy(fit.mu)
                error_mu_stat = experiment.channelwidth2energywidth(
                    fit.sigma_mu)
                error_mu_sys = experiment.sigma_channel2energy(fit.mu)
                string_mu = format_error(mu,
                                         error_mu_stat,
                                         error_mu_sys,
                                         unit='keV')

                sigma = experiment.channelwidth2energywidth(fit.sigma)
                error_sigma_stat = experiment.channelwidth2energywidth(
                    fit.sigma_sigma)
                #error_sigma_sys =  experiment.sigma_channelwidth2energywidth(fit.sigma) # close to 0
                string_sigma = format_error(sigma,
                                            error_sigma_stat,
                                            unit='keV')

                lines.append(r"$E_" + str(i) + r"$ = " + string_mu)
                peaktable.row((meta["title"], len(meta["peaks"])),
                              format_error(fit.A,
                                           fit.sigma_A,
                                           parenthesis=False), string_mu,
                              string_sigma)

                if plot:
                    plt.title("Probe: " + meta["title"] + " - Peak %d" % i)
                    plt.xlim(fit.mu - 5 * fit.sigma, fit.mu + 5 * fit.sigma)
                    plt.autoscale(enable=True, axis='y')
                    l, u = plt.ylim()
                    plt.ylim(0, u)
                    plt.savefig("out/" + clean_filename(filename) +
                                "_peak_%d." % i + SAVETYPE)

            if plot:
                plt.title("Probe: " + meta["title"])
                experiment.set_energy_labels(stepsize=5)
                plt.xlim(0, 4096)
                plt.autoscale(enable=True, axis='y')
                text = "\n".join(lines)
                info_box(text, location='tr')
                plt.savefig("out/" + clean_filename(filename) + "_all." +
                            SAVETYPE)
示例#12
0
def kalibration(plot=True):
    print("##### CALIBRATION #####")
    channels = []
    channel_errors = []
    energies = []

    with LatexTable("out/calib_peaks.tex") as peaktable, LatexTable(
            "out/calib_counts.tex") as counttable, LatexTable(
                "out/calib_relamp.tex") as relamptable:
        peaktable.header("El.",
                         "Höhe",
                         "Channel",
                         "Breite",
                         r'$\chi^2/\textrm{ndf}$',
                         "Linie",
                         "Energie",
                         lineafter=0)
        peaktable.row("", "", r"$x_\textrm{max}$", r"$\Delta x$", "", "", "")
        peaktable.hline()

        relamptable.header("Element",
                           "Peak",
                           "Relative Höhe",
                           "Literaturwert",
                           lineafter=1)
        counttable.header("Element", "Messdauer", "Ereignisse")

        for filename, meta in kalib.items():
            peaktable.hline()
            relamptable.hline()
            #print("="*10, filename, "="*10)
            experiment = Experiment("data/" + filename, title=meta["title"])
            counttable.row(
                meta["element"],
                r'$' + experiment.mcameta["Accumulation Time"] + r' \unit{s}$',
                experiment.mcameta["Slow Count"])
            if plot:
                plt.clf()
                experiment.errorplot()
                plt.title(meta["title"])
            lines = []
            kalpha = None
            for i, (mu0, sigma0, peak, energy, lrel) in enumerate(
                    sorted(meta["peaks"], key=lambda peak: peak[0]), 1):
                fit = experiment.find_peak(mu0, sigma0, plot=plot)
                channels.append(fit.mu)
                channel_errors.append(fit.sigma_mu)
                #channel_errors.append(fit.sigma) # FEHLER
                energies.append(energy)
                lines.append("Peak {:d}, Channel {:s}, Energy {:s}".format(
                    i, format_error(fit.mu, fit.sigma_mu),
                    format_error(energy, 0.01, unit='keV')))

                if not kalpha:
                    kalpha = (fit.A, fit.sigma_A)
                fehler = np.sqrt(
                    np.power(fit.sigma_A / kalpha[0], 2) +
                    np.power(fit.A / np.power(kalpha[0], 2) * kalpha[1], 2))
                rel = format_error(fit.A / kalpha[0] * 100,
                                   fehler * 100,
                                   unit=r'\%')
                if kalpha[0] == fit.A:
                    rel = r"100 \%"
                #rel = _n(fit.A/kalpha[0]*100, precision=3) + r"\%"
                lrel = _n(lrel, precision=3) + r"\%"
                relamptable.row((meta["element"], len(meta["peaks"])),
                                r'$ ' + peak + r'$', rel, lrel)

                peaktable.row((meta["element"], len(meta["peaks"])),
                              format_error(fit.A,
                                           fit.sigma_A,
                                           parenthesis=False),
                              format_error(fit.mu,
                                           fit.sigma_mu,
                                           parenthesis=False),
                              format_error(fit.sigma,
                                           fit.sigma_sigma,
                                           parenthesis=False),
                              "%.2f" % fit.chisqndf, r'$ ' + peak + r'$',
                              r'$ ' + ("%.2f" % energy) + r' \unit{keV}$')
                if plot:
                    plt.xlim(fit.mu - 5 * fit.sigma, fit.mu + 5 * fit.sigma)
                    plt.autoscale(enable=True, axis='y')
                    plt.savefig("out/" + clean_filename(filename) +
                                "_peak_%d." % i + SAVETYPE)
            if plot:
                text = "\n".join(lines)
                #info_box(text, location='tr')
                plt.xlim(0, 4096)
                plt.autoscale(enable=True, axis='y')
                plt.savefig("out/" + clean_filename(filename) + "_all." +
                            SAVETYPE)

    X = np.array(channels)
    SX = np.array(channel_errors)
    Y = np.array(energies)
    SY = 0.01  # 1 eV

    fit = linear_fit(X, Y, xerr=SX, yerr=SY)
    if plot:
        plt.clf()
        plt.errorbar(X, Y, xerr=SX, yerr=SY, fmt=',')
        fit.plot(np.min(X),
                 np.max(X),
                 box='tl',
                 units={
                     "slope": "keV / Channel",
                     "offset": "keV"
                 })
        plt.xlabel(r"Channel")
        plt.ylabel(r"$ E $ / keV")
        plt.title(r"Kalibrationsgerade: Fit")
        plt.savefig("out/calib_fit." + SAVETYPE)

        plt.clf()
        err = fit.combine_errors(X, SX, SY)
        fit.plot_residuums(X, Y, err, box='bl', fmt=",")
        plt.xlabel(r"Channel")
        plt.ylabel(r"$ E $ / keV")
        plt.title(r"Kalibrationsgerade: Residuen")
        plt.savefig("out/calib_residuum." + SAVETYPE)

    s = format_error(fit.slope * 1000,
                     fit.sigma_slope * 1000,
                     unit="eV / Kanal",
                     surroundmath=False)
    o = format_error(fit.offset * 1000,
                     fit.sigma_offset * 1000,
                     unit="eV",
                     surroundmath=False)

    with open('out/calib.tex', 'w') as file:
        file.write(
            r'Die in diesem Versuch verwendeten Einstellungen f\"uhren zu einer Einteilung von \[ '
            + s + r' \] wobei der erste Kanal die Energie \[ ' + o +
            r' \] besitzt.')

    # print("RESULT: linear fit with chisq: %.2f" % (fit.chisqndf))
    # print("RESULT: energy per bin: %.1f keV" % (fit.slope))
    # print("RESULT: energy of 0th bin: %.1f keV" % (fit.offset))
    return fit