def main():
    set_root_defaults()
    options, _ = parse_arguments()
    variable = 'ST'
    config_7TeV = XSectionConfig(7)
    config_8TeV = XSectionConfig(8)
    path_to_JSON_7TeV = options.path + '/7TeV/' + variable + '/'
    path_to_JSON_8TeV = options.path + '/8TeV/' + variable + '/'
    # we need the generators
    # and the central samples + errors
    results_7TeV, _ = read_xsection_measurement_results( path_to_JSON_7TeV,
                                                     variable,
                                                     bin_edges_full,
                                                     category = 'central',
                                                     channel = 'combined',
                                                     k_values = {
                                                                 'combined': config_7TeV.k_values_combined}
                                                     )
    results_8TeV, _ = read_xsection_measurement_results( path_to_JSON_8TeV,
                                                     variable,
                                                     bin_edges_full,
                                                     category = 'central',
                                                     channel = 'combined',
                                                     k_values = {
                                                                 'combined': config_8TeV.k_values_combined}
                                                     )
    plot_results(results_7TeV, results_8TeV, variable)
def main():
    set_root_defaults()
    options, _ = parse_arguments()
    variable = 'ST'
    config_7TeV = XSectionConfig(7)
    config_8TeV = XSectionConfig(8)
    path_to_JSON_7TeV = options.path + '/7TeV/' + variable + '/'
    path_to_JSON_8TeV = options.path + '/8TeV/' + variable + '/'
    # we need the generators
    # and the central samples + errors
    results_7TeV, _ = read_xsection_measurement_results(
        path_to_JSON_7TeV,
        variable,
        bin_edges_full,
        category='central',
        channel='combined',
        k_values={'combined': config_7TeV.k_values_combined})
    results_8TeV, _ = read_xsection_measurement_results(
        path_to_JSON_8TeV,
        variable,
        bin_edges_full,
        category='central',
        channel='combined',
        k_values={'combined': config_8TeV.k_values_combined})
    plot_results(results_7TeV, results_8TeV, variable)
def main():
    '''
        Main function for this script
    '''
    set_root_defaults(msg_ignore_level=3001)

    parser = OptionParser()
    parser.add_option("-o", "--output",
                      dest="output_folder", default='data/pull_data/',
                      help="output folder for pull data files")
    parser.add_option("-n", "--n_input_mc", type=int,
                      dest="n_input_mc", default=100,
                      help="number of toy MC used for the tests")
    parser.add_option("--tau", type='float',
                      dest="tau_value", default=-1.,
                      help="tau-value for SVD unfolding")
    parser.add_option("-m", "--method", type='string',
                      dest="method", default='TUnfold',
                      help="unfolding method")
    parser.add_option("-f", "--file", type='string',
                      dest="file", default='data/toy_mc/unfolding_toy_mc.root',
                      help="file with toy MC")
    parser.add_option("-v", "--variable", dest="variable", default='MET',
                      help="set the variable to analyse (defined in config/variable_binning.py)")
    parser.add_option("--com", "--centre-of-mass-energy", dest="CoM", default=13,
                      help='''set the centre of mass energy for analysis.
                      Default = 8 [TeV]''', type=int)
    parser.add_option("-c", "--channel", type='string',
                      dest="channel", default='combined',
                      help="channel to be analysed: electron|muon|combined")
    parser.add_option("-s", type='string',
                      dest="sample", default='madgraph',
                      help="channel to be analysed: electron|muon|combined")

    (options, _) = parser.parse_args()

    centre_of_mass = options.CoM
    measurement_config = XSectionConfig(centre_of_mass)
    make_folder_if_not_exists(options.output_folder)

    use_n_toy = options.n_input_mc
    method = options.method
    variable = options.variable
    sample = options.sample
    tau_value = options.tau_value

    create_unfolding_pull_data(options.file, method, options.channel,
                               centre_of_mass, variable,
                               sample,
                               measurement_config.unfolding_central,
                               use_n_toy,
                               options.output_folder,
                               tau_value)
def create_unfolding_pull_data(input_file_name,
                               method,
                               channel,
                               centre_of_mass,
                               variable,
                               sample,
                               responseFile,
                               n_toy_data,
                               output_folder,
                               tau_value,
                               run_matrix=None):
    '''
        Sets up all variables for check_multiple_data_multiple_unfolding
    '''
    set_root_defaults(msg_ignore_level=3001)
    timer = Timer()
    input_file = File(input_file_name, 'read')
    folder_template = '{path}/{centre_of_mass}TeV/{variable}/{sample}/'

    msg_template = 'Producing unfolding pull data for {variable},'
    msg_template += ' tau-value {value}'
    inputs = {
        'path': output_folder,
        'centre_of_mass': centre_of_mass,
        'variable': variable,
        'sample': sample,
        'value': round(tau_value, 4),
    }

    h_response = get_response_histogram(responseFile, variable, channel)
    output_folder = folder_template.format(**inputs)
    make_folder_if_not_exists(output_folder)
    print(msg_template.format(**inputs))
    print('Output folder: {0}'.format(output_folder))
    print('Response here :', h_response)
    output_file_name = check_multiple_data_multiple_unfolding(
        input_file,
        method,
        channel,
        variable,
        h_response,
        n_toy_data,
        output_folder,
        tau_value,
    )
    print('Runtime', timer.elapsed_time())

    return output_file_name
def main():
    set_root_defaults()
    # prevent directory ownership of ROOT histograms (python does the garbage
    # collection)
    parser = OptionParser()
    parser.add_option("-n", "--n_toy_mc",
                      dest="n_toy_mc", default=300,
                      help="number of toy MC to create", type=int)
    parser.add_option("-o", "--output",
                      dest="output_folder", default='data/toy_mc/',
                      help="output folder for toy MC")
    parser.add_option("-s", dest="sample", default='powhegPythia',
                        help='set underlying sample for creating the toy MC.  Possible options : madgraph, powhegPythia, amcatnlo.  Default is madgraph')
    parser.add_option("-c", "--centre-of-mass-energy", dest="CoM", default=13,
                      help="set the centre of mass energy for analysis. Default = 13 [TeV]", type=int)
    parser.add_option('-V', '--verbose', dest="verbose", action="store_true",
                      help="Print the event number, reco and gen variable value")

    (options, _) = parser.parse_args()

    measurement_config = XSectionConfig(options.CoM)


    # baseDir = '/storage/ec6821/DailyPythonScripts/new/DailyPythonScripts/unfolding/13TeV/'
    # input_files = [
        # baseDir + 'unfolding_TTJets_13TeV_asymmetric_50pc_tp_55pc.root',
        # baseDir + 'unfolding_TTJets_13TeV_asymmetric_95pc_tp_100pc.root',
        # baseDir + 'unfolding_TTJets_13TeV_asymmetric_55pc_tp_60pc.root',
        # baseDir + 'unfolding_TTJets_13TeV_asymmetric_60pc_tp_65pc.root',
        # baseDir + 'unfolding_TTJets_13TeV_asymmetric_65pc_tp_70pc.root',
        # baseDir + 'unfolding_TTJets_13TeV_asymmetric_70pc_tp_75pc.root',
        # baseDir + 'unfolding_TTJets_13TeV_asymmetric_80pc_tp_85pc.root',
        # baseDir + 'unfolding_TTJets_13TeV_asymmetric_75pc_tp_80pc.root',
    # ]
    input_files = [
        measurement_config.unfolding_central_secondHalf
    ]
    create_toy_mc(input_files=input_files,
                  sample=options.sample,
                  output_folder=options.output_folder,
#                   variable=variable,
                  n_toy=options.n_toy_mc,
                  centre_of_mass=options.CoM,
                  config=measurement_config
                  )
 def run(self):
     '''
         Run the workload
     '''
     import dps.analysis.unfolding_tests.create_unfolding_pull_data as pull
     from dps.utils.ROOT_utils import set_root_defaults
     set_root_defaults(msg_ignore_level=3001)
     pulls_file_name = pull.create_unfolding_pull_data(self.input_file_name,
                                     self.method,
                                     self.channel_to_run,
                                     self.centre_of_mass,
                                     self.variable_to_run,
                                     self.sample_to_run,
                                     self.response,
                                     self.n_toy_data,
                                     self.output_folder,
                                     self.tau_value_to_run
                                     )
예제 #7
0
 def run(self):
     '''
         Run the workload
     '''
     import dps.analysis.unfolding_tests.create_unfolding_pull_data as pull
     from dps.utils.ROOT_utils import set_root_defaults
     set_root_defaults(msg_ignore_level=3001)
     pulls_file_name = pull.create_unfolding_pull_data(self.input_file_name,
                                     self.method,
                                     self.channel_to_run,
                                     self.centre_of_mass,
                                     self.variable_to_run,
                                     self.sample,
                                     self.response,
                                     self.n_toy_data,
                                     self.output_folder,
                                     self.tau_value_to_run
                                     )
def create_unfolding_pull_data(input_file_name, method, channel,
                               centre_of_mass, variable,
                               sample, 
                               responseFile,
                               n_toy_data,
                               output_folder, 
                               tau_value,
                                run_matrix=None):
    '''
        Sets up all variables for check_multiple_data_multiple_unfolding
    '''
    set_root_defaults(msg_ignore_level=3001)
    timer = Timer()
    input_file = File(input_file_name, 'read')
    folder_template = '{path}/{centre_of_mass}TeV/{variable}/{sample}/'

    msg_template = 'Producing unfolding pull data for {variable},'
    msg_template += ' tau-value {value}'
    inputs = {
        'path': output_folder,
        'centre_of_mass': centre_of_mass,
        'variable': variable,
        'sample': sample,
        'value': round(tau_value,4),
    }

    h_response = get_response_histogram(responseFile, variable, channel)
    output_folder = folder_template.format(**inputs)
    make_folder_if_not_exists(output_folder)
    print(msg_template.format(**inputs))
    print('Output folder: {0}'.format(output_folder))
    print ('Response here :',h_response)
    output_file_name = check_multiple_data_multiple_unfolding(
                                input_file, method, channel, variable, 
                                h_response,
                                n_toy_data,
                                output_folder, 
                                tau_value,
                            )
    print('Runtime', timer.elapsed_time())

    return output_file_name
def main():
    set_root_defaults()
    # prevent directory ownership of ROOT histograms (python does the garbage
    # collection)
    parser = OptionParser()
    parser.add_option("-n", "--n_toy_mc",
                      dest="n_toy_mc", default=300,
                      help="number of toy MC to create", type=int)
    parser.add_option("-o", "--output",
                      dest="output_folder", default='data/toy_mc/',
                      help="output folder for toy MC")
    parser.add_option("-s", dest="sample", default='powhegPythia',
                        help='set underlying sample for creating the toy MC.  Possible options : madgraph, powhegPythia, amcatnlo.  Default is madgraph')
    parser.add_option("-c", "--centre-of-mass-energy", dest="CoM", default=13,
                      help="set the centre of mass energy for analysis. Default = 13 [TeV]", type=int)
    parser.add_option('-V', '--verbose', dest="verbose", action="store_true",
                      help="Print the event number, reco and gen variable value")

    (options, _) = parser.parse_args()

    measurement_config = XSectionConfig(options.CoM)

    input_file = None
    if options.sample == 'madgraph':
        input_file = measurement_config.unfolding_madgraphMLM
    elif options.sample == 'powhegPythia':
        input_file = measurement_config.unfolding_central
    elif options.sample == 'amcatnlo':
        input_file = measurement_config.unfolding_amcatnlo


    create_toy_mc(input_file=input_file,
                  sample=options.sample,
                  output_folder=options.output_folder,
#                   variable=variable,
                  n_toy=options.n_toy_mc,
                  centre_of_mass=options.CoM,
                  config=measurement_config
                  )
예제 #10
0
def main():
    "Main Function"
    set_root_defaults()

    parser = OptionParser(
        "Script to check progress of CRAB jobs in creating nTuples. Run as: python check_CRAB_jobs.py -p projectFolder -n numberOfJobs >&check.log &"
    )
    parser.add_option("-p",
                      "--projectFolder",
                      dest="projectFolder",
                      help="specify project")
    parser.add_option("-n",
                      "--numberOfJobs",
                      dest="numberOfJobs",
                      help="specify project")

    (options, _) = parser.parse_args()

    #make sure the project option has been specified
    if not options.projectFolder:
        parser.error(
            'Please enter a project folder as the -p option: /gpfs_phys/storm/cms/user/...'
        )

    #normalise the projectFolder filepath and add a "/" at the end
    projectFolder = os.path.normpath(options.projectFolder) + os.sep

    #list the items in the CRAB output folder on the Bristol Storage Element.
    storageElementList = glob.glob(projectFolder + "*.root")
    if storageElementList:
        pass
    else:
        print "Location Error: Specified project folder does not exist on the Bristol Storage Element, signifying that the CRAB job has probably not started running yet or you forgot to include the full path /gpfs_storm/cms/user/..."
        sys.exit()

    #The following section has been commented out because if it is the first time this script is being run in a session, a grid password will be needed which will cause the script
    #to not be able to finish. Since the only purpose of this following CRAB command is to obtain the number of jobs, for the time being the number of jobs has been entered as an option to
    #the script which should be manually entered by the user.

    #get the status of the crab jobs and extract the number of output files expected on the Bristol Storage Element.


#	projectFolder = options.projectFolder.split("/")[6]
#	status = commands.getstatusoutput("crab -status -c " + projectFolder)
#	statusFormatted = status[1].split("\n")
#	for line in statusFormatted:
#		if "crab:" in line and "Total Jobs" in line:
#			words = line.split()
#			numberOfJobs = int(words[1])

#Now, check that all job root files are present in Bristol Storage Element folder:

    missingOrBrokenTemp = []
    missingOrBroken = []
    goodFilesTemp = []
    goodFiles = []
    presentJobList = []
    duplicatesToDelete = []

    #make list of all the job numbers which should be present.
    jobList = range(1, int(options.numberOfJobs) + 1)

    #try opening all files in Bristol Storage Element folder and add to missing list if they cannot be opened.
    for f in storageElementList:
        #make list of all jobs numbers in the Bristol Storage Element folder
        jobNumber = int((re.split('[\W+,_]', f))[-4])
        presentJobList.append(jobNumber)

        #check if files are corrupt or not
        try:
            rootFile = File(f)
            rootFile.Close()
        except:
            print "Adding Job Number", jobNumber, "to missingOrBroken list because file is corrupted."
            missingOrBrokenTemp.append(jobNumber)
        else:
            goodFilesTemp.append(jobNumber)

    #now add any absent files to the missing list:
    for job in jobList:
        if job not in presentJobList:
            print "Adding Job Number", job, "to missingOrBroken list because it doesn't exist on the Storage Element."
            missingOrBrokenTemp.append(job)

    #Remove any job numbers from missingOrBroken which appear in both goodFiles and missingOrBroken lists
    for job in missingOrBrokenTemp:
        if job not in goodFilesTemp:
            missingOrBroken.append(job)
        else:
            print "Removing", job, "from missingOrBroken list because there is at least one duplicate good output file."

    #Remove any job numbers from goodFiles which appear more than once in goodFiles
    for job in goodFilesTemp:
        if job not in goodFiles:
            goodFiles.append(job)
        else:
            duplicatesToDelete.append(job)

    print "\n The following", len(
        goodFiles
    ), "good output files were found in the Bristol Storage Element folder:"
    print str(goodFiles).replace(" ", "")
    print "\n The following", len(
        duplicatesToDelete
    ), "job numbers have multiple good files on the Bristol Storage Element folder which can be deleted:"
    print str(duplicatesToDelete).replace(" ", "")
    print "\n The following", len(
        missingOrBroken
    ), "job numbers could not be found in the Bristol Storage Element folder:"
    print str(missingOrBroken).replace(" ", "")
                      help = "parameter for error treatment in RooUnfold")
    parser.add_argument( "-c", "--centre-of-mass-energy", dest = "com", default = 13,
                      help = "set the centre of mass energy for analysis. Default = 13 [TeV]", type = int )
    parser.add_argument( "-C", "--combine-before-unfolding", dest = "combine_before_unfolding", action = "store_true",
                      help = "Perform combination of channels before unfolding" )
    parser.add_argument( '--test', dest = "test", action = "store_true",
                      help = "Just run the central measurement" )
    parser.add_argument( '--ptreweight', dest = "ptreweight", action = "store_true",
                      help = "Use pt-reweighted MadGraph for the measurement" )
    parser.add_argument( '--visiblePS', dest = "visiblePS", action = "store_true",
                      help = "Unfold to visible phase space" )
    args = parser.parse_args()
    return args

if __name__ == '__main__':
    set_root_defaults( msg_ignore_level = 3001 )
    # setup
    args = parse_arguments()

    # Cache arguments
    run_just_central            = args.test
    use_ptreweight              = args.ptreweight
    variable                    = args.variable
    com                         = args.com
    unfoldCfg.error_treatment   = args.error_treatment
    method                      = args.unfolding_method
    combine_before_unfolding    = args.combine_before_unfolding
    visiblePS                   = args.visiblePS

    # Cache arguments from xsection config
    measurement_config  = XSectionConfig( com )
    svd = TDecompSVD( m )
    svd.Decompose()
    svd.Print()
    sig = svd.GetSig()
    sig.Print()
    nSig = len(sig)
    sigmaMax = sig[0]
    sigmaMin = sig[nSig-2]
    condition = sigmaMax / max(0,sigmaMin)
    # condition = 1
    print condition
    return condition

def print_results_to_screen(result_dict):
    '''
        Print the results to the screen
        Can copy straight into config
    '''
    print "\n Tau Scan Outcomes: \n"
    for com in result_dict.keys():
        for channel in result_dict[com].keys():
            # Print in foprm such that neatly copy and paste into xsection.py
            print "\t\tself.tau_values_{ch} = {{".format(ch = channel)
            for variable in result_dict[com][channel].keys():
                print '\t\t\t"{0}" : {1},'.format(variable, result_dict[com][channel][variable])
            print "\t\t}"

if __name__ == '__main__':
    set_root_defaults( set_batch = True, msg_ignore_level = 3001 )
    main()
import rootpy.plotting.root2matplotlib as rplt
from dps.config import CMS
import matplotlib.gridspec as gridspec

plt.rc('text', usetex=True)

def getControlRegionHistogramsFromFile(file):
	config = read_data_from_JSON(file)
	measurement = Measurement( config )
	return measurement.cr_histograms

def rebinHists( hists ):
	for h in hists:
		hists[h].Rebin(2)

set_root_defaults( set_batch=True)

measurement_config  = XSectionConfig( 13 )

channel =  ['electron', 'muon']

# for variable in measurement_config.variables:
# 	print variable

# 	central_control_histograms_path = 'data/normalisation/background_subtraction/13TeV/{var}/VisiblePS/central/normalisation_{channel}.txt'.format(
# 		var=variable,
# 		channel=ch,
# 	)
# 	other_control_histograms_path = central_control_histograms_path.replace('central', 'QCD_other_control_region')

# 	central_control_histograms_path = file_to_df(central_control_histograms_path)
def generate_toy(n_toy, n_input_mc, config, output_folder, start_at=0, split=1):
    from progressbar import Percentage, Bar, ProgressBar, ETA
    set_root_defaults()
    genWeight = '( EventWeight * {0})'.format(config.luminosity_scale)
    file_name = config.ttbar_category_templates_trees['central']
    make_folder_if_not_exists(output_folder)
    outfile = get_output_file_name(
        output_folder, n_toy, start_at, n_input_mc, config.centre_of_mass_energy)

    variable_bins = bin_edges.copy()
    
    widgets = ['Progress: ', Percentage(), ' ', Bar(),
           ' ', ETA()]
    
    with root_open(file_name, 'read') as f_in, root_open(outfile, 'recreate') as f_out:
        tree = f_in.Get("TTbar_plus_X_analysis/Unfolding/Unfolding")
        n_events = tree.GetEntries()
        print("Number of entries in tree : ", n_events)
        for channel in ['electron', 'muon']:
            print('Channel :', channel)
            gen_selection, gen_selection_vis = '', ''
            if channel is 'muon':
                gen_selection = '( isSemiLeptonicMuon == 1 )'
                gen_selection_vis = '( isSemiLeptonicMuon == 1 && passesGenEventSelection )'
            else:
                gen_selection = '( isSemiLeptonicElectron == 1 )'
                gen_selection_vis = '( isSemiLeptonicElectron == 1 && passesGenEventSelection )'

            selection = '( {0} ) * ( {1} )'.format(genWeight, gen_selection)
            selection_vis = '( {0} ) * ( {1} )'.format(genWeight,
                                                       gen_selection_vis)
            weighted_entries = get_weighted_entries(tree, selection)
            weighted_entries_vis = get_weighted_entries(tree, selection_vis)
            pbar = ProgressBar(widgets=widgets, maxval=n_input_mc).start()

            toy_mc_sets = []
            for variable in ['MET', 'HT', 'ST', 'WPT']:  # variable_bins:
                toy_mc = ToySet(f_out, variable, channel, n_toy)
                toy_mc_sets.append(toy_mc)
            count = 0
            for event in tree:
                # generate 300 weights for each event
                mc_weights = get_mc_weight(weighted_entries, n_toy)
                mc_weights_vis = get_mc_weight(weighted_entries_vis, n_toy)

                if count >= n_input_mc:
                    break
                count += 1
                if count < start_at:
                    continue
#                 weight = event.EventWeight * config.luminosity_scale
#                 # rescale to N input events
#                 weight *= n_events / n_input_mc / split
                weight = 1

                for toy_mc in toy_mc_sets:
                    toy_mc.fill(event, weight, mc_weights, mc_weights_vis)
                if count % 1000 == 1:
                    pbar.update(count)
                    print('Processed {0} events'.format(count))
            pbar.finish()
            for toy_mc in toy_mc_sets:
                toy_mc.write()
    print('Toy MC was saved to file:', outfile)
def main():
    '''
        Main function for this script
    '''
    set_root_defaults(msg_ignore_level=3001)

    parser = OptionParser()
    parser.add_option("-o",
                      "--output",
                      dest="output_folder",
                      default='data/pull_data/',
                      help="output folder for pull data files")
    parser.add_option("--tau",
                      type='float',
                      dest="tau_value",
                      default=-1.,
                      help="tau-value for SVD unfolding")
    parser.add_option("-m",
                      "--method",
                      type='string',
                      dest="method",
                      default='TUnfold',
                      help="unfolding method")
    parser.add_option(
        "-f",
        "--file",
        type='string',
        dest="file",
        default='data/toy_mc/toy_mc_powhegPythia_N_300_13TeV.root',
        help="file with toy MC")
    parser.add_option(
        "-v",
        "--variable",
        dest="variable",
        default='MET',
        help=
        "set the variable to analyse (defined in config/variable_binning.py)")
    parser.add_option("--com",
                      "--centre-of-mass-energy",
                      dest="CoM",
                      default=13,
                      help='''set the centre of mass energy for analysis.
                      Default = 8 [TeV]''',
                      type=int)

    (options, _) = parser.parse_args()

    centre_of_mass = options.CoM
    measurement_config = XSectionConfig(centre_of_mass)
    make_folder_if_not_exists(options.output_folder)

    use_n_toy = int(options.file.split('_')[5])
    print(use_n_toy)
    method = options.method
    variable = options.variable
    sample = str(options.file.split('_')[3])
    tau_value = options.tau_value

    for channel in measurement_config.analysis_types.keys():
        if channel is 'combined': continue
        create_unfolding_pull_data(
            options.file,
            method,
            channel,
            centre_of_mass,
            variable,
            sample,
            measurement_config.unfolding_central_firstHalf,
            # measurement_config.unfolding_central,
            use_n_toy,
            options.output_folder,
            tau_value)
예제 #16
0
def generate_toy(n_toy,
                 n_input_mc,
                 config,
                 output_folder,
                 start_at=0,
                 split=1):
    from progressbar import Percentage, Bar, ProgressBar, ETA
    set_root_defaults()
    genWeight = '( EventWeight * {0})'.format(config.luminosity_scale)
    file_name = config.ttbar_category_templates_trees['central']
    make_folder_if_not_exists(output_folder)
    outfile = get_output_file_name(output_folder, n_toy, start_at, n_input_mc,
                                   config.centre_of_mass_energy)

    variable_bins = bin_edges.copy()

    widgets = ['Progress: ', Percentage(), ' ', Bar(), ' ', ETA()]

    with root_open(file_name, 'read') as f_in, root_open(outfile,
                                                         'recreate') as f_out:
        tree = f_in.Get("TTbar_plus_X_analysis/Unfolding/Unfolding")
        n_events = tree.GetEntries()
        print("Number of entries in tree : ", n_events)
        for channel in ['electron', 'muon']:
            print('Channel :', channel)
            gen_selection, gen_selection_vis = '', ''
            if channel is 'muon':
                gen_selection = '( isSemiLeptonicMuon == 1 )'
                gen_selection_vis = '( isSemiLeptonicMuon == 1 && passesGenEventSelection )'
            else:
                gen_selection = '( isSemiLeptonicElectron == 1 )'
                gen_selection_vis = '( isSemiLeptonicElectron == 1 && passesGenEventSelection )'

            selection = '( {0} ) * ( {1} )'.format(genWeight, gen_selection)
            selection_vis = '( {0} ) * ( {1} )'.format(genWeight,
                                                       gen_selection_vis)
            weighted_entries = get_weighted_entries(tree, selection)
            weighted_entries_vis = get_weighted_entries(tree, selection_vis)
            pbar = ProgressBar(widgets=widgets, maxval=n_input_mc).start()

            toy_mc_sets = []
            for variable in ['MET', 'HT', 'ST', 'WPT']:  # variable_bins:
                toy_mc = ToySet(f_out, variable, channel, n_toy)
                toy_mc_sets.append(toy_mc)
            count = 0
            for event in tree:
                # generate 300 weights for each event
                mc_weights = get_mc_weight(weighted_entries, n_toy)
                mc_weights_vis = get_mc_weight(weighted_entries_vis, n_toy)

                if count >= n_input_mc:
                    break
                count += 1
                if count < start_at:
                    continue


#                 weight = event.EventWeight * config.luminosity_scale
#                 # rescale to N input events
#                 weight *= n_events / n_input_mc / split
                weight = 1

                for toy_mc in toy_mc_sets:
                    toy_mc.fill(event, weight, mc_weights, mc_weights_vis)
                if count % 1000 == 1:
                    pbar.update(count)
                    print('Processed {0} events'.format(count))
            pbar.finish()
            for toy_mc in toy_mc_sets:
                toy_mc.write()
    print('Toy MC was saved to file:', outfile)
	summary['ST_t'].append(ST_t)
	summary['ST_tW'].append(ST_tW)
	summary['STbar_t'].append(STbar_t)
	summary['STbar_tW'].append(STbar_tW)
	summary['TotalMC'].append(totalMC)
	summary['DataToMC'].append(dataToMC)

	order=['SingleTop', 'ST_s', 'ST_t', 'ST_tW', 'STbar_t', 'STbar_tW', 'TotalMC', 'DataToMC']

	d = dict_to_df(summary)
	d = d[order]
	df_to_file(output_folder+channel+'_'+branchName+'.txt', d)
	return

if __name__ == '__main__':
	set_root_defaults()
	args = parse_arguments()

	measurement_config = XSectionConfig( 13 )

	histogram_files = {
		'TTJet'     : measurement_config.ttbar_trees,
		'V+Jets'    : measurement_config.VJets_trees,
		'QCD'       : measurement_config.electron_QCD_MC_trees,
		'SingleTop' : measurement_config.SingleTop_trees,
		'ST_s'      : measurement_config.st_s_trees,
		'ST_t'      : measurement_config.st_t_trees,
		'ST_tW'     : measurement_config.st_tW_trees,
		'STbar_t'   : measurement_config.stbar_t_trees,
		'STbar_tW'  : measurement_config.stbar_tW_trees,
	}
예제 #18
0
    summary['TotalMC'].append(totalMC)
    summary['DataToMC'].append(dataToMC)

    order = [
        'SingleTop', 'ST_s', 'ST_t', 'ST_tW', 'STbar_t', 'STbar_tW', 'TotalMC',
        'DataToMC'
    ]

    d = dict_to_df(summary)
    d = d[order]
    df_to_file(output_folder + channel + '_' + branchName + '.txt', d)
    return


if __name__ == '__main__':
    set_root_defaults()
    args = parse_arguments()

    measurement_config = XSectionConfig(13)

    histogram_files = {
        'TTJet': measurement_config.ttbar_trees,
        'V+Jets': measurement_config.VJets_trees,
        'QCD': measurement_config.electron_QCD_MC_trees,
        'SingleTop': measurement_config.SingleTop_trees,
        'ST_s': measurement_config.st_s_trees,
        'ST_t': measurement_config.st_t_trees,
        'ST_tW': measurement_config.st_tW_trees,
        'STbar_t': measurement_config.stbar_t_trees,
        'STbar_tW': measurement_config.stbar_tW_trees,
    }
예제 #19
0
        data_efficiency_in_bin = data_efficiency.GetEfficiency(i + 1)
        data_efficiency_in_bin_error_up = data_efficiency.GetEfficiencyErrorUp(
            i + 1)
        data_efficiency_in_bin_error_down = data_efficiency.GetEfficiencyErrorLow(
            i + 1)
        dictionary[pt_bin_range]['data'] = {
            'efficiency': data_efficiency_in_bin,
            'err_up': data_efficiency_in_bin_error_up,
            'err_down': data_efficiency_in_bin_error_down,
        }

    pickle.dump(dictionary, output_pickle)


if __name__ == '__main__':
    set_root_defaults(msg_ignore_level=3001)
    parser = OptionParser()
    parser.add_option("-p",
                      "--path",
                      dest="path",
                      default='/hdfs/TopQuarkGroup/trigger_BLT_ntuples/',
                      help="set path to input BLT ntuples")
    parser.add_option("-o",
                      "--output_folder",
                      dest="output_plots_folder",
                      default='plots/2011/hadron_leg/',
                      help="set path to save tables")

    (options, args) = parser.parse_args()
    input_path = options.path
    output_folder = options.output_plots_folder
def main():
	"Main Function"
	set_root_defaults()

	parser = OptionParser("Script to check progress of CRAB jobs in creating nTuples. Run as: python check_CRAB_jobs.py -p projectFolder -n numberOfJobs >&check.log &")
	parser.add_option("-p", "--projectFolder", dest="projectFolder", help="specify project")
	parser.add_option("-n", "--numberOfJobs", dest="numberOfJobs",
		help="specify project")

	(options, _) = parser.parse_args()

	#make sure the project option has been specified
	if not options.projectFolder:
		parser.error('Please enter a project folder as the -p option: /gpfs_phys/storm/cms/user/...')

	#normalise the projectFolder filepath and add a "/" at the end
	projectFolder = os.path.normpath(options.projectFolder) + os.sep

	#list the items in the CRAB output folder on the Bristol Storage Element.
	storageElementList=glob.glob(projectFolder + "*.root")
	if storageElementList:
		pass
	else:
		print "Location Error: Specified project folder does not exist on the Bristol Storage Element, signifying that the CRAB job has probably not started running yet or you forgot to include the full path /gpfs_storm/cms/user/..."
		sys.exit()

	#The following section has been commented out because if it is the first time this script is being run in a session, a grid password will be needed which will cause the script
	#to not be able to finish. Since the only purpose of this following CRAB command is to obtain the number of jobs, for the time being the number of jobs has been entered as an option to
	#the script which should be manually entered by the user.

	#get the status of the crab jobs and extract the number of output files expected on the Bristol Storage Element.
#	projectFolder = options.projectFolder.split("/")[6]
#	status = commands.getstatusoutput("crab -status -c " + projectFolder)
#	statusFormatted = status[1].split("\n")
#	for line in statusFormatted:
#		if "crab:" in line and "Total Jobs" in line:
#			words = line.split()
#			numberOfJobs = int(words[1])


	#Now, check that all job root files are present in Bristol Storage Element folder:

	missingOrBrokenTemp = []
	missingOrBroken = []
	goodFilesTemp = []
	goodFiles = []
	presentJobList = []
	duplicatesToDelete = []

	#make list of all the job numbers which should be present.
	jobList = range(1,int(options.numberOfJobs)+1)

	#try opening all files in Bristol Storage Element folder and add to missing list if they cannot be opened.
	for f in storageElementList:
		#make list of all jobs numbers in the Bristol Storage Element folder
		jobNumber = int((re.split('[\W+,_]',f))[-4])
		presentJobList.append(jobNumber)

		#check if files are corrupt or not
		try:
			rootFile = File(f)
			rootFile.Close()
		except:
			print "Adding Job Number", jobNumber, "to missingOrBroken list because file is corrupted."
			missingOrBrokenTemp.append(jobNumber)
		else:
			goodFilesTemp.append(jobNumber)

	#now add any absent files to the missing list:
	for job in jobList:
		if job not in presentJobList:
			print "Adding Job Number", job, "to missingOrBroken list because it doesn't exist on the Storage Element."
			missingOrBrokenTemp.append(job)

	#Remove any job numbers from missingOrBroken which appear in both goodFiles and missingOrBroken lists
	for job in missingOrBrokenTemp:
		if job not in goodFilesTemp:
			missingOrBroken.append(job)
		else:
			print "Removing", job, "from missingOrBroken list because there is at least one duplicate good output file."

	#Remove any job numbers from goodFiles which appear more than once in goodFiles
	for job in goodFilesTemp:
		if job not in goodFiles:
			goodFiles.append(job)
		else:
			duplicatesToDelete.append(job)

	print "\n The following", len(goodFiles), "good output files were found in the Bristol Storage Element folder:"
	print str(goodFiles).replace(" ", "")  
	print "\n The following", len(duplicatesToDelete), "job numbers have multiple good files on the Bristol Storage Element folder which can be deleted:"
	print str(duplicatesToDelete).replace(" ", "")
	print "\n The following", len(missingOrBroken), "job numbers could not be found in the Bristol Storage Element folder:"
	print str(missingOrBroken).replace(" ", "")
예제 #21
0
    tau_lo      best tau            tau_hi
    '''
    best_tau = {}
    for variable in df_chi2.columns:
        if variable == 'tau': continue

        i=0
        for chisq in df_chi2[variable]:
            if chisq > cutoff:
                i+=1
                continue
            else:
                break
        if chisq > cutoff:
            print "{var} exceeds required cut".format(var=variable)
            # last i becomes out of range
            best_tau[variable] = df_chi2['tau'][i-1]
        else:
            chisq_lo = df_chi2[variable][i+1]
            chisq_hi = df_chi2[variable][i]
            ratio = (cutoff - chisq_lo) / (chisq_hi - chisq_lo)
            tau_lo = df_chi2['tau'][i+1]
            tau_hi = df_chi2['tau'][i]
            tau = tau_lo + ratio*(tau_hi - tau_lo)
            best_tau[variable] = tau
    return best_tau

if __name__ == '__main__':
    set_root_defaults( set_batch = True, msg_ignore_level = 3001 )
    main()