Пример #1
0
##      "CMS_ttHl_thu_shape_ttH_x1Down",
##      "CMS_ttHl_thu_shape_ttH_y1Up",   
##      "CMS_ttHl_thu_shape_ttH_y1Down",
##      "CMS_ttHl_thu_shape_ttW_x1Up",
##      "CMS_ttHl_thu_shape_ttW_x1Down",
##      "CMS_ttHl_thu_shape_ttW_y1Up",
##      "CMS_ttHl_thu_shape_ttW_y1Down",
##      "CMS_ttHl_thu_shape_ttZ_x1Up",
##      "CMS_ttHl_thu_shape_ttZ_x1Down",
##      "CMS_ttHl_thu_shape_ttZ_y1Up",
##      "CMS_ttHl_thu_shape_ttZ_y1Down"       
    ],
    max_files_per_job = 20,
    era = ERA, use_lumi = True, lumi = LUMI,
    debug = False,
    running_method = "sbatch",
    num_parallel_jobs = 4,
    executable_addBackgrounds = "addBackgrounds",
    executable_addBackgroundJetToTauFakes = "addBackgroundLeptonFakes", # CV: use common executable for estimating jet->lepton and jet->tau_h fake background
    histograms_to_fit = [ "EventCounter", "numJets", "mvaOutput_1l_2tau_ttbar_TMVA", "mvaOutput_1l_2tau_ttbar_sklearn", "mTauTauVis" ],
    select_rle_output = True)

  analysis.create()

  run_analysis = query_yes_no("Start jobs ?")
  if run_analysis:
    analysis.run()
  else:
    sys.exit(0)

Пример #2
0
            ##      "CMS_ttHl_thu_shape_ttZ_y1Up",
            ##      "CMS_ttHl_thu_shape_ttZ_y1Down",
        ],
        max_files_per_job=max_files_per_job,
        era=ERA,
        use_lumi=True,
        lumi=LUMI,
        debug=False,
        running_method="sbatch",
        num_parallel_jobs=100,  # Karl: speed up the hadd steps
        executable_addBackgrounds="addBackgrounds",
        executable_addBackgroundJetToTauFakes=
        "addBackgroundLeptonFakes",  # CV: use common executable for estimating jet->lepton and jet->tau_h fake background
        histograms_to_fit=[
            "EventCounter",
            "numJets",
            "mvaOutput_1l_1tau_ttbar",
            "mTauTauVis",
            "mTauTau",
        ],
        select_rle_output=True,
    )

    analysis.create()

    run_analysis = query_yes_no("Start jobs ?")
    if run_analysis:
        analysis.run()
    else:
        sys.exit(0)
Пример #3
0
if __name__ == '__main__':
    logging.basicConfig(stream=sys.stdout,
                        level=logging.INFO,
                        format='%(asctime)s - %(levelname)s: %(message)s')

    ntupleProduction = prodNtupleConfig_3l_1tau(
        configDir=os.path.join("/home", getpass.getuser(),
                               "ttHNtupleProduction", ERA, version),
        outputDir=os.path.join("/hdfs/local/ttH_2tau", getpass.getuser(),
                               "ttHNtupleProduction", ERA, version),
        ##outputDir = os.path.join("/home", getpass.getuser(), "ttHNtupleProduction", ERA, version),
        executable_prodNtuple="produceNtuple_3l_1tau",
        cfgFile_prodNtuple="produceNtuple_3l_1tau_cfg.py",
        samples=samples,
        era=ERA,
        debug=False,
        running_method="sbatch",
        rle_directory='default',  # [*]
        version=version,
        num_parallel_jobs=4)
    # [*] if rle_directory is set to 'default', then it looks files in /home/$USER/ttHAnalysis/era/version/rles/channel
    #     set it to '', if no RLE selection is needed

    ntupleProduction.create()

    run_ntupleProduction = query_yes_no("Start jobs ?")
    if run_ntupleProduction:
        ntupleProduction.run()
    else:
        sys.exit(0)
Пример #4
0
        check_output_files=check_output_files,
        running_method=running_method,
        max_files_per_job=
        1,  # so that we'd have 1-1 correspondence b/w input and output files
        mem_integrations_per_job=50 if mode != 'sync' else 10,
        max_mem_integrations=
        max_mem_integrations,  # use -1 if you don't want to limit the nof MEM integrations
        num_parallel_jobs=num_parallel_jobs,
        leptonSelection=leptonSelection,
        hadTauSelection=hadTauSelectionAndWP,
        isDebug=debug,
        jet_cleaning_by_index=jet_cleaning_by_index,
        central_or_shift=central_or_shifts,
        dry_run=dry_run,
        use_nonnominal=use_nonnominal,
        use_home=use_home,
        submission_cmd=sys.argv,
    )

    goodToGo = addMEMProduction.create()

    if goodToGo:
        if auto_exec:
            run_addMEMProduction = True
        elif no_exec:
            run_addMEMProduction = False
        else:
            run_addMEMProduction = query_yes_no("Start jobs ?")
        if run_addMEMProduction:
            addMEMProduction.run()
Пример #5
0
  puHistogramProduction = puHistogramConfig(
    configDir          = configDir,
    outputDir          = outputDir,
    output_file        = output_file,
    executable         = "puHistogramProducer.sh",
    samples            = samples,
    max_files_per_job  = files_per_job,
    era                = era,
    check_output_files = check_output_files,
    running_method     = running_method,
    num_parallel_jobs  = num_parallel_jobs,
    dry_run            = dry_run,
    use_home           = use_home,
  )

  job_statistics = puHistogramProduction.create()
  for job_type, num_jobs in job_statistics.items():
    logging.info(" #jobs of type '%s' = %i" % (job_type, num_jobs))

  if auto_exec:
    run_puHistogramProduction = True
  elif no_exec:
    run_puHistogramProduction = False
  else:
    run_puHistogramProduction = query_yes_no("Start jobs ?")
  if run_puHistogramProduction:
    puHistogramProduction.run()
  else:
    sys.exit(0)
Пример #6
0
    refGenWeightJobs = refGenWeightConfig(
        configDir=configDir,
        outputDir=outputDir,
        output_file=output_file,
        executable="getRefGenWeight.py",
        samples=samples,
        era=era,
        check_output_files=check_output_files,
        running_method=running_method,
        num_parallel_jobs=num_parallel_jobs,
        dry_run=dry_run,
        use_home=use_home,
        submission_cmd=sys.argv,
    )

    job_statistics = refGenWeightJobs.create()
    for job_type, num_jobs in job_statistics.items():
        logging.info(" #jobs of type '%s' = %i" % (job_type, num_jobs))

    if auto_exec:
        run_refGenWeight = True
    elif no_exec:
        run_refGenWeight = False
    else:
        run_refGenWeight = query_yes_no("Start jobs ?")
    if run_refGenWeight:
        refGenWeightJobs.run()
    else:
        sys.exit(0)
Пример #7
0
  if sample_name in [
      "/TT_TuneCUETP8M1_13TeV-powheg-pythia8/RunIISpring16MiniAODv1-PUSpring16_80X_mcRun2_asymptotic_2016_v3_ext3-v1/MINIAODSIM",
      "/TT_TuneCUETP8M1_13TeV-powheg-pythia8/RunIISpring16MiniAODv1-PUSpring16_80X_mcRun2_asymptotic_2016_v3_ext4-v1/MINIAODSIM",
      "/TTW/spring16DR80v6aMiniAODv1/FASTSIM" ]:
    sample_info["use_it"] = True
#--------------------------------------------------------------------------------    

if __name__ == '__main__':
  logging.basicConfig(
    stream = sys.stdout,
    level = logging.INFO,
    format = '%(asctime)s - %(levelname)s: %(message)s')

  ntupleProduction = prodNtupleConfig_3l_1tau(
    outputDir = os.path.join("/home", getpass.getuser(), "ttHNtupleProduction", ERA, version),
    executable_prodNtuple = "produceNtuple_3l_1tau",
    samples = samples,
    era = ERA,
    debug = False,
    running_method = "sbatch",
    num_parallel_jobs = 4)

  ntupleProduction.create()

  run_ntupleProduction = query_yes_no("Start jobs ?")
  if run_ntupleProduction:
    ntupleProduction.run()
  else:
    sys.exit(0)

Пример #8
0
#--------------------------------------------------------------------------------

version = "2016Dec12"
ERA     = "2016"

if __name__ == '__main__':
  logging.basicConfig(
    stream = sys.stdout,
    level = logging.INFO,
    format = '%(asctime)s - %(levelname)s: %(message)s')

  addMEMProduction = addMEMConfig_2lss_1tau(
    treeName                 = 'tree',
    outputDir                = os.path.join("/home", getpass.getuser(), "addMEM", ERA, version),
    executable_addMEM        = "addMEM_2lss_1tau",
    samples                  = samples,
    era                      = ERA,
    debug                    = False,
    running_method           = "sbatch",
    max_files_per_job        = 1,
    mem_integrations_per_job = 50,
    max_mem_integrations     = 20000,
    num_parallel_jobs        = 4)

  goodToGo = addMEMProduction.create()

  if goodToGo:
    run_addMEMProduction = query_yes_no("Start jobs ?")
    if run_addMEMProduction:
      addMEMProduction.run()
Пример #9
0
        outputDir=outputDir,
        output_file=output_file,
        executable="projectHistogram.sh",
        projection_module=projection_module,
        samples=samples,
        max_files_per_job=files_per_job,
        era=era,
        plot=plot,
        check_output_files=check_output_files,
        running_method=running_method,
        num_parallel_jobs=num_parallel_jobs,
        dry_run=dry_run,
        use_home=use_home,
        submission_cmd=sys.argv,
    )

    job_statistics = projectionJobs.create()
    for job_type, num_jobs in job_statistics.items():
        logging.info(" #jobs of type '%s' = %i" % (job_type, num_jobs))

    if auto_exec:
        run_projection = True
    elif no_exec:
        run_projection = False
    else:
        run_projection = query_yes_no("Start jobs ?")
    if run_projection:
        projectionJobs.run()
    else:
        sys.exit(0)