예제 #1
0
def produce_AllocationFile(MappingFile, allocation, minigroup_type="minimal"):

    #Load mapping file
    data = loadDataFile(MappingFile)

    #List of which minigroups are assigned to each bundle
    configuration = np.hstack(np.load(allocation, allow_pickle=True))

    #Get minigroups
    minigroups, minigroups_swap = getMinilpGBTGroups(data, minigroup_type)

    #Bundle together minigroup configuration
    bundles = getBundles(minigroups_swap, configuration)

    #Open output file
    fileout = open('allocation_20200729_1.txt', 'w')
    fileout.write(
        '(lpGBT_number) (number_modules) (sil=0scin=1) (layer) (u/eta) (v/phi) (number_elinks)\n'
    )
    for b, bundle in enumerate(bundles):
        fileout.write(str(b) + "\n")
        for minigroup in bundle:

            #list lpgbts in minigroup:
            for lpgbt in minigroups_swap[minigroup]:
                fileout.write(str(lpgbt) + " ")

                #Get modules associated to each lpgbt:
                data_list = data[((data['TPGId1'] == lpgbt) |
                                  (data['TPGId2'] == lpgbt))]
                fileout.write(str(len(data_list)) + " ")
                for index, row in data_list.iterrows():
                    if (row['density'] == 2):
                        fileout.write("1 " + str(row['layer']) + " " +
                                      str(row['u']) + " " + str(row['v']) +
                                      " " + str(row['TPGeLinkSum']) + " ")
                    else:
                        fileout.write("0 " + str(row['layer']) + " " +
                                      str(row['u']) + " " + str(row['v']) +
                                      " " + str(row['TPGeLinkSum']) + " ")
                fileout.write("\n")

    fileout.close()
예제 #2
0
    def mapping_max(state):
        global chi2_min
        global combbest

        max_modules = None
        max_towers = None
        chi2 = 0

        bundles = getBundles(minigroups_swap, state)
        bundled_lpgbthists = getBundledlpgbtHists(minigroup_hists, bundles)

        if include_max_modules_in_chi2:
            max_modules = getMaximumNumberOfModulesInABundle(
                minigroups_modules, bundles)
        if include_max_towers_in_chi2:
            bundled_towers = getTowerBundles(minigroups_towers, bundles)
            max_towers = len(
                max(bundled_towers, key=len)
            )  #Get the length of bundle with the greatest number of towers

        chi2 = calculateChiSquared(inclusive_hists, bundled_lpgbthists,
                                   max_modules, max_modules_weighting_factor,
                                   max_towers, max_towers_weighting_factor)

        typicalchi2 = 600000000000
        if include_errors_in_chi2:
            typicalchi2 = 10000000
        if (chi2 < chi2_min):
            chi2_min = chi2
            combbest = np.copy(state)
            if (print_level > 0):
                print(algorithm, " ", chi2_min, " ", chi2_min / typicalchi2)
                if include_max_towers_in_chi2:
                    print("max_towers = ", max_towers)
                if include_max_modules_in_chi2:
                    print("max_modules = ", max_modules)
            if (print_level > 1):
                print(repr(combbest))

        return chi2
예제 #3
0
    def mapping_max(state):
        global chi2_min
        global combbest

        chi2 = 0

        bundles = getBundles(minigroups_swap, state)
        #bundled_lpgbthists = getBundledlpgbtHists(minigroup_hists,bundles)
        bundled_lpgbthists = getBundledlpgbtHists(minigroup_hists, bundles)

        chi2 = calculateChiSquared(inclusive_hists, bundled_lpgbthists)

        typicalchi2 = 600000000000
        if (chi2 < chi2_min):
            chi2_min = chi2
            combbest = np.copy(state)
            if (print_level > 0):
                print(algorithm, " ", chi2_min, " ", chi2_min / typicalchi2)
            if (print_level > 1):
                print(repr(combbest))

        return chi2
예제 #4
0
def produce_nTCsPerModuleHists(MappingFile,
                               allocation,
                               CMSSW_ModuleHists,
                               minigroup_type="minimal",
                               correctionConfig=None):

    #Load mapping file
    data = loadDataFile(MappingFile)

    #List of which minigroups are assigned to each bundle
    configuration = np.hstack(np.load(allocation, allow_pickle=True))

    #Get minigroups
    minigroups, minigroups_swap = getMinilpGBTGroups(data, minigroup_type)

    #Get list of which modules are in each minigroup
    minigroups_modules = getMiniModuleGroups(data, minigroups_swap)

    #Bundle together minigroup configuration
    bundles = getBundles(minigroups_swap, configuration)

    #Get nTC hists per module
    module_hists = getModuleTCHists(CMSSW_ModuleHists)

    #Open output file
    outfile = ROOT.TFile.Open("hists_per_bundle.root", "RECREATE")
    for b, bundle in enumerate(bundles):
        outfile.mkdir("bundle_" + str(b))
        outfile.cd("bundle_" + str(b))
        for minigroup in bundle:

            for module in minigroups_modules[minigroup]:

                module_hists[tuple(module)].Write()

        outfile.cd()
예제 #5
0
def study_mapping(MappingFile,
                  CMSSW_ModuleHists,
                  algorithm="random_hill_climb",
                  initial_state="best_so_far",
                  random_seed=None,
                  max_iterations=100000,
                  output_dir=".",
                  print_level=0,
                  minigroup_type="minimal",
                  correctionConfig=None,
                  phisplitConfig=None,
                  chi2Config=None,
                  TowerMappingFile=None):

    #Load external data
    data = loadDataFile(MappingFile)  #dataframe

    try:

        #Configuration for how to divide TCs into phidivisionX and phidivisionY (traditionally phi > 60 and phi < 60)
        split = "per_roverz_bin"
        phidivisionX_fixvalue_min = 55
        phidivisionY_fixvalue_max = None

        if phisplitConfig != None:
            split = phisplitConfig['type']
            if 'phidivisionX_fixvalue_min' in phisplitConfig.keys():
                phidivisionX_fixvalue_min = phisplitConfig[
                    'phidivisionX_fixvalue_min']
            if 'phidivisionY_fixvalue_max' in phisplitConfig.keys():
                phidivisionY_fixvalue_max = phisplitConfig[
                    'phidivisionY_fixvalue_max']

        inclusive_hists, module_hists = getModuleHists(
            CMSSW_ModuleHists,
            split=split,
            phidivisionX_fixvalue_min=phidivisionX_fixvalue_min,
            phidivisionY_fixvalue_max=phidivisionY_fixvalue_max)

    except EnvironmentError:
        print("File " + CMSSW_ModuleHists + " does not exist")
        exit()
    # Apply various corrections to r/z distributions from CMSSW

    if correctionConfig != None:
        print("Applying geometry corrections")
        applyGeometryCorrections(inclusive_hists, module_hists,
                                 correctionConfig)

    include_errors_in_chi2 = False
    include_max_modules_in_chi2 = False
    include_max_towers_in_chi2 = False
    max_modules_weighting_factor = 1000
    if chi2Config != None:
        if 'include_errors_in_chi2' in chi2Config.keys():
            include_errors_in_chi2 = chi2Config['include_errors_in_chi2']
        if 'include_max_modules_in_chi2' in chi2Config.keys():
            include_max_modules_in_chi2 = chi2Config[
                'include_max_modules_in_chi2']
        if 'max_modules_weighting_factor' in chi2Config.keys():
            max_modules_weighting_factor = chi2Config[
                'max_modules_weighting_factor']
        if 'include_max_towers_in_chi2' in chi2Config.keys():
            include_max_towers_in_chi2 = chi2Config[
                'include_max_towers_in_chi2']
        if 'max_modules_weighting_factor' in chi2Config.keys():
            max_towers_weighting_factor = chi2Config[
                'max_towers_weighting_factor']

    #Load tower data if required
    if include_max_towers_in_chi2:
        try:
            towerdata = loadModuleTowerMappingFile(TowerMappingFile)
        except EnvironmentError:
            print("File " + TowerMappingFile + " does not exist")
            exit()

    #Form hists corresponding to each lpGBT from module hists
    lpgbt_hists = getlpGBTHists(data, module_hists)

    minigroups, minigroups_swap = getMinilpGBTGroups(data, minigroup_type)
    minigroup_hists = getMiniGroupHists(
        lpgbt_hists,
        minigroups_swap,
        return_error_squares=include_errors_in_chi2)
    minigroup_hists_root = getMiniGroupHists(lpgbt_hists,
                                             minigroups_swap,
                                             root=True)
    #Get list of which modules are in each minigroup
    minigroups_modules = getMiniModuleGroups(data, minigroups_swap)

    #Get list of which towers are in each minigroup
    if include_max_towers_in_chi2:
        minigroups_towers = getMiniTowerGroups(towerdata, minigroups_modules)

    def mapping_max(state):
        global chi2_min
        global combbest

        max_modules = None
        max_towers = None
        chi2 = 0

        bundles = getBundles(minigroups_swap, state)
        bundled_lpgbthists = getBundledlpgbtHists(minigroup_hists, bundles)

        if include_max_modules_in_chi2:
            max_modules = getMaximumNumberOfModulesInABundle(
                minigroups_modules, bundles)
        if include_max_towers_in_chi2:
            bundled_towers = getTowerBundles(minigroups_towers, bundles)
            max_towers = len(
                max(bundled_towers, key=len)
            )  #Get the length of bundle with the greatest number of towers

        chi2 = calculateChiSquared(inclusive_hists, bundled_lpgbthists,
                                   max_modules, max_modules_weighting_factor,
                                   max_towers, max_towers_weighting_factor)

        typicalchi2 = 600000000000
        if include_errors_in_chi2:
            typicalchi2 = 10000000
        if (chi2 < chi2_min):
            chi2_min = chi2
            combbest = np.copy(state)
            if (print_level > 0):
                print(algorithm, " ", chi2_min, " ", chi2_min / typicalchi2)
                if include_max_towers_in_chi2:
                    print("max_towers = ", max_towers)
                if include_max_modules_in_chi2:
                    print("max_modules = ", max_modules)
            if (print_level > 1):
                print(repr(combbest))

        return chi2

    init_state = []
    if (initial_state == "example"):
        init_state = example_minigroup_configuration
    if (initial_state[-4:] == ".npy"):
        print(initial_state)
        init_state = np.hstack(np.load(initial_state, allow_pickle=True))
        if (len(init_state) != len(minigroups_swap)):
            print(
                "Initial state should be the same length as the number of mini groups"
            )
            exit()
    elif (initial_state == "random"):
        np.random.seed(random_seed)
        init_state = np.arange(len(minigroups_swap))
        np.random.shuffle(init_state)

    fitness_cust = mlrose.CustomFitness(mapping_max)
    # Define optimization problem object
    problem_cust = mlrose.DiscreteOpt(length=len(init_state),
                                      fitness_fn=fitness_cust,
                                      maximize=False,
                                      max_val=len(minigroups_swap),
                                      minigroups=minigroups_swap)

    # Define decay schedule
    schedule = mlrose.ExpDecay()
    #schedule = mlrose.ArithDecay()

    filename = "bundles_job_"
    filenumber = ""
    if (len(sys.argv) > 2):
        filenumber = str(sys.argv[2])
    else:
        filenumber = "default"
    filename += filenumber

    if (algorithm == "save_root"):
        #Save best combination so far into a root file
        bundles = getBundles(minigroups_swap, init_state)

        bundled_hists_root = getBundledlpgbtHistsRoot(minigroup_hists_root,
                                                      bundles)
        bundled_hists = getBundledlpgbtHists(minigroup_hists, bundles)

        chi2 = calculateChiSquared(inclusive_hists, bundled_hists)
        newfile = ROOT.TFile("lpgbt_10.root", "RECREATE")
        np.save(output_dir + "/" + filename + "_saveroot.npy",
                np.array(bundles, dtype=object))
        for sector in bundled_hists_root:
            for key, value in sector.items():
                value.Write()
        for sector in inclusive_hists:
            sector.Scale(1. / 24.)
            sector.Write()
        newfile.Close()
        print("Chi2:", chi2)
        print("List of Bundles:")
        for b, bundle in enumerate(bundles):
            print("")
            print("bundle" + str(b))
            for minigroup in bundle:
                #print (minigroup)
                lpgbts = minigroups_swap[minigroup]
                for lpgbt in lpgbts:
                    print(str(lpgbt) + ", ", end='')

    elif algorithm == "random_hill_climb" or algorithm == "simulated_annealing":

        try:
            if (algorithm == "random_hill_climb"):
                best_state, best_fitness = mlrose.random_hill_climb(
                    problem_cust,
                    max_attempts=10000,
                    max_iters=max_iterations,
                    restarts=0,
                    init_state=init_state,
                    random_state=random_seed)
            elif (algorithm == "simulated_annealing"):
                best_state, best_fitness = mlrose.simulated_annealing(
                    problem_cust,
                    schedule=schedule,
                    max_attempts=100000,
                    max_iters=10000000,
                    init_state=init_state,
                    random_state=random_seed)

        except ValueError:
            print("interrupt received, stopping and saving")

        finally:
            bundles = getBundles(minigroups_swap, combbest)
            np.save(output_dir + "/" + filename + ".npy",
                    np.array(bundles, dtype=object))
            file1 = open(output_dir + "/chi2_" + filenumber + ".txt", "a")
            file1.write("bundles[" + filenumber + "] = " + str(chi2_min) +
                        "\n")
            file1.close()

    else:
        print("Algorithm " + algorithm + " currently not implemented")
예제 #6
0
def checkFluctuations(initial_state,
                      cmsswNtuple,
                      mappingFile,
                      outputName="alldata",
                      tcPtConfig=None,
                      correctionConfig=None,
                      phisplitConfig=None,
                      truncationConfig=None,
                      save_ntc_hists=False,
                      beginEvent=-1,
                      endEvent=-1):

    nROverZBins = 42
    #To get binning for r/z histograms
    inclusive_hists = np.histogram(np.empty(0),
                                   bins=nROverZBins,
                                   range=(0.076, 0.58))
    roverzBinning = inclusive_hists[1]

    #List of which minigroups are assigned to each bundle
    init_state = np.hstack(np.load(initial_state, allow_pickle=True))

    #Load the truncation options, if need to truncate based on E_T when running over ntuple (save_sum_tcPt == True)
    truncation_options = []
    ABratios = []
    nLinks = []
    save_sum_tcPt = False
    if (tcPtConfig != None):
        save_sum_tcPt = tcPtConfig['save_sum_tcPt']
        options_to_study = tcPtConfig['options_to_study']
        if (truncationConfig != None):
            for option in options_to_study:
                truncation_options.append(
                    truncationConfig['option' +
                                     str(option)]['predetermined_values'])
                ABratios.append(
                    truncationConfig['option' + str(option)]['maxTCsA'] /
                    truncationConfig['option' + str(option)]['maxTCsB'])
                nLinks.append(truncationConfig['option' +
                                               str(option)]['nLinks'])

    #Load the CMSSW ntuple to get per event and per trigger cell information
    rootfile = ROOT.TFile.Open(cmsswNtuple, "READ")
    tree = rootfile.Get("HGCalTriggerNtuple")

    #Load mapping file
    data = loadDataFile(mappingFile)

    #Load geometry corrections
    if correctionConfig['nTCCorrectionFile'] != None:
        modulesToCorrect = loadSiliconNTCCorrectionFile(
            correctionConfig['nTCCorrectionFile'])
    else:
        modulesToCorrect = pd.DataFrame()

    #Get list of which lpgbts are in each minigroup
    minigroups, minigroups_swap = getMinilpGBTGroups(data)

    #Get list of which modules are in each minigroup
    minigroups_modules = getMiniModuleGroups(data, minigroups_swap)
    bundles = getBundles(minigroups_swap, init_state)

    bundled_lpgbthists_allevents = []
    bundled_pt_hists_allevents = []

    ROverZ_per_module_phidivisionX = {}  #traditionally phi > 60 degrees
    ROverZ_per_module_phidivisionY = {}  #traditionally phi < 60 degrees
    ROverZ_per_module_phidivisionX_tcPt = {}
    ROverZ_per_module_phidivisionY_tcPt = {}

    nTCs_per_module = {}

    #Value of split in phi (traditionally 60 degrees)
    if phisplitConfig == None:
        phi_split_phidivisionX = np.full(nROverZBins, np.pi / 3)
        phi_split_phidivisionY = np.full(nROverZBins, np.pi / 3)
    else:
        if phisplitConfig['type'] == "fixed":
            phi_split_phidivisionX = np.full(
                nROverZBins,
                np.radians(phisplitConfig['phidivisionX_fixvalue_min']))
            phi_split_phidivisionY = np.full(
                nROverZBins,
                np.radians(phisplitConfig['phidivisionY_fixvalue_max']))
        else:
            file_roverz_inclusive = ROOT.TFile(
                str(phisplitConfig['splitfile']), "READ")
            PhiVsROverZ_Total = file_roverz_inclusive.Get("ROverZ_Inclusive")
            split_indices_phidivisionX = getPhiSplitIndices(
                PhiVsROverZ_Total, split="per_roverz_bin")
            split_indices_phidivisionY = getPhiSplitIndices(
                PhiVsROverZ_Total, split="per_roverz_bin")
            phi_split_phidivisionX = np.zeros(nROverZBins)
            phi_split_phidivisionY = np.zeros(nROverZBins)
            for i, (idxX, idxY) in enumerate(
                    zip(split_indices_phidivisionX,
                        split_indices_phidivisionY)):
                phi_split_phidivisionX[i] = PhiVsROverZ_Total.GetYaxis(
                ).GetBinLowEdge(int(idxY))
                phi_split_phidivisionY[i] = PhiVsROverZ_Total.GetYaxis(
                ).GetBinLowEdge(int(idxY))

    if save_ntc_hists:
        for i in range(15):
            for j in range(15):
                for k in range(1, 53):
                    if k < 28 and k % 2 == 0:
                        continue
                    nTCs_per_module[0, i, j, k] = ROOT.TH1D(
                        "nTCs_silicon_" + str(i) + "_" + str(j) + "_" + str(k),
                        "", 49, -0.5, 48.5)

        for i in range(5):
            for j in range(12):
                for k in range(37, 53):
                    nTCs_per_module[1, i, j, k] = ROOT.TH1D(
                        "nTCs_scintillator_" + str(i) + "_" + str(j) + "_" +
                        str(k), "", 49, -0.5, 48.5)

    for z in (-1, 1):
        for sector in (0, 1, 2):
            key1 = (z, sector)
            ROverZ_per_module_phidivisionX[key1] = {}
            ROverZ_per_module_phidivisionY[key1] = {}
            if save_sum_tcPt:
                ROverZ_per_module_phidivisionX_tcPt[key1] = {}
                ROverZ_per_module_phidivisionY_tcPt[key1] = {}

            for i in range(15):
                for j in range(15):
                    for k in range(1, 53):
                        if k < 28 and k % 2 == 0:
                            continue
                        ROverZ_per_module_phidivisionX[key1][0, i, j,
                                                             k] = np.empty(0)
                        ROverZ_per_module_phidivisionY[key1][0, i, j,
                                                             k] = np.empty(0)
                        if save_sum_tcPt:
                            ROverZ_per_module_phidivisionX_tcPt[key1][
                                0, i, j, k] = []  #np.empty(0)
                            ROverZ_per_module_phidivisionY_tcPt[key1][
                                0, i, j, k] = []  #np.empty(0)

            for i in range(5):
                for j in range(12):
                    for k in range(37, 53):
                        ROverZ_per_module_phidivisionX[key1][1, i, j,
                                                             k] = np.empty(0)
                        ROverZ_per_module_phidivisionY[key1][1, i, j,
                                                             k] = np.empty(0)
                        if save_sum_tcPt:
                            ROverZ_per_module_phidivisionX_tcPt[key1][
                                0, i, j, k] = []  #np.empty(0)
                            ROverZ_per_module_phidivisionY_tcPt[key1][
                                0, i, j, k] = []  #np.empty(0)

    try:
        for entry, event in enumerate(tree):

            if (beginEvent != -1 and entry < beginEvent):
                if (entry == 0):
                    print("Event number less than " + str(beginEvent) +
                          ", continue")
                continue

            if (endEvent != -1 and entry > endEvent):
                print("Event number greater than " + str(endEvent) + ", break")
                break

            # if entry > 10:
            #     break
            print("Event number " + str(entry))

            for key1 in ROverZ_per_module_phidivisionX.keys():
                for key2 in ROverZ_per_module_phidivisionX[key1].keys():
                    ROverZ_per_module_phidivisionX[key1][key2] = np.empty(0)
                    ROverZ_per_module_phidivisionY[key1][key2] = np.empty(0)
                    if save_sum_tcPt:
                        ROverZ_per_module_phidivisionX_tcPt[key1][key2] = [
                        ]  #np.empty(0)
                        ROverZ_per_module_phidivisionY_tcPt[key1][key2] = [
                        ]  #np.empty(0)

            #Loop over list of trigger cells in a particular
            #event and fill R/Z histograms for each module
            #for phidivisionX and phidivisionY (traditionally phi > 60 degrees and phi < 60 degrees respectively)

            #Check if tc_pt exists (needed if we want to save the sum of (truncated) TC's pT)
            eventzip = zip(event.tc_waferu, event.tc_waferv, event.tc_layer,
                           event.tc_x, event.tc_y, event.tc_z, event.tc_cellu,
                           event.tc_cellv)
            if (save_sum_tcPt):
                if hasattr(event, 'tc_pt'):
                    eventzip = zip(event.tc_waferu, event.tc_waferv,
                                   event.tc_layer, event.tc_x, event.tc_y,
                                   event.tc_z, event.tc_cellu, event.tc_cellv,
                                   event.tc_pt)
                else:
                    print(
                        'tc_pt not found in TTree - switching to non-save_sum_pt mode'
                    )
                    save_sum_tcPt = False

            for variables in eventzip:
                u, v, layer, x, y, z, cellu, cellv = variables[:8]
                if save_sum_tcPt: pt = variables[8]

                if (u > -990):  #Silicon
                    uv, sector = rotate_to_sector_0(u, v, layer)
                    roverz_phi = getROverZPhi(x, y, z, sector)
                    roverz_bin = np.argmax(roverzBinning > abs(roverz_phi[0]))

                    if (roverz_phi[1] >=
                            phi_split_phidivisionX[roverz_bin - 1]):
                        #There should be no r/z values lower than 0.076
                        ROverZ_per_module_phidivisionX[np.sign(z), sector][
                            0, uv[0], uv[1], layer] = np.append(
                                ROverZ_per_module_phidivisionX[np.sign(z),
                                                               sector][0,
                                                                       uv[0],
                                                                       uv[1],
                                                                       layer],
                                abs(roverz_phi[0]))
                        if save_sum_tcPt:
                            ROverZ_per_module_phidivisionX_tcPt[
                                np.sign(z),
                                sector][0, uv[0], uv[1],
                                        layer].append([abs(roverz_phi[0]), pt])
                    if (roverz_phi[1] <
                            phi_split_phidivisionY[roverz_bin - 1]):
                        ROverZ_per_module_phidivisionY[np.sign(z), sector][
                            0, uv[0], uv[1], layer] = np.append(
                                ROverZ_per_module_phidivisionY[np.sign(z),
                                                               sector][0,
                                                                       uv[0],
                                                                       uv[1],
                                                                       layer],
                                abs(roverz_phi[0]))
                        if save_sum_tcPt:
                            ROverZ_per_module_phidivisionY_tcPt[
                                np.sign(z),
                                sector][0, uv[0], uv[1],
                                        layer].append([abs(roverz_phi[0]), pt])

                else:  #Scintillator
                    eta = cellu
                    phi = cellv
                    etaphi, sector = etaphiMapping(layer, [eta, phi])
                    roverz_phi = getROverZPhi(x, y, z, sector)
                    roverz_bin = np.argmax(roverzBinning > abs(roverz_phi[0]))

                    if (roverz_phi[1] >=
                            phi_split_phidivisionX[roverz_bin - 1]):
                        ROverZ_per_module_phidivisionX[np.sign(z), sector][
                            1, etaphi[0], etaphi[1], layer] = np.append(
                                ROverZ_per_module_phidivisionX[
                                    np.sign(z), sector][1, etaphi[0],
                                                        etaphi[1], layer],
                                abs(roverz_phi[0]))
                        if save_sum_tcPt:
                            ROverZ_per_module_phidivisionX_tcPt[
                                np.sign(z),
                                sector][1, etaphi[0], etaphi[1],
                                        layer].append([abs(roverz_phi[0]), pt])
                    if (roverz_phi[1] <
                            phi_split_phidivisionY[roverz_bin - 1]):
                        ROverZ_per_module_phidivisionY[np.sign(z), sector][
                            1, etaphi[0], etaphi[1], layer] = np.append(
                                ROverZ_per_module_phidivisionY[
                                    np.sign(z), sector][1, etaphi[0],
                                                        etaphi[1], layer],
                                abs(roverz_phi[0]))
                        if save_sum_tcPt:
                            ROverZ_per_module_phidivisionY_tcPt[
                                np.sign(z),
                                sector][1, etaphi[0], etaphi[1],
                                        layer].append([abs(roverz_phi[0]), pt])
            #Bin the TC module data
            module_hists_phidivisionX = {}
            module_hists_phidivisionY = {}

            for key1, value1 in ROverZ_per_module_phidivisionX.items():
                module_hists_phidivisionX[key1] = {}
                for key2, value2 in value1.items():
                    module_hists_phidivisionX[key1][key2] = np.histogram(
                        value2, bins=nROverZBins, range=(0.076, 0.58))[0]

            for key1, value1 in ROverZ_per_module_phidivisionY.items():
                module_hists_phidivisionY[key1] = {}
                for key2, value2 in value1.items():
                    module_hists_phidivisionY[key1][key2] = np.histogram(
                        value2, bins=nROverZBins, range=(0.076, 0.58))[0]

            for z in (-1, 1):
                for sector in (0, 1, 2):

                    #the module hists are a numpy array of size 42
                    module_hists = [
                        module_hists_phidivisionX[z, sector],
                        module_hists_phidivisionY[z, sector]
                    ]

                    #Apply geometry corrections
                    applyGeometryCorrectionsNumpy(module_hists,
                                                  modulesToCorrect)

                    #Save the integral of module_hists, per event
                    if save_ntc_hists:
                        for module, hist in nTCs_per_module.items():
                            hist.Fill(
                                np.round(
                                    np.sum(module_hists[0][module]) +
                                    np.sum(module_hists[1][module])))

                    #Sum the individual module histograms to get the minigroup histograms
                    minigroup_hists = getMiniGroupHistsNumpy(
                        module_hists, minigroups_modules)

                    #Sum the minigroup histograms to get the bundle histograms
                    bundled_lpgbthists = getBundledlpgbtHists(
                        minigroup_hists, bundles)

                    bundled_lpgbthists_allevents.append(bundled_lpgbthists)

                    #Collect the individual TC Pt values for a given minigroup, with the view to truncate and sum
                    if (save_sum_tcPt):
                        tc_Pt_rawdata = [
                            ROverZ_per_module_phidivisionX_tcPt[z, sector],
                            ROverZ_per_module_phidivisionY_tcPt[z, sector]
                        ]

                        #Apply geometry corrections
                        applyGeometryCorrectionsTCPtRawData(
                            tc_Pt_rawdata, modulesToCorrect)

                        #Get lists of (r/z, pt) pairs, first for minigroups and then for bundles
                        minigroup_tc_Pt_rawdata = getMiniGroupTCPtRawData(
                            tc_Pt_rawdata, minigroups_modules)
                        bundled_tc_Pt_rawdata = getBundledTCPtRawData(
                            minigroup_tc_Pt_rawdata, bundles)

                        #Get histograms of (truncated) sum pT per r/z bin
                        bundled_pt_hists = applyTruncationAndGetPtSums(
                            bundled_tc_Pt_rawdata, truncation_options,
                            ABratios, roverzBinning, nLinks)

                        bundled_pt_hists_allevents.append(bundled_pt_hists)

    except KeyboardInterrupt:
        print("interrupt received, stopping and saving")

    finally:

        #Write all data to file for later analysis (Pickling)
        if (beginEvent != -1):
            outputName = outputName + "_from" + str(beginEvent)
        if (endEvent != -1):
            outputName = outputName + "_to" + str(endEvent)

        with open(outputName + ".txt", "wb") as filep:
            pickle.dump(bundled_lpgbthists_allevents, filep)

        if save_sum_tcPt:
            with open(outputName + "_sumpt.txt", "wb") as filep:
                pickle.dump(bundled_pt_hists_allevents, filep)

        if save_ntc_hists:
            outfile = ROOT.TFile(outputName + "_nTCs.root", "RECREATE")
            for hist in nTCs_per_module.values():
                hist.Write()
예제 #7
0
def study_mapping(MappingFile,
                  CMSSW_ModuleHists,
                  algorithm="random_hill_climb",
                  initial_state="best_so_far",
                  random_seed=1,
                  max_iterations=100000,
                  output_dir=".",
                  print_level=0,
                  minigroup_type="minimal"):

    #Load external data
    data = loadDataFile(MappingFile)  #dataframe
    inclusive_hists, module_hists = getModuleHists(CMSSW_ModuleHists)

    #Form hists corresponding to each lpGBT from module hists
    lpgbt_hists = getlpGBTHists(data, module_hists)
    minigroups, minigroups_swap = getMinilpGBTGroups(data, minigroup_type)
    #    minigroup_hists = getMiniGroupHists(lpgbt_hists,minigroups_swap)
    minigroup_hists = getMiniGroupHists(lpgbt_hists, minigroups_swap)
    minigroup_hists_root = getMiniGroupHists(lpgbt_hists,
                                             minigroups_swap,
                                             root=True)

    def mapping_max(state):
        global chi2_min
        global combbest

        chi2 = 0

        bundles = getBundles(minigroups_swap, state)
        #bundled_lpgbthists = getBundledlpgbtHists(minigroup_hists,bundles)
        bundled_lpgbthists = getBundledlpgbtHists(minigroup_hists, bundles)

        chi2 = calculateChiSquared(inclusive_hists, bundled_lpgbthists)

        typicalchi2 = 600000000000
        if (chi2 < chi2_min):
            chi2_min = chi2
            combbest = np.copy(state)
            if (print_level > 0):
                print(algorithm, " ", chi2_min, " ", chi2_min / typicalchi2)
            if (print_level > 1):
                print(repr(combbest))

        return chi2

    init_state = []
    if (initial_state == "best_so_far"):
        init_state = bestsofar
    if (initial_state[-4:] == ".npy"):
        print(initial_state)
        init_state = np.hstack(np.load(initial_state, allow_pickle=True))
    elif (initial_state == "random"):
        init_state = np.arange(len(minigroups_swap))
        np.random.shuffle(init_state)

    fitness_cust = mlrose.CustomFitness(mapping_max)
    # Define optimization problem object
    problem_cust = mlrose.DiscreteOpt(length=len(init_state),
                                      fitness_fn=fitness_cust,
                                      maximize=False,
                                      max_val=len(minigroups_swap),
                                      minigroups=minigroups_swap)

    # Define decay schedule
    schedule = mlrose.ExpDecay()
    #schedule = mlrose.ArithDecay()

    filename = "bundles_job_"
    filenumber = ""
    if (len(sys.argv) > 2):
        filenumber = str(sys.argv[2])
    else:
        filenumber = "default"
    filename += filenumber

    if (algorithm == "save_root"):
        #Save best combination so far into a root file
        bundles = getBundles(minigroups_swap, init_state)

        bundled_hists = getBundledlpgbtHistsRoot(minigroup_hists_root, bundles)
        chi2 = calculateChiSquared(inclusive_hists, bundled_hists)
        newfile = ROOT.TFile("lpgbt_10.root", "RECREATE")
        np.save(output_dir + "/" + filename + ".npy", bundles)
        for sector in bundled_hists:
            for key, value in sector.items():
                value.Write()
        for sector in inclusive_hists:
            sector.Scale(1. / 24.)
            sector.Write()
        newfile.Close()
        print("Chi2:", chi2)
        print("List of Bundles:")
        for b, bundle in enumerate(bundles):
            print("")
            print("bundle" + str(b))
            for minigroup in bundle:
                #print (minigroup)
                lpgbts = minigroups_swap[minigroup]
                for lpgbt in lpgbts:
                    print(str(lpgbt) + ", ", end='')

    elif (algorithm == "random_hill_climb"):
        try:

            best_state, best_fitness = mlrose.random_hill_climb(
                problem_cust,
                max_attempts=10000,
                max_iters=max_iterations,
                restarts=0,
                init_state=init_state,
                random_state=random_seed)
            print(repr(best_state))

        except ValueError:
            print("interrupt received, stopping and saving")

        finally:
            bundles = getBundles(minigroups_swap, combbest)
            np.save(output_dir + "/" + filename + ".npy", bundles)
            file1 = open(output_dir + "/chi2_" + filenumber + ".txt", "a")
            file1.write("bundles[" + filenumber + "] = " + str(chi2_min) +
                        "\n")
            file1.close()

    elif (algorithm == "genetic_alg"):
        best_state, best_fitness = mlrose.genetic_alg(problem_cust,
                                                      pop_size=200,
                                                      mutation_prob=0.1,
                                                      max_attempts=1000,
                                                      max_iters=10000000,
                                                      curve=False,
                                                      random_state=random_seed)
    elif (algorithm == "mimic"):
        best_state, best_fitness = mlrose.mimic(problem_cust,
                                                pop_size=200,
                                                keep_pct=0.2,
                                                max_attempts=10,
                                                max_iters=np.inf,
                                                curve=False,
                                                random_state=random_seed)
    elif (algorithm == "simulated_annealing"):
        best_state, best_fitness = mlrose.simulated_annealing(
            problem_cust,
            schedule=schedule,
            max_attempts=100000,
            max_iters=10000000,
            init_state=init_state,
            random_state=1)
    else:
        print("Algorithm " + algorithm + " not known")
예제 #8
0
def checkFluctuations(initial_state, cmsswNtuple, mappingFile):

    #List of which minigroups are assigned to each bundle
    init_state = np.hstack(np.load(initial_state, allow_pickle=True))

    #Load the CMSSW ntuple to get per event and per trigger cell information
    rootfile = ROOT.TFile.Open(cmsswNtuple, "READ")
    tree = rootfile.Get("HGCalTriggerNtuple")

    #Load mapping file
    data = loadDataFile(mappingFile)

    #Get list of which lpgbts are in each minigroup
    minigroups, minigroups_swap = getMinilpGBTGroups(data)

    #Get list of which modules are in each minigroup
    minigroups_modules = getMiniModuleGroups(data, minigroups_swap)
    bundles = getBundles(minigroups_swap, init_state)

    bundled_lpgbthists_allevents = []

    ROverZ_per_module = {}
    ROverZ_per_module_Phi60 = {}

    for i in range(15):
        for j in range(15):
            for k in range(1, 53):
                if k < 28 and k % 2 == 0:
                    continue
                ROverZ_per_module[0, i, j, k] = np.empty(0)
                ROverZ_per_module_Phi60[0, i, j, k] = np.empty(0)

    for i in range(5):
        for j in range(12):
            for k in range(37, 53):
                ROverZ_per_module[1, i, j, k] = np.empty(0)
                ROverZ_per_module_Phi60[1, i, j, k] = np.empty(0)

    try:
        for entry, event in enumerate(tree):
            # if entry > 10:
            #     break
            print("Event number " + str(entry))

            for key in ROverZ_per_module.keys():
                ROverZ_per_module[key] = np.empty(0)
                ROverZ_per_module_Phi60[key] = np.empty(0)

            #Loop over list of trigger cells in a particular
            #event and fill R/Z histograms for each module
            #(inclusively and for phi < 60)

            for u, v, layer, x, y, z, cellu, cellv in zip(
                    event.tc_waferu, event.tc_waferv, event.tc_layer,
                    event.tc_x, event.tc_y, event.tc_z, event.tc_cellu,
                    event.tc_cellv):

                eta_phi = getROverZPhi(x, y, z)

                if (u > -990):  #Silicon
                    uv = rotate_to_sector_0(u, v, layer)
                    ROverZ_per_module[0, uv[0], uv[1], layer] = np.append(
                        ROverZ_per_module[0, uv[0], uv[1], layer],
                        abs(eta_phi[0]))
                    if (eta_phi[1] < np.pi / 3):
                        ROverZ_per_module_Phi60[
                            0, uv[0], uv[1], layer] = np.append(
                                ROverZ_per_module_Phi60[0, uv[0],
                                                        uv[1], layer],
                                abs(eta_phi[0]))

                else:  #Scintillator
                    eta = cellu
                    phi = cellv
                    etaphi = etaphiMapping(layer, [eta, phi])
                    ROverZ_per_module[1, etaphi[0], etaphi[1],
                                      layer] = np.append(
                                          ROverZ_per_module[1, etaphi[0],
                                                            etaphi[1], layer],
                                          abs(eta_phi[0]))
                    if (eta_phi[1] < np.pi / 3):
                        ROverZ_per_module_Phi60[
                            1, etaphi[0], etaphi[1], layer] = np.append(
                                ROverZ_per_module_Phi60[1, etaphi[0],
                                                        etaphi[1], layer],
                                abs(eta_phi[0]))

            ROverZ_Inclusive = np.empty(0)
            ROverZ_Inclusive_Phi60 = np.empty(0)

            for key, value in ROverZ_per_module.items():
                ROverZ_Inclusive = np.append(ROverZ_Inclusive, value)
            for key, value in ROverZ_per_module_Phi60.items():
                ROverZ_Inclusive_Phi60 = np.append(ROverZ_Inclusive_Phi60,
                                                   value)

            #Bin the TC module data
            module_hists_inc = {}
            module_hists_phi60 = {}
            inclusive_hists = np.histogram(ROverZ_Inclusive,
                                           bins=42,
                                           range=(0.076, 0.58))
            inclusive_hists_phi60 = np.histogram(ROverZ_Inclusive_Phi60,
                                                 bins=42,
                                                 range=(0.076, 0.58))

            for key, value in ROverZ_per_module.items():
                module_hists_inc[key] = np.histogram(value,
                                                     bins=42,
                                                     range=(0.076, 0.58))[0]
            for key, value in ROverZ_per_module_Phi60.items():
                module_hists_phi60[key] = np.histogram(value,
                                                       bins=42,
                                                       range=(0.076, 0.58))[0]

            module_hists = [module_hists_inc, module_hists_phi60]

            #Sum the individual module histograms to get the minigroup histograms
            minigroup_hists = getMiniGroupHistsNumpy(module_hists,
                                                     minigroups_modules)

            #Sum the minigroup histograms to get the bundle histograms
            bundled_lpgbthists = getBundledlpgbtHists(minigroup_hists, bundles)

            bundled_lpgbthists_allevents.append(bundled_lpgbthists)

    except KeyboardInterrupt:
        print("interrupt received, stopping and saving")

    finally:

        #Write all data to file for later analysis (Pickling)
        with open("alldata.txt", "wb") as filep:
            pickle.dump(bundled_lpgbthists_allevents, filep)