def optimize_reco():
    var_edges = numpy.linspace(200, 1200, 31)
    kv_val = 1.0
    num_kappa_bins = 100
    k2v_val_range = numpy.linspace(-2, 4, num_kappa_bins + 1)
    kl_val_range = numpy.linspace(-14, 16, num_kappa_bins + 1)
    grid_pixel_area = (k2v_val_range[1] -
                       k2v_val_range[0]) * (kl_val_range[1] - kl_val_range[0])

    all_variations = [
        (1, 1, 1),
        (0, 1, 1),
        (0.5, 1, 1),
        (1.5, 1, 1),
        (2, 1, 1),
        (3, 1, 1),
        (1, 0, 1),
        (1, 2, 1),
        (1, 10, 1),
        (1, 1, 0.5),
        (1, 1, 1.5),
        (0, 0, 1),
        #(2.5  ,  10  ,  1     )
        #(-0.5 ,   8  ,  0.5   )
        (3, -9, 1)
        #(2    ,   7  ,  1     )
    ]

    valid_bases = []
    total = 0
    for couplings in itertools.combinations(all_variations, 6):
        # Unwrap each combination
        if (1.0, 1.0, 1.0) not in couplings: continue
        if not combination_utils.is_valid_combination(
                couplings, base_equations=combination_utils.full_scan_terms):
            continue
        #solidarity_integral = weight_contribution_map.get_theoretical_solidarity_map(couplings, 1, k2v_val_range, kl_val_range,
        #mask=lambda k2v, kl: ((k2v-1)/1)**2 + ((kl-1)/10)**2 < 1 )
        #solidarity_integral = weight_contribution_map.get_theoretical_solidarity_map(couplings, 1, k2v_val_range, kl_val_range)
        #solidarity_integral = (weight_contribution_map.get_theory_effective_stats_map(couplings, 1, k2v_val_range, kl_val_range)).sum()
        #valid_bases.append( (solidarity_integral, couplings) )
        effective_stat_integral = weight_contribution_map.get_theory_effective_stats_map(
            couplings, 1, k2v_val_range, kl_val_range)
        #mask=lambda k2v, kl: ((k2v-1)/1)**2 + ((kl-1)/10)**2 < 1 )
        valid_bases.append((effective_stat_integral, couplings))
        total += 1
        if total % 10 == 0: print(total)
    print('Integrals computed, sorting and printing...')
    valid_bases.sort(reverse=True)
    for rank, (integral, couplings) in enumerate(valid_bases):
        print(rank, int(integral), couplings)

    #draw_rankings([0,1,2,3], valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, 'masktop')
    #draw_rankings([0,1,2,3], valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, 'neomasktop')
    draw_rankings([0], valid_bases, var_edges, kv_val, k2v_val_range,
                  kl_val_range, 'neoeffectiveB')
Exemplo n.º 2
0
def get_sorted_acceptances():
    var_edges = numpy.linspace(200, 1200, 31)
    kv_val = 1.0
    num_bins = 21
    k2v_val_range = numpy.linspace(-2, 4, num_bins)
    kl_val_range = numpy.linspace(-14, 16, num_bins)

    data_files = fileio_utils.read_coupling_file()
    all_cutflows = fileio_utils.get_combined_cutflow_values(
        data_files.keys(), data_files).values(
        )  # It's a really good things that python dicts are ordered...
    all_events = fileio_utils.get_events(data_files.keys(), data_files)
    all_histograms = [
        fileio_utils.retrieve_reco_weights(var_edges, events)
        for events in all_events
    ]
    # Wrap all variations up together with their histograms so I can find combinations
    all_variations = list(
        zip(data_files.keys(), all_histograms, all_cutflows,
            all_events))  #[:7]
    print('All variations loaded, proceeding to retrieve metrics...')
    theory_function = combination_utils.get_theory_xsec_function()

    total = 0
    basis_metrics = {}
    Nweight_acceptance_list = []
    for basis_set in itertools.combinations(all_variations, 6):
        # Unwrap each combination
        couplings, histograms, cutflows, events_list = list(zip(*basis_set))
        if not combination_utils.is_valid_combination(couplings): continue
        #if (1.0,1.0,1.0) not in couplings: continue

        weights, errors = numpy.array(list(zip(*histograms)))
        Nweight_integral = negative_weight_map.get_Nweight_sum(
            couplings, weights, kv_val, k2v_val_range, kl_val_range)
        accXeff = metric_accXeff_list(theory_function, couplings, cutflows)

        total += 1
        if total % 10 == 0: print(total)
        if total % 20 != 0: continue
        Nweight_acceptance_list.append([Nweight_integral, accXeff])
    return Nweight_acceptance_list
def optimize_reco(which_coupling):
    var_edges = numpy.linspace(200, 1200, 31)
    kv_val = 1.0
    num_kappa_bins = 100
    data_files = fileio_utils.read_coupling_file()

    if which_coupling == 'k2v':
        hold_index = 1
        base_equations = combination_utils.k2v_scan_terms
        k2v_vals = numpy.linspace(-2, 4, num_kappa_bins + 1)
        kl_vals = 1
    elif which_coupling == 'kl':
        hold_index = 0
        base_equations = combination_utils.kl_scan_terms
        k2v_vals = 1
        kl_vals = numpy.linspace(-14, 16, num_kappa_bins + 1)
    else:
        print("What are you doing??")
        exit(1)
    which_variations = [
        variation for variation in data_files.keys()
        if variation[2] == 1 and variation[hold_index] == 1
    ]

    all_events = fileio_utils.get_events(which_variations, data_files)
    all_histograms = [
        fileio_utils.retrieve_reco_weights(var_edges, events)
        for events in all_events
    ]
    # Wrap all variations up together with their histograms so I can find combinations
    all_variations = list(zip(which_variations, all_histograms))
    print('Histograms loaded, proceeding to integrate Nweight grids...')

    valid_bases = []
    total = 0
    for basis_set in itertools.combinations(all_variations,
                                            len(base_equations)):
        # Unwrap each combination
        couplings, histograms = list(zip(*basis_set))
        if (1.0, 1.0, 1.0) not in couplings: continue
        if not combination_utils.is_valid_combination(
                couplings, base_equations=base_equations):
            continue

        weights, errors = numpy.array(list(zip(*histograms)))
        nWeight_integral = negative_weight_map.get_Nweight_sum1D(
            couplings,
            weights,
            k2v_vals,
            kl_vals,
            kv_val,
            base_equations=base_equations,
            which_coupling=which_coupling)
        valid_bases.append((nWeight_integral, couplings, weights))
        total += 1
        if total % 10 == 0: print(total)
    print('Integrals computed, sorting and printing...')
    valid_bases.sort()
    for rank, (integral, couplings, weight) in enumerate(valid_bases):
        print(rank, f'{integral:.9f}', couplings)

    ranks_to_draw = 0, 1, 2
    draw_rankings(ranks_to_draw, valid_bases, which_variations, var_edges,
                  k2v_vals, kl_vals, kv_val, which_coupling, base_equations)
    combination_utils.get_amplitude_function(valid_bases[0][1],
                                             base_equations=base_equations,
                                             name='optimalR0_' +
                                             which_coupling,
                                             output='tex')
    combination_utils.get_amplitude_function(valid_bases[1][1],
                                             base_equations=base_equations,
                                             name='optimalR1_' +
                                             which_coupling,
                                             output='tex')
def optimize_reco(mode='reco', extra_files={}, extra_name=''):
    var_edges = numpy.linspace(200, 1200, 31)
    kv_val = 1.0
    num_kappa_bins = 100
    k2v_val_range = numpy.linspace(-2, 4, num_kappa_bins + 1)
    kl_val_range = numpy.linspace(-14, 16, num_kappa_bins + 1)
    grid_pixel_area = (k2v_val_range[1] -
                       k2v_val_range[0]) * (kl_val_range[1] - kl_val_range[0])

    truth_data_files = None
    if mode == 'reco':
        data_files = fileio_utils.read_coupling_file()
        all_events = fileio_utils.get_events(data_files.keys(), data_files)
        all_histograms = [
            fileio_utils.retrieve_reco_weights(var_edges, events)
            for events in all_events
        ]
        all_weights, all_errors = numpy.array(list(zip(*all_histograms)))
        # Wrap all variations up together with their histograms so I can find combinations
        all_variations = list(zip(data_files.keys(), all_weights))
    elif mode == 'truth':
        truth_data_files = fileio_utils.read_coupling_file(
            coupling_file='basis_files/truth_LHE_couplings.dat')
        truth_data_files.update(extra_files)
        truth_weights, truth_errors = fileio_utils.extract_lhe_truth_data(
            truth_data_files.values(), var_edges)
        all_variations = list(zip(truth_data_files.keys(), truth_weights))
    else:
        print('What are you doing?!')
        print(mode)
        exit(1)
    print('Histograms loaded, proceeding to integrate Nweight grids...')

    valid_bases = []
    total = 0
    for basis_set in itertools.combinations(all_variations, 6):
        # Unwrap each combination
        couplings, weights = list(zip(*basis_set))
        if (1.0, 1.0, 1.0) not in couplings: continue
        if not combination_utils.is_valid_combination(
                couplings, base_equations=combination_utils.full_scan_terms):
            continue

        nWeight_integral = get_Nweight_sum(couplings, weights, kv_val,
                                           k2v_val_range, kl_val_range)
        #nWeight_integral = get_Nweight_sum(couplings, weights, kv_val, k2v_val_range, kl_val_range,
        #        mask=lambda k2v, kl: ((k2v-1)/1)**2 + ((kl-1)/10)**2 < 1 )
        valid_bases.append((nWeight_integral, couplings, weights))
        total += 1
        if total % 10 == 0: print(total)
    print('Integrals computed, sorting and printing...')
    valid_bases.sort()
    for rank, (integral, couplings, weight) in enumerate(valid_bases):
        print(rank, int(integral), couplings)

    #draw_rankings([0,1,2,3], valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, 'quad', only_heatmap=True)
    #draw_rankings([0,1], valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, 'top')

    #draw_rankings([0,1,2,3], valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, extra_name+'_truth_quad', only_heatmap=True)
    draw_rankings([0, 1],
                  valid_bases,
                  var_edges,
                  kv_val,
                  k2v_val_range,
                  kl_val_range,
                  extra_name + '_truth_top',
                  only_heatmap=False,
                  truth_level=True,
                  truth_data_files=truth_data_files,
                  skip_preview=True)
Exemplo n.º 5
0
def generate_metric_values():
    var_edges = numpy.linspace(200, 1200, 31)
    kv_val = 1.0
    num_bins = 100
    k2v_val_range = numpy.linspace(-2, 4, num_bins + 1)
    kl_val_range = numpy.linspace(-14, 16, num_bins + 1)

    #data_files = fileio_utils.read_coupling_file()
    #all_events = fileio_utils.get_events(data_files.keys(), data_files)
    #all_histograms = [ fileio_utils.retrieve_reco_weights(var_edges,events) for events in all_events ]
    #all_variations = list(zip(data_files.keys(), all_histograms, all_events))#[:7]
    #all_cutflows = fileio_utils.get_combined_cutflow_values(data_files.keys(), data_files).values() # It's a really good things that python dicts are ordered...

    truth_data_files = fileio_utils.read_coupling_file(
        coupling_file='basis_files/truth_LHE_couplings_extended.dat')
    truth_weights, truth_errors = fileio_utils.extract_lhe_truth_data(
        truth_data_files.values(), var_edges)
    all_variations = list(
        zip(truth_data_files.keys(), truth_weights, truth_errors))

    #assert list(data_files.keys()) == list(truth_data_files.keys())
    #all_variations = list(zip(data_files.keys(), all_histograms, all_events, all_events, truth_weights, truth_errors))

    # Wrap all variations up together with their histograms so I can find combinations
    #all_variations = list(zip(data_files.keys(), all_histograms, all_cutflows, all_events))#[:7]
    print('All variations loaded, proceeding to retrieve metrics...')
    #for variation, cuts in zip(data_files.keys(), all_cutflows):
    #    accXeff = cuts['Signal'] / cuts['Initial']
    #    print(variation, accXeff)
    #exit()

    total = 0
    basis_metrics = {}
    for basis_set in itertools.combinations(all_variations, 6):
        # Unwrap each combination
        #couplings, histograms, cutflows, events_list, truth_weights, truth_errors = list(zip(*basis_set))
        #couplings, histograms, events_list = list(zip(*basis_set))
        couplings, truth_weights, truth_errors = list(zip(*basis_set))
        if not is_valid_combination(couplings): continue
        #if (1.0,1.0,1.0) not in couplings: continue

        #weights, errors = numpy.array( list(zip(*histograms)) )

        basis_metrics[couplings] = {
            #'Nweight_integral': get_Nweight_sum(couplings, weights, kv_val, k2v_val_range, kl_val_range),
            'Nweight_truth_integral':
            get_Nweight_sum(couplings, truth_weights, kv_val, k2v_val_range,
                            kl_val_range),
            #'orthogonality': metric_orthogonality(couplings),
            #'reco_effective_stats_integral': metric_reco_effective_stats_integral(couplings, events_list, kv_val, k2v_val_range, kl_val_range),
            'theory_effective_stats_integral':
            get_theory_effective_stats_map(couplings, kv_val, k2v_val_range,
                                           kl_val_range),
            #'reco_solidarity_integral': get_reco_solidarity_map(couplings, weights, kv_val, k2v_val_range, kl_val_range),
            'theory_solidarity_integral':
            get_theoretical_solidarity_map(couplings, kv_val, k2v_val_range,
                                           kl_val_range),
            #'theory_test_val': metric_theory_test_val(couplings),
            #'contribution_integral': metric_contribution_integral(couplings, kv_val, k2v_val_range, kl_val_range),
            #'accXeff_list': metric_accXeff_list(cutflows),
            #'accXeff_sum': metric_accXeff_sum(cutflows),
            #'accXeff_geometric': metric_accXeff_geometric_mean(cutflows),
            #'accXeff_rms': metric_accXeff_rms(cutflows),
            #'accXeff_avg_stdev': metric_accXeff_avg_stdev(cutflows),
            #'accXeff_min': metric_accXeff_min(cutflows),
            #'accXeff_sigma': metric_accXeff_sigma(cutflows),
            #'accXeff_harmonic': metric_accXeff_harmonic_mean(cutflows),
            #'eventCount_sum': metric_eventCount_sum(events_list)
        }

        total += 1
        if total % 10 == 0: print(total)
    print('Variations traversed, plotting...')
    metric_lists = {key: [] for key in list(basis_metrics.values())[0].keys()}
    for basis, metrics in basis_metrics.items():
        for key, val in metrics.items():
            metric_lists[key].append(val)
    return metric_lists