def generate_1D_pojection_scans(basis_parameters): #var_edges = numpy.linspace(200, 1200, 31) var_edges = numpy.linspace(200, 2000, 55) data_files = fileio_utils.read_coupling_file() base_events_list = fileio_utils.get_events(basis_parameters, data_files) base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in base_events_list ] base_weights, base_errors = numpy.array(list(zip(*base_histograms))) num_kappa_bins = 100 k2v_fixed, kl_fixed, kv_fixed = 1, 1, 1 k2v_vals = numpy.linspace(-2, 4, num_kappa_bins + 1) kl_vals = numpy.linspace(-14, 16, num_kappa_bins + 1) k2v_nWeight_integral = negative_weight_map.get_Nweight_sum1D( basis_parameters, base_weights, k2v_vals, kl_fixed, kv_fixed, base_equations=combination_utils.full_scan_terms, which_coupling='k2v') draw_1D_mhh_heatmap(basis_parameters, base_weights, var_edges, k2v_vals, kl_fixed, kv_fixed, base_equations=combination_utils.full_scan_terms, which_coupling='k2v', filename='projectionscan_k2v', title_suffix=' 1D Projection Scan, Axis Integral = ' + f'{k2v_nWeight_integral:.3f}') kl_nWeight_integral = negative_weight_map.get_Nweight_sum1D( basis_parameters, base_weights, k2v_fixed, kl_vals, kv_fixed, base_equations=combination_utils.full_scan_terms, which_coupling='kl') draw_1D_mhh_heatmap(basis_parameters, base_weights, var_edges, k2v_fixed, kl_vals, kv_fixed, base_equations=combination_utils.full_scan_terms, which_coupling='kl', filename='projectionscan_kl', title_suffix=' 1D Projection Scan, Axis Integral = ' + f'{kl_nWeight_integral:.3f}')
def main(): numpy.set_printoptions(precision=2, linewidth=400, threshold=100, sign=' ') var_edges = numpy.linspace(200, 1200, 31) kv_val = 1.0 #num_bins = 21 #k2v_val_range = numpy.linspace(-2,4,num_bins) #kl_val_range = numpy.linspace(-14,16,num_bins) truth_data_files = fileio_utils.read_coupling_file(coupling_file='basis_files/truth_LHE_couplings.dat') truth_weights, truth_errors = fileio_utils.extract_lhe_truth_data(truth_data_files.values(), var_edges, stat_limit=None, emulateSelection=True)
def single_negative_weight_map(basis_parameters, name_suffix='_base', truth_level=False): var_edges = numpy.linspace(200, 1200, 31) kv_val = 1.0 num_kappa_bins = 100 k2v_val_range = numpy.linspace(-2,4,num_kappa_bins+1) kl_val_range = numpy.linspace(-14,16,num_kappa_bins+1) if truth_level: truth_data_files = fileio_utils.read_coupling_file(coupling_file='basis_files/truth_LHE_couplings.dat') basis_files = [ truth_data_files[coupling] for coupling in basis_parameters ] base_weights, base_errors = fileio_utils.extract_lhe_truth_data(basis_files, var_edges) else: data_files = fileio_utils.read_coupling_file() base_events_list = fileio_utils.get_events(basis_parameters, data_files) base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in base_events_list ] base_weights, base_errors = numpy.array(list(zip(*base_histograms))) integral = get_Nweight_sum(basis_parameters, base_weights, kv_val, k2v_val_range, kl_val_range, grid=False) negative_weight_grid = get_Nweight_sum(basis_parameters, base_weights, kv_val, k2v_val_range, kl_val_range, grid=True) draw_error_map(basis_parameters, var_edges, kv_val, k2v_val_range, kl_val_range, negative_weight_grid, name_suffix=name_suffix, title_suffix=f'Integral={int(integral)}')
def single_reco_negative_weight_map(basis_parameters): var_edges = numpy.linspace(200, 1200, 31) kv_val = 1.0 num_bins = 101 k2v_val_range = numpy.linspace(-2, 4, num_bins) kl_val_range = numpy.linspace(-14, 16, num_bins) data_files = read_coupling_file() base_events_list = get_events(basis_parameters, data_files) effective_stats_grid = get_effective_stats_grid(basis_parameters, base_events_list, kv_val, k2v_val_range, kl_val_range) draw_stats_map(basis_parameters, var_edges, kv_val, k2v_val_range, kl_val_range, 1 / effective_stats_grid)
def get_sorted_acceptances(): var_edges = numpy.linspace(200, 1200, 31) kv_val = 1.0 num_bins = 21 k2v_val_range = numpy.linspace(-2, 4, num_bins) kl_val_range = numpy.linspace(-14, 16, num_bins) data_files = fileio_utils.read_coupling_file() all_cutflows = fileio_utils.get_combined_cutflow_values( data_files.keys(), data_files).values( ) # It's a really good things that python dicts are ordered... all_events = fileio_utils.get_events(data_files.keys(), data_files) all_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, events) for events in all_events ] # Wrap all variations up together with their histograms so I can find combinations all_variations = list( zip(data_files.keys(), all_histograms, all_cutflows, all_events)) #[:7] print('All variations loaded, proceeding to retrieve metrics...') theory_function = combination_utils.get_theory_xsec_function() total = 0 basis_metrics = {} Nweight_acceptance_list = [] for basis_set in itertools.combinations(all_variations, 6): # Unwrap each combination couplings, histograms, cutflows, events_list = list(zip(*basis_set)) if not combination_utils.is_valid_combination(couplings): continue #if (1.0,1.0,1.0) not in couplings: continue weights, errors = numpy.array(list(zip(*histograms))) Nweight_integral = negative_weight_map.get_Nweight_sum( couplings, weights, kv_val, k2v_val_range, kl_val_range) accXeff = metric_accXeff_list(theory_function, couplings, cutflows) total += 1 if total % 10 == 0: print(total) if total % 20 != 0: continue Nweight_acceptance_list.append([Nweight_integral, accXeff]) return Nweight_acceptance_list
def validate_reco_method(basis_parameters, verification_parameters, base_equations=combination_utils.full_scan_terms, name_suffix='', title_suffix=''): reweight_vector = get_amplitude_function(basis_parameters, as_scalar=False, base_equations=base_equations) #var_edges = numpy.linspace(200, 1200, 31) var_edges = numpy.linspace(200, 2000, 55) #var_edges = numpy.arange(0, 2050, 50) data_files = fileio_utils.read_coupling_file() base_events_list = fileio_utils.get_events(basis_parameters, data_files) verification_events_list = fileio_utils.get_events(verification_parameters, data_files) base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in base_events_list ] base_weights, base_errors = numpy.array(list(zip(*base_histograms))) for verification_events, coupling_parameters in zip( verification_events_list, verification_parameters): verification_weights, verification_errors = fileio_utils.retrieve_reco_weights( var_edges, verification_events) combined_weights, combined_errors = reco_reweight( reweight_vector, coupling_parameters, base_weights, base_errors) plot_histogram('reco_mHH' + name_suffix, 'NNT-Based Linear Combination:\n$m_{HH}$' + title_suffix, var_edges, coupling_parameters, combined_weights, combined_errors, verification_weights, verification_errors, xlabel='Reconstructed $m_{HH}$ (GeV)')
def main(): # Sort out command-line arguments parser = argparse.ArgumentParser() parser.add_argument( "--mode", required=False, default='reco', type=str, help="Options are: 'reco' or 'reco'", ) args = parser.parse_args() data_files = fileio_utils.read_coupling_file() verification_parameters = list(data_files.keys()) #pdb.set_trace() if args.mode == 'reco': validate_reco_method(_reco_basis, verification_parameters) #validate_reco_method( [(1.0, 1.0, 1.0), (2.0, 1.0, 1.0), (3.0, 1.0, 1.0)] , verification_parameters) elif args.mode == '1D': generate_1D_pojection_scans(_reco_basis) elif args.mode == 'dual': compare12_reco_method([(1.0, 1.0, 1.0), (0.5, 1.0, 1.0), (3.0, 1.0, 1.0), (1.0, 2.0, 1.0), (1.0, 10.0, 1.0), (0.0, 0.0, 1.0)], [(1, 1, 1), (1.5, 1, 1), (3, 1, 1)], [(1, 1, 1), (1, 2, 1), (1, 10, 1)], verification_parameters) elif args.mode == 'multi': compare1D3S9S_reco_method( [(1, 1, 1), (1.5, 1, 1), (3, 1, 1)], #compare1D3S9S_reco_method( [(1.0, 1.0, 1.0), (1.5, 1.0, 1.0), (2.0, 1.0, 1.0)], ([0, 2], ([1, 0.5, 0], [2, 1.5, 1], [3, 2, 1.5]))) else: print('Mode - ' + str(args.mode) + ' - is not valid.') print('Aborting') exit(1)
def view_reco_method(basis_parameters, view_params): reweight_vector = get_amplitude_function(basis_parameters, as_scalar=False) var_edges = numpy.linspace(200, 1200, 31) data_files = fileio_utils.read_coupling_file() base_events_list = fileio_utils.get_events(basis_parameters, data_files) base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in base_events_list ] base_weights, base_errors = numpy.array(list(zip(*base_histograms))) for coupling_parameters in view_params: print(coupling_parameters) combined_weights, combined_errors = reco_reweight( reweight_vector, coupling_parameters, base_weights, base_errors) plot_histogram('preview_reco_mHH_new', 'NNT-Based Linear Combination:\n$m_{HH}$', var_edges, coupling_parameters, combined_weights, combined_errors, xlabel='Reconstructed $m_{HH}$ (GeV)')
def optimize_reco(which_coupling): var_edges = numpy.linspace(200, 1200, 31) kv_val = 1.0 num_kappa_bins = 100 data_files = fileio_utils.read_coupling_file() if which_coupling == 'k2v': hold_index = 1 base_equations = combination_utils.k2v_scan_terms k2v_vals = numpy.linspace(-2, 4, num_kappa_bins + 1) kl_vals = 1 elif which_coupling == 'kl': hold_index = 0 base_equations = combination_utils.kl_scan_terms k2v_vals = 1 kl_vals = numpy.linspace(-14, 16, num_kappa_bins + 1) else: print("What are you doing??") exit(1) which_variations = [ variation for variation in data_files.keys() if variation[2] == 1 and variation[hold_index] == 1 ] all_events = fileio_utils.get_events(which_variations, data_files) all_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, events) for events in all_events ] # Wrap all variations up together with their histograms so I can find combinations all_variations = list(zip(which_variations, all_histograms)) print('Histograms loaded, proceeding to integrate Nweight grids...') valid_bases = [] total = 0 for basis_set in itertools.combinations(all_variations, len(base_equations)): # Unwrap each combination couplings, histograms = list(zip(*basis_set)) if (1.0, 1.0, 1.0) not in couplings: continue if not combination_utils.is_valid_combination( couplings, base_equations=base_equations): continue weights, errors = numpy.array(list(zip(*histograms))) nWeight_integral = negative_weight_map.get_Nweight_sum1D( couplings, weights, k2v_vals, kl_vals, kv_val, base_equations=base_equations, which_coupling=which_coupling) valid_bases.append((nWeight_integral, couplings, weights)) total += 1 if total % 10 == 0: print(total) print('Integrals computed, sorting and printing...') valid_bases.sort() for rank, (integral, couplings, weight) in enumerate(valid_bases): print(rank, f'{integral:.9f}', couplings) ranks_to_draw = 0, 1, 2 draw_rankings(ranks_to_draw, valid_bases, which_variations, var_edges, k2v_vals, kl_vals, kv_val, which_coupling, base_equations) combination_utils.get_amplitude_function(valid_bases[0][1], base_equations=base_equations, name='optimalR0_' + which_coupling, output='tex') combination_utils.get_amplitude_function(valid_bases[1][1], base_equations=base_equations, name='optimalR1_' + which_coupling, output='tex')
def optimize_reco(mode='reco', extra_files={}, extra_name=''): var_edges = numpy.linspace(200, 1200, 31) kv_val = 1.0 num_kappa_bins = 100 k2v_val_range = numpy.linspace(-2, 4, num_kappa_bins + 1) kl_val_range = numpy.linspace(-14, 16, num_kappa_bins + 1) grid_pixel_area = (k2v_val_range[1] - k2v_val_range[0]) * (kl_val_range[1] - kl_val_range[0]) truth_data_files = None if mode == 'reco': data_files = fileio_utils.read_coupling_file() all_events = fileio_utils.get_events(data_files.keys(), data_files) all_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, events) for events in all_events ] all_weights, all_errors = numpy.array(list(zip(*all_histograms))) # Wrap all variations up together with their histograms so I can find combinations all_variations = list(zip(data_files.keys(), all_weights)) elif mode == 'truth': truth_data_files = fileio_utils.read_coupling_file( coupling_file='basis_files/truth_LHE_couplings.dat') truth_data_files.update(extra_files) truth_weights, truth_errors = fileio_utils.extract_lhe_truth_data( truth_data_files.values(), var_edges) all_variations = list(zip(truth_data_files.keys(), truth_weights)) else: print('What are you doing?!') print(mode) exit(1) print('Histograms loaded, proceeding to integrate Nweight grids...') valid_bases = [] total = 0 for basis_set in itertools.combinations(all_variations, 6): # Unwrap each combination couplings, weights = list(zip(*basis_set)) if (1.0, 1.0, 1.0) not in couplings: continue if not combination_utils.is_valid_combination( couplings, base_equations=combination_utils.full_scan_terms): continue nWeight_integral = get_Nweight_sum(couplings, weights, kv_val, k2v_val_range, kl_val_range) #nWeight_integral = get_Nweight_sum(couplings, weights, kv_val, k2v_val_range, kl_val_range, # mask=lambda k2v, kl: ((k2v-1)/1)**2 + ((kl-1)/10)**2 < 1 ) valid_bases.append((nWeight_integral, couplings, weights)) total += 1 if total % 10 == 0: print(total) print('Integrals computed, sorting and printing...') valid_bases.sort() for rank, (integral, couplings, weight) in enumerate(valid_bases): print(rank, int(integral), couplings) #draw_rankings([0,1,2,3], valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, 'quad', only_heatmap=True) #draw_rankings([0,1], valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, 'top') #draw_rankings([0,1,2,3], valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, extra_name+'_truth_quad', only_heatmap=True) draw_rankings([0, 1], valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, extra_name + '_truth_top', only_heatmap=False, truth_level=True, truth_data_files=truth_data_files, skip_preview=True)
def draw_rankings(ranks_to_draw, valid_bases, var_edges, kv_val, k2v_val_range, kl_val_range, name_infix, only_heatmap=False, skip_preview=False, truth_level=False, truth_data_files=None): print('Drawing basis ranks ' + str(ranks_to_draw)) max_negative_weight = 0 for rank in ranks_to_draw: basis = valid_bases[rank] nWeight_grid = get_Nweight_sum(basis[1], basis[2], kv_val, k2v_val_range, kl_val_range, grid=True) max_for_rank = numpy.max(nWeight_grid) if max_for_rank > max_negative_weight: max_negative_weight = max_for_rank for rank in ranks_to_draw: print('Drawing rank ' + str(rank) + '...') basis = valid_bases[rank] nWeight_grid = get_Nweight_sum(basis[1], basis[2], kv_val, k2v_val_range, kl_val_range, grid=True) draw_error_map( basis[1], var_edges, kv_val, k2v_val_range, kl_val_range, nWeight_grid, vmax=max_negative_weight, name_suffix=f'_{name_infix}rank{int(rank)}', title_suffix= f'Rank {rank+1}/{len(valid_bases)}, Integral={int(basis[0])}') if only_heatmap: return comp_couplings = [ valid_bases[ranks_to_draw[0]][1], valid_bases[ranks_to_draw[1]][1] ] if truth_level: data_files = fileio_utils.read_coupling_file( coupling_file='basis_files/truth_LHE_couplings_extended.dat') else: data_files = fileio_utils.read_coupling_file() validate_linear_combinations.compare_bases_reco_method( comp_couplings, list(data_files.keys()), name_suffix= f'_auto_{name_infix}_3D_{ranks_to_draw[0]}-{ranks_to_draw[1]}', labels=(f'Rank {ranks_to_draw[0]}', f'Rank {ranks_to_draw[1]}'), truth_level=truth_level, truth_data_files=truth_data_files) if skip_preview: return k2v_vals = [-1.5, 0.5, 2, 3.5] kl_vals = [-9, -3, 5, 14] preview_couplings = [] for k2v in k2v_vals: for kl in kl_vals: preview_couplings.append((k2v, kl, 1)) validate_linear_combinations.compare_bases_reco_method( comp_couplings, preview_couplings, name_suffix='_preview_auto_' + name_infix + '_3D_' f'{ranks_to_draw[0]}-{ranks_to_draw[1]}', labels=(f'Rank {ranks_to_draw[0]}', f'Rank {ranks_to_draw[1]}'), is_verification=False, truth_level=truth_level)
def generate_metric_values(): var_edges = numpy.linspace(200, 1200, 31) kv_val = 1.0 num_bins = 100 k2v_val_range = numpy.linspace(-2, 4, num_bins + 1) kl_val_range = numpy.linspace(-14, 16, num_bins + 1) #data_files = fileio_utils.read_coupling_file() #all_events = fileio_utils.get_events(data_files.keys(), data_files) #all_histograms = [ fileio_utils.retrieve_reco_weights(var_edges,events) for events in all_events ] #all_variations = list(zip(data_files.keys(), all_histograms, all_events))#[:7] #all_cutflows = fileio_utils.get_combined_cutflow_values(data_files.keys(), data_files).values() # It's a really good things that python dicts are ordered... truth_data_files = fileio_utils.read_coupling_file( coupling_file='basis_files/truth_LHE_couplings_extended.dat') truth_weights, truth_errors = fileio_utils.extract_lhe_truth_data( truth_data_files.values(), var_edges) all_variations = list( zip(truth_data_files.keys(), truth_weights, truth_errors)) #assert list(data_files.keys()) == list(truth_data_files.keys()) #all_variations = list(zip(data_files.keys(), all_histograms, all_events, all_events, truth_weights, truth_errors)) # Wrap all variations up together with their histograms so I can find combinations #all_variations = list(zip(data_files.keys(), all_histograms, all_cutflows, all_events))#[:7] print('All variations loaded, proceeding to retrieve metrics...') #for variation, cuts in zip(data_files.keys(), all_cutflows): # accXeff = cuts['Signal'] / cuts['Initial'] # print(variation, accXeff) #exit() total = 0 basis_metrics = {} for basis_set in itertools.combinations(all_variations, 6): # Unwrap each combination #couplings, histograms, cutflows, events_list, truth_weights, truth_errors = list(zip(*basis_set)) #couplings, histograms, events_list = list(zip(*basis_set)) couplings, truth_weights, truth_errors = list(zip(*basis_set)) if not is_valid_combination(couplings): continue #if (1.0,1.0,1.0) not in couplings: continue #weights, errors = numpy.array( list(zip(*histograms)) ) basis_metrics[couplings] = { #'Nweight_integral': get_Nweight_sum(couplings, weights, kv_val, k2v_val_range, kl_val_range), 'Nweight_truth_integral': get_Nweight_sum(couplings, truth_weights, kv_val, k2v_val_range, kl_val_range), #'orthogonality': metric_orthogonality(couplings), #'reco_effective_stats_integral': metric_reco_effective_stats_integral(couplings, events_list, kv_val, k2v_val_range, kl_val_range), 'theory_effective_stats_integral': get_theory_effective_stats_map(couplings, kv_val, k2v_val_range, kl_val_range), #'reco_solidarity_integral': get_reco_solidarity_map(couplings, weights, kv_val, k2v_val_range, kl_val_range), 'theory_solidarity_integral': get_theoretical_solidarity_map(couplings, kv_val, k2v_val_range, kl_val_range), #'theory_test_val': metric_theory_test_val(couplings), #'contribution_integral': metric_contribution_integral(couplings, kv_val, k2v_val_range, kl_val_range), #'accXeff_list': metric_accXeff_list(cutflows), #'accXeff_sum': metric_accXeff_sum(cutflows), #'accXeff_geometric': metric_accXeff_geometric_mean(cutflows), #'accXeff_rms': metric_accXeff_rms(cutflows), #'accXeff_avg_stdev': metric_accXeff_avg_stdev(cutflows), #'accXeff_min': metric_accXeff_min(cutflows), #'accXeff_sigma': metric_accXeff_sigma(cutflows), #'accXeff_harmonic': metric_accXeff_harmonic_mean(cutflows), #'eventCount_sum': metric_eventCount_sum(events_list) } total += 1 if total % 10 == 0: print(total) print('Variations traversed, plotting...') metric_lists = {key: [] for key in list(basis_metrics.values())[0].keys()} for basis, metrics in basis_metrics.items(): for key, val in metrics.items(): metric_lists[key].append(val) return metric_lists
def compare_bases_reco_method(basis_parameters_list, verification_parameters, base_equations=combination_utils.full_scan_terms, name_suffix='', title_suffix='', labels=('', ''), is_verification=True, truth_level=False, truth_data_files=None): #var_edges = numpy.linspace(200, 1200, 31) #var_edges = numpy.arange(0, 2050, 50) var_edges = numpy.linspace(200, 2000, 55) basis_tuple_list = [] for basis_parameters in basis_parameters_list: reweight_vector = get_amplitude_function(basis_parameters, as_scalar=False, base_equations=base_equations) if truth_level: data_files = fileio_utils.read_coupling_file( coupling_file='basis_files/truth_LHE_couplings_extended.dat') basis_files = [ truth_data_files[coupling] for coupling in basis_parameters ] truth_weights, truth_errors = fileio_utils.extract_lhe_truth_data( basis_files, var_edges) basis_tuple_list.append( (truth_weights, truth_errors, reweight_vector)) else: data_files = fileio_utils.read_coupling_file() base_events_list = fileio_utils.get_events(basis_parameters, data_files) base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in base_events_list ] base_weights, base_errors = numpy.array(list( zip(*base_histograms))) basis_tuple_list.append( (base_weights, base_errors, reweight_vector)) testpoint_list = verification_parameters if is_verification: if truth_level: verification_files = [ data_files[key] for key in verification_parameters ] truth_verification_weights, truth_verification_errors = fileio_utils.extract_lhe_truth_data( verification_files, var_edges) testpoint_list = zip(verification_parameters, truth_verification_weights, truth_verification_errors) else: testpoint_list = [] verification_events_list = fileio_utils.get_events( verification_parameters, data_files) for events, param in zip(verification_events_list, verification_parameters): verification_weights, verification_errors = fileio_utils.retrieve_reco_weights( var_edges, events) testpoint_list.append( (param, verification_weights, verification_errors)) for testpoint in testpoint_list: verification_weights, verification_errors = None, None if is_verification: coupling_parameters, verification_weights, verification_errors = testpoint else: coupling_parameters = testpoint combined_tuples = [] for base_weights, base_errors, reweight_vector in basis_tuple_list: combined_tuples.append( reco_reweight(reweight_vector, coupling_parameters, base_weights, base_errors)) if truth_level: name = 'truth_mHH_compare' + name_suffix title = 'Truth LHE-Based Linear Combination:\nTruth $m_{HH}$' + title_suffix xlabel = 'Truth $m_{HH}$ (GeV)' else: name = 'reco_mHH_compare' + name_suffix title = 'NNT-Based Linear Combination:\n$m_{HH}$' + title_suffix xlabel = 'Reconstructed $m_{HH}$ (GeV)' plot_histogram( name, title, var_edges, coupling_parameters, combined_tuples[0][0], combined_tuples[0][1], verification_weights, verification_errors, alt_linearly_combined_weights=combined_tuples[1][0], alt_linearly_combined_errors=combined_tuples[1][1], generated_label=labels[0], alt_label=labels[1], xlabel=xlabel, )
def compare12_reco_method(basis_parameters, k2v_basis_parameters, kl_basis_parameters, verification_parameters, base_equations=combination_utils.full_scan_terms, name_suffix='', title_suffix=''): reweight_vector = get_amplitude_function(basis_parameters, as_scalar=False, base_equations=base_equations) k2v_reweight_vector = get_amplitude_function( k2v_basis_parameters, as_scalar=False, base_equations=combination_utils.k2v_scan_terms) kl_reweight_vector = get_amplitude_function( kl_basis_parameters, as_scalar=False, base_equations=combination_utils.kl_scan_terms) #var_edges = numpy.linspace(200, 1200, 31) var_edges = numpy.linspace(200, 2000, 55) #var_edges = numpy.arange(0, 2050, 50) data_files = fileio_utils.read_coupling_file() base_events_list = fileio_utils.get_events(basis_parameters, data_files) k2v_base_events_list = fileio_utils.get_events(k2v_basis_parameters, data_files) kl_base_events_list = fileio_utils.get_events(kl_basis_parameters, data_files) verification_events_list = fileio_utils.get_events(verification_parameters, data_files) base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in base_events_list ] base_weights, base_errors = numpy.array(list(zip(*base_histograms))) k2v_base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in k2v_base_events_list ] k2v_base_weights, k2v_base_errors = numpy.array( list(zip(*k2v_base_histograms))) kl_base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in kl_base_events_list ] kl_base_weights, kl_base_errors = numpy.array( list(zip(*kl_base_histograms))) for verification_events, coupling_parameters in zip( verification_events_list, verification_parameters): k2v, kl, kv = coupling_parameters if coupling_parameters == (1, 1, 1): continue if k2v != 1 and kl != 1: continue if kv != 1: continue alt_combined_weights, alt_combined_errors = None, None if k2v != 1 and kl == 1: alt_combined_weights, alt_combined_errors = reco_reweight( k2v_reweight_vector, coupling_parameters, k2v_base_weights, k2v_base_errors) if k2v == 1 and kl != 1: alt_combined_weights, alt_combined_errors = reco_reweight( kl_reweight_vector, coupling_parameters, kl_base_weights, kl_base_errors) verification_weights, verification_errors = fileio_utils.retrieve_reco_weights( var_edges, verification_events) combined_weights, combined_errors = reco_reweight( reweight_vector, coupling_parameters, base_weights, base_errors) plot_histogram( 'reco_mHH_1-2D_compare' + name_suffix, 'NNT-Based Linear Combination:\n$m_{HH}$' + title_suffix, var_edges, coupling_parameters, combined_weights, combined_errors, verification_weights, verification_errors, alt_linearly_combined_weights=alt_combined_weights, alt_linearly_combined_errors=alt_combined_errors, generated_label='3D Combination', xlabel='Reconstructed $m_{HH}$ (GeV)', )
def compare1D3S9S_reco_method(k2v_3S_basis_parameters, k2v_9S_basis_tuple): vmin, vmax = 1e-5, 5 generate_1D9S_pojection_scans(k2v_9S_basis_tuple, vmin, vmax) #var_edges = numpy.linspace(200, 1200, 31) alt_var_edges = numpy.linspace(200, 1200, 31) var_edges = numpy.linspace(200, 2000, 55) #var_edges = numpy.arange(0, 2050, 50) num_kappa_bins = 10 k2v_vals = numpy.linspace(-2, 4, num_kappa_bins + 1) k2v_vals_alt = numpy.linspace(-2, 4, 100 + 1) data_files = fileio_utils.read_coupling_file() k2v_3S_reweight_vector = get_amplitude_function( k2v_3S_basis_parameters, as_scalar=False, base_equations=combination_utils.k2v_scan_terms) k2v_3S_base_events_list = fileio_utils.get_events(k2v_3S_basis_parameters, data_files) k2v_3S_base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in k2v_3S_base_events_list ] k2v_3S_base_weights, k2v_3S_base_errors = numpy.array( list(zip(*k2v_3S_base_histograms))) k2v_3S_base_histograms_alt = [ fileio_utils.retrieve_reco_weights(alt_var_edges, base_events) for base_events in k2v_3S_base_events_list ] k2v_3S_base_weights_alt, k2v_3S_base_errors_alt = numpy.array( list(zip(*k2v_3S_base_histograms_alt))) draw_1D_mhh_heatmap(k2v_3S_basis_parameters, k2v_3S_base_weights_alt, alt_var_edges, k2v_vals_alt, 1, 1, base_equations=combination_utils.k2v_scan_terms, which_coupling='k2v', filename='projectionscan_k2v_multicompare', title_suffix='Using Single Basis', vrange=(vmin, vmax)) multibasis_list = [] for k2v_list in k2v_9S_basis_tuple[1]: basis_parameters = [(k2v, 1, 1) for k2v in k2v_list] base_events_list = fileio_utils.get_events(basis_parameters, data_files) base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in base_events_list ] weights, errors = numpy.array(list(zip(*base_histograms))) reweight_vector_function = combination_utils.get_amplitude_function( basis_parameters, as_scalar=False, base_equations=combination_utils.k2v_scan_terms) multibasis_list.append((weights, errors, reweight_vector_function)) index_bounds = k2v_9S_basis_tuple[0] for k2v in k2v_vals: coupling_parameters = [k2v, 1, 1] k2v_combined_weights, k2v_combined_errors = reco_reweight( k2v_3S_reweight_vector, coupling_parameters, k2v_3S_base_weights, k2v_3S_base_errors) multibasis_index = None if k2v <= index_bounds[0]: multibasis_index = 0 elif k2v <= index_bounds[1]: multibasis_index = 1 else: multibasis_index = 2 multibasis_weights, multibasis_errors, multibasis_reweight_vector_function = multibasis_list[ multibasis_index] multicombined_weights, multicombined_errors = reco_reweight( multibasis_reweight_vector_function, coupling_parameters, multibasis_weights, multibasis_errors) view_linear_combination.plot_histogram( 'preview_reco_mHH_multibasis', 'NNT-Based Linear Combination:\n$m_{HH}$', var_edges, coupling_parameters, k2v_combined_weights, k2v_combined_errors, alt_linearly_combined_weights=multicombined_weights, alt_linearly_combined_errors=multicombined_errors, alt_label='3-Basis Set', generated_label='1-Basis Equation', xlabel='Reconstructed $m_{HH}$ (GeV)', )
def generate_1D9S_pojection_scans(k2v_9S_basis_tuple, vmin, vmax): var_edges = numpy.linspace(200, 1200, 31) #var_edges = numpy.linspace(200, 2000, 55) data_files = fileio_utils.read_coupling_file() numpy.set_printoptions(threshold=sys.maxsize, linewidth=230, precision=1, floatmode='fixed', suppress=True) num_kappa_bins = 100 kl_fixed, kv_fixed = 1, 1 k2v_vals = numpy.linspace(-2, 4, num_kappa_bins + 1) index_bounds = k2v_9S_basis_tuple[0] grid_bounds = [ k2v_vals <= index_bounds[0], numpy.logical_and(index_bounds[0] < k2v_vals, k2v_vals <= index_bounds[1]), index_bounds[1] < k2v_vals ] grid_list = [] for k2v_list, boundary in zip(k2v_9S_basis_tuple[1], grid_bounds): basis_parameters = [(k2v, 1, 1) for k2v in k2v_list] base_events_list = fileio_utils.get_events(basis_parameters, data_files) base_histograms = [ fileio_utils.retrieve_reco_weights(var_edges, base_events) for base_events in base_events_list ] weights, errors = numpy.array(list(zip(*base_histograms))) reweight_vector_function = combination_utils.get_amplitude_function( basis_parameters, as_scalar=False, base_equations=combination_utils.k2v_scan_terms) multiplier_array_vector = reweight_vector_function( k2v_vals, kl_fixed, kv_fixed)[0] partial_weight_grid = sum([ multiplier_array[..., None] * w for multiplier_array, w in zip(multiplier_array_vector, weights) ]) bounded_weight_grid = (partial_weight_grid.transpose() * boundary).transpose() grid_list.append(bounded_weight_grid) weight_grid = sum(grid_list) num_bins = len(k2v_vals) - 1 ranges = k2v_vals[0], k2v_vals[-1], var_edges[0], var_edges[-1] title = 'Combined $m_{HH}$ Across ' r'$\kappa_{2V}$ Using Multi-Basis Combination' axis_title = r'$\kappa_{2V}$' #plottable_couplings = [ c[0] for c in couplings ] tick_vals = numpy.arange(ranges[0], ranges[1] + 1, 1) fig, ax = plt.subplots() im = ax.imshow(weight_grid.transpose(), cmap='viridis', extent=ranges, origin='lower', norm=matplotlib.colors.LogNorm(vmin, vmax)) #im = ax.imshow(weight_grid.transpose(), extent=ranges, origin='lower', cmap='viridis') ax.set_xticks(ticks=tick_vals) ax.set_xlabel(axis_title) ax.set_ylabel('$m_{HH}$') ax.grid() #for x in plottable_couplings: ax.vlines(x, ymin=var_edges[0], ymax=var_edges[-1], color='red') ax.set_aspect('auto', 'box') fig.subplots_adjust(right=0.85) cbar_ax = fig.add_axes([0.87, 0.11, 0.03, 0.7]) fig.colorbar(im, cax=cbar_ax, label='Bin Weight') #basis_table = '$\kappa_{2V}$ , $\kappa_{\lambda}$ , $\kappa_{V}$ ' #for coupling in couplings: basis_table += '\n'+combination_utils.nice_coupling_string(coupling) #fig.text(.99, 1, basis_table, ha='right', va='top', fontsize='xx-small', family='monospace') fig.suptitle(title, fontsize=10, fontweight='bold') dpi = 500 figname = 'c2v_9S_projection' #plt.savefig('plots/scan_maps/'+figname+'.png',dpi=dpi) plt.savefig('plots/scan_maps/' + figname + '.pdf', dpi=dpi)