def test_nested_list(self): in_list = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]] flat_list = list(mh.flatten(in_list)) self.assertEqual(flat_list, range(1, 11)) in_list = [1, [[2, 3], [4, 5]], 6, 7, [8, 9, [10]]] self.assertEqual(flat_list, range(1, 11))
def main(args): """Main""" # In order to not have to load the whole file first determine the names of # the branches that are necessary var_names = [('costh', 'phi')] if args.genlevel: logging.info('Also adding generator level folding') var_names.append(('gen_costh', 'gen_phi')) frames = args.frames.split(',') load_variables = ['_'.join(p) for p in product(flatten(var_names), frames)] for infile in args.inputfiles: logging.info('Processing file {}'.format(infile)) if not args.treename: treename = get_treename(infile) else: treename = args.treename df = get_dataframe(infile, treename, columns=load_variables) for var_pair in var_names: for frame in frames: costh_f, phi_f = get_folded_frame(df, frame, *var_pair) add_branch(costh_f, '_'.join([var_pair[0], frame, 'fold']), infile, treename) add_branch(phi_f, '_'.join([var_pair[1], frame, 'fold']), infile, treename)
def shift_graphs(graphs, abs_shift, offset=1): """ Shift all graphs slightly along the horizontal direction. If offset==0 the first graph will remain unchanged and the others will be shifted symmetrically around this one, otherwise all the graphs will just be distributed symmetrically around the 0 """ # Just define 17 shifts and hope that this will be enough for all variations shifts = list(flatten([0] + [[-abs_shift * i, abs_shift * i] for i in xrange(1, 8)])) return [shift_graph_horizontal(g, shifts[i + offset]) for i, g in enumerate(graphs)]
def main(args): """Main""" data = get_dataframe(args.scanfile, columns=list(flatten(args.variables))) outfile = r.TFile.Open(args.outfile, 'recreate') for varx, vary in args.variables: corr_graphs = make_pair_correlation(data, varx, vary, args.physical_lambdas) for cg in corr_graphs: cg.Write() outfile.Close()
def create_muon_pz_res_map(dfr, n_bins_res, n_bins_p): """ See above, same thing for muon pz """ pz_bins = get_equi_pop_bins(dfr, lambda df: df.gen_muPPz.abs(), n_bins_p) pz_bins = np.array(sorted({b for b in flatten(pz_bins)})) res_bins = np.linspace(-0.15, 0.15, n_bins_res) hist_sett = (len(pz_bins) - 1, pz_bins, len(res_bins) - 1, res_bins) pz_p_map = create_res_v_gen_map(dfr, 'muPPz', hist_sett) pz_n_map = create_res_v_gen_map(dfr, 'muNPz', hist_sett) muon_map = pz_p_map.Clone() muon_map.Add(pz_n_map) return muon_map
def get_eta_binning(effs): """ Get the eta binning from the efficiencies and also check that the eta range is contiguous throughout the whole range """ # first get all unique bin borders than check if all adjacent pairs are # also present in the efficiencies # to facilitate the task sort the unique borders uniq_bin_bord = sorted({b for b in flatten(effs)}) possible_bins = zip(uniq_bin_bord[:-1], uniq_bin_bord[1:]) for pbin in possible_bins: if pbin not in effs: logging.error( 'The eta binning is not contiguous! The possible bin ' '{:.1f} - {:.1f} is not present'.format(*pbin)) return None return uniq_bin_bord
def create_muon_pxy_res_map(dfr, n_bins_res, n_bins_p): """ Create the muon px and py combined residual maps (see photon for binning) """ px_bins = get_equi_pop_bins(dfr, lambda df: df.gen_muPPx.abs(), n_bins_p) px_bins = np.array(sorted({b for b in flatten(px_bins)})) res_bins = np.linspace(-0.15, 0.15, n_bins_res) hist_sett = (len(px_bins) - 1, px_bins, len(res_bins) - 1, res_bins) px_p_map = create_res_v_gen_map(dfr, 'muPPx', hist_sett) px_n_map = create_res_v_gen_map(dfr, 'muNPx', hist_sett) py_p_map = create_res_v_gen_map(dfr, 'muPPy', hist_sett) py_n_map = create_res_v_gen_map(dfr, 'muNPy', hist_sett) muon_map = px_n_map.Clone() for mu_map in [px_n_map, py_p_map, py_n_map]: muon_map.Add(mu_map) return muon_map
def compile_load_vars(selections, varx, vary): """Compile the list of variables that has to be loaded""" vars_to_load = [ sf.collect_requirements(sel[0]) for sel in selections.values() ] if any(sel[1] is not None for sel in selections.values()): # simply load all efficiencies in case they are needed vars_to_load.append('*eff_sm') vars_to_load.append('costh_HX') # needed to bin in costh_HX if varx in FUNCVARS: vars_to_load.append(FUNCVARS[varx].requires) else: vars_to_load.append(varx) if vary in FUNCVARS: vars_to_load.append(FUNCVARS[vary].requires) else: vars_to_load.append(vary) return list(set(flatten(vars_to_load))) # remove duplicates
def create_photon_res_map(dfr, n_bins_res, n_bins_p): """ Create the photon residuals map for a given number of resolution bins (equal spacing) and a given number of P(x,y,z) bins (equal population) """ # determine the P(x,y,x) binning px_bins = get_equi_pop_bins(dfr, lambda df: df.gen_photonPx.abs(), n_bins_p) # get_equi_pop_bins returns bins in an unsuitable format transform them to # proper format px_bins = np.array(sorted({b for b in flatten(px_bins)})) res_bins = np.linspace(-2, 2, n_bins_res) hist_sett = (len(px_bins) - 1, px_bins, len(res_bins) - 1, res_bins) px_map = create_res_v_gen_map(dfr, 'photonPx', hist_sett) py_map = create_res_v_gen_map(dfr, 'photonPy', hist_sett) pz_map = create_res_v_gen_map(dfr, 'photonPz', hist_sett) photon_map = px_map.Clone() photon_map.Add(py_map) photon_map.Add(pz_map) return photon_map
def get_all_variations(): """ Get all the possible variations """ return (list(flatten(v)) for v in product(*VARIATIONS))
def test_chunks_preserves_order(self): in_list = range(100) chunk_size = 20 self.assertEqual(list(mh.flatten(mh.chunks(in_list, chunk_size))), in_list)
def test_already_flat(self): in_list = list(xrange(10)) flat_list = list(mh.flatten(in_list)) self.assertEqual(flat_list, in_list)