def _dump(a): PInfo('PandaAnalysis.Flat.analysis', 'Summary of analysis %s:' % (a.name)) for k in dir(a): if k[0] == '_': continue PInfo( 'PandaAnalysis.Flat.analysis', ' %20s = %s' % (k, 'True' if bool(getattr(a, k)) else 'False'))
def _dump(a): PInfo('PandaAnalysis.Flat.analysis','Summary of analysis %s:'%(a.name)) print a.monojet for k in dir(a): if k[0] == '_': continue if type(getattr(a, k)) != int: continue PInfo('PandaAnalysis.Flat.analysis',' %20s = %s'%(k, 'True' if getattr(a, k) else 'False'))
def process_tree(t, fpath, n_to_print=0, do_truth=True): N = t.GetEntriesFast() imgpath = fpath + '_img%i.png' gen_arrs = [] truth_arrs = [] dims = None threshold = 0 for iE in xrange(N): if float(iE) / N > threshold: PInfo('convert_images.process_tree', '%5.2f (%i/%i)' % (float(iE) / N, iE, N)) threshold += 0.1 t.GetEntry(iE) gen = t.gen truth = t.truth if n_to_print > 0 and iE < n_to_print: img(gen, imgpath % iE) img(truth, (imgpath % iE).replace('.png', '_truth.png')) if not dims: dims = (gen.GetNbinsX(), gen.GetNbinsY()) gen_arrs.append(conv(gen, dims)) truth_arrs.append(conv(truth, dims, dtype=np.int16)) gen_arr = np.array(gen_arrs) np.save(fpath + '_gen.npy', gen_arr) truth_arr = np.array(truth_arrs) np.save(fpath + '_truth.npy', truth_arr)
def run(self, f_out_path): f_out = root.TFile.Open(f_out_path, 'RECREATE') for proc in chain(self.__data_procs, self.__mc_procs): proc.run(f_out) f_out.Close() PInfo('fitting_forest.RegionFactory.run', 'Created output in %s' % f_out_path)
def run(self, f_out): PInfo('fitting_forest.Process.run', 'Running ' + self.name) branches = sorted(self.all_branches.values()) try: xarr = root_interface.read_tree(tree=self.tree, branches=branches, cut=self.cut) fields = self.variables.keys() + ['nominal'] self.__write_out(f_out, xarr, fields, '') for shift, weight in self.weights.iteritems(): fields = self.variables.keys() + [shift] self.__write_out(f_out, xarr, fields, '_' + shift) except ValueError as e: PError('fitting_forest.Process.run', str(e)) return
last_lock = int(time()) if args.submit_only and (mc.missing + data.missing > 0): submit(silent=(args.monitor is not None)) if args.monitor: sleep(1) else: return ### MAIN ### if args.kill: kill() if args.clean_output: PInfo('task.py', 'Cleaning up %s and %s' % (lockdir, outdir)) sleep(2) system('rm -rf %s/* %s/* &' % (lockdir, outdir)) if args.clean: PInfo('task.py', 'Cleaning up %s and %s' % (logdir, workdir)) sleep(2) system('rm -rf %s/* %s/*' % (logdir, workdir)) if args.check: if args.monitor is not None: curses.wrapper(check) else: check() else: PInfo('task.py', 'TASK = ' + submit_name) if args.build_only and (not path.isfile(workdir + '/submission.pkl')
import argparse import subprocess from re import sub from os import getenv from PandaCore.Tools.Misc import PInfo from PandaCore.Tools.job_management import DataSample,convert_catalog workdir = getenv('SUBMIT_WORKDIR') parser = argparse.ArgumentParser(description='convert configuration') parser.add_argument('--infile',type=str,default=None) parser.add_argument('--outfile',type=str,default=None) parser.add_argument('--nfiles',type=int,default=None) args = parser.parse_args() fin = open(args.infile) samples = convert_catalog(list(fin),as_dict=True) fout = open(args.outfile,'w') keys = sorted(samples) counter=0 for k in keys: sample = samples[k] configs = sample.get_config(args.nfiles,suffix='_%i') for c in configs: fout.write(c%(counter,counter)) counter += 1 PInfo('buildConfig.py','Submission will have %i jobs'%(counter)) fout.close()
h_pt = f_pt.Get('h_%s' % (name.split('_')[0])) f_pt_scaled = root.TFile.Open(datadir + 'flatten_scaled.root') h_pt_scaled = f_pt_scaled.Get('h_%s' % (name.split('_')[0])) data = {} for fpath in fcfg.readlines(): d = np.load(fpath.strip()) mask = (d['nPartons'] == n_partons) for k, v in d.iteritems(): if v.shape[0]: if k not in data: data[k] = [] data[k].append(v[mask]) if not len(data): PInfo(me, 'This was an empty config!') exit(0) for k, v in data.iteritems(): data[k] = np.concatenate(v) if not data['pt'].shape[0]: PInfo(me, 'Nothing passed the mask') exit(0) if NORM: deep_utils.normalize_arrays(data, 'pf') deep_utils.normalize_arrays(data, 'sv') def reweight(x_pt):
import argparse import subprocess from re import sub from os import getenv from PandaCore.Tools.Misc import PInfo from PandaCore.Tools.job_management import DataSample, convert_catalog workdir = getenv('SUBMIT_WORKDIR') parser = argparse.ArgumentParser(description='convert configuration') parser.add_argument('--infile', type=str, default=None) parser.add_argument('--outfile', type=str, default=None) parser.add_argument('--nfiles', type=int, default=None) args = parser.parse_args() fin = open(args.infile) samples = convert_catalog(list(fin), as_dict=True) fout = open(args.outfile, 'w') keys = sorted(samples) counter = 0 for k in keys: sample = samples[k] configs = sample.get_config(args.nfiles, suffix='_%i') for c in configs: fout.write(c % (counter, counter)) counter += 1 PInfo('configBuilder.py', 'Submission will have %i jobs' % (counter)) fout.close()
flist = glob(argv[2]) target_evt = np.int64(argv[1]) lumis = [] pts = [] msds = [] pfs = [] for f in flist: arr = np.load(f) evt = arr['eventNumber'] mask = (evt == target_evt) if np.sum(mask): idx = np.argmax(mask) pfs.append(arr['pf'][idx]) pts.append(arr['pt'][idx]) msds.append(arr['msd'][idx]) lumis.append(arr['lumi']) PInfo(argv[0], 'Found %i in %s' % (target_evt, f)) if lumis: np.savez('sliced.npz', pf=np.array(pfs), msd=np.array(msds), pt=np.array(pts), lumi=np.array(lumis)) else: PError(argv[0], 'Could not find %i in %s' % (target_evt, argv[2])) exit(1)