def RedirectIO(self): toolkit.mkdir('{0:}/logs'.format(self.outputDir)) logfile = '{0:}/logs/{1:}'.format(self.outputDir, self.CurrentItem_Str) self.BindOutput('Stdout', 'Stdout', logfile + '.stdout') self.BindOutput('Warning', 'Warning', logfile + '.warning') self.BindOutput('FBIWarning', 'FBIWarning', logfile + '.fbiwarning')
def __init__(self, tfile_path, output_path, run_cfg_path): run_config = toolkit.json_load(run_cfg_path) overall_cfg = toolkit.json_load('./data/Overall_Info.json') tfile = R.TFile(tfile_path, 'read') args = self.ReadConfig(overall_cfg, run_config, tfile) outputDir = '{0:}/plots_ControlPlots'.format(output_path) toolkit.mkdir(outputDir) self.name_maker = RetrieveHistsNames(**args) self.hist_maker = RetrieveHists(tfile) self.plots_drawer = DrawControlPlots(outputDir)
def __init__(self,output_path,version='r21.old',variable=['ScaleFactor','Scale factor','sf']): self.uncertainties = GroupSystmatics().GetUncertainties(version) self.output_path = '{0:}/table'.format(output_path,) self.Name = variable[0] self.head = variable[1] self.key = variable[2] self.FooSum2 = lambda x:list(reduce(lambda _x,_y:map(lambda __x,__y:math.sqrt(__x**2+__y**2),_x,_y),x)) self.config_default = { 'width':270, 'height':270, 't_b_l_rMargin':[2,2,2,2], 'scale':1.5, } toolkit.mkdir(self.output_path)
def PerformFit(self, name, central, error, systs, isPlot=False): fun_central = self.__fitCentral(central, error) fun_systs = {} for sys_name, syst in systs.iteritems(): fun_systs[sys_name] = self.__fitSyst(sys_name, syst) out_central = self.__getOutBins(fun_central) out_systs = {} for key, fun in fun_systs.iteritems(): out_systs[key] = self.__getOutBins(fun) out_systs[key].insert(0, 0) if isPlot: saveDir = '{0:}/{1:}'.format(self.savePath, name) toolkit.mkdir(saveDir) for sys_name in systs.keys(): fun_syst = fun_systs[sys_name] syst = systs[sys_name] self.__makePlot(saveDir, sys_name, central, error, syst, fun_central, fun_syst) out_central.insert(0, 0.) return out_central, out_systs
def __init__(self, output_path): self.scale_x = 1.05 self.output_path = '{0:}/plots_BtaggingPlots'.format(output_path) toolkit.mkdir(self.output_path)
def WorkParallel(self): output_path = self.output_path output_path, project_name = os.path.split(output_path) if len(project_name) == 0: output_path, project_name = os.path.split(output_path) toolkit.mkdir('./{0:}/submit'.format(self.output_path)) config = copy.deepcopy(self.calibration_config) cats = config['cats'] keys = cats.keys() values = itertools.product(*[cats[key] for key in keys]) jobids = [] for value in values: name = '_'.join( [str(k[0]) + '_' + str(k[1]) for k in zip(keys, value)]) #print name for n, key in enumerate(keys): cats[key] = [value[n]] config_name = './{0:}/submit/Config_{1:}.json'.format( self.output_path, name) script_name = './{0:}/submit/Submit_{1:}.sh'.format( self.output_path, name) log_name = './{0:}/submit/Log_{1:}'.format(self.output_path, name) with open(config_name, 'w') as f: toolkit.DumpToJson(config, f) pwd = os.environ['PWD'] isLoadRaw = '--load_raw' if self.isLoadRawFromCache else '' with open(script_name, 'w') as f: print >> f, 'INPUT="{0:}"'.format(self.input_file) print >> f, 'OUTPUT="{0:}"'.format(self.output_path) print >> f, 'LOADRAW="{0:}"'.format( int(self.isLoadRawFromCache)) print >> f, 'CONFIG="{0:}"'.format(config_name) print >> f, 'cd {0:}'.format(pwd) print >> f, 'source ./setup.sh' print >> f, 'python ./scripts/submit_hook.py --input_file ${INPUT} --output_path ${OUTPUT} --config_file ${CONFIG} ' + isLoadRaw cmd = 'qsub -cwd -P P_atlas -l sps=1 -l h_vmem=8g -q long -N {0:} -o {1:}.log -e {1:}.err {2:}'.format( name, log_name, script_name) jobids.append(name) res = commands.getstatusoutput(cmd) print res[1] join_jobs = './submit/{0:}/Join'.format(project_name) with open(join_jobs + '.sh', 'w') as f: print >> f, '#!/bin/bash' print >> f, 'cd {0:}'.format(pwd) print >> f, 'echo "Finished!"' jids = ','.join(jobids) cmd_join = 'qsub -cwd -o {0:}.log -e {0:}.err -hold_jid {1:} -sync y {0:}.sh'.format( join_jobs, jids) print '\nWaiting for all jobs to be finished' res = commands.getstatusoutput(cmd_join) print res raise RuntimeError('debug')
def init_environment(self): toolkit.mkdir(self.outputDir)