예제 #1
0
 def run(self, f_out_path):
     f_out = root.TFile.Open(f_out_path, 'RECREATE')
     for proc in chain(self.__data_procs, self.__mc_procs):
         proc.run(f_out)
     f_out.Close()
     logger.info('fitting_forest.RegionFactory.run',
                 'Created output in %s' % f_out_path)
예제 #2
0
def _dump(a):
    logger.info('PandaAnalysis.Flat.analysis','Summary of analysis %s:'%(a.name))
    for k in dir(a):
        if k[0] == '_':
            continue
        if type(getattr(a, k)) != int:
            continue
        logger.info('PandaAnalysis.Flat.analysis','    %20s = %s'%(k, 'True' if getattr(a, k) else 'False'))
예제 #3
0
 def run(self, f_out):
     logger.info('fitting_forest.Process.run', 'Running ' + self.name)
     branches = sorted(self.all_branches.values())
     try:
         xarr = root_interface.read_tree(tree=self.tree,
                                         branches=branches,
                                         cut=self.cut)
         fields = self.variables.keys() + ['nominal']
         self.__write_out(f_out, xarr, fields, '')
         for shift, weight in self.weights.iteritems():
             fields = self.variables.keys() + [shift]
             self.__write_out(f_out, xarr, fields, '_' + shift)
     except ValueError as e:
         logger.error('fitting_forest.Process.run', str(e))
         return
예제 #4
0
 def kill(self, idle_only=False):
     if not self.cluster_id:
         logger.error(
             self.__class__.__name__ + ".kill",
             "This submission has not been executed yet (ClusterId not set)"
         )
         raise RuntimeError
     if idle_only:
         proc_ids = self.query_status(return_ids=True)['idle']
     else:
         proc_ids = self.proc_ids
     N = self.schedd.act(htcondor.JobAction.Remove,
                         ["%s.%s" % (self.cluster_id, p)
                          for p in proc_ids])['TotalSuccess']
     if N:
         logger.info(
             self.__class__.__name__ + '.kill',
             'Killed %i jobs in ClusterId=%i' % (N, self.cluster_id))
예제 #5
0
    def execute(self, njobs=None):
        self.submission_time = time.time()
        runner = '''#!/bin/bash
OLDPATH=$PATH
export USER=$SUBMIT_USER
env
hostname
python -c "import socket; print socket.gethostname()"
cd {0}
eval `/cvmfs/cms.cern.ch/common/scramv1 runtime -sh`
cd -
jobwd=$PWD
export PATH=${{PATH}}:${{OLDPATH}} # no idea why this is overwritten
for i in $@; do
    arg=$(sed "${{i}}q;d" {3}) # get the ith line
    echo $arg
    mkdir -p $i ; cd $i
    {1} $arg && echo $i >> {2};
    cd $jobwd ; rm -rf $i
done'''.format(self.cmssw, self.executable, self.workdir + '/progress.log',
               self.arglist)
        with open(self.workdir + 'exec.sh', 'w') as frunner:
            frunner.write(runner)
        repl = {
            'WORKDIR': self.workdir,
            'LOGDIR': self.logdir,
            'UID': str(getuid()),
            'USER': getenv('USER'),
            'SUBMITID': str(self.sub_id)
        }
        cluster_ad = classad.ClassAd()

        job_properties = base_job_properties.copy()
        for k in [
                'TransferInput', 'ShouldTransferFiles', 'WhenToTransferOutput'
        ]:
            del job_properties[k]
        job_properties['Environment'] = environ_to_condor()
        for key, value in job_properties.iteritems():
            if type(value) == str and key != 'Environment':
                for pattern, target in repl.iteritems():
                    value = value.replace(pattern, target)
            cluster_ad[key] = value

        proc_properties = {
            'UserLog': 'LOGDIR/SUBMITID_PROCID.log',
            'Out': 'LOGDIR/SUBMITID_PROCID.out',
            'Err': 'LOGDIR/SUBMITID_PROCID.err',
        }
        proc_id = 0
        procs = []
        self.arguments = sorted(self.arguments)
        n_to_run = len(self.arguments) / self.nper + 1
        arg_mapping = {}  # condor arg -> job args
        for idx in xrange(n_to_run):
            if njobs and proc_id >= njobs:
                break
            repl['PROCID'] = '%i' % idx
            proc_ad = classad.ClassAd()
            for key, value in proc_properties.iteritems():
                if type(value) == str:
                    for pattern, target in repl.iteritems():
                        value = value.replace(pattern, target)
                proc_ad[key] = value
            proc_ad['Arguments'] = ' '.join([
                str(x)
                for x in self.arguments[self.nper *
                                        idx:min(self.nper *
                                                (idx +
                                                 1), len(self.arguments))]
            ])
            arg_mapping[idx] = proc_ad['Arguments']
            procs.append((proc_ad, 1))
            proc_id += 1

        logger.info(self.__class__.__name__ + '.execute',
                    'Submitting %i jobs!' % (len(procs)))
        self.submission_time = time.time()
        results = []
        self.proc_ids = {}
        if len(procs):
            myinfo(self.__class__.__name__ + '.execute',
                   'Cluster ClassAd:' + str(cluster_ad))
            self.cluster_id = self.schedd.submitMany(cluster_ad,
                                                     procs,
                                                     spool=should_spool,
                                                     ad_results=results)
            if should_spool:
                self.schedd.spool(results)
            for result, idx in zip(results, range(n_to_run)):
                self.proc_ids[int(result['ProcId'])] = arg_mapping[idx]
            logger.info(self.__class__.__name__ + '.execute',
                        'Submitted to cluster %i' % (self.cluster_id))
        else:
            self.cluster_id = -1
예제 #6
0
def myinfo(*args, **kwargs):
    if not SILENT:
        logger.info(*args, **kwargs)
예제 #7
0
    try:
        d = np.load(fpath.strip())
    except [IOError, AttributeError] as e:
        logger.error(me, str(e))
        continue
    for k, v in d.iteritems():
        if k == 'singleton_branches':
            data[k] = v
            continue
        if v.shape[0]:
            if k not in data:
                data[k] = []
            data[k].append(v)

if not len(data):
    logger.info(me, 'This was an empty config!')
    exit(0)

for k, v in data.iteritems():
    if k == 'singleton_branches':
        continue
    data[k] = np.concatenate(v)


def dump():
    global singletons

    outpath = 'tmp/' + name + '_%s.npy'

    # particles
    d = data['particles']
예제 #8
0
    except [IOError, AttributeError] as e:
        logger.error(me, str(e))
        continue
    mask = (d['nprongs'] == n_partons)
    for k, v in d.iteritems():
        if k == 'singleton_branches':
            data[k] = v
            continue
        if v.shape[0]:
            if k not in data:
                data[k] = []
            #data[k].append(v)
            data[k].append(v[mask])

if not len(data):
    logger.info(me, 'This was an empty config!')
    exit(0)

for k, v in data.iteritems():
    if k == 'singleton_branches':
        continue
    data[k] = np.concatenate(v)

if not data['pt'].shape[0]:
    logger.info(me, 'Nothing passed the mask')
    exit(0)

if deep_utils.NORM:
    deep_utils.normalize_arrays(data, 'particles')

예제 #9
0
flist = glob(argv[2])
target_evt = np.int64(argv[1])

lumis = []
pts = []
msds = []
pfs = []

for f in flist:
    arr = np.load(f)
    evt = arr['eventNumber']
    mask = (evt == target_evt)
    if np.sum(mask):
        idx = np.argmax(mask)
        pfs.append(arr['pf'][idx])
        pts.append(arr['pt'][idx])
        msds.append(arr['msd'][idx])
        lumis.append(arr['lumi'])
        logger.info(argv[0], 'Found %i in %s' % (target_evt, f))

if lumis:
    np.savez('sliced.npz',
             pf=np.array(pfs),
             msd=np.array(msds),
             pt=np.array(pts),
             lumi=np.array(lumis))
else:
    logger.error(argv[0], 'Could not find %i in %s' % (target_evt, argv[2]))
    exit(1)