Example #1
0
def _analysis(name, verbose, **kwargs):
    a = root.pa.Analysis(name)
    for k,v in kwargs.iteritems():
        if not hasattr(a, k):
            logger.error('PandaAnalysis.Flat.analysis','Could not set property %s'%k)
            return None 
        setattr(a, k, bool(v))
    setattr(a, 'dump', lambda : _dump(a))
    if verbose:
        a.dump()
    return a
Example #2
0
 def run(self, f_out):
     logger.info('fitting_forest.Process.run', 'Running ' + self.name)
     branches = sorted(self.all_branches.values())
     try:
         xarr = root_interface.read_tree(tree=self.tree,
                                         branches=branches,
                                         cut=self.cut)
         fields = self.variables.keys() + ['nominal']
         self.__write_out(f_out, xarr, fields, '')
         for shift, weight in self.weights.iteritems():
             fields = self.variables.keys() + [shift]
             self.__write_out(f_out, xarr, fields, '_' + shift)
     except ValueError as e:
         logger.error('fitting_forest.Process.run', str(e))
         return
Example #3
0
def read_r_model(mV, mDM=100, couplings='nominal'):
    tmpl = getenv('PANDA_XSECS') + '/resonant/%i_%i.dat'
    try:
        fdat = open(tmpl % (mV, mDM))
    except IOError:
        logger.error('PandaCore.Tools.models.read_nr_model',
                     'Could not open %s' % (tmpl % (mV, mDM)))
        return None
    for line in fdat:
        line_coupling, sigma = line.split(':')
        if not (line_coupling == couplings):
            continue
        sigma = float(sigma)
        p = ModelParams(mV, mDM, 1, 1, 0.25, 0.25, sigma, 0)
        fdat.close()
        return p
Example #4
0
 def kill(self, idle_only=False):
     if not self.cluster_id:
         logger.error(
             self.__class__.__name__ + ".kill",
             "This submission has not been executed yet (ClusterId not set)"
         )
         raise RuntimeError
     if idle_only:
         proc_ids = self.query_status(return_ids=True)['idle']
     else:
         proc_ids = self.proc_ids
     N = self.schedd.act(htcondor.JobAction.Remove,
                         ["%s.%s" % (self.cluster_id, p)
                          for p in proc_ids])['TotalSuccess']
     if N:
         logger.info(
             self.__class__.__name__ + '.kill',
             'Killed %i jobs in ClusterId=%i' % (N, self.cluster_id))
Example #5
0
 def query_status(self, return_ids=False):
     if not self.cluster_id:
         logger.error(
             self.__class__.__name__ + ".query_status",
             "This submission has not been executed yet (ClusterId not set)"
         )
         raise RuntimeError
     jobs = {x: [] for x in ['T3', 'T2', 'idle', 'held', 'other']}
     try:
         results = self.schedd.query('ClusterId =?= %i' % (self.cluster_id))
     except IOError:  # schedd is down!
         return jobs
     for job in results:
         proc_id = int(job['ProcId'])
         status = job['JobStatus']
         try:
             if return_ids:
                 samples = [proc_id]
             elif type(self.arguments) == dict:
                 samples = [self.arguments[self.proc_ids[proc_id]]]
             else:
                 samples = self.proc_ids[proc_id].split()
         except KeyError:
             continue  # sometimes one extra dummy job is created and not tracked, oh well
         if job_status[status] == 'running':
             try:
                 remote_host = job['RemoteHost']
                 if '@T3' in remote_host:
                     status = job_status_rev['T3']
                 else:
                     status = job_status_rev['T2']
             except KeyError:
                 status = 1  # call it idle, job is probably moving between states
                 pass
         if job_status[status] in jobs:
             jobs[job_status[status]] += samples
         else:
             jobs['other'] += samples
     return jobs
Example #6
0
def read_nr_model(mV, mDM, couplings=None, path='non-resonant'):
    tmpl = getenv('PANDA_XSECS') + '/' + path + '/%i_%i_xsec_gencut.dat'
    try:
        fdat = open(tmpl % (mV, mDM))
    except IOError:
        logger.error('PandaCore.Tools.models.read_nr_model',
                     'Could not open %s' % (tmpl % (mV, mDM)))
        return None
    for line in fdat:
        if 'med dm' in line:
            continue
        p = ModelParams(*[float(x) for x in line.strip().split()])
        if couplings:
            if couplings == (p.gV_DM, p.gA_DM, p.gV_q, p.gA_q):
                fdat.close()
                return p
            else:
                continue
        else:
            # if not specified, take the first valid model (nominal)
            fdat.close()
            return p
Example #7
0
 def __init__(self,
              cache_dir,
              executable=None,
              arglist=None,
              arguments=None,
              nper=1):
     super(SimpleSubmission, self).__init__(cache_dir + '/submission.pkl')
     self.cache_dir = cache_dir
     if executable != None:
         self.executable = executable
         self.arguments = arguments
         self.arglist = arglist
         self.nper = nper
     else:
         try:
             pkl = pickle.load(open(self.cache_filepath))
             last_sub = pkl[-1]
             self.executable = last_sub.executable
             self.arguments = last_sub.arguments
             self.arglist = last_sub.arglist
             self.nper = last_sub.nper
         except:
             logger.error(self.__class__.__name__ + '.__init__',
                          'Must provide a valid cache or arguments!')
             raise RuntimeError
     self.cmssw = getenv('CMSSW_BASE')
     self.workdir = cache_dir + '/workdir/'
     self.logdir = cache_dir + '/logdir/'
     for d in [self.workdir, self.logdir]:
         system('mkdir -p ' + d)
     if type(self.arglist) == list:
         with open(cache_dir + '/workdir/args.list', 'w') as fargs:
             fargs.write('\n'.join(self.arglist))
         if not self.arguments:
             self.arguments = range(1, len(self.arglist) + 1)
         self.arglist = cache_dir + '/workdir/args.list'
Example #8
0
def setup_schedd(config='T3'):
    config = config.split(':') if config else []
    global pool_server, schedd_server, base_job_properties, should_spool
    os = '&& OpSysAndVer == "SL6"' if 'SL6' in config else ''
    if 'T3' in config:
        base_job_properties = {
            "Iwd":
            "WORKDIR",
            "Cmd":
            "WORKDIR/exec.sh",
            "WhenToTransferOutput":
            "ON_EXIT",
            "REQUIRED_OS":
            "rhel7",
            "PYTHONHOME":
            "/home/ceballos/.local/lib/python2.7/site-packages/",
            "ShouldTransferFiles":
            "YES",
            "Requirements":
            classad.ExprTree(
                'UidDomain == "mit.edu" && Arch == "X86_64" %s && TARGET.Machine != "t3btchXXX.mit.edu"'
                % os),
            "AcctGroup":
            acct_grp_t3,
            "AccountingGroup":
            '%s.USER' % (acct_grp_t3),
            "X509UserProxy":
            "/tmp/x509up_uUID",
            "OnExitHold":
            classad.ExprTree("( ExitBySignal == true ) || ( ExitCode != 0 )"),
            "In":
            "/dev/null",
            "TransferInput":
            "WORKDIR/cmssw.tgz,WORKDIR/skim.py,WORKDIR/x509up",
        }

        pool_server = None
        schedd_server = getenv('HOSTNAME')
        should_spool = False
        query_owner = getenv('USER')
    elif 'T2' in config:
        base_job_properties = {
            "Iwd":
            "WORKDIR",
            "Cmd":
            "WORKDIR/exec.sh",
            "WhenToTransferOutput":
            "ON_EXIT",
            "ShouldTransferFiles":
            "YES",
            "Requirements":
            classad.ExprTree('(Arch == "X86_64" %s) && \
((GLIDEIN_Site =!= "MIT_CampusFactory") || (GLIDEIN_Site == "MIT_CampusFactory" && \
BOSCOCluster == "ce03.cmsaf.mit.edu" && BOSCOGroup == "bosco_cms" && HAS_CVMFS_cms_cern_ch))'
                             % os),
            #classad.ExprTree('UidDomain == "cmsaf.mit.edu" && Arch == "X86_64" && OpSysAndVer == "SL6"'),
            "REQUIRED_OS":
            "rhel7",
            "AcctGroup":
            'group_cmsuser.USER',
            "AccountingGroup":
            'group_cmsuser.USER',
            "X509UserProxy":
            "/tmp/x509up_uUID",
            "OnExitHold":
            classad.ExprTree("( ExitBySignal == true ) || ( ExitCode != 0 )"),
            "In":
            "/dev/null",
            "TransferInput":
            "WORKDIR/cmssw.tgz,WORKDIR/skim.py,WORKDIR/x509up",
        }

        pool_server = None
        schedd_server = getenv('HOSTNAME')
        should_spool = False
        query_owner = getenv('USER')
    elif 'T2Only' in config:
        base_job_properties = {
            "Iwd":
            "WORKDIR",
            "Cmd":
            "WORKDIR/exec.sh",
            "WhenToTransferOutput":
            "ON_EXIT",
            "ShouldTransferFiles":
            "YES",
            "Requirements":
            classad.ExprTree('(Arch == "X86_64" %s) && \
((GLIDEIN_Site == "MIT_CampusFactory" && \
BOSCOCluster == "ce03.cmsaf.mit.edu" && BOSCOGroup == "bosco_cms" && HAS_CVMFS_cms_cern_ch))'
                             % os),
            #                classad.ExprTree('UidDomain == "cmsaf.mit.edu" && Arch == "X86_64" && OpSysAndVer == "SL6"'),
            "REQUIRED_OS":
            "rhel7",
            "AcctGroup":
            'group_cmsuser.USER',
            "AccountingGroup":
            'group_cmsuser.USER',
            "X509UserProxy":
            "/tmp/x509up_uUID",
            "OnExitHold":
            classad.ExprTree("( ExitBySignal == true ) || ( ExitCode != 0 )"),
            "In":
            "/dev/null",
            "TransferInput":
            "WORKDIR/cmssw.tgz,WORKDIR/skim.py,WORKDIR/x509up",
        }

        pool_server = None
        schedd_server = getenv('HOSTNAME')
        should_spool = False
        query_owner = getenv('USER')
    elif 'SubMIT' in config:
        base_job_properties = {
            "Iwd":
            "WORKDIR",
            "Cmd":
            "WORKDIR/exec.sh",
            "WhenToTransferOutput":
            "ON_EXIT",
            "ShouldTransferFiles":
            "YES",
            "Requirements":
            classad.ExprTree(
                'Arch == "X86_64" && TARGET.OpSys == "LINUX" && TARGET.HasFileTransfer && ( isUndefined(IS_GLIDEIN) || ( OSGVO_OS_STRING == "RHEL 6" && HAS_CVMFS_cms_cern_ch == true ) || GLIDEIN_REQUIRED_OS == "rhel7" || HAS_SINGULARITY == true || ( Has_CVMFS_cms_cern_ch == true && ( BOSCOGroup == "bosco_cms" ) ) ) && %s'
                % (submit_exclude)),
            "AcctGroup":
            "analysis",
            "AccountingGroup":
            "analysis.USER",
            "X509UserProxy":
            "/tmp/x509up_uUID",
            "OnExitHold":
            classad.ExprTree("( ExitBySignal == true ) || ( ExitCode != 0 )"),
            "In":
            "/dev/null",
            "TransferInput":
            "WORKDIR/cmssw.tgz,WORKDIR/skim.py",
            "ProjectName":
            "CpDarkMatterSimulation",
            "Rank":
            "Mips",
            'SubMITOwner':
            'USER',
            "REQUIRED_OS":
            "rhel7",
            "DESIRED_OS":
            "rhel7",
            "RequestDisk":
            3000000,
            "SingularityImage":
            "/cvmfs/singularity.opensciencegrid.org/bbockelm/cms:rhel7",
        }

        pool_server = 'submit.mit.edu:9615'
        schedd_server = 'submit.mit.edu'
        query_owner = getenv('USER')
        should_spool = False
    else:
        logger.error('job_management.setup_schedd',
                     'Unknown config "%s"' % (':'.join(config)))
        raise ValueError
Example #9
0
name = argv[2]
singletons = None
outdir = getenv('SUBMIT_NPY')
datadir = getenv('CMSSW_BASE') + '/src/PandaAnalysis/data/deep/'
submit_name = getenv('SUBMIT_NAME')
me = argv[0].split('/')[-1]
argv = []
system('mkdir -p tmp/')

data = {}
for fpath in fcfg.readlines():
    print fpath.strip()
    try:
        d = np.load(fpath.strip())
    except [IOError, AttributeError] as e:
        logger.error(me, str(e))
        continue
    for k, v in d.iteritems():
        if k == 'singleton_branches':
            data[k] = v
            continue
        if v.shape[0]:
            if k not in data:
                data[k] = []
            data[k].append(v)

if not len(data):
    logger.info(me, 'This was an empty config!')
    exit(0)

for k, v in data.iteritems():
Example #10
0
flist = glob(argv[2])
target_evt = np.int64(argv[1])

lumis = []
pts = []
msds = []
pfs = []

for f in flist:
    arr = np.load(f)
    evt = arr['eventNumber']
    mask = (evt == target_evt)
    if np.sum(mask):
        idx = np.argmax(mask)
        pfs.append(arr['pf'][idx])
        pts.append(arr['pt'][idx])
        msds.append(arr['msd'][idx])
        lumis.append(arr['lumi'])
        logger.info(argv[0], 'Found %i in %s' % (target_evt, f))

if lumis:
    np.savez('sliced.npz',
             pf=np.array(pfs),
             msd=np.array(msds),
             pt=np.array(pts),
             lumi=np.array(lumis))
else:
    logger.error(argv[0], 'Could not find %i in %s' % (target_evt, argv[2]))
    exit(1)