Example #1
0
    def _setup_bulk_subjobs(self, dirac_ids, dirac_script):
        """
        This is the old bulk submit method which is used to construct the subjobs for a parametric job
        Args:
            dirac_ids (list): This is a list of the Dirac ids which have been created
            dirac_script (str): Name of the dirac script which contains the job jdl
        """
        f = open(dirac_script, 'r')
        parametric_datasets = get_parametric_datasets(f.read().split('\n'))
        f.close()
        if len(parametric_datasets) != len(dirac_ids):
            raise BackendError(
                'Dirac',
                'Missmatch between number of datasets defines in dirac API script and those returned by DIRAC'
            )

        master_job = self.getJobObject()
        master_job.subjobs = []
        for i in range(len(dirac_ids)):
            j = Job()
            j.copyFrom(master_job)
            j.splitter = None
            j.backend.id = dirac_ids[i]
            j.id = i
            j.inputdata = self._setup_subjob_dataset(parametric_datasets[i])
            j.status = 'submitted'
            j.time.timenow('submitted')
            master_job.subjobs.append(j)
        return True
Example #2
0
    def master_setup_bulk_subjobs(self, jobs, jdefids):

        from Ganga.GPIDev.Lib.Job.Job import Job
        master_job = self.getJobObject()
        for i in range(len(jdefids)):
            j = Job()
            j.copyFrom(master_job)
            j.splitter = None
            j.backend = Panda()
            j.backend.id = jdefids[i]
            j.id = i
            j.status = 'submitted'
            j.time.timenow('submitted')
            master_job.subjobs.append(j)
        return True
Example #3
0
    def createSubjob(self, job, additional_skip_args=None):
        """ Create a new subjob by copying the master job and setting all fields correctly.
        """
        from Ganga.GPIDev.Lib.Job.Job import Job
        if additional_skip_args is None:
            additional_skip_args = []

        j = Job()
        skipping_args = [
            'splitter', 'inputsandbox', 'inputfiles', 'inputdata', 'subjobs'
        ]
        for arg in additional_skip_args:
            skipping_args.append(arg)
        j.copyFrom(job, skipping_args)
        j.splitter = None
        j.inputsandbox = []
        j.inputfiles = []
        j.inputdata = None
        return j
Example #4
0
    def _setup_bulk_subjobs(self, dirac_ids, dirac_script):
        f = open(dirac_script, 'r')
        parametric_datasets = get_parametric_datasets(f.read().split('\n'))
        f.close()
        if len(parametric_datasets) != len(dirac_ids):
            raise BackendError('Dirac', 'Missmatch between number of datasets defines in dirac API script and those returned by DIRAC')

        from Ganga.GPIDev.Lib.Job.Job import Job
        master_job = self.getJobObject()
        master_job.subjobs = []
        for i in range(len(dirac_ids)):
            j = Job()
            j.copyFrom(master_job)
            j.splitter = None
            j.backend.id = dirac_ids[i]
            j.id = i
            j.inputdata = self._setup_subjob_dataset(parametric_datasets[i])
            j.status = 'submitted'
            j.time.timenow('submitted')
            master_job.subjobs.append(j)
        master_job._commit()
        return True
Example #5
0
def submit(N, K):
    jobs = []
    for i in range(K):
        j = Job()
        j._auto__init__()
        j.backend = LCG()
        j.backend.middleware = 'GLITE'
        j.splitter = GenericSplitter()
        j.splitter.attribute = 'application.args'
        j.splitter.values = [['x']] * N
        j.submit()
        jobs.append(j)
    import time

    def finished():
        for j in jobs:
            if not j.status in ['failed', 'completed']:
                return False
        return True

    while not finished():
        time.sleep(1)

    return jobs
Example #6
0
    def master_updateMonitoringInformation(jobs):
        """Updates the statuses of the list of jobs provided by issuing crab -status."""
        logger.info('Updating the monitoring information of ' +
                    str(len(jobs)) + ' jobs')
        try:
            from Ganga.GPIDev.Lib.Job.Job import Job
            import sys, traceback

            for j in jobs:
                server = CRABServer()
                logger.debug(
                    'Updating monitoring information for job %d (%s)' %
                    (j.id, j.status))
                try:
                    dictresult, status, reason = server.status(j)
                    logger.info(
                        'CRAB3 server call answer status: %s - reason: %s' %
                        (status, reason))
                    joblist = sorted(dictresult['result'][0]['jobList'],
                                     key=lambda x: x[1])
                except KeyError:
                    logger.info(
                        'Get status for job %d didn\'t return job list, skipping job for now.'
                        % j.id)

                    continue
                except:
                    logger.error('Get status for job %d failed, skipping.' %
                                 j.id)
                    raise

                if joblist:
                    logger.info('There are subjob statuses for job %s' % j.id)
                    logger.info('j: %s' % dir(j))
                    if not j.subjobs:
                        logger.warning('No subjob object for job %s' % j.id)
                        j.subjobs = []
                        for i in xrange(len(joblist)):
                            subjob = joblist[i]
                            index = int(subjob[1])
                            logger.info('Processing subjob %d, %s' %
                                        (index, subjob))
                            sj = Job()
                            sj.copyFrom(j)
                            sj.backend.crabid = index
                            sj.id = i
                            sj.updateStatus('submitting')
                            sj.backend.checkReport(subjob)
                            sj.backend.checkStatus()
                            j.subjobs.append(sj)
                        #j.subjobs = sorted(j.subjobs, key=lambda x: x.backend.id)
                        #j._commit()
                    else:
                        for subjob in joblist:
                            index = int(subjob[1])
                            logger.debug(
                                'Found subjob %s searching with index %s' %
                                (j.subjobs[index - 1].backend.crabid, index))
                            j.subjobs[index - 1].backend.checkReport(subjob)
                            j.subjobs[index - 1].backend.checkStatus()

                    j.updateMasterJobStatus()
                else:
                    logger.info('There are no subjobs for job %s' % (j.id))
                    logger.info('checking task status from report: %s' %
                                dictresult['result'][0]['status'])
                    taskstatus = dictresult['result'][0]['status']
                    if taskstatus in ['FAILED']:
                        logger.info('Job failed: %s' % dictresult)
                        j.updateStatus('failed')
        except Exception as e:
            logger.error(e)
            traceback.print_exc(file=sys.stdout)
Example #7
0
    def createUnits(self):
        """Create new units if required given the inputdata"""

        # call parent for chaining
        super(CoreTransform, self).createUnits()

        # Use the given splitter to create the unit definitions
        if len(self.units) > 0:
            # already have units so return
            return

        if self.unit_splitter == None and len(self.inputdata) == 0:
            raise ApplicationConfigurationError(
                "No unit splitter or InputData provided for CoreTransform unit creation, Transform %d (%s)"
                % (self.getID(), self.name))

        # -----------------------------------------------------------------
        # split over unit_splitter by preference
        if self.unit_splitter:

            # create a dummy job, assign everything and then call the split
            j = Job()
            j.backend = self.backend.clone()
            j.application = self.application.clone()

            if self.inputdata:
                j.inputdata = self.inputdata.clone()

            subjobs = self.unit_splitter.split(j)

            if len(subjobs) == 0:
                raise ApplicationConfigurationError(
                    "Unit splitter gave no subjobs after split for CoreTransform unit creation, Transform %d (%s)"
                    % (self.getID(), self.name))

            # only copy the appropriate elements
            fields = []
            if len(self.fields_to_copy) > 0:
                fields = self.fields_to_copy
            elif isType(self.unit_splitter, GenericSplitter):
                if self.unit_splitter.attribute != "":
                    fields = [self.unit_splitter.attribute.split(".")[0]]
                else:
                    for attr in self.unit_splitter.multi_attrs.keys():
                        fields.append(attr.split(".")[0])

            # now create the units from these jobs
            for sj in subjobs:
                unit = CoreUnit()

                for attr in fields:
                    setattr(unit, attr, copy.deepcopy(getattr(sj, attr)))

                self.addUnitToTRF(unit)

        # -----------------------------------------------------------------
        # otherwise split on inputdata
        elif len(self.inputdata) > 0:

            if self.files_per_unit > 0:

                # combine all files and split accorindgly
                filelist = []
                for ds in self.inputdata:

                    if isType(ds, GangaDataset):
                        for f in ds.files:
                            if f.containsWildcards():
                                # we have a wildcard so grab the subfiles
                                for sf in f.getSubFiles(
                                        process_wildcards=True):
                                    filelist.append(sf)
                            else:
                                # no wildcards so just add the file
                                filelist.append(f)
                    else:
                        logger.warning("Dataset '%s' doesn't support files" %
                                       getName(ds))

                # create DSs and units for this list of files
                fid = 0
                while fid < len(filelist):
                    unit = CoreUnit()
                    unit.name = "Unit %d" % len(self.units)
                    unit.inputdata = GangaDataset(
                        files=filelist[fid:fid + self.files_per_unit])
                    unit.inputdata.treat_as_inputfiles = self.inputdata[
                        0].treat_as_inputfiles

                    fid += self.files_per_unit

                    self.addUnitToTRF(unit)

            else:
                # just produce one unit per dataset
                for ds in self.inputdata:

                    # avoid splitting over chain inputs
                    if isType(ds, TaskChainInput):
                        continue

                    unit = CoreUnit()
                    unit.name = "Unit %d" % len(self.units)
                    unit.inputdata = copy.deepcopy(ds)
                    self.addUnitToTRF(unit)
Example #8
0
def makeRegisteredJob():
    """Makes a new Job and registers it with the Registry"""
    from Ganga.GPIDev.Lib.Job.Job import Job
    j = Job()
    j._auto__init__()
    return j