def __init__(self,logger,dbname="aCTjobs.db"): aCTDB.__init__(self, logger, dbname) conf = aCTConfig.aCTConfigARC() self.proxydir = conf.get(["voms","proxystoredir"]) # mapping from Job class attribute types to column types self.jobattrmap = {int: 'integer', str: 'varchar(255)', arc.JobState: 'varchar(255)', arc.StringList: 'varchar(1024)', arc.URL: 'varchar(255)', arc.Period: 'int', arc.Time: 'datetime', arc.StringStringMap: 'varchar(1024)'} ignoremems=['STDIN', 'STDOUT', 'STDERR', 'STAGEINDIR', 'STAGEOUTDIR', 'SESSIONDIR', 'JOBLOG', 'JOBDESCRIPTION', 'JobDescriptionDocument'] # Attributes of Job class mapped to DB column type self.jobattrs={} j=arc.Job() for i in dir(j): if re.match('^__',i): continue if i in ignoremems: continue if type(getattr(j, i)) in self.jobattrmap: self.jobattrs[i] = type(getattr(j, i))
def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Create a new job object with a given JobID job = arc.Job() job.JobID = "https://piff.hep.lu.se:443/arex/hYDLDmyxvUfn5h5iWqkutBwoABFKDmABFKDmIpHKDmYBFKDmtRy9En" job.Flavour = "ARC1" job.ServiceInformationURL = job.JobStatusURL = job.JobManagementURL = arc.URL("https://piff.hep.lu.se:443/arex") sys.stdout.write("Get job information from the computing element...\n") # Put the job into a JobSupervisor and update its information job_supervisor = arc.JobSupervisor(uc, [job]) job_supervisor.Update() sys.stdout.write("Downloading results...\n") # Prepare a list for storing the directories for the downloaded job results (if there would be more jobs) downloadeddirectories = arc.StringList() # Start retrieving results of all the selected jobs # into the "/tmp" directory (first argument) # using the jobid and not the jobname as the name of the subdirectory (second argument, usejobname = False) # do not overwrite existing directories with the same name (third argument: force = False) # collect the downloaded directories into the variable "downloadeddirectories" (forth argument) success = job_supervisor.Retrieve("/tmp", False, False, downloadeddirectories) if not success: sys.stdout.write("Downloading results failed.\n") for downloadeddirectory in downloadeddirectories: sys.stdout.write("Job results were downloaded to %s\n"%str(downloadeddirectory)) sys.stdout.write("Contents of the directory:\n") for filename in os.listdir(downloadeddirectory): sys.stdout.write(" %s\n"%filename)
def _db2job(self, dbinfo): ''' Convert a dictionary of DB key value into arc Job object ''' j = arc.Job() for attr in self.jobattrs: if attr not in dbinfo or dbinfo[attr] is None: continue # Some object types need special treatment if self.jobattrs[attr] == arc.StringList: l = arc.StringList() for item in dbinfo[attr].encode('utf-8').split('|'): l.append(item) setattr(j, attr, l) continue if self.jobattrs[attr] == arc.StringStringMap: m = arc.StringStringMap() d = eval(dbinfo[attr]) if not isinstance(d, dict): continue for (k,v) in d.items(): m[k] = v setattr(j, attr, m) continue setattr(j, attr, self.jobattrs[attr](str(dbinfo[attr]))) return j
def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Create a new job object with a given JobID job = arc.Job() job.JobID = "https://piff.hep.lu.se:443/arex/1QuMDmRwvUfn5h5iWqkutBwoABFKDmABFKDmIpHKDmXBFKDmIuAean" job.Flavour = "ARC1" job.JobManagementURL = arc.URL("https://piff.hep.lu.se:443/arex") job.JobStatusURL = arc.URL("https://piff.hep.lu.se:443/arex") sys.stdout.write("Job object before update:\n") job.SaveToStream(arc.CPyOstream(sys.stdout), True) job_supervisor = arc.JobSupervisor(uc, [job]) # Update the states of jobs within this JobSupervisor job_supervisor.Update() # Get our updated job from the JobSupervisor jobs = job_supervisor.GetAllJobs() job = jobs[0] sys.stdout.write("Job object after update:\n") job.SaveToStream(arc.CPyOstream(sys.stdout), True)
def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Create a new job object with a given JobID job = arc.Job() job.JobID = "https://piff.hep.lu.se:443/arex/w7LNDmSkEiun1ZPzno6AuCjpABFKDmABFKDmZ9LKDmUBFKDmXugZwm" job.IDFromEndpoint = "w7LNDmSkEiun1ZPzno6AuCjpABFKDmABFKDmZ9LKDmUBFKDmXugZwm" job.JobManagementURL = arc.URL("https://piff.hep.lu.se:443/arex") job.JobStatusURL = arc.URL("https://piff.hep.lu.se:443/arex") job.JobStatusInterfaceName = 'org.ogf.glue.emies.activitymanagement' job.JobManagementInterfaceName = 'org.ogf.glue.emies.activitymanagement' sys.stdout.write("Job object before update:\n") job.SaveToStream(arc.CPyOstream(sys.stdout), True) job_supervisor = arc.JobSupervisor(uc, [job]) # Update the states of jobs within this JobSupervisor job_supervisor.Update() # Get our updated job from the JobSupervisor jobs = job_supervisor.GetAllJobs() if not jobs: sys.stdout.write("No jobs found\n") return job = jobs[0] sys.stdout.write("Job object after update:\n") job.SaveToStream(arc.CPyOstream(sys.stdout), True)
def run(self): '''Do brokering and submit''' arclog = arc_utils.ARCLogger(baselogger, 0) tmplog = arclog.log # Do brokering among the available queues jobdesc = self.jobdescs[0] broker = arc.Broker(self.userconfig, jobdesc, "Random") targetsorter = arc.ExecutionTargetSorter(broker) for target in self.queuelist: tmplog.debug("considering target {0}:{1}".format( target.ComputingService.Name, target.ComputingShare.Name)) # Adding an entity performs matchmaking and brokering targetsorter.addEntity(target) if len(targetsorter.getMatchingTargets()) == 0: tmplog.error("no clusters satisfied job description requirements") return targetsorter.reset( ) # required to reset iterator, otherwise we get a seg fault selectedtarget = targetsorter.getCurrentTarget() # Job object will contain the submitted job job = arc.Job() submitter = arc.Submitter(self.userconfig) if submitter.Submit(selectedtarget, jobdesc, job) != arc.SubmissionStatus.NONE: tmplog.error("Submission failed") return self.job = job
def __init__(self): self.jobattrmap = { int: 'integer', str: 'varchar(255)', arc.JobState: 'varchar(255)', arc.StringList: 'varchar(1024)', arc.URL: 'varchar(255)', arc.Period: 'int', arc.Time: 'datetime', arc.StringStringMap: 'varchar(1024)' } ignoremems = [ 'STDIN', 'STDOUT', 'STDERR', 'STAGEINDIR', 'STAGEOUTDIR', 'SESSIONDIR', 'JOBLOG', 'JOBDESCRIPTION', 'JobDescriptionDocument' ] # Attributes of Job class mapped to DB column type self.jobattrs = {} j = arc.Job() for i in dir(j): if re.match('^__', i): continue if i in ignoremems: continue if type(getattr(j, i)) in self.jobattrmap: self.jobattrs[i] = type(getattr(j, i))
def _getARCJob(self, jobID): """Create an ARC Job with all the needed / possible parameters defined. By the time we come here, the environment variable X509_USER_PROXY should already be set """ j = arc.Job() j.JobID = str(jobID) j.IDFromEndpoint = os.path.basename(j.JobID) if self.endpointType == "Gridftp": statURL = "ldap://%s:2135/Mds-Vo-Name=local,o=grid??sub?(nordugrid-job-globalid=%s)" % ( self.ceHost, jobID) j.JobStatusURL = arc.URL(str(statURL)) j.JobStatusInterfaceName = "org.nordugrid.ldapng" mangURL = "gsiftp://%s:2811/jobs/" % (self.ceHost) j.JobManagementURL = arc.URL(str(mangURL)) j.JobManagementInterfaceName = "org.nordugrid.gridftpjob" j.ServiceInformationURL = j.JobManagementURL j.ServiceInformationInterfaceName = "org.nordugrid.ldapng" else: commonURL = "https://%s:8443/arex" % self.ceHost j.JobStatusURL = arc.URL(str(commonURL)) j.JobStatusInterfaceName = "org.ogf.glue.emies.activitymanagement" j.JobManagementURL = arc.URL(str(commonURL)) j.JobManagementInterfaceName = "org.ogf.glue.emies.activitymanagement" j.ServiceInformationURL = arc.URL(str(commonURL)) j.ServiceInformationInterfaceName = "org.ogf.glue.emies.resourceinfo" j.PrepareHandler(self.usercfg) return j
def workspec2arcjob(workspec): '''Convert WorkSpec.workAttributes to arc.Job object''' job = arc.Job() try: wsattrs = workspec.workAttributes['arcjob'] proxyrole = workspec.workAttributes['proxyrole'] except: # Job was not submitted yet return (job, arc.Time(), None) for attr in dir(job): if attr not in wsattrs or attr == 'CreationTime': continue attrtype = type(getattr(job, attr)) # Some object types need special treatment if attrtype == arc.StringList: strlist = arc.StringList() for item in wsattrs[attr].split('|'): strlist.append(str(item)) setattr(job, attr, strlist) elif attrtype == arc.StringStringMap: ssm = arc.StringStringMap() for (k, v) in json.loads(wsattrs[attr]).items(): ssm[str(k)] = str(v) setattr(job, attr, ssm) else: setattr(job, attr, attrtype(str(wsattrs[attr]))) return (job, arc.Time(str(wsattrs['ModificationTime'])), proxyrole)
def _getARCJob(self, jobID): """Create an ARC Job with all the needed / possible parameters defined. By the time we come here, the environment variable X509_USER_PROXY should already be set """ j = arc.Job() j.JobID = str(jobID) j.IDFromEndpoint = os.path.basename(j.JobID) # Get the endpoint type (GridFTP or AREX) endpointType = j.JobID.split(":")[0] if endpointType == "gsiftp": statURL = "ldap://%s:2135/Mds-Vo-Name=local,o=grid??sub?(nordugrid-job-globalid=%s)" % (self.ceHost, jobID) j.JobStatusURL = arc.URL(str(statURL)) j.JobStatusInterfaceName = "org.nordugrid.ldapng" mangURL = os.path.dirname(j.JobID) j.JobManagementURL = arc.URL(str(mangURL)) j.JobManagementInterfaceName = "org.nordugrid.gridftpjob" j.ServiceInformationURL = j.JobManagementURL j.ServiceInformationInterfaceName = "org.nordugrid.ldapng" else: commonURL = "/".join(j.JobID.split("/")[0:4]) j.JobStatusURL = arc.URL(str(commonURL)) j.JobStatusInterfaceName = "org.nordugrid.arcrest" j.JobManagementURL = arc.URL(str(commonURL)) j.JobManagementInterfaceName = "org.nordugrid.arcrest" j.ServiceInformationURL = arc.URL(str(commonURL)) j.ServiceInformationInterfaceName = "org.nordugrid.arcrest" j.PrepareHandler(self.usercfg) return j
def processToResubmit(self): if self.cluster: jobstoresubmit = self.db.getArcJobs( "arcstate='toresubmit' and cluster='" + self.cluster + "'") else: jobstoresubmit = self.db.getArcJobs( "arcstate='toresubmit' and clusterlist=''") for proxyid, jobs in jobstoresubmit.items(): self.uc.CredentialString(str(self.db.getProxy(proxyid))) # Clean up jobs which were submitted jobstoclean = [job[2] for job in jobs if job[2].JobID] if jobstoclean: # Put all jobs to cancel, however the supervisor will only cancel # cancellable jobs and remove the rest so there has to be 2 calls # to Clean() job_supervisor = arc.JobSupervisor(self.uc, jobstoclean) job_supervisor.Update() self.log.info("Cancelling %i jobs" % len(jobstoclean)) job_supervisor.Cancel() processed = job_supervisor.GetIDsProcessed() notprocessed = job_supervisor.GetIDsNotProcessed() # Clean the successfully cancelled jobs if processed: job_supervisor.SelectByID(processed) self.log.info("Cleaning %i jobs" % len(processed)) if not job_supervisor.Clean(): self.log.warning("Failed to clean some jobs") # New job supervisor with the uncancellable jobs if notprocessed: notcancellable = [ job for job in jobstoclean if job.JobID in notprocessed ] job_supervisor = arc.JobSupervisor(self.uc, notcancellable) job_supervisor.Update() self.log.info("Cleaning %i jobs" % len(notcancellable)) if not job_supervisor.Clean(): self.log.warning("Failed to clean some jobs") # Empty job to reset DB info j = arc.Job() for (id, appjobid, job, created) in jobs: self.db.updateArcJob( id, { "arcstate": "tosubmit", "tarcstate": self.db.getTimeStamp(), "cluster": None }, j)
def Submit(id, appjobid, jobdescstr, ucproxy, timeout): global queuelist global usercred # get the submission logger #log = logger() log = logging.getLogger() if len(queuelist) == 0: log.error("%s: no cluster free for submission" % appjobid) return None #cred_type=arc.initializeCredentialsType(arc.initializeCredentialsType.SkipCredentials) #uc=arc.UserConfig(cred_type) uc = usercred uc.CredentialString(ucproxy) jobdescs = arc.JobDescriptionList() if not jobdescstr or not arc.JobDescription_Parse(jobdescstr, jobdescs): log.error("%s: Failed to prepare job description" % appjobid) return None # Do brokering among the available queues jobdesc = jobdescs[0] broker = arc.Broker(uc, jobdesc, "Random") targetsorter = arc.ExecutionTargetSorter(broker) for target in queuelist: log.debug("%s: considering target %s:%s" % (appjobid, target.ComputingService.Name, target.ComputingShare.Name)) # Adding an entity performs matchmaking and brokering targetsorter.addEntity(target) if len(targetsorter.getMatchingTargets()) == 0: log.error("%s: no clusters satisfied job description requirements" % appjobid) return None targetsorter.reset( ) # required to reset iterator, otherwise we get a seg fault selectedtarget = targetsorter.getCurrentTarget() # Job object will contain the submitted job job = arc.Job() submitter = arc.Submitter(uc) if submitter.Submit(selectedtarget, jobdesc, job) != arc.SubmissionStatus.NONE: log.error("%s: Submission failed" % appjobid) return None jconv = JobConv() return jconv.job2db(job)
def resetJobs(self, jobstoreset): ''' Empty all StringLists in jobs so that when they are updated they do not contain duplicate values, since ARC always appends to these lists. ''' emptylist = arc.StringList() j = arc.Job() attrstoreset = [ attr for attr in dir(j) if type(getattr(j, attr)) == arc.StringList ] for jobs in jobstoreset.values(): for job in jobs: for attr in attrstoreset: setattr(job[2], attr, emptylist)
def __getARCJob( self, jobID ): """ Create an ARC Job with all the needed / possible parameters defined. By the time we come here, the environment variable X509_USER_PROXY should already be set """ j = arc.Job() j.JobID = jobID statURL = "ldap://%s:2135/Mds-Vo-Name=local,o=grid??sub?(nordugrid-job-globalid=%s)" % ( self.ceHost, jobID ) j.JobStatusURL = arc.URL( statURL ) j.JobStatusInterfaceName = "org.nordugrid.ldapng" mangURL = "gsiftp://%s:2811/jobs/" % ( self.ceHost ) j.JobManagementURL = arc.URL( mangURL ) j.JobManagementInterfaceName = "org.nordugrid.gridftpjob" j.ServiceInformationURL = j.JobManagementURL j.ServiceInformationInterfaceName = "org.nordugrid.ldapng" j.PrepareHandler( self.usercfg ) return j
def create_test_job(self, job_id = "http://test.nordugrid.org/testid", cluster = "http://test.nordugrid.org", state = arc.JobState.RUNNING, state_text = None, job_description = "non-empty"): job = arc.Job() job.JobID = job_id job.ServiceInformationInterfaceName = job.JobStatusInterfaceName = job.JobManagementInterfaceName = "org.nordugrid.test" job.ServiceInformationURL = job.JobStatusURL = job.JobManagementURL = arc.URL(cluster) if state_text is None: job.State = arc.JobStateTEST(state) else: job.State = arc.JobStateTEST(state, state_text) job.JobDescriptionDocument = job_description return job
def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Creating an endpoint for a Computing Element endpoint = arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2") # Get the ExecutionTargets of this ComputingElement retriever = arc.ComputingServiceRetriever(uc, [endpoint]) retriever.wait() targets = retriever.GetExecutionTargets() # Shuffle the targets to simulate a random broker targets = list(targets) random.shuffle(targets) # Create a JobDescription jobdesc = arc.JobDescription() jobdesc.Application.Executable.Path = "/bin/hostname" jobdesc.Application.Output = "stdout.txt" # create an empty job object which will contain our submitted job job = arc.Job() success = False # Submit job directly to the execution targets, without a broker for target in targets: sys.stdout.write("Trying to submit to %s (%s) ... " % (target.ComputingEndpoint.URLString, target.ComputingEndpoint.InterfaceName)) sys.stdout.flush() success = target.Submit(uc, jobdesc, job) if success: sys.stdout.write("succeeded!\n") break else: sys.stdout.write("failed!\n") if success: sys.stdout.write("Job was submitted:\n") job.SaveToStream(arc.CPyOstream(sys.stdout), False) else: sys.stdout.write("Job submission failed\n")
def Submit(jobdescs, uc, log, appjobid): global queuelist if len(queuelist) == 0: log.error("%s: no cluster free for submission" % appjobid) return None # Do brokering among the available queues jobdesc = jobdescs[0] broker = arc.Broker(uc, jobdesc, "Random") targetsorter = arc.ExecutionTargetSorter(broker) for target in queuelist: log.debug("%s: considering target %s:%s" % (appjobid, target.ComputingService.Name, target.ComputingShare.Name)) # Adding an entity performs matchmaking and brokering targetsorter.addEntity(target) if len(targetsorter.getMatchingTargets()) == 0: log.error("%s: no clusters satisfied job description requirements" % appjobid) return None targetsorter.reset( ) # required to reset iterator, otherwise we get a seg fault selectedtarget = targetsorter.getCurrentTarget() # Job object will contain the submitted job job = arc.Job() submitter = arc.Submitter(uc) if submitter.Submit(selectedtarget, jobdesc, job) != arc.SubmissionStatus.NONE: log.error("%s: Submission failed" % appjobid) return None return job
''' This regression test tests whether deleting a Swig iterator obtained from a ARC C++ public member std::list object generates a segmentation fault. That issue was reported in bug 2473. ''' import arc j = arc.Job() for i in j.Error: continue
def submit_job(self, executable, args=[], input_files=[]): """ Submit a job and return the job ID :param executable: The command to run on the LOTUS cluster :param args: List of arguments to pass to the executable :param input_files: A list of paths to local files to copy to the remote session directory (the directory the job will run from on JASMIN) :raises InputFileError: if any of the specified input files do not exist or are directories :raises NoTargetsAvailableError: if no execution targets can be found on the ARC server :raises JobSubmissionError: if the job cannot be submitted to any targets :return: Job ID """ endpoint = arc.Endpoint(self.config.ARC_SERVER, arc.Endpoint.COMPUTINGINFO) user_config = self.get_user_config() # Get the ExecutionTargets of this ComputingElement retriever = arc.ComputingServiceRetriever(user_config, [endpoint]) retriever.wait() targets = retriever.GetExecutionTargets() if len(targets) == 0: raise NoTargetsAvailableError("No targets available") input_files_map = {} # Map local paths to destination file names for filename in input_files: if not os.path.isfile(filename): raise InputFileError("{} is not a file".format(filename)) # Use absolute local path input_files_map[os.path.abspath(filename)] = os.path.basename( filename) template = self.env.get_template("job_template.xml") jsdl = template.render({ "name": "ARC job", # TODO: Use sensible name or omit "executable": executable, "arguments": args, "input_files_map": input_files_map, "output_file": self.config.OUTPUT_FILE }) job_descriptions = self.get_job_descriptions(jsdl) # Create an empty job object which will contain our submitted job job = arc.Job() # Submit job directly to the execution targets, without a broker # Try each target until successfully submitted for target in targets: msg = "Attempting to submit job to {} ({})".format( target.ComputingEndpoint.URLString, target.ComputingEndpoint.InterfaceName) self.logger.msg(arc.DEBUG, msg) if target.Submit(user_config, job_descriptions[0], job): break else: self.logger.msg(arc.DEBUG, "Failed to submit job") else: raise JobSubmissionError( "Could not submit job to any of the {} available target(s)". format(len(targets))) self.logger.msg(arc.INFO, "Started job with ID: {}".format(job.JobID)) # Write information on submitted job to local job list so standard arc tools (arcstat, # arcget etc) can be used with this job job_list = arc.JobInformationStorageBDB(self.config.JOBS_INFO_FILE) if not job_list.Write([job]): self.logger.msg( arc.WARNING, "Failed to write to local job list {}".format( self.config.JOBS_INFO_FILE)) return job.JobID
def submitJob(self, executableFile, proxy, numberOfJobs=1): """Method to submit job""" # Assume that the ARC queues are always of the format nordugrid-<batchSystem>-<queue> # And none of our supported batch systems have a "-" in their name self.arcQueue = self.queue.split("-", 2)[2] result = self._prepareProxy() if not result["OK"]: self.log.error("ARCComputingElement: failed to set up proxy", result["Message"]) return result self.usercfg.ProxyPath(os.environ["X509_USER_PROXY"]) self.log.verbose("Executable file path: %s" % executableFile) if not os.access(executableFile, 5): os.chmod(executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH + stat.S_IXOTH) batchIDList = [] stampDict = {} # Creating an endpoint endpoint = arc.Endpoint(self.ceHost, arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2") # Get the ExecutionTargets of the ComputingElement (Can be REST, EMI-ES or GRIDFTP) retriever = arc.ComputingServiceRetriever(self.usercfg, [endpoint]) retriever.wait() targetsWithQueues = list(retriever.GetExecutionTargets()) # Targets also include queues # To avoid losing time trying to submit to queues we cannot interact with, we only keep the interesting ones targets = [] for target in targetsWithQueues: if target.ComputingShare.Name == self.arcQueue: self.log.debug( "Adding target:", "%s (%s)" % (target.ComputingEndpoint.URLString, target.ComputingEndpoint.InterfaceName), ) targets.append(target) # At this point, we should have GRIDFTP and AREX (EMI-ES and REST) targets related to arcQueue # We intend to submit to AREX first, if it does not work, GRIDFTP is used submissionWorked = False for target in targets: # If the submission is already done, we stop if submissionWorked: break for __i in range(numberOfJobs): # The basic job description jobdescs = arc.JobDescriptionList() # Get the job into the ARC way xrslString, diracStamp = self._writeXRSL(executableFile) self.log.debug("XRSL string submitted : %s" % xrslString) self.log.debug("DIRAC stamp for job : %s" % diracStamp) # The arc bindings don't accept unicode objects in Python 2 so xrslString must be explicitly cast result = arc.JobDescription_Parse(str(xrslString), jobdescs) if not result: self.log.error("Invalid job description", "%r, message=%s" % (xrslString, result.str())) break # Submit the job job = arc.Job() result = target.Submit(self.usercfg, jobdescs[0], job) # Save info or else ..else. if result == arc.SubmissionStatus.NONE: # Job successfully submitted pilotJobReference = job.JobID batchIDList.append(pilotJobReference) stampDict[pilotJobReference] = diracStamp submissionWorked = True self.log.debug("Successfully submitted job %s to CE %s" % (pilotJobReference, self.ceHost)) else: self._analyzeSubmissionError(result) break # Boo hoo *sniff* if batchIDList: result = S_OK(batchIDList) result["PilotStampDict"] = stampDict else: result = S_ERROR("No pilot references obtained from the ARC job submission") return result
''' This regression test tests whether invoking the next method on a Swig iterator obtained from a std::list of ARC C++ objects generates a segmentation fault. That issue was reported in bug 2683. ''' import arc jobs = arc.JobList() jobs.push_back(arc.Job()) itJobs = jobs.__iter__() next(itJobs)