job.destinationDBlock = datasetName job.destinationSE = 'local' job.currentPriority = 1000 job.prodSourceLabel = 'panda' job.jobParameters = ' --lsstJobParams="%s" ' % lsstJobParams if prodUserName is not None: job.prodUserName = prodUserName else: job.prodUserName = prodUserNameDefault if PIPELINE_PROCESSINSTANCE is not None: job.taskID = PIPELINE_PROCESSINSTANCE if PIPELINE_EXECUTIONNUMBER is not None: job.attemptNr = PIPELINE_EXECUTIONNUMBER if PIPELINE_TASK is not None: job.processingType = PIPELINE_TASK job.computingSite = site job.VO = "lsst" fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) s, o = Client.submitJobs([job], srvID=aSrvID) print(s) for x in o: print("PandaID=%s" % x[0])
datasetName = 'panda.destDB.%s' % str(uuid.uuid4()) destName = 'local' job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = "%s" % str(uuid.uuid4()) # MPI transform on Titan that will run actual job job.transformation = '/lustre/atlas/proj-shared/csc108/panitkin/alicetest1/m\ pi_wrapper_alice_ppbench.py' job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 1000 job.prodSourceLabel = 'panda' job.computingSite = site job.jobParameters = " " job.VO = 'alice' fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) s, o = Client.submitJobs([job], srvID=aSrvID) print(s) for x in o: print("PandaID=%s" % x[0])