def create(self, jobNode): jobStoreID = self._newJobID() log.debug("Creating job %s for '%s'", jobStoreID, '<no command>' if jobNode.command is None else jobNode.command) job = JobGraph.fromJobNode(jobNode, jobStoreID=jobStoreID, tryCount=self._defaultTryCount()) if hasattr(self, "_batchedJobGraphs") and self._batchedJobGraphs is not None: self._batchedJobGraphs.append(job) else: self._writeString(jobStoreID, pickle.dumps(job, protocol=pickle.HIGHEST_PROTOCOL)) # UPDATE: bz2.compress( return job
def _newMesosTask(self, job, offer): """ Build the Mesos task object for a given the Toil job and Mesos offer """ task = addict.Dict() task.task_id.value = str(job.jobID) task.agent_id.value = offer.agent_id.value task.name = job.name task.data = encode_data(pickle.dumps(job)) task.executor = addict.Dict(self.executor) task.resources = [] task.resources.append(addict.Dict()) cpus = task.resources[-1] cpus.name = 'cpus' cpus.type = 'SCALAR' cpus.scalar.value = job.resources.cores task.resources.append(addict.Dict()) disk = task.resources[-1] disk.name = 'disk' disk.type = 'SCALAR' if toMiB(job.resources.disk) > 1: disk.scalar.value = toMiB(job.resources.disk) else: log.warning( "Job %s uses less disk than Mesos requires. Rounding %s up to 1 MiB.", job.jobID, job.resources.disk) disk.scalar.value = 1 task.resources.append(addict.Dict()) mem = task.resources[-1] mem.name = 'mem' mem.type = 'SCALAR' if toMiB(job.resources.memory) > 1: mem.scalar.value = toMiB(job.resources.memory) else: log.warning( "Job %s uses less memory than Mesos requires. Rounding %s up to 1 MiB.", job.jobID, job.resources.memory) mem.scalar.value = 1 return task
def update(self, job): self._writeString(job.jobStoreID, pickle.dumps(job, protocol=pickle.HIGHEST_PROTOCOL), update=True)
def create(self, jobDescription): # TODO: we don't implement batching, but we probably should. self._writeString( jobDescription.jobStoreID, pickle.dumps(jobDescription, protocol=pickle.HIGHEST_PROTOCOL)) return jobDescription