def create_task(self, offer, t): task = mesos_pb2.TaskInfo() task.task_id.value = str(t.id) task.slave_id.value = offer.slave_id.value task.name = "%s-%s" % (t.id, t.tried) task.executor.MergeFrom(self.executor) task.data = compress(cPickle.dumps((t, t.tried), -1)) cpu = task.resources.add() cpu.name = "cpus" cpu.type = 0 # mesos_pb2.Value.SCALAR cpu.scalar.value = t.resources.get("cpus", 1) mem = task.resources.add() mem.name = "mem" mem.type = 0 # mesos_pb2.Value.SCALAR mem.scalar.value = t.resources.get("mem", 100) # gpu = task.resources.add() # gpu.name = "gpus" # gpu.type = 0 # mesos_pb2.Value.SCALAR # gpu.scalar.value = t.resources.get("gpus",1) return task
def create_task(self, offer, t, cpus): task = mesos_pb2.TaskInfo() task.task_id.value = t.id task.slave_id.value = offer.slave_id.value task.name = "task(%s/%d)" % (t.id, self.taskNum) task.executor.MergeFrom(self.executor) task.data = compress(cPickle.dumps((t, t.tried), -1)) cpu = task.resources.add() cpu.name = "cpus" cpu.type = 0 # mesos_pb2.Value.SCALAR cpu.scalar.value = min(self.cpus, cpus) mem = task.resources.add() mem.name = "mem" mem.type = 0 # mesos_pb2.Value.SCALAR mem.scalar.value = self.mem # # gpu = task.resources.add() # gpu.name = "gpus" # gpu.type = 0 # mesos_pb2.Value.SCALAR # gpu.scalar.value = self.gpus return task
def resourceOffers(self, driver, offers): logging.debug("resourceOffers (%s, %s)", driver, offers) for offer in offers: logging.info("Received offer: %s", offer) task = mesos_pb2.TaskInfo() task.task_id.value = "job-{}".format(self.number) task.slave_id.value = offer.slave_id.value task.name = "Job {}".format(self.number) task.command.shell = False task.command.value = "36000" task.container.type = mesos_pb2.ContainerInfo.DOCKER task.container.docker.image = "python:2.7-alpine" task.container.docker.network = task.container.docker.BRIDGE task.container.docker.force_pull_image = False task.container.hostname = task.task_id.value entrypoint = task.container.docker.parameters.add() entrypoint.key = "entrypoint" entrypoint.value = "/bin/sleep" rmparam = task.container.docker.parameters.add() rmparam.key = "rm" rmparam.value = "true" cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = 2 cpus.role = "*" mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = 2927 mem.role = "*" logging.info("Created a Mesos task for %s", self.number) self.number += 1 driver.launchTasks(offer.id, [task], filters=mesos_pb2.Filters())
def _create_base_task(task): """Creates and returns a base Mesos task from a Scale task :param task: The task :type task: :class:`job.execution.running.tasks.base_task.Task` :returns: The base Mesos task :rtype: :class:`mesos_pb2.TaskInfo` """ mesos_task = mesos_pb2.TaskInfo() mesos_task.task_id.value = task.id mesos_task.slave_id.value = task.agent_id mesos_task.name = task.name resources = task.get_resources() if resources.cpus > 0: cpus = mesos_task.resources.add() cpus.name = 'cpus' cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = resources.cpus if resources.mem > 0: mem = mesos_task.resources.add() mem.name = 'mem' mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = resources.mem if resources.disk > 0: disk = mesos_task.resources.add() disk.name = 'disk' disk.type = mesos_pb2.Value.SCALAR disk.scalar.value = resources.disk return mesos_task
def resourceOffers(self, driver, offers): for offer in offers: tasks = [] tid = self.tasksLaunched self.tasksLaunched += 1 task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = offer.slave_id.value task.name = "task %d" % tid task.command.value = self.command cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = TASK_CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = TASK_MEM tasks.append(task) driver.launchTasks(offer.id, tasks)
def new_task(offer): task = mesos_pb2.TaskInfo() id = uuid.uuid4() task.task_id.value = str(id) task.slave_id.value = offer.slave_id.value task.name = "task {}".format(str(id)) if False: # container task.container.type = 1 task.container.docker.image = 'kapliy/aam-lite' # aam/latest aam_code = task.container.volumes.add() aam_code.host_path = '/home/akapliy' aam_code.container_path = '/mnt/home' aam_code.mode = 2 # anaconda aam_conda = task.container.volumes.add() aam_conda.host_path = '/opt/anaconda' aam_conda.container_path = '/opt/anaconda' aam_conda.mode = 2 cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = 1 mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = 2048 return task
def resourceOffers(self, driver, offers): print "Got %d resource offers" % len(offers) for offer in offers: tasks = [] print "Got resource offer %s" % offer.id.value if self.tasksLaunched < TOTAL_TASKS: tid = self.tasksLaunched self.tasksLaunched += 1 print "Accepting offer on %s to start task %d" \ % (offer.hostname, tid) task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = offer.slave_id.value task.name = "task %d" % tid task.executor.MergeFrom(self.executor) cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = TASK_CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = TASK_MEM tasks.append(task) self.taskData[task.task_id.value] = (offer.slave_id, task.executor.executor_id) driver.launchTasks(offer.id, tasks)
def _newMesosTask(self, job, offer): """ Build the Mesos task object for a given the Toil job and Mesos offer """ task = mesos_pb2.TaskInfo() task.task_id.value = str(job.jobID) task.slave_id.value = offer.slave_id.value task.name = job.name task.data = pickle.dumps(job) task.executor.MergeFrom(self.executor) cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = job.resources.cores disk = task.resources.add() disk.name = "disk" disk.type = mesos_pb2.Value.SCALAR if toMiB(job.resources.disk) > 1: disk.scalar.value = toMiB(job.resources.disk) else: log.warning("Job %s uses less disk than Mesos requires. Rounding %s up to 1 MiB.", job.jobID, job.resources.disk) disk.scalar.value = 1 mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR if toMiB(job.resources.memory) > 1: mem.scalar.value = toMiB(job.resources.memory) else: log.warning("Job %s uses less memory than Mesos requires. Rounding %s up to 1 MiB.", job.jobID, job.resources.memory) mem.scalar.value = 1 return task
def _createTask(self, jt_job, offer): """ Build the Mesos task object from the toil batchjob here to avoid further cluttering resourceOffers """ task = mesos_pb2.TaskInfo() task.task_id.value = str(jt_job.jobID) task.slave_id.value = offer.slave_id.value task.name = "task %d" % jt_job.jobID # assigns toil command to task task.data = pickle.dumps(jt_job) task.executor.MergeFrom(self.executor) cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = jt_job.resources.cpu disk = task.resources.add() disk.name = "disk" disk.type = mesos_pb2.Value.SCALAR disk.scalar.value = jt_job.resources.disk / 1000000 mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = jt_job.resources.memory / 1000000 return task
def _create_base_task(task): """Creates and returns a base Mesos task from a Scale task :param task: The task :type task: :class:`job.tasks.base_task.Task` :returns: The base Mesos task :rtype: :class:`mesos_pb2.TaskInfo` """ mesos_task = mesos_pb2.TaskInfo() mesos_task.task_id.value = task.id mesos_task.slave_id.value = task.agent_id mesos_task.name = task.name resources = task.get_resources() if settings.CONFIG_URI: mesos_task.command.uris.add().value = settings.CONFIG_URI for resource in resources.resources: if resource.value > 0.0: task_resource = mesos_task.resources.add() task_resource.name = resource.name task_resource.type = mesos_pb2.Value.SCALAR task_resource.scalar.value = resource.value return mesos_task
def getTaskInfo(self, offer, request, accept_cpu, accept_mem): if request is None: task_name = "%s:%s" % (offer.hostname, "system") else: task_name = "%s:%s" % (offer.hostname, request.name) task = mesos_pb2.TaskInfo() task.task_id.value = task_name task.slave_id.value = offer.slave_id.value task.name = "Nebula Worker" task.executor.MergeFrom(self.getExecutorInfo()) if request is not None: task_data = {} #FIXME: request.get_task_data(self.workrepo) print task_data task.data = json.dumps(task_data) cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = accept_cpu mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = accept_mem return task
def new_task(executor, offer, slave): """ Generates a new task info. :param executor: Shred Executor Info :param offer: Offer to launch on :param slave: Slave to monitor :return: """ task = mesos_pb2.TaskInfo() task.task_id.value = slave.task_id task.slave_id.value = offer.slave_id.value task.name = "Monitor %s" % slave.hostname task.executor.MergeFrom(executor) cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = TASK_CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = TASK_MEM task.data = json.dumps({'slave_location': slave.hostname}) return task
def create_task(self, offer, t, cpus): task = mesos_pb2.TaskInfo() task.task_id.value = "%d-%d" % (t.id, t.tried) task.slave_id.value = offer.slave_id.value task.name = "task %s/%d" % (t.id, self.options.tasks) task.executor.MergeFrom(self.executor) env = dict(os.environ) env['DRUN_RANK'] = str(t.id) env['DRUN_SIZE'] = str(self.options.tasks) command = self.command[:] if self.options.expand: for i, x in enumerate(command): command[i] = x % {'RANK': t.id, 'SIZE': self.options.tasks} task.data = pickle.dumps([ os.getcwd(), command, env, self.options.shell, self.std_port, self.err_port, None ]) cpu = task.resources.add() cpu.name = "cpus" cpu.type = mesos_pb2.Value.SCALAR cpu.scalar.value = min(self.cpus, cpus) mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = self.mem return task
def resourceOffers(self, driver, offers): print "Got %d resource offers" % len(offers) for offer in offers: print "Considering resource offer %s from %s" % (offer.id.value, offer.hostname) if self.mpdsLaunched == TOTAL_MPDS: print "Declining permanently because we have already launched enough tasks" driver.declineOffer(offer.id) continue cpus = 0 mem = 0 tasks = [] for resource in offer.resources: if resource.name == "cpus": cpus = resource.scalar.value elif resource.name == "mem": mem = resource.scalar.value if cpus < CPUS or mem < MEM: print "Declining offer due to too few resources" driver.declineOffer(offer.id) else: tid = self.mpdsLaunched self.mpdsLaunched += 1 print "Accepting offer on %s to start mpd %d" % ( offer.hostname, tid) task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = offer.slave_id.value task.name = "task %d " % tid cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = MEM task.command.value = "%smpd --noconsole --ncpus=%d --host=%s --port=%s" % ( MPICH2PATH, CPUS, self.ip, self.port) tasks.append(task) print "Replying to offer: launching mpd %d on host %s" % ( tid, offer.hostname) driver.launchTasks(offer.id, tasks) if not self.startedExec and self.mpdsLaunched == TOTAL_MPDS: threading.Thread(target=mpiexec).start() self.startedExec = True
def _create_task(tid, offer, command, ns): """ `tid` (str) task id `offer` a mesos Offer instance `ns.mesos_task_resources` the stuff a task would consume: { "cpus": 10, "mem": 1, "disk": 12, "ports": [(20, 34), (35, 35)], "disks": ["sda1"] } `ns.docker_image` (str|None) a docker image you wish to execute the command in `ns.volumes` a list of volumes that get mounted into the container: [ ("host_path", "container_path", "mode"), ("/my/directory", "/path/on/container", "ro") ] """ task = dict( task_id=mesos_pb2.TaskID(value=tid), slave_id=offer.slave_id, command=mesos_pb2.CommandInfo( value=command, uris=[mesos_pb2.CommandInfo.URI(value=uri) for uri in ns.uris], environment=mesos_pb2.Environment(variables=[ mesos_pb2.Environment.Variable(name=k, value=v) for k, v in ns.mesos_environment ]))) if ns.mesos_framework_name: task.update(name="relay.mesos task: %s: %s" % (ns.mesos_framework_name, tid)) else: task.update(name="relay.mesos task: %s" % tid) # ability to inject os.environ values into the command if ns.docker_image: volumes = [ mesos_pb2.Volume(host_path=host_path, container_path=container_path, mode=mesos_pb2.Volume.Mode.Value(mode.upper())) for host_path, container_path, mode in ns.volumes ] task.update(container=mesos_pb2.ContainerInfo( type=mesos_pb2.ContainerInfo.DOCKER, volumes=volumes, docker=mesos_pb2.ContainerInfo.DockerInfo( image=ns.docker_image, force_pull_image=ns.force_pull_image, network=mesos_pb2.ContainerInfo.DockerInfo.Network.Value( ns.docker_network), parameters=[ mesos_pb2.Parameter(key=k, value=v) for k, v in ns.docker_parameters.items() ], ))) task = mesos_pb2.TaskInfo(**task) _create_task_add_task_resources(task, ns) return task
def resourceOffers(self, driver, offers): for offer in offers: tasks = [] offerCpus = 0 offerMem = 0 for resource in offer.resources: if resource.name == "cpus": offerCpus += resource.scalar.value elif resource.name == "mem": offerMem += resource.scalar.value #接收到的总的资源数量 print "Received offer %s with cpus: %s and mem: %s" % ( offer.id.value, offerCpus, offerMem) remainingCpus = offerCpus remainingMem = offerMem #发起任务 while self.tasksLaunched < TOTAL_TASKS and \ remainingCpus >= TASK_CPUS and \ remainingMem >= TASK_MEM: tid = self.tasksLaunched self.tasksLaunched += 1 print "Launching task %d using offer %s" % (tid, offer.id.value) #设置任务,一个executor包含了多个task #TaskInfo必须至少包含一个ExecutorInfo或者CommandInfo task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = offer.slave_id.value task.name = "task %d" % tid task.executor.MergeFrom(self.executor) #任务所需的资源 cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = TASK_CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = TASK_MEM tasks.append(task) self.taskData[task.task_id.value] = (offer.slave_id, task.executor.executor_id) remainingCpus -= TASK_CPUS remainingMem -= TASK_MEM operation = mesos_pb2.Offer.Operation() #operation的type为LAUNCH\RESERVE\UNRESERVE\CREATE\DESTROY operation.type = mesos_pb2.Offer.Operation.LAUNCH operation.launch.task_infos.extend(tasks) # Accepts the given offers and performs a sequence of operations on those accepted offers. driver.acceptOffers([offer.id], [operation])
def start_task(self, driver, offer): """Starts a task using the offer, and subtracts any resources used from the offer.""" tasks = [] offerCpus = 0 offerMem = 0 offerPorts = [] for resource in offer.resources: if resource.name == "cpus": offerCpus += resource.scalar.value elif resource.name == "mem": offerMem += resource.scalar.value elif resource.name == "ports": for rg in resource.ranges.range: # I believe mesos protobuf ranges are inclusive, but range() is exclusive offerPorts += range(rg.begin, rg.end + 1) remainingCpus = offerCpus remainingMem = offerMem remainingPorts = set(offerPorts) base_task = self.service_config.base_task(self.system_paasta_config) base_task.slave_id.value = offer.slave_id.value task_mem = self.service_config.get_mem() task_cpus = self.service_config.get_cpus() while self.need_more_tasks(base_task.name) and \ remainingCpus >= task_cpus and \ remainingMem >= task_mem and \ len(remainingPorts) >= 1: task_port = random.choice(list(remainingPorts)) t = mesos_pb2.TaskInfo() t.MergeFrom(base_task) tid = "%s.%s" % (t.name, uuid.uuid4().hex) t.task_id.value = tid t.container.docker.port_mappings[0].host_port = task_port for resource in t.resources: if resource.name == "ports": resource.ranges.range[0].begin = task_port resource.ranges.range[0].end = task_port tasks.append(t) self.tasks_with_flags.setdefault( tid, MesosTaskParameters( health=None, mesos_task_state=TASK_STAGING, ), ) remainingCpus -= task_cpus remainingMem -= task_mem remainingPorts -= set([task_port]) return tasks
def resourceOffers(self, driver, offers): print "Got %d resource offers" % len(offers) for offer in offers: print "Considering resource offer %s from %s" % (offer.id.value, offer.hostname) if self.mpdsLaunched > TOTAL_MPDS: print "Declining permanently because we have already launched enough tasks" driver.declineOffer(offer.id) continue cpus = 0 mem = 0 tasks = [] for resource in offer.resources: if resource.name == "cpus": cpus = resource.scalar.value elif resource.name == "mem": mem = resource.scalar.value if cpus < CPUS or mem < MEM: print "Declining offer due to too few resources" driver.declineOffer(offer.id) else: tid = self.mpdsLaunched self.mpdsLaunched += 1 print "Accepting offer on %s to start mpd %d" % (offer.hostname, tid) task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = offer.slave_id.value task.name = "task %d " % tid cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = MEM uri = task.command.uris.add() uri.value= parser_config.get_option("PATH_TO_DEPLOY_SCRIPT") #task.command.value = "python ./deploy_slave.py /mnt/resource_manager_new/" #parser_config.get_option("TASK_SLAVE_COMMAND").strip('"') task.command.value = "nohup mpirun --am dsolver.mca -n 8 -N 4 -machinefile dsolver_machinefile python train.py > training_withmca_4x4.out 2> training_withmca_4x4.err &" #parser_config.get_option("TASK_SLAVE_COMMAND").strip('"') tasks.append(task) print "Replying to offer: launching mpd %d on host %s" % (tid, offer.hostname) driver.launchTasks(offer.id, tasks)
def base_task(self, system_paasta_config, portMappings=True): """Return a TaskInfo protobuf with all the fields corresponding to the configuration filled in. Does not include task.slave_id or a task.id; those need to be computed separately.""" task = mesos_pb2.TaskInfo() task.container.type = mesos_pb2.ContainerInfo.DOCKER task.container.docker.image = get_docker_url( system_paasta_config.get_docker_registry(), self.get_docker_image()) for param in self.format_docker_parameters(): p = task.container.docker.parameters.add() p.key = param['key'] p.value = param['value'] task.container.docker.network = self.get_mesos_network_mode() docker_volumes = self.get_volumes( system_volumes=system_paasta_config.get_volumes()) for volume in docker_volumes: v = task.container.volumes.add() v.mode = getattr(mesos_pb2.Volume, volume['mode'].upper()) v.container_path = volume['containerPath'] v.host_path = volume['hostPath'] task.command.value = self.get_cmd() cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = self.get_cpus() mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = self.get_mem() if portMappings: pm = task.container.docker.port_mappings.add() pm.container_port = self.get_container_port() pm.host_port = 0 # will be filled in by tasks_and_state_for_offer() pm.protocol = "tcp" port = task.resources.add() port.name = "ports" port.type = mesos_pb2.Value.RANGES port.ranges.range.add() port.ranges.range[ 0].begin = 0 # will be filled in by tasks_and_state_for_offer(). port.ranges.range[ 0].end = 0 # will be filled in by tasks_and_state_for_offer(). task.name = self.task_name(task) docker_cfg_uri = task.command.uris.add() docker_cfg_uri.value = system_paasta_config.get_dockercfg_location() docker_cfg_uri.extract = False return task
def as_new_mesos_task(self): """ Take the information stored in this Task object and fill a mesos task. """ assert self.task_id, "Calico task must be assigned a task_id" assert self.slave_id, "Calico task must be assigned a slave_id" task = mesos_pb2.TaskInfo() task.name = repr(self) task.task_id.value = self.task_id task.slave_id.value = self.slave_id cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = TASK_CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = TASK_MEM if not self.default_executor: executor = mesos_pb2.ExecutorInfo() executor.executor_id.value = "execute Task %s" % self.task_id executor.command.value = "python %s" % self.executor_script executor.name = "Test Executor for Task %s" % self.task_id executor.source = "python_test" executor.container.type = mesos_pb2.ContainerInfo.MESOS task.executor.MergeFrom(executor) self.executor_id = executor.executor_id.value else: task.container.type = mesos_pb2.ContainerInfo.MESOS if self.calico: if not self.default_executor: network_info = task.executor.container.network_infos.add() else: network_info = task.container.network_infos.add() for netgroup in self.netgroups: network_info.groups.append(netgroup) for ip in self.requested_ips: network_info.ip_addresses.add().ip_address = ip for _ in range(self.auto_ipv4): network_info.ip_addresses.add().protocol = \ mesos_pb2.NetworkInfo.IPv4 for _ in range(self.auto_ipv6): network_info.ip_addresses.add().protocol = \ mesos_pb2.NetworkInfo.IPv4 return task
def resourceOffers(self, driver, offers): for offer in offers: tasks = [] offerCpus = 0 offerMem = 0 for resource in offer.resources: if resource.name == "cpus": offerCpus += resource.scalar.value elif resource.name == "mem": offerMem += resource.scalar.value self.log.info("Received offer %s with cpus: %s and mem: %s", offer.id.value, offerCpus, offerMem) remainingCpus = offerCpus remainingMem = offerMem while (not self.task_queue.empty()) and \ remainingCpus >= self.task_cpu and \ remainingMem >= self.task_mem: key, cmd = self.task_queue.get() tid = self.task_counter self.task_counter += 1 self.task_key_map[str(tid)] = key self.log.info("Launching task %d using offer %s", tid, offer.id.value) task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = offer.slave_id.value task.name = "AirflowTask %d" % tid cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = self.task_cpu mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = self.task_mem command = mesos_pb2.CommandInfo() command.shell = True command.value = cmd task.command.MergeFrom(command) tasks.append(task) remainingCpus -= self.task_cpu remainingMem -= self.task_mem driver.launchTasks(offer.id, tasks)
def resourceOffers(self, driver, offers): for offer in offers: tasks = [] offerCpus = 0 offerMem = 0 for resource in offer.resources: if resource.name == "cpus": offerCpus += resource.scalar.value elif resource.name == "mem": offerMem += resource.scalar.value print "Received offer %s with cpus: %s and mem: %s" \ % (offer.id.value, offerCpus, offerMem) remainingCpus = offerCpus remainingMem = offerMem while self.tasksLaunched < TOTAL_TASKS and \ remainingCpus >= TASK_CPUS and \ remainingMem >= TASK_MEM: tid = self.tasksLaunched self.tasksLaunched += 1 print "Launching task %d using offer %s" \ % (tid, offer.id.value) task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = offer.slave_id.value task.name = "task %d" % tid task.executor.MergeFrom(self.executor) cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = TASK_CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = TASK_MEM tasks.append(task) self.taskData[task.task_id.value] = ( offer.slave_id, task.executor.executor_id) remainingCpus -= TASK_CPUS remainingMem -= TASK_MEM operation = mesos_pb2.Offer.Operation() operation.type = mesos_pb2.Offer.Operation.LAUNCH operation.launch.task_infos.extend(tasks) driver.acceptOffers([offer.id], [operation])
def test_python_task_decode(): fn, args, kwargs = sum, [range(5)], {} data = (fn, args, kwargs) dumped = cloudpickle.dumps(data) proto = mesos_pb2.TaskInfo( data=dumped, labels=mesos_pb2.Labels(labels=[mesos_pb2.Label(key='python')])) task = decode(proto) assert isinstance(task, PythonTask) assert task['data'] == dumped assert task.data == data proto = mesos_pb2.TaskInfo(labels=mesos_pb2.Labels( labels=[mesos_pb2.Label(key='python')])) task = decode(proto) task.data = data assert isinstance(task, PythonTask) assert task.data == data assert task['data'] == dumped
def _new_task(self, offer, task_cpus, task_mem, task_port): """Return a new task with the requested resources.""" server_id = self._cluster.next_id task_id = "mysos-" + self.cluster_name + "-" + str(server_id) task = mesos_pb2.TaskInfo() task.task_id.value = task_id task.slave_id.value = offer.slave_id.value task.name = task_id task.executor.executor_id.value = task_id # Use task_id as executor_id. task.executor.command.value = self._executor_cmd if self._executor_environ: # Could be 'None' since it's an optional argument. executor_environ_ = json.loads(self._executor_environ) if executor_environ_: for var_ in executor_environ_: log.info("Executor will use environment variable: %s" % var_) var = task.executor.command.environment.variables.add() var.name = var_['name'] var.value = var_['value'] uri = task.executor.command.uris.add() uri.value = self._executor_uri uri.executable = True uri.extract = False # Don't need to decompress pex. task.data = json.dumps({ 'framework_user': self._framework_user, 'host': offer.hostname, 'port': task_port, 'cluster': self._cluster.name, 'cluster_user': self._cluster.user, 'cluster_password': self._cluster.password, 'server_id': server_id, # Use the integer Task ID as the server ID. 'zk_url': self._zk_url, 'admin_keypath': self._admin_keypath, 'installer_args': self._installer_args, 'backup_store_args': self._backup_store_args, 'backup_id': self._cluster.backup_id, }) resources = create_resources(task_cpus, task_mem, set([task_port]), role=self._framework_role) task.resources.extend(resources) return task
def _makeTask(self, tid, slave_id_value, port): print("Creating task " + str(tid)) # The essentials for defining a task. # The task id can be any string unique across all tasks. # The slave id is the id of the slave to start the task on (taken from # the detail of the offer). task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = slave_id_value task.name = "task %d" % tid # https://github.com/apache/mesos/blob/2985ae05634038b70f974bbfed6b52fe47231418/include/mesos/mesos.proto#L992 # This is where Docker comes in! # All of this section of code is dedicated to passing the correct # settings to docker i.e. the image we want to use and port mappings task.container.type = task.container.DOCKER task.container.docker.image = 'ubuntu:14.04.2' task.container.docker.network = task.container.docker.BRIDGE portmapping = task.container.docker.port_mappings.add() portmapping.host_port = portmapping.container_port = port portmapping.protocol = 'tcp' # If we commented out all of the docker details above, this task # would still work because of the crucial line below telling the task # what it actually needs to execute - it just wouldn't run inside a # container. task.command.value = "echo 'hello {}' | nc -l {}".format(tid, port) # Mesos is a resource usage enforcer, so we need to tell it what # resources we want when running our task. This includes CPU and memory, # but also ports - they too are a resource that can be exhausted! cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = CPUS_REQUIRED mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = MEM_REQUIRED ports = task.resources.add() ports.name = "ports" ports.type = mesos_pb2.Value.RANGES portrange = ports.ranges.range.add() portrange.begin = port portrange.end = port return task
def _grab_offer(self, offer, resource_tag): """ Grabs the offer from mesos and fires tasks. """ offer_cpus = 0 offer_mem = 0 agent_ip = offer.url.address.ip agent_hostname = offer.hostname for resource in offer.resources: if resource.name == "cpus": offer_cpus += resource.scalar.value elif resource.name == "mem": offer_mem += resource.scalar.value # XXX: we take the complete offer here for now :-P # TODO: deal with offers with have 0 cpu in it... tid = uuid.uuid4() task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = offer.slave_id.value task.name = "OpenLava task %d" % tid task.executor.MergeFrom(self.executor) # this is the master host task.data = json.dumps({ 'master_hostname': self.master_host, 'master_ip': self.master_ip, 'agent_hostname': str(agent_hostname), 'agent_ip': str(agent_ip) }) cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = offer_cpus mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = offer_mem operation = mesos_pb2.Offer.Operation() operation.type = mesos_pb2.Offer.Operation.LAUNCH operation.launch.task_infos.extend([task]) self.accepted_tasks[agent_hostname] = (offer_cpus, resource_tag) return operation
def makeTaskPrototype(self, offer): task = mesos_pb2.TaskInfo() tid = self.tasksCreated self.tasksCreated += 1 task.task_id.value = str(tid).zfill(LEADING_ZEROS_COUNT) task.slave_id.value = offer.slave_id.value cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = TASK_CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = TASK_MEM return task
def make_task(thermos_config, assigned_ports={}, **kw): role = getpass.getuser() task_id = thermos_config.task().name().get() + '-001' at = AssignedTask(taskId=task_id, task=TaskConfig(executorConfig=ExecutorConfig( name=AURORA_EXECUTOR_NAME, data=thermos_config.json_dumps()), owner=Identity(role=role, user=role)), assignedPorts=assigned_ports, **kw) td = mesos_pb2.TaskInfo() td.task_id.value = task_id td.name = thermos_config.task().name().get() td.data = serialize(at) return td
def new_task(offer): task = mesos_pb2.TaskInfo() task.task_id.value = str(uuid.uuid4()) task.slave_id.value = offer.slave_id.value task.name = "HelloWorld" cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = 0.1 mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = 10 return task
def new_task(offer, name): task = mesos_pb2.TaskInfo() task.task_id.value = str(uuid.uuid4()) task.slave_id.value = offer.slave_id.value task.name = name cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = TASK_CPUS mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = TASK_MEM return task