Пример #1
0
    def statusUpdate(self, driver: MesosSchedulerDriver, update: Dict):
        if self.frozen:
            return

        # update tasks
        task_id = update['task_id']['value']
        self.log('Task {} is in state {}'.format(
            task_id,
            update['state'],
        ), )

        task_params = self.task_store.update_task(
            task_id,
            mesos_task_state=update['state'],
        )

        if task_params.mesos_task_state not in LIVE_TASK_STATES:
            with self.constraint_state_lock:
                update_constraint_state(
                    task_params.offer,
                    self.constraints,
                    self.constraint_state,
                    step=-1,
                )

        driver.acknowledgeStatusUpdate(update)
        self.kill_tasks_if_necessary(driver)
Пример #2
0
    def statusUpdate(self, driver, update):
        if self.frozen:
            return

        # update tasks
        task_id = update.task_id.value
        state = update.state
        paasta_print("Task %s is in state %s" %
                     (task_id, mesos_pb2.TaskState.Name(state)))

        task_params = self.tasks_with_flags.setdefault(
            task_id, MesosTaskParameters(health=None))
        task_params.mesos_task_state = state

        for task, params in list(self.tasks_with_flags.items()):
            if params.marked_for_gc:
                self.tasks_with_flags.pop(task)

        if task_params.mesos_task_state is not TASK_STAGING:
            if self.tasks_with_flags[task_id].staging_timer:
                self.tasks_with_flags[task_id].staging_timer.cancel()
                self.tasks_with_flags[task_id].staging_timer = None

        if task_params.mesos_task_state not in LIVE_TASK_STATES:
            task_params.marked_for_gc = True
            with self.constraint_state_lock:
                update_constraint_state(task_params.offer,
                                        self.constraints,
                                        self.constraint_state,
                                        step=-1)

        driver.acknowledgeStatusUpdate(update)
        self.kill_tasks_if_necessary(driver)
Пример #3
0
def test_update_constraint_state_increments_counters():
    attr = Mock(text=Mock(value='test'))
    attr.configure_mock(name='pool')
    offer = Mock(attributes=[attr])
    cons = [['pool', 'MAX_PER', '5']]
    state = {}
    constraints.update_constraint_state(offer, cons, state)
    assert state['MAX_PER']['pool']['test'] == 1
Пример #4
0
def test_update_constraint_state_increments_counters():
    attr = Mock(text=Mock(value="test"))
    attr.configure_mock(name="pool")
    offer = Mock(attributes=[attr])
    cons = [["pool", "MAX_PER", "5"]]
    state: constraints.ConstraintState = {}
    constraints.update_constraint_state(offer, cons, state)
    assert state["MAX_PER"]["pool"]["test"] == 1
Пример #5
0
    def tasks_and_state_for_offer(
        self,
        driver: MesosSchedulerDriver,
        offer,
        state: ConstraintState,
    ) -> Tuple[List[TaskInfo], ConstraintState]:
        """Returns collection of tasks that can fit inside an offer."""
        tasks: List[TaskInfo] = []
        offerCpus = 0.0
        offerMem = 0.0
        offerPorts: List[int] = []
        for resource in offer.resources:
            if resource.name == "cpus":
                offerCpus += resource.scalar.value
            elif resource.name == "mem":
                offerMem += resource.scalar.value
            elif resource.name == "ports":
                for rg in resource.ranges.range:
                    # I believe mesos protobuf ranges are inclusive, but range() is exclusive
                    offerPorts += range(rg.begin, rg.end + 1)
        remainingCpus = offerCpus
        remainingMem = offerMem
        remainingPorts = set(offerPorts)

        base_task = self.service_config.base_task(self.system_paasta_config)
        base_task['agent_id']['value'] = offer['agent_id']['value']

        task_mem = self.service_config.get_mem()
        task_cpus = self.service_config.get_cpus()

        # don't mutate existing state
        new_constraint_state = copy.deepcopy(state)
        total = 0
        failed_constraints = 0
        while self.need_more_tasks(base_task['name'],
                                   self.task_store.get_all_tasks(), tasks):
            total += 1

            if not (remainingCpus >= task_cpus and remainingMem >= task_mem
                    and self.offer_matches_pool(offer)
                    and len(remainingPorts) >= 1):
                break

            if not (check_offer_constraints(
                    offer,
                    self.constraints,
                    new_constraint_state,
            )):
                failed_constraints += 1
                break

            task_port = random.choice(list(remainingPorts))

            task = copy.deepcopy(base_task)
            task['task_id'] = {
                'value': '{}.{}'.format(task['name'],
                                        uuid.uuid4().hex)
            }

            task['container']['docker']['port_mappings'][0][
                'host_port'] = task_port
            for resource in task['resources']:
                if resource['name'] == 'ports':
                    resource['ranges']['range'][0]['begin'] = task_port
                    resource['ranges']['range'][0]['end'] = task_port

            tasks.append(task)

            remainingCpus -= task_cpus
            remainingMem -= task_mem
            remainingPorts -= {task_port}

            update_constraint_state(offer, self.constraints,
                                    new_constraint_state)

        # raise constraint error but only if no other tasks fit/fail the offer
        if total > 0 and failed_constraints == total:
            raise ConstraintFailAllTasksError

        return tasks, new_constraint_state
Пример #6
0
    def tasks_and_state_for_offer(self, driver, offer, state):
        """Returns collection of tasks that can fit inside an offer."""
        tasks = []
        offerCpus = 0
        offerMem = 0
        offerPorts = []
        for resource in offer.resources:
            if resource.name == "cpus":
                offerCpus += resource.scalar.value
            elif resource.name == "mem":
                offerMem += resource.scalar.value
            elif resource.name == "ports":
                for rg in resource.ranges.range:
                    # I believe mesos protobuf ranges are inclusive, but range() is exclusive
                    offerPorts += range(rg.begin, rg.end + 1)
        remainingCpus = offerCpus
        remainingMem = offerMem
        remainingPorts = set(offerPorts)

        base_task = self.service_config.base_task(self.system_paasta_config)
        base_task.slave_id.value = offer.slave_id.value

        task_mem = self.service_config.get_mem()
        task_cpus = self.service_config.get_cpus()

        # don't mutate existing state
        new_constraint_state = copy.deepcopy(state)
        total = 0
        failed_constraints = 0
        while self.need_more_tasks(base_task.name, self.tasks_with_flags,
                                   tasks):
            total += 1

            if not (remainingCpus >= task_cpus and remainingMem >= task_mem
                    and self.offer_matches_pool(offer)
                    and len(remainingPorts) >= 1):
                break

            if not (check_offer_constraints(offer, self.constraints,
                                            new_constraint_state)):
                failed_constraints += 1
                break

            task_port = random.choice(list(remainingPorts))

            t = mesos_pb2.TaskInfo()
            t.MergeFrom(base_task)
            tid = "%s.%s" % (t.name, uuid.uuid4().hex)
            t.task_id.value = tid

            t.container.docker.port_mappings[0].host_port = task_port
            for resource in t.resources:
                if resource.name == "ports":
                    resource.ranges.range[0].begin = task_port
                    resource.ranges.range[0].end = task_port

            tasks.append(t)

            remainingCpus -= task_cpus
            remainingMem -= task_mem
            remainingPorts -= {task_port}

            update_constraint_state(offer, self.constraints,
                                    new_constraint_state)

        # raise constraint error but only if no other tasks fit/fail the offer
        if total > 0 and failed_constraints == total:
            raise ConstraintFailAllTasksError

        return tasks, new_constraint_state