コード例 #1
0
ファイル: test_constraints.py プロジェクト: ycaihua/paasta
def test_check_offer_constraints_returns_true_when_satisfied():
    attr = Mock(text=Mock(value='test'))
    attr.configure_mock(name='pool')
    offer = Mock(attributes=[attr])
    cons = [['pool', 'MAX_PER', '5'], ['pool', 'EQUALS', 'test'],
            ['pool', 'LIKE', 'te.*$'], ['pool', 'UNLIKE', 'ta.*']]
    state = {'MAX_PER': {'pool': {'test': 0}}}
    assert constraints.check_offer_constraints(offer, cons, state) is True
    state = {'MAX_PER': {'pool': {'test': 6}}}
    assert constraints.check_offer_constraints(offer, cons, state) is False
コード例 #2
0
def test_check_offer_constraints_returns_true_when_satisfied():
    attr = Mock(text=Mock(value="test"))
    attr.configure_mock(name="pool")
    offer = Mock(attributes=[attr])
    cons = [
        ["pool", "MAX_PER", "5"],
        ["pool", "EQUALS", "test"],
        ["pool", "LIKE", "te.*$"],
        ["pool", "UNLIKE", "ta.*"],
    ]
    state = {"MAX_PER": {"pool": {"test": 0}}}
    assert constraints.check_offer_constraints(offer, cons, state) is True
    state = {"MAX_PER": {"pool": {"test": 6}}}
    assert constraints.check_offer_constraints(offer, cons, state) is False
コード例 #3
0
    def tasks_and_state_for_offer(
        self,
        driver: MesosSchedulerDriver,
        offer,
        state: ConstraintState,
    ) -> Tuple[List[TaskInfo], ConstraintState]:
        """Returns collection of tasks that can fit inside an offer."""
        tasks: List[TaskInfo] = []
        offerCpus = 0.0
        offerMem = 0.0
        offerPorts: List[int] = []
        for resource in offer.resources:
            if resource.name == "cpus":
                offerCpus += resource.scalar.value
            elif resource.name == "mem":
                offerMem += resource.scalar.value
            elif resource.name == "ports":
                for rg in resource.ranges.range:
                    # I believe mesos protobuf ranges are inclusive, but range() is exclusive
                    offerPorts += range(rg.begin, rg.end + 1)
        remainingCpus = offerCpus
        remainingMem = offerMem
        remainingPorts = set(offerPorts)

        base_task = self.service_config.base_task(self.system_paasta_config)
        base_task['agent_id']['value'] = offer['agent_id']['value']

        task_mem = self.service_config.get_mem()
        task_cpus = self.service_config.get_cpus()

        # don't mutate existing state
        new_constraint_state = copy.deepcopy(state)
        total = 0
        failed_constraints = 0
        while self.need_more_tasks(base_task['name'],
                                   self.task_store.get_all_tasks(), tasks):
            total += 1

            if not (remainingCpus >= task_cpus and remainingMem >= task_mem
                    and self.offer_matches_pool(offer)
                    and len(remainingPorts) >= 1):
                break

            if not (check_offer_constraints(
                    offer,
                    self.constraints,
                    new_constraint_state,
            )):
                failed_constraints += 1
                break

            task_port = random.choice(list(remainingPorts))

            task = copy.deepcopy(base_task)
            task['task_id'] = {
                'value': '{}.{}'.format(task['name'],
                                        uuid.uuid4().hex)
            }

            task['container']['docker']['port_mappings'][0][
                'host_port'] = task_port
            for resource in task['resources']:
                if resource['name'] == 'ports':
                    resource['ranges']['range'][0]['begin'] = task_port
                    resource['ranges']['range'][0]['end'] = task_port

            tasks.append(task)

            remainingCpus -= task_cpus
            remainingMem -= task_mem
            remainingPorts -= {task_port}

            update_constraint_state(offer, self.constraints,
                                    new_constraint_state)

        # raise constraint error but only if no other tasks fit/fail the offer
        if total > 0 and failed_constraints == total:
            raise ConstraintFailAllTasksError

        return tasks, new_constraint_state
コード例 #4
0
    def tasks_and_state_for_offer(self, driver, offer, state):
        """Returns collection of tasks that can fit inside an offer."""
        tasks = []
        offerCpus = 0
        offerMem = 0
        offerPorts = []
        for resource in offer.resources:
            if resource.name == "cpus":
                offerCpus += resource.scalar.value
            elif resource.name == "mem":
                offerMem += resource.scalar.value
            elif resource.name == "ports":
                for rg in resource.ranges.range:
                    # I believe mesos protobuf ranges are inclusive, but range() is exclusive
                    offerPorts += range(rg.begin, rg.end + 1)
        remainingCpus = offerCpus
        remainingMem = offerMem
        remainingPorts = set(offerPorts)

        base_task = self.service_config.base_task(self.system_paasta_config)
        base_task.slave_id.value = offer.slave_id.value

        task_mem = self.service_config.get_mem()
        task_cpus = self.service_config.get_cpus()

        # don't mutate existing state
        new_constraint_state = copy.deepcopy(state)
        total = 0
        failed_constraints = 0
        while self.need_more_tasks(base_task.name, self.tasks_with_flags,
                                   tasks):
            total += 1

            if not (remainingCpus >= task_cpus and remainingMem >= task_mem
                    and self.offer_matches_pool(offer)
                    and len(remainingPorts) >= 1):
                break

            if not (check_offer_constraints(offer, self.constraints,
                                            new_constraint_state)):
                failed_constraints += 1
                break

            task_port = random.choice(list(remainingPorts))

            t = mesos_pb2.TaskInfo()
            t.MergeFrom(base_task)
            tid = "%s.%s" % (t.name, uuid.uuid4().hex)
            t.task_id.value = tid

            t.container.docker.port_mappings[0].host_port = task_port
            for resource in t.resources:
                if resource.name == "ports":
                    resource.ranges.range[0].begin = task_port
                    resource.ranges.range[0].end = task_port

            tasks.append(t)

            remainingCpus -= task_cpus
            remainingMem -= task_mem
            remainingPorts -= {task_port}

            update_constraint_state(offer, self.constraints,
                                    new_constraint_state)

        # raise constraint error but only if no other tasks fit/fail the offer
        if total > 0 and failed_constraints == total:
            raise ConstraintFailAllTasksError

        return tasks, new_constraint_state