Exemplo n.º 1
0
    def test_filter_tasks_in_smartstack(self):
        service = "foo"
        nerve_ns = "bar"
        fake_task = mock.Mock(name="fake_task", host="foo", ports=[123456])
        fake_backend = {"svname": "foo_256.256.256.256:123456", "status": "UP"}

        with mock.patch(
                "paasta_tools.smartstack_tools.get_multiple_backends",
                autospec=True,
                return_value=[fake_backend],
        ):
            with mock.patch("socket.gethostbyname",
                            autospec=True,
                            return_value="256.256.256.256"):
                assert [fake_task] == bounce_lib.filter_tasks_in_smartstack(
                    [fake_task], service, nerve_ns,
                    self.fake_system_paasta_config())

        with mock.patch(
                "paasta_tools.smartstack_tools.get_multiple_backends",
                autospec=True,
                return_value=[],
        ):
            with mock.patch("socket.gethostbyname",
                            autospec=True,
                            return_value="256.256.256.256"):
                assert [] == bounce_lib.filter_tasks_in_smartstack(
                    [fake_task], service, nerve_ns,
                    self.fake_system_paasta_config())

        with mock.patch(
                "paasta_tools.bounce_lib.get_registered_marathon_tasks",
                autospec=True,
                side_effect=[[fake_task], [ConnectionError],
                             [RequestException]],
        ):
            assert [fake_task] == bounce_lib.filter_tasks_in_smartstack(
                [fake_task], service, nerve_ns,
                self.fake_system_paasta_config())
            assert [] == bounce_lib.filter_tasks_in_smartstack(
                [fake_task], service, nerve_ns,
                self.fake_system_paasta_config())
            assert [] == bounce_lib.filter_tasks_in_smartstack(
                [fake_task], service, nerve_ns,
                self.fake_system_paasta_config())
Exemplo n.º 2
0
 def test_filter_tasks_in_smartstack_only_calls_n_hosts(self):
     tasks = [
         mock.Mock(health_check_results=[mock.Mock(alive=True)],
                   host=f"fake_host{i}") for i in range(5)
     ]
     with mock.patch(
             "paasta_tools.bounce_lib.get_registered_marathon_tasks",
             return_value=tasks,
             autospec=True,
     ) as get_registered_marathon_tasks_patch:
         actual = bounce_lib.filter_tasks_in_smartstack(
             tasks,
             service="service",
             nerve_ns="nerve_ns",
             system_paasta_config=self.fake_system_paasta_config(),
             max_hosts_to_query=3,
         )
         assert actual == tasks
         assert get_registered_marathon_tasks_patch.call_count == 3
Exemplo n.º 3
0
def filter_autoscaling_tasks(
    marathon_apps: Sequence[MarathonApp],
    all_mesos_tasks: Sequence[Task],
    config: MarathonServiceConfig,
    system_paasta_config: SystemPaastaConfig,
) -> Tuple[Mapping[str, MarathonTask], Sequence[Task]]:
    """Find the tasks that are serving traffic. We care about this because many tasks have a period of high CPU when
    they first start up, during which they warm up code, load and process data, etc., and we don't want this high load
    to drag our overall load estimate upwards. Allowing these tasks to count towards overall load could cause a cycle of
    scaling up, seeing high load due to new warming-up containers, scaling up, until we hit max_instances.

    However, accidentally omitting a task that actually is serving traffic will cause us to underestimate load; this is
    generally much worse than overestimating, since it can cause us to incorrectly scale down or refuse to scale up when
    necessary. For this reason, we look at several sources of health information, and if they disagree, assume the task
    is serving traffic.
    """
    job_id_prefix = "{}{}".format(
        format_job_id(service=config.service, instance=config.instance),
        MESOS_TASK_SPACER,
    )

    # Get a dict of healthy tasks, we assume tasks with no healthcheck defined are healthy.
    # We assume tasks with no healthcheck results but a defined healthcheck to be unhealthy, unless they are "old" in
    # which case we assume that Marathon has screwed up and stopped healthchecking but that they are healthy.

    log.info("Inspecting %s for autoscaling" % job_id_prefix)

    relevant_tasks_by_app: Dict[MarathonApp, List[MarathonTask]] = {
        app: app.tasks
        for app in marathon_apps
        if app.id.lstrip("/").startswith(job_id_prefix)
    }

    healthy_marathon_tasks: Dict[str, MarathonTask] = {}

    for app, tasks in relevant_tasks_by_app.items():
        for task in tasks:
            if (is_task_healthy(task) or not app.health_checks
                    or is_old_task_missing_healthchecks(task, app)):
                healthy_marathon_tasks[task.id] = task

    service_namespace_config = load_service_namespace_config(
        service=config.service, namespace=config.get_nerve_namespace())
    if service_namespace_config.is_in_smartstack():

        for task in filter_tasks_in_smartstack(
                tasks=[
                    task for tasks in relevant_tasks_by_app.values()
                    for task in tasks
                ],
                service=config.service,
                nerve_ns=config.get_nerve_namespace(),
                system_paasta_config=system_paasta_config,
                max_hosts_to_query=20,
                haproxy_min_fraction_up=
                0.01,  # Be very liberal. See docstring above for rationale.
        ):
            healthy_marathon_tasks[task.id] = task

    if not healthy_marathon_tasks:
        raise MetricsProviderNoDataError(
            "Couldn't find any healthy marathon tasks")
    mesos_tasks = [
        task for task in all_mesos_tasks
        if task["id"] in healthy_marathon_tasks
    ]
    return (healthy_marathon_tasks, mesos_tasks)