def filter_autoscaling_tasks(marathon_apps, all_mesos_tasks, config): job_id_prefix = "%s%s" % (format_job_id( service=config.service, instance=config.instance), MESOS_TASK_SPACER) # Get a dict of healthy tasks, we assume tasks with no healthcheck defined # are healthy. We assume tasks with no healthcheck results but a defined # healthcheck to be unhealthy (unless they are "old" in which case we # assume that marathon has screwed up and stopped healthchecking but that # they are healthy log.info("Inspecting %s for autoscaling" % job_id_prefix) marathon_tasks = {} for app in marathon_apps: for task in app.tasks: if task.id.startswith(job_id_prefix) and ( is_task_healthy(task) or not app.health_checks or is_old_task_missing_healthchecks(task, app)): marathon_tasks[task.id] = task if not marathon_tasks: raise MetricsProviderNoDataError( "Couldn't find any healthy marathon tasks") mesos_tasks = [ task for task in all_mesos_tasks if task['id'] in marathon_tasks ] return (marathon_tasks, mesos_tasks)
def get_happy_tasks( app: MarathonApp, service: str, nerve_ns: str, system_paasta_config: SystemPaastaConfig, min_task_uptime: Optional[float] = None, check_haproxy: bool = False, haproxy_min_fraction_up: float = 1.0, ) -> List[MarathonTask]: """Given a MarathonApp object, return the subset of tasks which are considered healthy. With the default options, this returns tasks where at least one of the defined Marathon healthchecks passes. For it to do anything interesting, set min_task_uptime or check_haproxy. :param app: A MarathonApp object. :param service: The name of the service. :param nerve_ns: The nerve namespace :param min_task_uptime: Minimum number of seconds that a task must be running before we consider it healthy. Useful if tasks take a while to start up. :param check_haproxy: Whether to check the local haproxy to make sure this task has been registered and discovered. """ tasks = app.tasks happy = [] now = datetime.datetime.now(datetime.timezone.utc) for task in tasks: if task.started_at is None: # Can't be healthy if it hasn't started continue if min_task_uptime is not None: if (now - task.started_at).total_seconds() < min_task_uptime: continue # if there are healthchecks defined for the app but none have executed yet, then task is unhappy # BUT if the task is "old" and Marathon forgot about its healthcheck due to a leader election, # treat it as happy if (len(app.health_checks) > 0 and len(task.health_check_results) == 0 and not marathon_tools.is_old_task_missing_healthchecks( task, app)): continue # if there are health check results, check if at least one healthcheck is passing if not marathon_tools.is_task_healthy( task, require_all=False, default_healthy=True): continue happy.append(task) if check_haproxy: return filter_tasks_in_smartstack( happy, service, nerve_ns, system_paasta_config, haproxy_min_fraction_up=haproxy_min_fraction_up, ) else: return happy
def autoscale_services(soa_dir=DEFAULT_SOA_DIR): try: with create_autoscaling_lock(): cluster = load_system_paasta_config().get_cluster() configs = get_configs_of_services_to_scale(cluster=cluster, soa_dir=soa_dir) if configs: marathon_config = load_marathon_config() marathon_client = get_marathon_client( url=marathon_config.get_url(), user=marathon_config.get_username(), passwd=marathon_config.get_password()) all_marathon_tasks = marathon_client.list_tasks() all_mesos_tasks = get_all_running_tasks() with ZookeeperPool(): for config in configs: try: job_id = config.format_marathon_app_dict()['id'] # Get a dict of healthy tasks, we assume tasks with no healthcheck defined # are healthy. We assume tasks with no healthcheck results but a defined # healthcheck to be unhealthy (unless they are "old" in which case we # assume that marathon has screwed up and stopped healthchecking but that # they are healthy log.info("Inspecting %s for autoscaling" % job_id) marathon_tasks = { task.id: task for task in all_marathon_tasks if task.id.startswith(job_id) and (is_task_healthy(task) or not marathon_client. get_app(task.app_id).health_checks or is_old_task_missing_healthchecks( task, marathon_client)) } if not marathon_tasks: raise MetricsProviderNoDataError( "Couldn't find any healthy marathon tasks") mesos_tasks = [ task for task in all_mesos_tasks if task['id'] in marathon_tasks ] autoscale_marathon_instance( config, list(marathon_tasks.values()), mesos_tasks) except Exception as e: write_to_log(config=config, line='Caught Exception %s' % e) except LockHeldException: log.warning( "Skipping autoscaling run for services because the lock is held")
def filter_autoscaling_tasks(marathon_client, all_marathon_tasks, all_mesos_tasks, config): job_id = config.format_marathon_app_dict()['id'] # Get a dict of healthy tasks, we assume tasks with no healthcheck defined # are healthy. We assume tasks with no healthcheck results but a defined # healthcheck to be unhealthy (unless they are "old" in which case we # assume that marathon has screwed up and stopped healthchecking but that # they are healthy log.info("Inspecting %s for autoscaling" % job_id) marathon_tasks = {task.id: task for task in all_marathon_tasks if task.id.startswith(job_id) and (is_task_healthy(task) or not marathon_client.get_app(task.app_id).health_checks or is_old_task_missing_healthchecks(task, marathon_client))} if not marathon_tasks: raise MetricsProviderNoDataError("Couldn't find any healthy marathon tasks") mesos_tasks = [task for task in all_mesos_tasks if task['id'] in marathon_tasks] return (marathon_tasks, mesos_tasks)
def filter_autoscaling_tasks( marathon_apps: Sequence[MarathonApp], all_mesos_tasks: Sequence[Task], config: MarathonServiceConfig, system_paasta_config: SystemPaastaConfig, ) -> Tuple[Mapping[str, MarathonTask], Sequence[Task]]: """Find the tasks that are serving traffic. We care about this because many tasks have a period of high CPU when they first start up, during which they warm up code, load and process data, etc., and we don't want this high load to drag our overall load estimate upwards. Allowing these tasks to count towards overall load could cause a cycle of scaling up, seeing high load due to new warming-up containers, scaling up, until we hit max_instances. However, accidentally omitting a task that actually is serving traffic will cause us to underestimate load; this is generally much worse than overestimating, since it can cause us to incorrectly scale down or refuse to scale up when necessary. For this reason, we look at several sources of health information, and if they disagree, assume the task is serving traffic. """ job_id_prefix = "{}{}".format( format_job_id(service=config.service, instance=config.instance), MESOS_TASK_SPACER, ) # Get a dict of healthy tasks, we assume tasks with no healthcheck defined are healthy. # We assume tasks with no healthcheck results but a defined healthcheck to be unhealthy, unless they are "old" in # which case we assume that Marathon has screwed up and stopped healthchecking but that they are healthy. log.info("Inspecting %s for autoscaling" % job_id_prefix) relevant_tasks_by_app: Dict[MarathonApp, List[MarathonTask]] = { app: app.tasks for app in marathon_apps if app.id.lstrip("/").startswith(job_id_prefix) } healthy_marathon_tasks: Dict[str, MarathonTask] = {} for app, tasks in relevant_tasks_by_app.items(): for task in tasks: if (is_task_healthy(task) or not app.health_checks or is_old_task_missing_healthchecks(task, app)): healthy_marathon_tasks[task.id] = task service_namespace_config = load_service_namespace_config( service=config.service, namespace=config.get_nerve_namespace()) if service_namespace_config.is_in_smartstack(): for task in filter_tasks_in_smartstack( tasks=[ task for tasks in relevant_tasks_by_app.values() for task in tasks ], service=config.service, nerve_ns=config.get_nerve_namespace(), system_paasta_config=system_paasta_config, max_hosts_to_query=20, haproxy_min_fraction_up= 0.01, # Be very liberal. See docstring above for rationale. ): healthy_marathon_tasks[task.id] = task if not healthy_marathon_tasks: raise MetricsProviderNoDataError( "Couldn't find any healthy marathon tasks") mesos_tasks = [ task for task in all_mesos_tasks if task["id"] in healthy_marathon_tasks ] return (healthy_marathon_tasks, mesos_tasks)