示例#1
0
def mesos_cpu_metrics_provider(marathon_service_config, marathon_tasks, mesos_tasks, **kwargs):
    """
    Gets the mean cpu utilization of a service across all of its tasks.

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param mesos_tasks: Mesos tasks to get data from

    :returns: the service's mean utilization, from 0 to 1
    """

    autoscaling_root = compose_autoscaling_zookeeper_root(
        service=marathon_service_config.service,
        instance=marathon_service_config.instance,
    )
    zk_last_time_path = '%s/cpu_last_time' % autoscaling_root
    zk_last_cpu_data = '%s/cpu_data' % autoscaling_root

    with ZookeeperPool() as zk:
        try:
            last_time, _ = zk.get(zk_last_time_path)
            last_cpu_data, _ = zk.get(zk_last_cpu_data)
            last_time = float(last_time)
            last_cpu_data = (datum for datum in last_cpu_data.split(',') if datum)
        except NoNodeError:
            last_time = 0.0
            last_cpu_data = []

    mesos_tasks = {task['id']: task.stats for task in mesos_tasks}
    current_time = int(datetime.now().strftime('%s'))
    time_delta = current_time - last_time

    mesos_cpu_data = {task_id: float(stats.get('cpus_system_time_secs', 0.0) + stats.get(
        'cpus_user_time_secs', 0.0)) / (stats.get('cpus_limit', 0) - .1) for task_id, stats in mesos_tasks.items()}

    if not mesos_cpu_data:
        raise MetricsProviderNoDataError("Couldn't get any cpu data from Mesos")

    cpu_data_csv = ','.join('%s:%s' % (cpu_seconds, task_id) for task_id, cpu_seconds in mesos_cpu_data.items())

    with ZookeeperPool() as zk:
        zk.ensure_path(zk_last_cpu_data)
        zk.ensure_path(zk_last_time_path)
        zk.set(zk_last_cpu_data, str(cpu_data_csv))
        zk.set(zk_last_time_path, str(current_time))

    utilization = {}
    for datum in last_cpu_data:
        last_cpu_seconds, task_id = datum.split(':')
        if task_id in mesos_cpu_data:
            utilization[task_id] = (mesos_cpu_data[task_id] - float(last_cpu_seconds)) / time_delta

    if not utilization:
        raise MetricsProviderNoDataError("""The mesos_cpu metrics provider doesn't have Zookeeper data for this service.
                                         This is expected for its first run.""")

    task_utilization = utilization.values()
    mean_utilization = mean(task_utilization)

    return mean_utilization
示例#2
0
def get_http_utilization_for_all_tasks(marathon_service_config, marathon_tasks, endpoint, json_mapper):
    """
    Gets the mean utilization of a service across all of its tasks by fetching
    json from an http endpoint and applying a function that maps it to a
    utilization

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param endpoint: The http endpoint to get the stats from
    :param json_mapper: A function that takes a dictionary for a task and returns that task's utilization

    :returns: the service's mean utilization, from 0 to 1
    """

    endpoint = endpoint.lstrip('/')
    utilization = []
    service = marathon_service_config.get_service()

    monkey.patch_socket()
    gevent_pool = pool.Pool(20)
    jobs = [
        gevent_pool.spawn(get_http_utilization_for_a_task, task, service, endpoint, json_mapper)
        for task in marathon_tasks
    ]
    gevent.joinall(jobs)

    for job in jobs:
        if job.value is not None:
            utilization.append(job.value)

    if not utilization:
        raise MetricsProviderNoDataError("Couldn't get any data from http endpoint {} for {}.{}".format(
            endpoint, marathon_service_config.service, marathon_service_config.instance,
        ))
    return mean(utilization)
示例#3
0
def get_http_utilization_for_all_tasks(marathon_service_config, marathon_tasks, endpoint, json_mapper):
    """
    Gets the mean utilization of a service across all of its tasks by fetching
    json from an http endpoint and applying a function that maps it to a
    utilization

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param endpoint: The http endpoint to get the uwsgi stats from
    :param json_mapper: A function that takes a dictionary for a task and returns that task's utilization

    :returns: the service's mean utilization, from 0 to 1
    """

    endpoint = endpoint.lstrip('/')
    utilization = []
    for task in marathon_tasks:
        try:
            utilization.append(json_mapper(get_json_body_from_service(task.host, task.ports[0], endpoint)))
        except requests.exceptions.Timeout:
            # If we time out querying an endpoint, assume the task is fully loaded
            # This won't trigger in the event of DNS error or when a request is refused
            # a requests.exception.ConnectionError is raised in those cases
            utilization.append(1.0)
            log.debug('Recieved a timeout when querying %s on %s:%s. Assuming the service is at full utilization.' % (
                marathon_service_config.get_service(), task.host, task.ports[0]))
        except Exception as e:
            log.debug('Caught excpetion when querying %s on %s:%s : %s' % (
                marathon_service_config.get_service(), task.host, task.ports[0], str(e)))
    if not utilization:
        raise MetricsProviderNoDataError('Couldn\'t get any data from http endpoint %s for %s.%s' % (
            endpoint, marathon_service_config.service, marathon_service_config.instance))
    return mean(utilization)
async def get_http_utilization_for_all_tasks(
    marathon_service_config, marathon_tasks, endpoint, json_mapper
):
    """
    Gets the mean utilization of a service across all of its tasks by fetching
    json from an http endpoint and applying a function that maps it to a
    utilization

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param endpoint: The http endpoint to get the stats from
    :param json_mapper: A function that takes a dictionary for a task and returns that task's utilization

    :returns: the service's mean utilization, from 0 to 1
    """

    endpoint = endpoint.lstrip("/")
    utilization = []
    service = marathon_service_config.get_service()
    instance = marathon_service_config.get_instance()

    # Using a single aiohttp session reduces the number of errors seen. Launching
    # hundreds of unique sessions seems to increase (timeout) errors.
    # However, using 1 session is slower because the default number of connections
    # is 100, but still seems to be a sane amount.
    async with aiohttp.ClientSession(conn_timeout=10, read_timeout=10) as session:
        futures = [
            asyncio.ensure_future(
                get_http_utilization_for_a_task(
                    task=task,
                    service=service,
                    instance=instance,
                    endpoint=endpoint,
                    json_mapper=json_mapper,
                    session=session,
                )
            )
            for task in marathon_tasks
        ]
        await asyncio.wait(futures)

    for future in futures:
        result = future.result()
        if result is not None:
            utilization.append(result)

    if not utilization:
        raise MetricsProviderNoDataError(
            "Couldn't get any data from http endpoint {} for {}.{}".format(
                endpoint,
                marathon_service_config.service,
                marathon_service_config.instance,
            )
        )
    return mean(utilization)
示例#5
0
def get_http_utilization_for_all_tasks(marathon_service_config, marathon_tasks,
                                       endpoint, json_mapper):
    """
    Gets the mean utilization of a service across all of its tasks by fetching
    json from an http endpoint and applying a function that maps it to a
    utilization

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param endpoint: The http endpoint to get the uwsgi stats from
    :param json_mapper: A function that takes a dictionary for a task and returns that task's utilization

    :returns: the service's mean utilization, from 0 to 1
    """

    endpoint = endpoint.lstrip('/')
    utilization = []
    for task in marathon_tasks:
        try:
            utilization.append(
                json_mapper(
                    get_json_body_from_service(task.host, task.ports[0],
                                               endpoint)))
        except requests.exceptions.Timeout:
            # If we time out querying an endpoint, assume the task is fully loaded
            # This won't trigger in the event of DNS error or when a request is refused
            # a requests.exception.ConnectionError is raised in those cases
            utilization.append(1.0)
            log.debug(
                'Recieved a timeout when querying %s on %s:%s. Assuming the service is at full utilization.'
                % (marathon_service_config.get_service(), task.host,
                   task.ports[0]))
        except Exception as e:
            log.debug('Caught excpetion when querying %s on %s:%s : %s' %
                      (marathon_service_config.get_service(), task.host,
                       task.ports[0], str(e)))
    if not utilization:
        raise MetricsProviderNoDataError(
            'Couldn\'t get any data from http endpoint %s for %s.%s' %
            (endpoint, marathon_service_config.service,
             marathon_service_config.instance))
    return mean(utilization)
示例#6
0
async def get_http_utilization_for_all_tasks(marathon_service_config,
                                             marathon_tasks, endpoint,
                                             json_mapper):
    """
    Gets the mean utilization of a service across all of its tasks by fetching
    json from an http endpoint and applying a function that maps it to a
    utilization

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param endpoint: The http endpoint to get the stats from
    :param json_mapper: A function that takes a dictionary for a task and returns that task's utilization

    :returns: the service's mean utilization, from 0 to 1
    """

    endpoint = endpoint.lstrip("/")
    utilization = []
    service = marathon_service_config.get_service()

    futures = [
        asyncio.ensure_future(
            get_http_utilization_for_a_task(task, service, endpoint,
                                            json_mapper))
        for task in marathon_tasks
    ]
    await asyncio.wait(futures)

    for future in futures:
        result = future.result()
        if result is not None:
            utilization.append(result)

    if not utilization:
        raise MetricsProviderNoDataError(
            "Couldn't get any data from http endpoint {} for {}.{}".format(
                endpoint,
                marathon_service_config.service,
                marathon_service_config.instance,
            ))
    return mean(utilization)
示例#7
0
def mesos_cpu_metrics_provider(
    marathon_service_config,
    system_paasta_config,
    marathon_tasks,
    mesos_tasks,
    log_utilization_data={},
    noop=False,
    **kwargs,
):
    """
    Gets the mean cpu utilization of a service across all of its tasks.

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param mesos_tasks: Mesos tasks to get data from
    :param log_utilization_data: A dict used to transfer utilization data to autoscale_marathon_instance()

    :returns: the service's mean utilization, from 0 to 1
    """

    autoscaling_root = compose_autoscaling_zookeeper_root(
        service=marathon_service_config.service,
        instance=marathon_service_config.instance,
    )
    zk_last_time_path = "%s/cpu_last_time" % autoscaling_root
    zk_last_cpu_data = "%s/cpu_data" % autoscaling_root

    with ZookeeperPool() as zk:
        try:
            last_time = zk.get(zk_last_time_path)[0].decode("utf8")
            last_cpu_data = zk.get(zk_last_cpu_data)[0].decode("utf8")
            log_utilization_data[last_time] = last_cpu_data
            last_time = float(last_time)
            last_cpu_data = (datum for datum in last_cpu_data.split(",")
                             if datum)
        except NoNodeError:
            last_time = 0.0
            last_cpu_data = []

    futures = [asyncio.ensure_future(task.stats()) for task in mesos_tasks]
    if futures:
        a_sync.block(asyncio.wait, futures, timeout=60)

    def results_or_None(fut):
        if fut.exception():
            return None
        else:
            return fut.result()

    mesos_tasks_stats = dict(
        zip(
            [task["id"] for task in mesos_tasks],
            [results_or_None(fut) for fut in futures],
        ))

    current_time = int(datetime.now().strftime("%s"))
    time_delta = current_time - last_time

    mesos_cpu_data = {}
    for task_id, stats in mesos_tasks_stats.items():
        if stats is not None:
            try:
                utime = float(stats["cpus_user_time_secs"])
                stime = float(stats["cpus_system_time_secs"])
                limit = float(stats["cpus_limit"]) - 0.1
                mesos_cpu_data[task_id] = (stime + utime) / limit
            except KeyError:
                pass

    if not mesos_cpu_data:
        raise MetricsProviderNoDataError(
            "Couldn't get any cpu data from Mesos")

    cpu_data_csv = ",".join(f"{cpu_seconds}:{task_id}"
                            for task_id, cpu_seconds in mesos_cpu_data.items())
    log_utilization_data[str(current_time)] = cpu_data_csv

    if not noop:
        with ZookeeperPool() as zk:
            zk.ensure_path(zk_last_cpu_data)
            zk.ensure_path(zk_last_time_path)
            zk.set(zk_last_cpu_data, str(cpu_data_csv).encode("utf8"))
            zk.set(zk_last_time_path, str(current_time).encode("utf8"))

    utilization = {}
    for datum in last_cpu_data:
        last_cpu_seconds, task_id = datum.split(":")
        if task_id in mesos_cpu_data:
            cputime_delta = mesos_cpu_data[task_id] - float(last_cpu_seconds)
            utilization[task_id] = cputime_delta / time_delta

    if not utilization:
        raise MetricsProviderNoDataError(
            """The mesos_cpu metrics provider doesn't have Zookeeper data for this service.
                                         This is expected for its first run."""
        )

    task_utilization = utilization.values()
    mean_utilization = mean(task_utilization)
    return mean_utilization
示例#8
0
 def uwsgi_mapper(json):
     workers = json["workers"]
     utilization = [
         1.0 if worker["status"] != "idle" else 0.0 for worker in workers
     ]
     return mean(utilization)
示例#9
0
def mesos_cpu_metrics_provider(
    marathon_service_config,
    system_paasta_config,
    marathon_tasks,
    mesos_tasks,
    log_utilization_data={},
    noop=False,
    **kwargs,
):
    """
    Gets the mean cpu utilization of a service across all of its tasks.

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param mesos_tasks: Mesos tasks to get data from
    :param log_utilization_data: A dict used to transfer utilization data to autoscale_marathon_instance()

    :returns: the service's mean utilization, from 0 to 1
    """

    autoscaling_root = compose_autoscaling_zookeeper_root(
        service=marathon_service_config.service,
        instance=marathon_service_config.instance,
    )
    zk_last_time_path = '%s/cpu_last_time' % autoscaling_root
    zk_last_cpu_data = '%s/cpu_data' % autoscaling_root

    with ZookeeperPool() as zk:
        try:
            last_time = zk.get(zk_last_time_path)[0].decode('utf8')
            last_cpu_data = zk.get(zk_last_cpu_data)[0].decode('utf8')
            log_utilization_data[last_time] = last_cpu_data
            last_time = float(last_time)
            last_cpu_data = (datum for datum in last_cpu_data.split(',')
                             if datum)
        except NoNodeError:
            last_time = 0.0
            last_cpu_data = []

    monkey.patch_socket()
    jobs = [gevent.spawn(task.stats_callable) for task in mesos_tasks]
    gevent.joinall(jobs, timeout=60)
    mesos_tasks = dict(
        zip([task['id'] for task in mesos_tasks], [job.value for job in jobs]))

    current_time = int(datetime.now().strftime('%s'))
    time_delta = current_time - last_time

    mesos_cpu_data = {}
    for task_id, stats in mesos_tasks.items():
        if stats is not None:
            try:
                utime = float(stats['cpus_user_time_secs'])
                stime = float(stats['cpus_system_time_secs'])
                limit = float(stats['cpus_limit']) - .1
                mesos_cpu_data[task_id] = (stime + utime) / limit
            except KeyError:
                pass

    if not mesos_cpu_data:
        raise MetricsProviderNoDataError(
            "Couldn't get any cpu data from Mesos")

    cpu_data_csv = ','.join('%s:%s' % (cpu_seconds, task_id)
                            for task_id, cpu_seconds in mesos_cpu_data.items())
    log_utilization_data[str(current_time)] = cpu_data_csv

    if not noop:
        with ZookeeperPool() as zk:
            zk.ensure_path(zk_last_cpu_data)
            zk.ensure_path(zk_last_time_path)
            zk.set(zk_last_cpu_data, str(cpu_data_csv).encode('utf8'))
            zk.set(zk_last_time_path, str(current_time).encode('utf8'))

    utilization = {}
    for datum in last_cpu_data:
        last_cpu_seconds, task_id = datum.split(':')
        if task_id in mesos_cpu_data:
            cputime_delta = mesos_cpu_data[task_id] - float(last_cpu_seconds)

            if system_paasta_config.get_filter_bogus_mesos_cputime_enabled():
                # It is unlikely that the cputime consumed by a task is greater than the CPU limits
                # that we enforce (plus 10% of margin). This is a bug in Mesos (tracked in PAASTA-13510)
                cpu_burst_allowance = (
                    1.10 * marathon_service_config.get_cpu_quota() /
                    marathon_service_config.get_cpu_period())
                if cputime_delta > time_delta * cpu_burst_allowance:
                    log.warning(
                        'Ignoring potentially bogus cputime values for task {}'
                        .format(str(task_id)))
                    log.debug(
                        'Elapsed time: {}, Enforced CPU limit: {}, CPU time consumed: {}'
                        .format(
                            time_delta,
                            cpu_burst_allowance,
                            cputime_delta,
                        ), )
                    continue

            utilization[task_id] = cputime_delta / time_delta

    if not utilization:
        raise MetricsProviderNoDataError(
            """The mesos_cpu metrics provider doesn't have Zookeeper data for this service.
                                         This is expected for its first run."""
        )

    task_utilization = utilization.values()
    mean_utilization = mean(task_utilization)
    return mean_utilization
示例#10
0
 def uwsgi_mapper(json):
     workers = json['workers']
     utilization = [
         1.0 if worker['status'] != 'idle' else 0.0 for worker in workers
     ]
     return mean(utilization)
示例#11
0
def mesos_cpu_metrics_provider(
    marathon_service_config,
    marathon_tasks,
    mesos_tasks,
    log_utilization_data={},
    noop=False,
    **kwargs,
):
    """
    Gets the mean cpu utilization of a service across all of its tasks.

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param mesos_tasks: Mesos tasks to get data from
    :param log_utilization_data: A dict used to transfer utilization data to autoscale_marathon_instance()

    :returns: the service's mean utilization, from 0 to 1
    """

    autoscaling_root = compose_autoscaling_zookeeper_root(
        service=marathon_service_config.service,
        instance=marathon_service_config.instance,
    )
    zk_last_time_path = '%s/cpu_last_time' % autoscaling_root
    zk_last_cpu_data = '%s/cpu_data' % autoscaling_root

    with ZookeeperPool() as zk:
        try:
            last_time = zk.get(zk_last_time_path)[0].decode('utf8')
            last_cpu_data = zk.get(zk_last_cpu_data)[0].decode('utf8')
            log_utilization_data[last_time] = last_cpu_data
            last_time = float(last_time)
            last_cpu_data = (datum for datum in last_cpu_data.split(',')
                             if datum)
        except NoNodeError:
            last_time = 0.0
            last_cpu_data = []

    monkey.patch_socket()
    jobs = [gevent.spawn(task.stats_callable) for task in mesos_tasks]
    gevent.joinall(jobs, timeout=60)
    mesos_tasks = dict(
        zip([task['id'] for task in mesos_tasks], [job.value for job in jobs]))

    current_time = int(datetime.now().strftime('%s'))
    time_delta = current_time - last_time

    mesos_cpu_data = {}
    for task_id, stats in mesos_tasks.items():
        if stats is not None:
            try:
                utime = float(stats['cpus_user_time_secs'])
                stime = float(stats['cpus_system_time_secs'])
                limit = float(stats['cpus_limit']) - .1
                mesos_cpu_data[task_id] = (stime + utime) / limit
            except KeyError:
                pass

    if not mesos_cpu_data:
        raise MetricsProviderNoDataError(
            "Couldn't get any cpu data from Mesos")

    cpu_data_csv = ','.join('%s:%s' % (cpu_seconds, task_id)
                            for task_id, cpu_seconds in mesos_cpu_data.items())
    log_utilization_data[str(current_time)] = cpu_data_csv

    if not noop:
        with ZookeeperPool() as zk:
            zk.ensure_path(zk_last_cpu_data)
            zk.ensure_path(zk_last_time_path)
            zk.set(zk_last_cpu_data, str(cpu_data_csv).encode('utf8'))
            zk.set(zk_last_time_path, str(current_time).encode('utf8'))

    utilization = {}
    for datum in last_cpu_data:
        last_cpu_seconds, task_id = datum.split(':')
        if task_id in mesos_cpu_data:
            utilization[task_id] = (mesos_cpu_data[task_id] -
                                    float(last_cpu_seconds)) / time_delta

    if not utilization:
        raise MetricsProviderNoDataError(
            """The mesos_cpu metrics provider doesn't have Zookeeper data for this service.
                                         This is expected for its first run."""
        )

    task_utilization = utilization.values()
    mean_utilization = mean(task_utilization)

    return mean_utilization
示例#12
0
def mesos_cpu_metrics_provider(marathon_service_config, marathon_tasks,
                               mesos_tasks, **kwargs):
    """
    Gets the mean cpu utilization of a service across all of its tasks.

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param mesos_tasks: Mesos tasks to get data from

    :returns: the service's mean utilization, from 0 to 1
    """

    autoscaling_root = compose_autoscaling_zookeeper_root(
        service=marathon_service_config.service,
        instance=marathon_service_config.instance,
    )
    zk_last_time_path = '%s/cpu_last_time' % autoscaling_root
    zk_last_cpu_data = '%s/cpu_data' % autoscaling_root

    with ZookeeperPool() as zk:
        try:
            last_time, _ = zk.get(zk_last_time_path)
            last_cpu_data, _ = zk.get(zk_last_cpu_data)
            last_time = float(last_time)
            last_cpu_data = (datum for datum in last_cpu_data.split(',')
                             if datum)
        except NoNodeError:
            last_time = 0.0
            last_cpu_data = []

    mesos_tasks = {task['id']: task.stats for task in mesos_tasks}
    current_time = int(datetime.now().strftime('%s'))
    time_delta = current_time - last_time

    mesos_cpu_data = {
        task_id: float(
            stats.get('cpus_system_time_secs', 0.0) +
            stats.get('cpus_user_time_secs', 0.0)) /
        (stats.get('cpus_limit', 0) - .1)
        for task_id, stats in mesos_tasks.items()
    }

    if not mesos_cpu_data:
        raise MetricsProviderNoDataError(
            "Couldn't get any cpu data from Mesos")

    cpu_data_csv = ','.join('%s:%s' % (cpu_seconds, task_id)
                            for task_id, cpu_seconds in mesos_cpu_data.items())

    with ZookeeperPool() as zk:
        zk.ensure_path(zk_last_cpu_data)
        zk.ensure_path(zk_last_time_path)
        zk.set(zk_last_cpu_data, str(cpu_data_csv))
        zk.set(zk_last_time_path, str(current_time))

    utilization = {}
    for datum in last_cpu_data:
        last_cpu_seconds, task_id = datum.split(':')
        if task_id in mesos_cpu_data:
            utilization[task_id] = (mesos_cpu_data[task_id] -
                                    float(last_cpu_seconds)) / time_delta

    if not utilization:
        raise MetricsProviderNoDataError(
            """The mesos_cpu metrics provider doesn't have Zookeeper data for this service.
                                         This is expected for its first run."""
        )

    task_utilization = utilization.values()
    mean_utilization = mean(task_utilization)

    return mean_utilization
示例#13
0
 def uwsgi_mapper(json):
     workers = json['workers']
     utilization = [1.0 if worker['status'] != 'idle' else 0.0 for worker in workers]
     return mean(utilization)
示例#14
0
def mesos_cpu_metrics_provider(
    marathon_service_config,
    system_paasta_config,
    marathon_tasks,
    mesos_tasks,
    log_utilization_data={},
    noop=False,
    **kwargs,
):
    """
    Gets the mean cpu utilization of a service across all of its tasks.

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param mesos_tasks: Mesos tasks to get data from
    :param log_utilization_data: A dict used to transfer utilization data to autoscale_marathon_instance()

    :returns: the service's mean utilization, from 0 to 1
    """

    autoscaling_root = compose_autoscaling_zookeeper_root(
        service=marathon_service_config.service,
        instance=marathon_service_config.instance,
    )
    zk_last_time_path = '%s/cpu_last_time' % autoscaling_root
    zk_last_cpu_data = '%s/cpu_data' % autoscaling_root

    with ZookeeperPool() as zk:
        try:
            last_time = zk.get(zk_last_time_path)[0].decode('utf8')
            last_cpu_data = zk.get(zk_last_cpu_data)[0].decode('utf8')
            log_utilization_data[last_time] = last_cpu_data
            last_time = float(last_time)
            last_cpu_data = (datum for datum in last_cpu_data.split(',')
                             if datum)
        except NoNodeError:
            last_time = 0.0
            last_cpu_data = []

    futures = [asyncio.ensure_future(task.stats()) for task in mesos_tasks]
    if futures:
        a_sync.block(asyncio.wait, futures, timeout=60)

    def results_or_None(fut):
        if fut.exception():
            return None
        else:
            return fut.result()

    mesos_tasks_stats = dict(
        zip([task['id'] for task in mesos_tasks],
            [results_or_None(fut) for fut in futures]))

    current_time = int(datetime.now().strftime('%s'))
    time_delta = current_time - last_time

    mesos_cpu_data = {}
    for task_id, stats in mesos_tasks_stats.items():
        if stats is not None:
            try:
                utime = float(stats['cpus_user_time_secs'])
                stime = float(stats['cpus_system_time_secs'])
                limit = float(stats['cpus_limit']) - .1
                mesos_cpu_data[task_id] = (stime + utime) / limit
            except KeyError:
                pass

    if not mesos_cpu_data:
        raise MetricsProviderNoDataError(
            "Couldn't get any cpu data from Mesos")

    cpu_data_csv = ','.join(f'{cpu_seconds}:{task_id}'
                            for task_id, cpu_seconds in mesos_cpu_data.items())
    log_utilization_data[str(current_time)] = cpu_data_csv

    if not noop:
        with ZookeeperPool() as zk:
            zk.ensure_path(zk_last_cpu_data)
            zk.ensure_path(zk_last_time_path)
            zk.set(zk_last_cpu_data, str(cpu_data_csv).encode('utf8'))
            zk.set(zk_last_time_path, str(current_time).encode('utf8'))

    utilization = {}
    for datum in last_cpu_data:
        last_cpu_seconds, task_id = datum.split(':')
        if task_id in mesos_cpu_data:
            cputime_delta = mesos_cpu_data[task_id] - float(last_cpu_seconds)

            if system_paasta_config.get_filter_bogus_mesos_cputime_enabled():
                # It is unlikely that the cputime consumed by a task is greater than the CPU limits
                # that we enforce. This is a bug in Mesos (tracked in PAASTA-13510)
                max_cpu_allowed = 1 + marathon_service_config.get_cpu_burst_add(
                ) / marathon_service_config.get_cpus()
                task_cpu_usage = cputime_delta / time_delta

                if task_cpu_usage > (max_cpu_allowed * 1.1):
                    log.warning(
                        'Ignoring potentially bogus cpu usage {} for task {}'.
                        format(
                            task_cpu_usage,
                            str(task_id),
                        ), )
                    continue

            utilization[task_id] = cputime_delta / time_delta

    if not utilization:
        raise MetricsProviderNoDataError(
            """The mesos_cpu metrics provider doesn't have Zookeeper data for this service.
                                         This is expected for its first run."""
        )

    task_utilization = utilization.values()
    mean_utilization = mean(task_utilization)
    return mean_utilization