def collectMetricStatistics(self, autostack, metric):
    """ Get a sequence of min/max statistics for a given metric from the
    Autostack's instances

    :param autostack: an autostack object
    :type autostack: TODO

    :param metric: a metric object linked to the given autostack
    :type metric: TODO

    :returns: a possibly empty, unordered sequence of InstanceMetricData
      objects, each containing a single MetricRecord object in its `records`
      attribute.
    """
    self._updateInstanceCache((autostack,))

    executionStart = time.time()

    # Initialize the result
    result = tuple()

    # Determine the maximum time range for gathering individual statistics
    timeSlice = self._getMetricStatisticsTimeSlice(period=metric.poll_interval)

    # Create tasks for concurrent execution
    tasks = self._createMetricStatisticsCollectionTasks(
      autostack=autostack,
      metric=metric,
      stats=self._STATS_OF_INTEREST.keys(),
      timeSlice=timeSlice,
      instanceCache=self._instanceCache)

    # Execute the tasks, if any, via worker pool
    numFailedTasks = 0
    sampleInfo = None
    if tasks:
      # TODO: evaluate performance with
      #   chunksize=max(1, len(tasks)//self._WORKER_PROCESS_POOL_SIZE)
      taskResults = self._processPool.map(_collectMetrics, tasks, chunksize=1)

      assert len(taskResults) == len(tasks), (len(taskResults), len(tasks))

      result, numFailedTasks, sampleInfo = (
        self._processMetricStatisticsResults(
          taskResults=taskResults,
          log=self._log))

    self._log.info(
      "Completed collection of stats for metric=<%s> of autostack=<%s>: "
      "numTasks=%d; numFailedTasks=%s; sampleCounts=%s; numSlices=%s; "
      "duration=%ss",
      getMetricLogPrefix(metric), getAutostackLogPrefix(autostack), len(tasks),
      numFailedTasks, sampleInfo, len(result), time.time() - executionStart)

    return result
  def _processAutostackMetricRequests(self, engine, requests, modelSwapper):
    """ Execute autostack metric requests, aggregate and stream
    collected metric data

    :param engine: SQLAlchemy engine object
    :type engine: sqlalchemy.engine.Engine
    :param requests: sequence of AutostackMetricRequest objects
    :param modelSwapper: Model Swapper
    """
    # Start collecting requested metric data
    collectionIter = self._metricGetter.collectMetricData(requests)

    # Aggregate each collection and dispatch to app MetricStreamer
    for metricCollection in collectionIter:
      request = requests[metricCollection.refID]

      metricObj = request.metric
      data = None

      if metricCollection.slices:
        aggregationFn = getAggregationFn(metricObj)
        if aggregationFn:
          data = aggregate(metricCollection.slices,
                           aggregationFn=aggregationFn)
        else:
          data = aggregate(metricCollection.slices)

      try:
        with engine.connect() as conn:
          repository.retryOnTransientErrors(repository.setMetricLastTimestamp)(
            conn, metricObj.uid, metricCollection.nextMetricTime)
      except ObjectNotFoundError:
        self._log.warning("Processing autostack data collection results for "
                          "unknown model=%s (model deleted?)", metricObj.uid)
        continue

      if data:
        try:
          self.metricStreamer.streamMetricData(data,
                                               metricID=metricObj.uid,
                                               modelSwapper=modelSwapper)
        except ObjectNotFoundError:
          # We expect that the model exists but in the odd case that it has
          # already been deleted we don't want to crash the process.
          self._log.info("Metric not found when adding data. metric=%s" %
                         metricObj.uid)

        self._log.debug(
          "{TAG:APP.AGG.DATA.PUB} Published numItems=%d for metric=%s;"
          "timeRange=[%sZ-%sZ]; headTS=%sZ; tailTS=%sZ",
          len(data), getMetricLogPrefix(metricObj),
          metricCollection.timeRange.start.isoformat(),
          metricCollection.timeRange.end.isoformat(),
          data[0][0].isoformat(), data[-1][0].isoformat())

      else:
        self._log.info(
          "{TAG:APP.AGG.DATA.NONE} No data for metric=%s;"
          "timeRange=[%sZ-%sZ]", getMetricLogPrefix(metricObj),
          metricCollection.timeRange.start.isoformat(),
          metricCollection.timeRange.end.isoformat())