Ejemplo n.º 1
0
def getAggregationFn(metric):
    fn = None

    slaveDatasource = AutostackMetricAdapterBase.getMetricDatasource(metric)
    metricAdapter = AutostackMetricAdapterBase.getMetricAdapter(slaveDatasource)
    query = metricAdapter.getQueryParams(metric.name)

    if "statistics" in query and query["statistics"] == "Sum":
        fn = sum

    return fn
Ejemplo n.º 2
0
def getAggregationFn(metric):
  fn = None

  slaveDatasource = AutostackMetricAdapterBase.getMetricDatasource(metric)
  metricAdapter = AutostackMetricAdapterBase.getMetricAdapter(slaveDatasource)
  query = metricAdapter.getQueryParams(metric.name)

  if "statistics" in query and query["statistics"] == "Sum":
    fn = sum

  return fn
Ejemplo n.º 3
0
    def monitorMetric(self, modelSpec):
        """ Start monitoring a metric; create a model linked to an existing
    Autostack

    :param modelSpec: model specification for an Autostack-based model
    :type modelSpec: dict

    ::

        {
          "datasource": "autostack",

          "metricSpec": {
            # TODO [MER-3533]: This should be autostack name instead
            "autostackId": "a858c6990a444cd8a07466ec7f3cae16",

            "slaveDatasource": "cloudwatch",

            "slaveMetric": {
              # specific to slaveDatasource
              "namespace": "AWS/EC2",
              "metric": "CPUUtilization"
            },

            "period": 300  # aggregation period; seconds
          },

          "modelParams": { # optional; specific to slave metric
            "min": 0,  # optional
            "max": 100  # optional
          }
        }

    :returns: datasource-specific unique model identifier

    :raises YOMP.app.exceptions.ObjectNotFoundError: if referenced autostack
      doesn't exist

    :raises YOMP.app.exceptions.MetricNotSupportedError: if requested metric
      isn't supported

    :raises YOMP.app.exceptions.MetricAlreadyMonitored: if the metric is already
      being monitored
    """
        metricSpec = modelSpec["metricSpec"]
        autostackId = metricSpec["autostackId"]
        with self.connectionFactory() as conn:
            autostack = repository.getAutostack(conn, autostackId)

        slaveDatasource = metricSpec["slaveDatasource"]
        slaveMetric = metricSpec["slaveMetric"]

        canonicalResourceName = self.getInstanceNameForModelSpec(modelSpec)

        metricAdapter = AutostackMetricAdapterBase.getMetricAdapter(
            slaveDatasource)
        nameColumnValue = metricAdapter.getMetricName(slaveMetric)
        metricDescription = metricAdapter.getMetricDescription(
            slaveMetric, autostack)
        queryParams = metricAdapter.getQueryParams(nameColumnValue)

        defaultMin = queryParams["min"]
        defaultMax = queryParams["max"]
        defaultPeriod = queryParams["period"]

        modelParams = modelSpec.get("modelParams", dict())
        explicitMin = modelParams.get("min")
        explicitMax = modelParams.get("max")
        explicitPeriod = metricSpec.get("period")
        if (explicitMin is None) != (explicitMax is None):
            raise ValueError(
                "min and max params must both be None or non-None; modelSpec=%r"
                % (modelSpec, ))

        minVal = explicitMin if explicitMin is not None else defaultMin
        maxVal = explicitMax if explicitMax is not None else defaultMax
        period = explicitPeriod if explicitPeriod is not None else defaultPeriod
        stats = {"min": minVal, "max": maxVal}

        if minVal is None or maxVal is None:
            raise ValueError("Expected min and max to be set")

        swarmParams = scalar_metric_utils.generateSwarmParams(stats)

        @repository.retryOnTransientErrors
        def startMonitoringWithRetries():
            """
      :returns: metricId
      """
            with self.connectionFactory() as conn:
                with conn.begin():
                    repository.lockOperationExclusive(
                        conn, repository.OperationLock.METRICS)

                    # Check if the metric is already monitored
                    matchingMetrics = repository.getAutostackMetricsWithMetricName(
                        conn,
                        autostackId,
                        nameColumnValue,
                        fields=[schema.metric.c.uid])

                    matchingMetric = next(iter(matchingMetrics), None)

                    if matchingMetric:
                        msg = (
                            "monitorMetric: Autostack modelId=%s is already "
                            "monitoring metric=%s on resource=%s; model=%r" %
                            (matchingMetric.uid, nameColumnValue,
                             canonicalResourceName, matchingMetric))
                        self._log.warning(msg)
                        raise YOMP.app.exceptions.MetricAlreadyMonitored(
                            msg, uid=matchingMetric.uid)

                    # Add a metric row for the requested metric
                    metricDict = repository.addMetric(
                        conn,
                        datasource=self._DATASOURCE,
                        name=nameColumnValue,
                        description=metricDescription,
                        server=canonicalResourceName,
                        location=autostack.region,
                        tag_name=autostack.name,
                        parameters=htmengine.utils.jsonEncode(modelSpec),
                        poll_interval=period,
                        status=MetricStatus.UNMONITORED)

                    metricId = metricDict["uid"]

                    repository.addMetricToAutostack(conn, autostackId,
                                                    metricId)

                    # Start monitoring
                    scalar_metric_utils.startMonitoring(
                        conn=conn,
                        metricId=metricId,
                        swarmParams=swarmParams,
                        logger=self._log)

            self._log.info("monitorMetric: monitoring metric=%s, stats=%r",
                           metricId, stats)

            return metricId

        return startMonitoringWithRetries()
Ejemplo n.º 4
0
  def monitorMetric(self, modelSpec):
    """ Start monitoring a metric; create a model linked to an existing
    Autostack

    :param modelSpec: model specification for an Autostack-based model
    :type modelSpec: dict

    ::

        {
          "datasource": "autostack",

          "metricSpec": {
            # TODO [MER-3533]: This should be autostack name instead
            "autostackId": "a858c6990a444cd8a07466ec7f3cae16",

            "slaveDatasource": "cloudwatch",

            "slaveMetric": {
              # specific to slaveDatasource
              "namespace": "AWS/EC2",
              "metric": "CPUUtilization"
            },

            "period": 300  # aggregation period; seconds
          },

          "modelParams": { # optional; specific to slave metric
            "min": 0,  # optional
            "max": 100  # optional
          }
        }

    :returns: datasource-specific unique model identifier

    :raises YOMP.app.exceptions.ObjectNotFoundError: if referenced autostack
      doesn't exist

    :raises YOMP.app.exceptions.MetricNotSupportedError: if requested metric
      isn't supported

    :raises YOMP.app.exceptions.MetricAlreadyMonitored: if the metric is already
      being monitored
    """
    metricSpec = modelSpec["metricSpec"]
    autostackId = metricSpec["autostackId"]
    with self.connectionFactory() as conn:
      autostack = repository.getAutostack(conn, autostackId)

    slaveDatasource = metricSpec["slaveDatasource"]
    slaveMetric = metricSpec["slaveMetric"]

    canonicalResourceName = self.getInstanceNameForModelSpec(modelSpec)

    metricAdapter = AutostackMetricAdapterBase.getMetricAdapter(slaveDatasource)
    nameColumnValue = metricAdapter.getMetricName(slaveMetric)
    metricDescription = metricAdapter.getMetricDescription(slaveMetric,
                                                           autostack)
    queryParams = metricAdapter.getQueryParams(nameColumnValue)

    defaultMin = queryParams["min"]
    defaultMax = queryParams["max"]
    defaultPeriod = queryParams["period"]

    modelParams = modelSpec.get("modelParams", dict())
    explicitMin = modelParams.get("min")
    explicitMax = modelParams.get("max")
    explicitPeriod = metricSpec.get("period")
    if (explicitMin is None) != (explicitMax is None):
      raise ValueError(
        "min and max params must both be None or non-None; modelSpec=%r"
        % (modelSpec,))

    minVal = explicitMin if explicitMin is not None else defaultMin
    maxVal = explicitMax if explicitMax is not None else defaultMax
    period = explicitPeriod if explicitPeriod is not None else defaultPeriod
    stats = {"min": minVal, "max": maxVal}

    if minVal is None or maxVal is None:
      raise ValueError("Expected min and max to be set")

    swarmParams = scalar_metric_utils.generateSwarmParams(stats)

    @repository.retryOnTransientErrors
    def startMonitoringWithRetries():
      """
      :returns: metricId
      """
      with self.connectionFactory() as conn:
        with conn.begin():
          repository.lockOperationExclusive(conn,
                                            repository.OperationLock.METRICS)

          # Check if the metric is already monitored
          matchingMetrics = repository.getAutostackMetricsWithMetricName(
            conn,
            autostackId,
            nameColumnValue,
            fields=[schema.metric.c.uid])

          matchingMetric = next(iter(matchingMetrics), None)

          if matchingMetric:
            msg = ("monitorMetric: Autostack modelId=%s is already "
                   "monitoring metric=%s on resource=%s; model=%r"
                   % (matchingMetric.uid, nameColumnValue,
                      canonicalResourceName, matchingMetric))
            self._log.warning(msg)
            raise YOMP.app.exceptions.MetricAlreadyMonitored(
                    msg,
                    uid=matchingMetric.uid)

          # Add a metric row for the requested metric
          metricDict = repository.addMetric(
            conn,
            datasource=self._DATASOURCE,
            name=nameColumnValue,
            description=metricDescription,
            server=canonicalResourceName,
            location=autostack.region,
            tag_name=autostack.name,
            parameters=htmengine.utils.jsonEncode(modelSpec),
            poll_interval=period,
            status=MetricStatus.UNMONITORED)

          metricId = metricDict["uid"]

          repository.addMetricToAutostack(conn, autostackId, metricId)

          # Start monitoring
          scalar_metric_utils.startMonitoring(
            conn=conn,
            metricId=metricId,
            swarmParams=swarmParams,
            logger=self._log)

      self._log.info("monitorMetric: monitoring metric=%s, stats=%r",
                     metricId, stats)

      return metricId

    return startMonitoringWithRetries()
Ejemplo n.º 5
0
    def _createMetricDataCollectionTasks(cls, requests, instanceCache):
        """ Create tasks to be executed concurrently from the given collection
    requests.

    :param requests: Metric collection requests
    :type requests: A sequence of AutostackMetricRequest objects

    :param instanceCache: Autostack instance cache. All Autostacks referenced in
                          requests are expected to be present in instance cache
    :type instanceCache: a dict, where each key is an Autostack uid and the
                         corresponding value is an _InstanceCacheValue object

    :returns: data collection tasks and request
        refID-to-_MetricCollectionAccumulator mappings
    :rtype: A two-tuple:
        The first element is a sequence of _MetricCollectionTask objects with
        refID values from the corresponding AutostackMetricRequest objects;
        The second elment is a dict of the
        refID-to-_MetricCollectionAccumulator mappings. The refID values are the
        ones provided by user in the corresponding AutostackMetricRequest
        objects.
    """
        accumulatorMap = dict()
        tasks = []
        for request in requests:
            refID = request.refID
            autostack = request.autostack
            metric = request.metric
            period = metric.poll_interval
            slaveDatasource = AutostackMetricAdapterBase.getMetricDatasource(
                metric)

            if slaveDatasource == "autostacks":
                timeRange = cls._getMetricCollectionTimeSliceForAutostackMetric(
                    period=period)
            else:
                timeRange = cls._getMetricCollectionTimeSlice(
                    startTime=metric.last_timestamp, period=period)

            instanceCacheItem = instanceCache[autostack.uid]

            region = autostack.region

            metricAdapter = AutostackMetricAdapterBase.getMetricAdapter(
                slaveDatasource)
            queryParams = metricAdapter.getQueryParams(metric.name)
            metricName = metric.name.split("/")[-1]
            stats = queryParams["statistics"]
            unit = queryParams["unit"]

            # Generate metric data collection tasks for the current request
            for instance in instanceCacheItem.instances:
                # instance is an aggregator_instances.InstanceInfo object
                task = _MetricCollectionTask(refID=refID,
                                             metricID=metric.uid,
                                             region=region,
                                             instanceID=instance.instanceID,
                                             metricName=metricName,
                                             stats=stats,
                                             unit=unit,
                                             period=period,
                                             timeRange=timeRange)

                tasks.append(task)

            # Create the metric collection accumulator for the current request
            assert refID not in accumulatorMap
            accumulatorMap[refID] = _MetricCollectionAccumulator(
                expectedNumSlices=len(instanceCacheItem.instances),
                collection=MetricCollection(refID=refID,
                                            slices=[],
                                            timeRange=timeRange,
                                            nextMetricTime=timeRange.end))

        return tasks, accumulatorMap
Ejemplo n.º 6
0
  def _createMetricDataCollectionTasks(cls, requests, instanceCache):
    """ Create tasks to be executed concurrently from the given collection
    requests.

    :param requests: Metric collection requests
    :type requests: A sequence of AutostackMetricRequest objects

    :param instanceCache: Autostack instance cache. All Autostacks referenced in
                          requests are expected to be present in instance cache
    :type instanceCache: a dict, where each key is an Autostack uid and the
                         corresponding value is an _InstanceCacheValue object

    :returns: data collection tasks and request
        refID-to-_MetricCollectionAccumulator mappings
    :rtype: A two-tuple:
        The first element is a sequence of _MetricCollectionTask objects with
        refID values from the corresponding AutostackMetricRequest objects;
        The second elment is a dict of the
        refID-to-_MetricCollectionAccumulator mappings. The refID values are the
        ones provided by user in the corresponding AutostackMetricRequest
        objects.
    """
    accumulatorMap = dict()
    tasks = []
    for request in requests:
      refID = request.refID
      autostack = request.autostack
      metric = request.metric
      period = metric.poll_interval
      slaveDatasource = AutostackMetricAdapterBase.getMetricDatasource(metric)

      if slaveDatasource == "autostacks":
        timeRange = cls._getMetricCollectionTimeSliceForAutostackMetric(
          period=period)
      else:
        timeRange = cls._getMetricCollectionTimeSlice(
          startTime=metric.last_timestamp,
          period=period)

      instanceCacheItem = instanceCache[autostack.uid]

      region = autostack.region

      metricAdapter = AutostackMetricAdapterBase.getMetricAdapter(
        slaveDatasource)
      queryParams = metricAdapter.getQueryParams(metric.name)
      metricName = metric.name.split("/")[-1]
      stats = queryParams["statistics"]
      unit = queryParams["unit"]

      # Generate metric data collection tasks for the current request
      for instance in instanceCacheItem.instances:
        # instance is an aggregator_instances.InstanceInfo object
        task = _MetricCollectionTask(
          refID=refID,
          metricID=metric.uid,
          region=region,
          instanceID=instance.instanceID,
          metricName=metricName,
          stats=stats,
          unit=unit,
          period=period,
          timeRange=timeRange)

        tasks.append(task)

      # Create the metric collection accumulator for the current request
      assert refID not in accumulatorMap
      accumulatorMap[refID] = _MetricCollectionAccumulator(
        expectedNumSlices=len(instanceCacheItem.instances),
        collection=MetricCollection(
          refID=refID, slices=[], timeRange=timeRange,
          nextMetricTime=timeRange.end))

    return tasks, accumulatorMap