def GET(self): # pylint: disable=R0201
    """
    Describe Cloudwatch datasource, listing all supported regions, namespaces
    and metrics

      ::

          GET /_metrics/cloudwatch

      Returns:

      ::

        {
            'regions': { 'region-name": 'region-description',...},
            'namespaces': {
                'namespace-name': {
                    'metrics': ['metric-name',...],
                    'dimensions': ['dimension-name',...]
                }, ....
            }
        }
    """
    web.header('Content-Type', 'application/json; charset=UTF-8', True)
    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()
    resources = adapter.describeSupportedMetrics()

    return utils.jsonEncode(
      {"regions": dict(adapter.describeRegions()),
       "namespaces": _translateResourcesIntoNamespaces(resources)})
Exemple #2
0
    def GET(self):  # pylint: disable=R0201
        """
      Returns list of supported Clouwdwatch regions

      ::

          GET /_metrics/cloudwatch/regions

      Returns:

      ::

          { 'region-name': 'region-description',...}

      Sample output:

      ::

          {
            "ap-northeast-1": "Asia Pacific (Tokyo) Region",
            "ap-southeast-1": "Asia Pacific (Singapore) Region",
            "ap-southeast-2": "Asia Pacific (Sydney) Region",
            "eu-west-1": "EU (Ireland) Region",
            "sa-east-1": "South America (Sao Paulo) Region",
            "us-east-1": "US East (Northern Virginia) Region",
            "us-west-1": "US West (Northern California) Region",
            "us-west-2": "US West (Oregon) Region"
          }
    """
        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )
        AuthenticatedBaseHandler.addStandardHeaders()
        return utils.jsonEncode(dict(adapter.describeRegions()))
  def _checkModelExportImport(self, modelSpec):

    def checkExportSpec(exportSpec):
      self.assertEqual(exportSpec, modelSpec)

    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()

    modelId = self._monitorMetric(adapter, modelSpec)
    try:
      # Export
      _LOG.info("Exporting model")
      exportSpec = adapter.exportModel(modelId)
      checkExportSpec(exportSpec)

      # Unmonitor
      _LOG.info("Unmonitoring")
      adapter.unmonitorMetric(modelId)
      self.checkModelDeleted(modelId)

      # Import
      _LOG.info("Importing")
      modelId = adapter.importModel(exportSpec)
      self._runBasicChecksOnModel(modelId, adapter, modelSpec)

      # Export again
      _LOG.info("Exporting again")
      exportSpec = adapter.exportModel(modelId)
      checkExportSpec(exportSpec)
    except:
      try:
        adapter.unmonitorMetric(modelId)
      except app_exceptions.ObjectNotFoundError:
        pass
    else:
      adapter.unmonitorMetric(modelId)
    def testDescribeSupportedMetrics(self):
        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )

        supportedMetricDescriptions = adapter.describeSupportedMetrics()
        _LOG.info("Got %d supported metric description resource groups",
                  len(supportedMetricDescriptions))

        self.assertIsInstance(supportedMetricDescriptions, dict)

        self.assertItemsEqual(supportedMetricDescriptions.keys(),
                              self._expectedResourceTypes)

        for value in supportedMetricDescriptions.itervalues():
            self.assertIsInstance(value, dict)
            for metricName, metricInfo in value.iteritems():
                self.assertIsInstance(metricName, basestring)

                self.assertItemsEqual(metricInfo.keys(),
                                      ["namespace", "dimensionGroups"])
                self.assertIsInstance(metricInfo["namespace"], basestring)
                self.assertIsInstance(metricInfo["dimensionGroups"], tuple)
                for dimensionGroup in metricInfo["dimensionGroups"]:
                    self.assertIsInstance(dimensionGroup, tuple)
                    for dimensionName in dimensionGroup:
                        self.assertIsInstance(dimensionName, basestring)
    def _checkModelExportImport(self, modelSpec):
        def checkExportSpec(exportSpec):
            self.assertEqual(exportSpec, modelSpec)

        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )

        modelId = self._monitorMetric(adapter, modelSpec)
        try:
            # Export
            _LOG.info("Exporting model")
            exportSpec = adapter.exportModel(modelId)
            checkExportSpec(exportSpec)

            # Unmonitor
            _LOG.info("Unmonitoring")
            adapter.unmonitorMetric(modelId)
            self.checkModelDeleted(modelId)

            # Import
            _LOG.info("Importing")
            modelId = adapter.importModel(exportSpec)
            self._runBasicChecksOnModel(modelId, adapter, modelSpec)

            # Export again
            _LOG.info("Exporting again")
            exportSpec = adapter.exportModel(modelId)
            checkExportSpec(exportSpec)
        except:
            try:
                adapter.unmonitorMetric(modelId)
            except app_exceptions.ObjectNotFoundError:
                pass
        else:
            adapter.unmonitorMetric(modelId)
  def testDescribeRegions(self):
    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()

    regionSpecs = adapter.describeRegions()
    _LOG.info("got %d region descriptions", len(regionSpecs))

    self.assertIsInstance(regionSpecs, tuple)

    expectedRegionNames = (
      "ap-northeast-1",
      "ap-southeast-1",
      "ap-southeast-2",
      "eu-west-1",
      "sa-east-1",
      "us-east-1",
      "us-west-1",
      "us-west-2"
    )

    regionNames = tuple(name for name, description in regionSpecs)

    self.assertItemsEqual(regionNames, expectedRegionNames)

    for item in regionSpecs:
      self.assertIsInstance(item, tuple)
      self.assertEqual(len(item), 2)
      name, description = item
      self.assertIsInstance(name, basestring)
      self.assertIsInstance(description, basestring)
  def testDescribeSupportedMetrics(self):
    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()

    supportedMetricDescriptions = adapter.describeSupportedMetrics()
    _LOG.info("Got %d supported metric description resource groups",
               len(supportedMetricDescriptions))

    self.assertIsInstance(supportedMetricDescriptions, dict)

    self.assertItemsEqual(
      supportedMetricDescriptions.keys(),
      self._expectedResourceTypes)

    for value in supportedMetricDescriptions.itervalues():
      self.assertIsInstance(value, dict)
      for metricName, metricInfo in value.iteritems():
        self.assertIsInstance(metricName, basestring)

        self.assertItemsEqual(metricInfo.keys(),
                              ["namespace", "dimensionGroups"])
        self.assertIsInstance(metricInfo["namespace"], basestring)
        self.assertIsInstance(metricInfo["dimensionGroups"], tuple)
        for dimensionGroup in metricInfo["dimensionGroups"]:
          self.assertIsInstance(dimensionGroup, tuple)
          for dimensionName in dimensionGroup:
            self.assertIsInstance(dimensionName, basestring)
  def testGETSpecificInstanceFromRegion(self):
    """
    Test for Get
    '/_metrics/cloudwatch/<region-name>/AWS/<namespace>/instances/<InstancdId>'
    response is validated for appropriate headers, body and status
    Test is currently using ec2 box for jenkins-master, this test also
    validates for retriving all supported metrics with dimensions
    """
    supportedMetrics = (
      createCloudwatchDatasourceAdapter().describeSupportedMetrics())
    ec2Metrics = supportedMetrics[ResourceTypeNames.EC2_INSTANCE].keys()

    # Instance used for following test is jenkins-master node
    response = self.app.get("/us-west-2/AWS/EC2/instances/%s"
        % VALID_EC2_INSTANCE["InstanceId"], headers=self.headers)
    assertions.assertSuccess(self, response)
    result = app_utils.jsonDecode(response.body)
    self.assertIsInstance(result, list)
    self.assertGreater(len(ec2Metrics), 0)
    self.assertGreater(len(result), 0)
    self.assertEqual(len(ec2Metrics), len(result))
    for res in result:
      self.assertEqual(res["region"], "us-west-2")
      self.assertEqual(res["namespace"], "AWS/EC2")
      self.assertEqual(res["datasource"], "cloudwatch")
      self.assertIn(res["metric"], ec2Metrics)
      self.assertIsInstance(res["dimensions"], dict)
      self.assertEqual(res["dimensions"]["InstanceId"],
        VALID_EC2_INSTANCE["InstanceId"])
      ec2Metrics.remove(res["metric"])

    self.assertEqual(ec2Metrics, [])
Exemple #9
0
    def GET(self):  # pylint: disable=R0201
        """
    Describe Cloudwatch datasource, listing all supported regions, namespaces
    and metrics

      ::

          GET /_metrics/cloudwatch

      Returns:

      ::

        {
            'regions': { 'region-name": 'region-description',...},
            'namespaces': {
                'namespace-name': {
                    'metrics': ['metric-name',...],
                    'dimensions': ['dimension-name',...]
                }, ....
            }
        }
    """
        web.header('Content-Type', 'application/json; charset=UTF-8', True)
        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )
        resources = adapter.describeSupportedMetrics()

        return utils.jsonEncode({
            "regions":
            dict(adapter.describeRegions()),
            "namespaces":
            _translateResourcesIntoNamespaces(resources)
        })
    def testGETSpecificInstanceFromRegion(self):
        """
    Test for Get
    '/_metrics/cloudwatch/<region-name>/AWS/<namespace>/instances/<InstancdId>'
    response is validated for appropriate headers, body and status
    Test is currently using ec2 box for jenkins-master, this test also
    validates for retriving all supported metrics with dimensions
    """
        supportedMetrics = (
            createCloudwatchDatasourceAdapter().describeSupportedMetrics())
        ec2Metrics = supportedMetrics[ResourceTypeNames.EC2_INSTANCE].keys()

        # Instance used for following test is jenkins-master node
        response = self.app.get("/us-west-2/AWS/EC2/instances/%s" %
                                VALID_EC2_INSTANCE["InstanceId"],
                                headers=self.headers)
        assertions.assertSuccess(self, response)
        result = app_utils.jsonDecode(response.body)
        self.assertIsInstance(result, list)
        self.assertGreater(len(ec2Metrics), 0)
        self.assertGreater(len(result), 0)
        self.assertEqual(len(ec2Metrics), len(result))
        for res in result:
            self.assertEqual(res["region"], "us-west-2")
            self.assertEqual(res["namespace"], "AWS/EC2")
            self.assertEqual(res["datasource"], "cloudwatch")
            self.assertIn(res["metric"], ec2Metrics)
            self.assertIsInstance(res["dimensions"], dict)
            self.assertEqual(res["dimensions"]["InstanceId"],
                             VALID_EC2_INSTANCE["InstanceId"])
            ec2Metrics.remove(res["metric"])

        self.assertEqual(ec2Metrics, [])
Exemple #11
0
  def GET(self): # pylint: disable=R0201
    """
      Returns list of supported Clouwdwatch regions

      ::

          GET /_metrics/cloudwatch/regions

      Returns:

      ::

          { 'region-name': 'region-description',...}

      Sample output:

      ::

          {
            "ap-northeast-1": "Asia Pacific (Tokyo) Region",
            "ap-southeast-1": "Asia Pacific (Singapore) Region",
            "ap-southeast-2": "Asia Pacific (Sydney) Region",
            "eu-west-1": "EU (Ireland) Region",
            "sa-east-1": "South America (Sao Paulo) Region",
            "us-east-1": "US East (Northern Virginia) Region",
            "us-west-1": "US West (Northern California) Region",
            "us-west-2": "US West (Oregon) Region"
          }
    """
    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()
    AuthenticatedBaseHandler.addStandardHeaders()
    return utils.jsonEncode(dict(adapter.describeRegions()))
 def setUp(self):
     self.headers = getDefaultHTTPHeaders(grok.app.config)
     self.app = TestApp(cloudwatch_api.app.wsgifunc())
     adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
     )
     self.resources = adapter.describeSupportedMetrics()
     self.regions = adapter.describeRegions()
  def testListSupportedResourceTypes(self):
    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()

    supportedResourceTypes = adapter.listSupportedResourceTypes()
    _LOG.info("Got %d supported resource types", len(supportedResourceTypes))


    self.assertItemsEqual(supportedResourceTypes, self._expectedResourceTypes)
Exemple #14
0
 def testCreateCloudwatchDatasourceAdapter(self):
     """ Make sure createCloudwatchDatasourceAdapter returns the expected adapter
 """
     adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
     )
     self.assertEqual(adapter._DATASOURCE, "cloudwatch")
     self.assertEqual(adapter.__class__.__name__,
                      "_CloudwatchDatasourceAdapter")
    def _supportedAWSNamespaces():
        """ Compile set of supported AWS namespaces

    :returns: Set of known AWS Cloudwatch namespaces
    :rtype: set of str
    """
        return set(value for x in (createCloudwatchDatasourceAdapter(
        ).describeSupportedMetrics().values()) for y in x.values()
                   for key, value in y.items() if key == "namespace")
Exemple #16
0
    def GET(self, namespace=None):
        """
      List supported Cloudwatch namespaces

      ::

          GET /_metrics/cloudwatch/namespaces

      Returns:

      ::

          {'namespace-name1': {...},
           'namespace-name2': {...}
           ,...
        }


      OR

      List supported Cloudwatch metrics for a given namespace

      ::

          GET /_metrics/cloudwatch/{namespace-name}`

      Returns:

      ::

          {
              'namespace-name': {
                   'metrics': ['metric-name',...],
                   'dimensions': ['dimension-name',...]
              }
          }
    """
        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )
        resources = adapter.describeSupportedMetrics()

        namespaces = _translateResourcesIntoNamespaces(resources)

        # Adding Autostacks namespaces to this list for now, to maintain API
        # backwards-compatibility during adapter refactor
        namespaces["Autostacks"] = {"metrics": ["InstanceCount"]}

        if namespace is None:
            self.addStandardHeaders()
            return utils.jsonEncode(namespaces)

        if not namespace in namespaces:
            raise web.NotFound("Namespace '%s' was not found" % namespace)

        self.addStandardHeaders()
        return utils.jsonEncode({str(namespace): namespaces[namespace]})
    def testListSupportedResourceTypes(self):
        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )

        supportedResourceTypes = adapter.listSupportedResourceTypes()
        _LOG.info("Got %d supported resource types",
                  len(supportedResourceTypes))

        self.assertItemsEqual(supportedResourceTypes,
                              self._expectedResourceTypes)
Exemple #18
0
  def GET(self, namespace=None):
    """
      List supported Cloudwatch namespaces

      ::

          GET /_metrics/cloudwatch/namespaces

      Returns:

      ::

          {'namespace-name1': {...},
           'namespace-name2': {...}
           ,...
        }


      OR

      List supported Cloudwatch metrics for a given namespace

      ::

          GET /_metrics/cloudwatch/{namespace-name}`

      Returns:

      ::

          {
              'namespace-name': {
                   'metrics': ['metric-name',...],
                   'dimensions': ['dimension-name',...]
              }
          }
    """
    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()
    resources = adapter.describeSupportedMetrics()

    namespaces = _translateResourcesIntoNamespaces(resources)

    # Adding Autostacks namespaces to this list for now, to maintain API
    # backwards-compatibility during adapter refactor
    namespaces["Autostacks"] = {"metrics": ["InstanceCount"]}

    if namespace is None:
      self.addStandardHeaders()
      return utils.jsonEncode(namespaces)

    if not namespace in namespaces:
      raise web.NotFound("Namespace '%s' was not found" % namespace)

    self.addStandardHeaders()
    return utils.jsonEncode({str(namespace): namespaces[namespace]})
Exemple #19
0
    def GET(self, region):
        """
      List all existing Cloudwatch metrics for a given region

      ::

          GET /_metrics/cloudwatch/regions/{region}

      Returns:

      ::

          [
              {
                  'name': 'tag-or-empty-string',
                  'region': 'region-name',
                  'namespace': 'namespace-name',
                  'datasource': 'cloudwatch',
                  'identifier': 'id-from-dimension',
                  'metric': 'metric-name',
                  'dimensions': {
                      ...
                  }
              },...
          ]
    """

        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )
        resources = adapter.describeSupportedMetrics()

        def translateResourcesIntoMetrics():
            for resource, metrics in resources.items():
                for specificResource in adapter.describeResources(
                        region, resource):
                    for metric, cloudwatchParams in metrics.items():
                        yield {
                            "datasource": "cloudwatch",
                            "dimensions": {
                                cloudwatchParams["dimensionGroups"][0][0]:
                                specificResource["resID"]
                            },
                            "identifier": specificResource["resID"],
                            "metric": metric,
                            "name": specificResource["name"],
                            "namespace": cloudwatchParams["namespace"],
                            "region": region
                        }

        if region not in dict(adapter.describeRegions()):
            raise web.NotFound("Region '%s' was not found" % region)

        self.addStandardHeaders()
        return utils.jsonEncode(list(translateResourcesIntoMetrics()))
  def testActivateModel(self):
    """ Test activateModel """
    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()

    modelId = self._monitorMetric(adapter, self._modelSpecNoMinMax)

    adapter.activateModel(modelId)

    self._runBasicChecksOnModel(modelId, adapter, self._modelSpecNoMinMax)

    # Cleanup
    adapter.unmonitorMetric(modelId)
  def _supportedAWSNamespaces():
    """ Compile set of supported AWS namespaces

    :returns: Set of known AWS Cloudwatch namespaces
    :rtype: set of str
    """
    return set(value
               for x in (createCloudwatchDatasourceAdapter()
                         .describeSupportedMetrics()
                         .values())
               for y in x.values()
               for key, value in y.items() if key == "namespace")
  def testMonitorMetricThatIsAlreadyMonitored(self):
    """ monitorMetric should raise if already monitored """
    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()
    modelId = self._monitorMetric(adapter, self._modelSpecNoMinMax)

    with self.assertRaises(app_exceptions.MetricAlreadyMonitored) as cm:
      adapter.monitorMetric(self._modelSpecNoMinMax)

    self.assertEqual(cm.exception.uid, modelId)

    # Cleanup
    adapter.unmonitorMetric(modelId)
    def testActivateModel(self):
        """ Test activateModel """
        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )

        modelId = self._monitorMetric(adapter, self._modelSpecNoMinMax)

        adapter.activateModel(modelId)

        self._runBasicChecksOnModel(modelId, adapter, self._modelSpecNoMinMax)

        # Cleanup
        adapter.unmonitorMetric(modelId)
    def testMonitorMetricThatIsAlreadyMonitored(self):
        """ monitorMetric should raise if already monitored """
        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )
        modelId = self._monitorMetric(adapter, self._modelSpecNoMinMax)

        with self.assertRaises(app_exceptions.MetricAlreadyMonitored) as cm:
            adapter.monitorMetric(self._modelSpecNoMinMax)

        self.assertEqual(cm.exception.uid, modelId)

        # Cleanup
        adapter.unmonitorMetric(modelId)
Exemple #25
0
  def GET(self, region):
    """
      List all existing Cloudwatch metrics for a given region

      ::

          GET /_metrics/cloudwatch/regions/{region}

      Returns:

      ::

          [
              {
                  'name': 'tag-or-empty-string',
                  'region': 'region-name',
                  'namespace': 'namespace-name',
                  'datasource': 'cloudwatch',
                  'identifier': 'id-from-dimension',
                  'metric': 'metric-name',
                  'dimensions': {
                      ...
                  }
              },...
          ]
    """

    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()
    resources = adapter.describeSupportedMetrics()

    def translateResourcesIntoMetrics():
      for resource, metrics in resources.items():
        for specificResource in adapter.describeResources(region, resource):
          for metric, cloudwatchParams in metrics.items():
            yield {"datasource": "cloudwatch",
                   "dimensions": {
                    cloudwatchParams["dimensionGroups"][0][0]:
                      specificResource["resID"]},
                   "identifier": specificResource["resID"],
                   "metric": metric,
                   "name": specificResource["name"],
                   "namespace": cloudwatchParams["namespace"],
                   "region": region}

    if region not in dict(adapter.describeRegions()):
      raise web.NotFound("Region '%s' was not found" % region)

    self.addStandardHeaders()
    return utils.jsonEncode(list(translateResourcesIntoMetrics()))
  def _testGETCloudWatchImpl(self, url):
    response = self.app.get(url, headers=self.headers)
    assertions.assertSuccess(self, response)
    result = app_utils.jsonDecode(response.body)
    self.assertIsInstance(result, dict)

    supportedMetrics = createCloudwatchDatasourceAdapter().describeSupportedMetrics()

    for metrics in supportedMetrics.values():
      for metric, keys in metrics.items():
        self.assertIn(keys["namespace"],
                      result["namespaces"],
                      "Expected namespace (%s) not found in response." % (
                        keys["namespace"]))
        self.assertIn(metric,
                      result["namespaces"][keys["namespace"]]["metrics"],
                      "Expected metric (%s, %s) not found in response." % (
                        keys["namespace"], metric))
    def _testGETCloudWatchImpl(self, url):
        response = self.app.get(url, headers=self.headers)
        assertions.assertSuccess(self, response)
        result = app_utils.jsonDecode(response.body)
        self.assertIsInstance(result, dict)

        supportedMetrics = createCloudwatchDatasourceAdapter(
        ).describeSupportedMetrics()

        for metrics in supportedMetrics.values():
            for metric, keys in metrics.items():
                self.assertIn(
                    keys["namespace"], result["namespaces"],
                    "Expected namespace (%s) not found in response." %
                    (keys["namespace"]))
                self.assertIn(
                    metric, result["namespaces"][keys["namespace"]]["metrics"],
                    "Expected metric (%s, %s) not found in response." %
                    (keys["namespace"], metric))
    def testDescribeRegions(self):
        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )

        regionSpecs = adapter.describeRegions()
        _LOG.info("got %d region descriptions", len(regionSpecs))

        self.assertIsInstance(regionSpecs, tuple)

        expectedRegionNames = ("ap-northeast-1", "ap-southeast-1",
                               "ap-southeast-2", "eu-west-1", "sa-east-1",
                               "us-east-1", "us-west-1", "us-west-2")

        regionNames = tuple(name for name, description in regionSpecs)

        self.assertItemsEqual(regionNames, expectedRegionNames)

        for item in regionSpecs:
            self.assertIsInstance(item, tuple)
            self.assertEqual(len(item), 2)
            name, description = item
            self.assertIsInstance(name, basestring)
            self.assertIsInstance(description, basestring)
Exemple #29
0
    def GET(self, region, namespace, instance=None):
        """
    List metrics in the given namespace in the region [for a specific instance
    if specified]

    ::

        GET /_metrics/cloudwatch/{region}/{namespace}/instances/[{instance}]

    Sample Output:

    ::

        [
          {
            "dimensions": {
              "dimension-name": "value-1",
              ...
            }
            "region":"regions-name",
            "namespace": "namespace-name",
            "datasource": "cloudwatch",
            "identifier': "resource-id-from-dimension",
            "metric": "metric-name",
            "name": "name-tag-or-empty-string"
          },...
        ]

    Note:
    Expect a 200 OK even when attempting to GET from an invalid instance,
    this saves the overhead of asking AWS if we're dealing with a valid
    instance every GET.

    This fails silently. We expect the CLI user to know what Instance ID she is
    looking for.
    """

        data = web.input(tags=None)
        filters = None
        if data.tags:
            filters = {}
            kvpairs = [tag.strip() for tag in data.tags.split(",")]

            for kvpair in kvpairs:
                (key, _, value) = kvpair.partition(":")
                filters.setdefault("tag:" + key, []).append(value)

        if not namespace in NAMESPACE_TO_RESOURCE_TYPE:
            raise web.NotFound("Namespace '%s' was not found" % namespace)

        aggSpec = {
            "resourceType": NAMESPACE_TO_RESOURCE_TYPE[namespace],
            "region": region
        }

        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )

        resources = adapter.describeSupportedMetrics()

        namespaces = _translateResourcesIntoNamespaces(resources)

        if filters:
            aggSpec["filters"] = filters

            def translateResourcesIntoMetrics(namespace=None, instance=None):
                for metrics in resources.values():
                    try:
                        for specificInstance in adapter.getMatchingResources(
                                aggSpec):
                            for metricName, cloudwatchParams in metrics.items(
                            ):
                                if (namespace and cloudwatchParams["namespace"]
                                        == namespace
                                        and (instance is None or instance
                                             == specificInstance.instanceID)):
                                    yield {
                                        "datasource": "cloudwatch",
                                        "dimensions": {
                                            cloudwatchParams["dimensionGroups"][0][0]:
                                            specificInstance.instanceID
                                        },
                                        "identifier":
                                        specificInstance.instanceID,
                                        "metric": metricName,
                                        "name": specificInstance.tags["Name"],
                                        "namespace":
                                        cloudwatchParams["namespace"],
                                        "region": region
                                    }

                    except NotImplementedError:
                        # Metric exists but is otherwise not yet fully implemented.  When
                        # the adapter no longer raises NotImplementedError, it will become
                        # available.
                        pass

        else:

            def translateResourcesIntoMetrics(namespace=None, instance=None):
                for resource, metrics in resources.items():
                    try:
                        for specificResource in adapter.describeResources(
                                region, resource):
                            for metricName, cloudwatchParams in metrics.items(
                            ):
                                if (namespace and cloudwatchParams["namespace"]
                                        == namespace
                                        and (instance is None or instance
                                             == specificResource["resID"])):
                                    yield {
                                        "datasource": "cloudwatch",
                                        "dimensions": {
                                            cloudwatchParams["dimensionGroups"][0][0]:
                                            specificResource["resID"]
                                        },
                                        "identifier":
                                        specificResource["resID"],
                                        "metric": metricName,
                                        "name": specificResource["name"],
                                        "namespace":
                                        cloudwatchParams["namespace"],
                                        "region": region
                                    }

                    except NotImplementedError:
                        # Metric exists but is otherwise not yet fully implemented.  When
                        # the adapter no longer raises NotImplementedError, it will become
                        # available.
                        pass

        if region not in dict(adapter.describeRegions()):
            raise web.NotFound("Region '%s' was not found" % region)

        self.addStandardHeaders()
        return utils.jsonEncode(
            list(translateResourcesIntoMetrics(namespace, instance)))
Exemple #30
0
  def GET(self, region, namespace, metric):
    """
      List all instances of the given metric for the given namespace in the
      region

      ::

          GET /_metrics/cloudwatch/{region-name}/{namespace-name}/{metric-name}

      Sample Output:

      ::

          [
            {
              "region":"regions-name",
              "namespace": "namespace-name",
              "datasource": "cloudwatch",
              "metric": "metric-name",
              "dimensions": {
                "dimension-name": "value-1",
                ...
              }
            },...
          ]
    """

    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()
    resources = adapter.describeSupportedMetrics()

    namespaces = _translateResourcesIntoNamespaces(resources)

    def translateResourcesIntoMetrics(namespace = None):
      for resource, metrics in resources.items():
        try:
          for specificResource in adapter.describeResources(region, resource):
            for metricName, cloudwatchParams in metrics.items():
              if (namespace and
                  cloudwatchParams["namespace"] == namespace and
                  metricName == metric):
                yield {"datasource": "cloudwatch",
                       "dimensions": {
                        cloudwatchParams["dimensionGroups"][0][0]:
                           specificResource["resID"]},
                       "metric": metricName,
                       "namespace": cloudwatchParams["namespace"],
                       "region": region}
        except NotImplementedError:
          # Metric exists but is otherwise not yet fully implemented.  When the
          # adapter no longer raises NotImplementedError, it will become
          # available.
          pass

    if region not in dict(adapter.describeRegions()):
      raise web.NotFound("Region '%s' was not found" % region)

    if not namespace in namespaces:
      raise web.NotFound("Namespace '%s' was not found" % namespace)

    if not metric in namespaces[namespace]["metrics"]:
      raise web.NotFound("Metric '%s' was not found" % metric)

    queryParams = dict(urlparse.parse_qsl(web.ctx.env['QUERY_STRING']))
    if not queryParams:
      self.addStandardHeaders()
      return utils.jsonEncode(list(translateResourcesIntoMetrics(namespace)))

    raise NotImplementedError("Unexpectedly received query params.")
Exemple #31
0
    def testCollectAndPublishMetrics(self):
        # Start Metric Collector, create a set of Metrics, wait for it to collect
        # some metrics and to publish them to the metric_exchange, then validate
        # attributes of the published metrics.
        #
        # TODO Add more metric types
        # TODO Deeper validation of the published metrics

        # Start our own instance of metric collector and wait for data points
        with self._startModelSchedulerSubprocess() as modelSchedulerSubprocess, \
            self._startMetricCollectorSubprocess() as metricCollectorSubprocess:
            # Create some models for metric collector to harvest
            region = "us-west-2"
            namespace = "AWS/EC2"
            resourceType = ResourceTypeNames.EC2_INSTANCE

            engine = repository.engineFactory()
            adapter = createCloudwatchDatasourceAdapter()

            ec2Instances = adapter.describeResources(region=region,
                                                     resourceType=resourceType)

            self.assertGreater(len(ec2Instances), 0)

            maxModels = 10

            ec2Instances = ec2Instances[:min(maxModels, Quota.getInstanceQuota(
            ))]

            metricInstances = []

            _LOGGER.info("Starting %d models", len(ec2Instances))
            self.assertGreater(len(ec2Instances), 0)
            for ec2Instance in ec2Instances:

                metricSpec = {
                    "region": region,
                    "namespace": namespace,
                    "metric": "CPUUtilization",
                    "dimensions": {
                        "InstanceId": ec2Instance["resID"]
                    }
                }

                modelSpec = {
                    "datasource": "cloudwatch",
                    "metricSpec": metricSpec
                }

                metricId = adapter.monitorMetric(modelSpec)

                with engine.connect() as conn:
                    repository.setMetricStatus(conn, metricId,
                                               MetricStatus.ACTIVE)

                metricInstances.append(metricId)

            _LOGGER.info("Waiting for results from models...")

            seenMetricIDs = set()
            allMetricIDs = set(metricInstances)

            # Register a timeout so we won't deadlock the test
            def onTimeout(resultsQueueName):
                _LOGGER.error(
                    "Timed out waiting to get results from models; numResults=%d; "
                    "expected=%d", len(seenMetricIDs), len(allMetricIDs))

                # HACK delete model swapper results queue to abort the consumer
                try:
                    with MessageBusConnector() as bus:
                        bus.deleteMessageQueue(resultsQueueName)
                except Exception:
                    _LOGGER.exception("Failed to delete results mq=%s",
                                      resultsQueueName)
                    raise

            with ModelSwapperInterface() as modelSwapper:
                with modelSwapper.consumeResults() as consumer:
                    timer = threading.Timer(
                        120, onTimeout, args=[modelSwapper._resultsQueueName])
                    timer.start()
                    try:
                        for batch in consumer:
                            seenMetricIDs.add(batch.modelID)
                            batch.ack()
                            if seenMetricIDs == allMetricIDs:
                                break
                        else:
                            self.fail(
                                "Expected %d results, but got only %d: %s" % (
                                    len(allMetricIDs),
                                    len(seenMetricIDs),
                                    seenMetricIDs,
                                ))
                        _LOGGER.info("Got %d results from models",
                                     len(seenMetricIDs))
                    finally:
                        timer.cancel()

            # Terminate metric_collector subprocess gracefully to avoid too much
            # error logging junk on the terminal
            metricCollectorSubprocess.send_signal(signal.SIGINT)

            # Terminate metric_collector subprocess gracefully to avoid too much
            # error logging junk on the terminal
            modelSchedulerSubprocess.send_signal(signal.SIGINT)
 def testCreateCloudwatchDatasourceAdapter(self):
   """ Make sure createCloudwatchDatasourceAdapter returns the expected adapter
   """
   adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()
   self.assertEqual(adapter._DATASOURCE, "cloudwatch")
   self.assertEqual(adapter.__class__.__name__, "_CloudwatchDatasourceAdapter")
    def GET(self, autostackId=None):  # pylint: disable=C0103
        """
      Get instances for known Autostack:

      ::

          GET /_autostacks/{autostackId}/instances

      Preview Autostack instances:

      ::

          GET /_autostacks/preview_instances?region={region}&filters={filters}

      :param region: AWS Region Name
      :type region: str
      :param filters: AWS Tag value pattern
      :type value: str (JSON object)

      Example query params:

      ::

          region=us-west-2&filters={"tag:Name":["jenkins-master"]}

      :return: List of instance details.  See
               AutostackInstancesHandler.formatInstance() for implementation.

      Example return value:

      ::

          [
            {
              "instanceID": "i-12345678",
              "state": "stopped",
              "regionName": "us-west-2",
              "instanceType": "m1.medium",
              "launchTime": "2013-09-24T02:02:48Z",
              "tags": {
                "Type": "Jenkins",
                "Description": "Jenkins Master",
                "Name": "jenkins-master"
              }
            },
            {
              "instanceID": "i-12345678",
              "state": "running",
              "regionName": "us-west-2",
              "instanceType": "m1.large",
              "launchTime": "2013-12-19T12:02:31Z",
              "tags": {
                "Type": "Jenkins",
                "Name": "jenkins-master",
                "Description": "Jenkin Master(Python 2.7)"
              }
            }
          ]
    """
        self.addStandardHeaders()
        aggSpec = {
            "datasource": "cloudwatch",  # only support EC2 for now
            "region": None,  # to be filled below
            "resourceType": "AWS::EC2::Instance",  # only support EC2 for now
            "filters": None  # to be filled below
        }
        adapter = createCloudwatchDatasourceAdapter()
        if autostackId is not None:
            try:
                with web.ctx.connFactory() as conn:
                    autostackRow = repository.getAutostack(conn, autostackId)
            except ObjectNotFoundError:
                raise web.notfound("Autostack not found: Autostack ID: %s" %
                                   autostackId)
            except web.HTTPError as ex:
                if bool(re.match(r"([45][0-9][0-9])\s?", web.ctx.status)):
                    # Log 400-599 status codes as errors, ignoring 200-399
                    log.error(str(ex) or repr(ex))
                raise
            except Exception as ex:
                raise web.internalerror(str(ex) or repr(ex))
            aggSpec["region"] = autostackRow.region
            aggSpec["filters"] = autostackRow.filters
            result = adapter.getMatchingResources(aggSpec)
        else:
            data = web.input(region=None, filters=None)
            if not data.region:
                raise InvalidRequestResponse({"result": "Invalid region"})
            if not data.filters:
                raise InvalidRequestResponse({"result": "Invalid filters"})

            try:
                aggSpec["region"] = data.region
                aggSpec["filters"] = utils.jsonDecode(data.filters)
                result = adapter.getMatchingResources(aggSpec)
            except boto.exception.EC2ResponseError as responseError:
                raise InvalidRequestResponse({"result": responseError.message})

        if result:
            return utils.jsonEncode(
                [self.formatInstance(instance) for instance in result])

        return utils.jsonEncode([])
Exemple #34
0
  def GET(self, region, namespace, instance=None):
    """
    List metrics in the given namespace in the region [for a specific instance
    if specified]

    ::

        GET /_metrics/cloudwatch/{region}/{namespace}/instances/[{instance}]

    Sample Output:

    ::

        [
          {
            "dimensions": {
              "dimension-name": "value-1",
              ...
            }
            "region":"regions-name",
            "namespace": "namespace-name",
            "datasource": "cloudwatch",
            "identifier': "resource-id-from-dimension",
            "metric": "metric-name",
            "name": "name-tag-or-empty-string"
          },...
        ]

    Note:
    Expect a 200 OK even when attempting to GET from an invalid instance,
    this saves the overhead of asking AWS if we're dealing with a valid
    instance every GET.

    This fails silently. We expect the CLI user to know what Instance ID she is
    looking for.
    """

    data = web.input(tags=None)
    filters = None
    if data.tags:
      filters = {}
      kvpairs = [tag.strip() for tag in data.tags.split(",")]

      for kvpair in kvpairs:
        (key, _, value) = kvpair.partition(":")
        filters.setdefault("tag:" + key, []).append(value)

    if not namespace in NAMESPACE_TO_RESOURCE_TYPE:
      raise web.NotFound("Namespace '%s' was not found" % namespace)

    aggSpec = {"resourceType": NAMESPACE_TO_RESOURCE_TYPE[namespace],
               "region": region}

    adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()

    resources = adapter.describeSupportedMetrics()

    namespaces = _translateResourcesIntoNamespaces(resources)

    if filters:
      aggSpec["filters"] = filters

      def translateResourcesIntoMetrics(namespace=None, instance=None):
        for metrics in resources.values():
          try:
            for specificInstance in adapter.getMatchingResources(aggSpec):
              for metricName, cloudwatchParams in metrics.items():
                if (namespace and
                    cloudwatchParams["namespace"] == namespace and
                    (instance is None or
                     instance == specificInstance.instanceID)):
                  yield {"datasource": "cloudwatch",
                       "dimensions": {
                        cloudwatchParams["dimensionGroups"][0][0]:
                           specificInstance.instanceID},
                       "identifier": specificInstance.instanceID,
                       "metric": metricName,
                       "name": specificInstance.tags["Name"],
                       "namespace": cloudwatchParams["namespace"],
                       "region": region}

          except NotImplementedError:
            # Metric exists but is otherwise not yet fully implemented.  When
            # the adapter no longer raises NotImplementedError, it will become
            # available.
            pass

    else:
      def translateResourcesIntoMetrics(namespace=None, instance=None):
        for resource, metrics in resources.items():
          try:
            for specificResource in adapter.describeResources(region,
                                                              resource):
              for metricName, cloudwatchParams in metrics.items():
                if (namespace and
                    cloudwatchParams["namespace"] == namespace and
                    (instance is None or
                     instance == specificResource["resID"])):
                  yield {"datasource": "cloudwatch",
                       "dimensions": {
                        cloudwatchParams["dimensionGroups"][0][0]:
                           specificResource["resID"]},
                       "identifier": specificResource["resID"],
                       "metric": metricName,
                       "name": specificResource["name"],
                       "namespace": cloudwatchParams["namespace"],
                       "region": region}

          except NotImplementedError:
            # Metric exists but is otherwise not yet fully implemented.  When
            # the adapter no longer raises NotImplementedError, it will become
            # available.
            pass

    if region not in dict(adapter.describeRegions()):
      raise web.NotFound("Region '%s' was not found" % region)

    self.addStandardHeaders()
    return utils.jsonEncode(list(translateResourcesIntoMetrics(namespace,
                                                               instance)))
 def setUpClass(cls):
     adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
     )
     cls.resources = adapter.describeSupportedMetrics()
     cls.regions = adapter.describeRegions()
Exemple #36
0
    def GET(self, region, namespace, metric):
        """
      List all instances of the given metric for the given namespace in the
      region

      ::

          GET /_metrics/cloudwatch/{region-name}/{namespace-name}/{metric-name}

      Sample Output:

      ::

          [
            {
              "region":"regions-name",
              "namespace": "namespace-name",
              "datasource": "cloudwatch",
              "metric": "metric-name",
              "dimensions": {
                "dimension-name": "value-1",
                ...
              }
            },...
          ]
    """

        adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter(
        )
        resources = adapter.describeSupportedMetrics()

        namespaces = _translateResourcesIntoNamespaces(resources)

        def translateResourcesIntoMetrics(namespace=None):
            for resource, metrics in resources.items():
                try:
                    for specificResource in adapter.describeResources(
                            region, resource):
                        for metricName, cloudwatchParams in metrics.items():
                            if (namespace and cloudwatchParams["namespace"]
                                    == namespace and metricName == metric):
                                yield {
                                    "datasource": "cloudwatch",
                                    "dimensions": {
                                        cloudwatchParams["dimensionGroups"][0][0]:
                                        specificResource["resID"]
                                    },
                                    "metric": metricName,
                                    "namespace": cloudwatchParams["namespace"],
                                    "region": region
                                }
                except NotImplementedError:
                    # Metric exists but is otherwise not yet fully implemented.  When the
                    # adapter no longer raises NotImplementedError, it will become
                    # available.
                    pass

        if region not in dict(adapter.describeRegions()):
            raise web.NotFound("Region '%s' was not found" % region)

        if not namespace in namespaces:
            raise web.NotFound("Namespace '%s' was not found" % namespace)

        if not metric in namespaces[namespace]["metrics"]:
            raise web.NotFound("Metric '%s' was not found" % metric)

        queryParams = dict(urlparse.parse_qsl(web.ctx.env['QUERY_STRING']))
        if not queryParams:
            self.addStandardHeaders()
            return utils.jsonEncode(
                list(translateResourcesIntoMetrics(namespace)))

        raise NotImplementedError("Unexpectedly received query params.")
 def setUpClass(cls):
     adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()
     cls.resources = adapter.describeSupportedMetrics()
     cls.regions = adapter.describeRegions()
  def GET(self, autostackId=None): # pylint: disable=C0103
    """
      Get instances for known Autostack:

      ::

          GET /_autostacks/{autostackId}/instances

      Preview Autostack instances:

      ::

          GET /_autostacks/preview_instances?region={region}&filters={filters}

      :param region: AWS Region Name
      :type region: str
      :param filters: AWS Tag value pattern
      :type value: str (JSON object)

      Example query params:

      ::

          region=us-west-2&filters={"tag:Name":["jenkins-master"]}

      :return: List of instance details.  See
               AutostackInstancesHandler.formatInstance() for implementation.

      Example return value:

      ::

          [
            {
              "instanceID": "i-12345678",
              "state": "stopped",
              "regionName": "us-west-2",
              "instanceType": "m1.medium",
              "launchTime": "2013-09-24T02:02:48Z",
              "tags": {
                "Type": "Jenkins",
                "Description": "Jenkins Master",
                "Name": "jenkins-master"
              }
            },
            {
              "instanceID": "i-12345678",
              "state": "running",
              "regionName": "us-west-2",
              "instanceType": "m1.large",
              "launchTime": "2013-12-19T12:02:31Z",
              "tags": {
                "Type": "Jenkins",
                "Name": "jenkins-master",
                "Description": "Jenkin Master(Python 2.7)"
              }
            }
          ]
    """
    self.addStandardHeaders()
    aggSpec = {
      "datasource": "cloudwatch",  # only support EC2 for now
      "region": None,  # to be filled below
      "resourceType": "AWS::EC2::Instance",  # only support EC2 for now
      "filters": None  # to be filled below
    }
    adapter = createCloudwatchDatasourceAdapter()
    if autostackId is not None:
      try:
        with web.ctx.connFactory() as conn:
          autostackRow = repository.getAutostack(conn, autostackId)
      except ObjectNotFoundError:
        raise web.notfound("Autostack not found: Autostack ID: %s"
                           % autostackId)
      except web.HTTPError as ex:
        if bool(re.match(r"([45][0-9][0-9])\s?", web.ctx.status)):
          # Log 400-599 status codes as errors, ignoring 200-399
          log.error(str(ex) or repr(ex))
        raise
      except Exception as ex:
        raise web.internalerror(str(ex) or repr(ex))
      aggSpec["region"] = autostackRow.region
      aggSpec["filters"] = autostackRow.filters
      result = adapter.getMatchingResources(aggSpec)
    else:
      data = web.input(region=None, filters=None)
      if not data.region:
        raise InvalidRequestResponse({"result":"Invalid region"})
      if not data.filters:
        raise InvalidRequestResponse({"result":"Invalid filters"})

      try:
        aggSpec["region"] = data.region
        aggSpec["filters"] = utils.jsonDecode(data.filters)
        result = adapter.getMatchingResources(aggSpec)
      except boto.exception.EC2ResponseError as responseError:
        raise InvalidRequestResponse({"result": responseError.message})

    if result:
      return utils.jsonEncode([self.formatInstance(instance)
                               for instance in result])

    return utils.jsonEncode([])
  def testCollectAndPublishMetrics(self):
    # Start Metric Collector, create a set of Metrics, wait for it to collect
    # some metrics and to publish them to the metric_exchange, then validate
    # attributes of the published metrics.
    #
    # TODO Add more metric types
    # TODO Deeper validation of the published metrics

    # Start our own instance of metric collector and wait for data points
    with self._startModelSchedulerSubprocess() as modelSchedulerSubprocess, \
        self._startMetricCollectorSubprocess() as metricCollectorSubprocess:
      # Create some models for metric collector to harvest
      region = "us-west-2"
      namespace = "AWS/EC2"
      resourceType = ResourceTypeNames.EC2_INSTANCE

      engine = repository.engineFactory()
      adapter = createCloudwatchDatasourceAdapter()


      ec2Instances = adapter.describeResources(region=region,
                                               resourceType=resourceType)

      self.assertGreater(len(ec2Instances), 0)

      maxModels = 10

      ec2Instances = ec2Instances[:min(maxModels, Quota.getInstanceQuota())]

      metricInstances = []

      _LOGGER.info("Starting %d models", len(ec2Instances))
      self.assertGreater(len(ec2Instances), 0)
      for ec2Instance in ec2Instances:

        metricSpec = {"region": region,
                      "namespace": namespace,
                      "metric": "CPUUtilization",
                      "dimensions": {"InstanceId": ec2Instance["resID"]}}

        modelSpec = {"datasource": "cloudwatch",
                     "metricSpec": metricSpec}

        metricId = adapter.monitorMetric(modelSpec)

        with engine.connect() as conn:
          repository.setMetricStatus(conn, metricId, MetricStatus.ACTIVE)

        metricInstances.append(metricId)

      _LOGGER.info("Waiting for results from models...")

      seenMetricIDs = set()
      allMetricIDs = set(metricInstances)

      # Register a timeout so we won't deadlock the test
      def onTimeout(resultsQueueName):
        _LOGGER.error(
          "Timed out waiting to get results from models; numResults=%d; "
          "expected=%d", len(seenMetricIDs), len(allMetricIDs))

        # HACK delete model swapper results queue to abort the consumer
        try:
          with MessageBusConnector() as bus:
            bus.deleteMessageQueue(resultsQueueName)
        except Exception:
          _LOGGER.exception("Failed to delete results mq=%s", resultsQueueName)
          raise

      with ModelSwapperInterface() as modelSwapper:
        with modelSwapper.consumeResults() as consumer:
          timer = threading.Timer(120, onTimeout,
                                  args=[modelSwapper._resultsQueueName])
          timer.start()
          try:
            for batch in consumer:
              seenMetricIDs.add(batch.modelID)
              batch.ack()
              if seenMetricIDs == allMetricIDs:
                break
            else:
              self.fail(
                "Expected %d results, but got only %d: %s"
                % (len(allMetricIDs), len(seenMetricIDs), seenMetricIDs,))
            _LOGGER.info("Got %d results from models", len(seenMetricIDs))
          finally:
            timer.cancel()

      # Terminate metric_collector subprocess gracefully to avoid too much
      # error logging junk on the terminal
      metricCollectorSubprocess.send_signal(signal.SIGINT)

      # Terminate metric_collector subprocess gracefully to avoid too much
      # error logging junk on the terminal
      modelSchedulerSubprocess.send_signal(signal.SIGINT)
 def setUp(self):
     self.headers = getDefaultHTTPHeaders(grok.app.config)
     self.app = TestApp(cloudwatch_api.app.wsgifunc())
     adapter = datasource_adapter_factory.createCloudwatchDatasourceAdapter()
     self.resources = adapter.describeSupportedMetrics()
     self.regions = adapter.describeRegions()