def testGETSpecificInstanceFromRegion(self):
    """
    Test for Get
    '/_metrics/cloudwatch/<region-name>/AWS/<namespace>/instances/<InstancdId>'
    response is validated for appropriate headers, body and status
    Test is currently using ec2 box for jenkins-master, this test also
    validates for retriving all supported metrics with dimensions
    """
    supportedMetrics = (
      createCloudwatchDatasourceAdapter().describeSupportedMetrics())
    ec2Metrics = supportedMetrics[ResourceTypeNames.EC2_INSTANCE].keys()

    # Instance used for following test is jenkins-master node
    response = self.app.get("/us-west-2/AWS/EC2/instances/%s"
        % VALID_EC2_INSTANCE["InstanceId"], headers=self.headers)
    assertions.assertSuccess(self, response)
    result = app_utils.jsonDecode(response.body)
    self.assertIsInstance(result, list)
    self.assertGreater(len(ec2Metrics), 0)
    self.assertGreater(len(result), 0)
    self.assertEqual(len(ec2Metrics), len(result))
    for res in result:
      self.assertEqual(res["region"], "us-west-2")
      self.assertEqual(res["namespace"], "AWS/EC2")
      self.assertEqual(res["datasource"], "cloudwatch")
      self.assertIn(res["metric"], ec2Metrics)
      self.assertIsInstance(res["dimensions"], dict)
      self.assertEqual(res["dimensions"]["InstanceId"],
        VALID_EC2_INSTANCE["InstanceId"])
      ec2Metrics.remove(res["metric"])

    self.assertEqual(ec2Metrics, [])
  def _supportedAWSNamespaces():
    """ Compile set of supported AWS namespaces

    :returns: Set of known AWS Cloudwatch namespaces
    :rtype: set of str
    """
    return set(value
               for x in (createCloudwatchDatasourceAdapter()
                         .describeSupportedMetrics()
                         .values())
               for y in x.values()
               for key, value in y.items() if key == "namespace")
  def _testGETCloudWatchImpl(self, url):
    response = self.app.get(url, headers=self.headers)
    assertions.assertSuccess(self, response)
    result = app_utils.jsonDecode(response.body)
    self.assertIsInstance(result, dict)

    supportedMetrics = createCloudwatchDatasourceAdapter().describeSupportedMetrics()

    for metrics in supportedMetrics.values():
      for metric, keys in metrics.items():
        self.assertIn(keys["namespace"],
                      result["namespaces"],
                      "Expected namespace (%s) not found in response." % (
                        keys["namespace"]))
        self.assertIn(metric,
                      result["namespaces"][keys["namespace"]]["metrics"],
                      "Expected metric (%s, %s) not found in response." % (
                        keys["namespace"], metric))
Example #4
0
  def GET(self, autostackId=None): # pylint: disable=C0103
    """
      Get instances for known Autostack:

      ::

          GET /_autostacks/{autostackId}/instances

      Preview Autostack instances:

      ::

          GET /_autostacks/preview_instances?region={region}&filters={filters}

      :param region: AWS Region Name
      :type region: str
      :param filters: AWS Tag value pattern
      :type value: str (JSON object)

      Example query params:

      ::

          region=us-west-2&filters={"tag:Name":["jenkins-master"]}

      :return: List of instance details.  See
               AutostackInstancesHandler.formatInstance() for implementation.

      Example return value:

      ::

          [
            {
              "instanceID": "i-12345678",
              "state": "stopped",
              "regionName": "us-west-2",
              "instanceType": "m1.medium",
              "launchTime": "2013-09-24T02:02:48Z",
              "tags": {
                "Type": "Jenkins",
                "Description": "Jenkins Master",
                "Name": "jenkins-master"
              }
            },
            {
              "instanceID": "i-12345678",
              "state": "running",
              "regionName": "us-west-2",
              "instanceType": "m1.large",
              "launchTime": "2013-12-19T12:02:31Z",
              "tags": {
                "Type": "Jenkins",
                "Name": "jenkins-master",
                "Description": "Jenkin Master(Python 2.7)"
              }
            }
          ]
    """
    self.addStandardHeaders()
    aggSpec = {
      "datasource": "cloudwatch",  # only support EC2 for now
      "region": None,  # to be filled below
      "resourceType": "AWS::EC2::Instance",  # only support EC2 for now
      "filters": None  # to be filled below
    }
    adapter = createCloudwatchDatasourceAdapter()
    if autostackId is not None:
      try:
        with web.ctx.connFactory() as conn:
          autostackRow = repository.getAutostack(conn, autostackId)
      except ObjectNotFoundError:
        raise web.notfound("Autostack not found: Autostack ID: %s"
                           % autostackId)
      except web.HTTPError as ex:
        if bool(re.match(r"([45][0-9][0-9])\s?", web.ctx.status)):
          # Log 400-599 status codes as errors, ignoring 200-399
          log.error(str(ex) or repr(ex))
        raise
      except Exception as ex:
        raise web.internalerror(str(ex) or repr(ex))
      aggSpec["region"] = autostackRow.region
      aggSpec["filters"] = autostackRow.filters
      result = adapter.getMatchingResources(aggSpec)
    else:
      data = web.input(region=None, filters=None)
      if not data.region:
        raise InvalidRequestResponse({"result":"Invalid region"})
      if not data.filters:
        raise InvalidRequestResponse({"result":"Invalid filters"})

      try:
        aggSpec["region"] = data.region
        aggSpec["filters"] = utils.jsonDecode(data.filters)
        result = adapter.getMatchingResources(aggSpec)
      except boto.exception.EC2ResponseError as responseError:
        raise InvalidRequestResponse({"result": responseError.message})

    if result:
      return utils.jsonEncode([self.formatInstance(instance)
                               for instance in result])

    return utils.jsonEncode([])
  def testCollectAndPublishMetrics(self):
    # Start Metric Collector, create a set of Metrics, wait for it to collect
    # some metrics and to publish them to the metric_exchange, then validate
    # attributes of the published metrics.
    #
    # TODO Add more metric types
    # TODO Deeper validation of the published metrics

    # Start our own instance of metric collector and wait for data points
    with self._startModelSchedulerSubprocess() as modelSchedulerSubprocess, \
        self._startMetricCollectorSubprocess() as metricCollectorSubprocess:
      # Create some models for metric collector to harvest
      region = "us-west-2"
      namespace = "AWS/EC2"
      resourceType = ResourceTypeNames.EC2_INSTANCE

      engine = repository.engineFactory()
      adapter = createCloudwatchDatasourceAdapter()


      ec2Instances = adapter.describeResources(region=region,
                                               resourceType=resourceType)

      self.assertGreater(len(ec2Instances), 0)

      maxModels = 10

      ec2Instances = ec2Instances[:min(maxModels, Quota.getInstanceQuota())]

      metricInstances = []

      _LOGGER.info("Starting %d models", len(ec2Instances))
      self.assertGreater(len(ec2Instances), 0)
      for ec2Instance in ec2Instances:

        metricSpec = {"region": region,
                      "namespace": namespace,
                      "metric": "CPUUtilization",
                      "dimensions": {"InstanceId": ec2Instance["resID"]}}

        modelSpec = {"datasource": "cloudwatch",
                     "metricSpec": metricSpec}

        metricId = adapter.monitorMetric(modelSpec)

        with engine.connect() as conn:
          repository.setMetricStatus(conn, metricId, MetricStatus.ACTIVE)

        metricInstances.append(metricId)

      _LOGGER.info("Waiting for results from models...")

      seenMetricIDs = set()
      allMetricIDs = set(metricInstances)

      # Register a timeout so we won't deadlock the test
      def onTimeout(resultsQueueName):
        _LOGGER.error(
          "Timed out waiting to get results from models; numResults=%d; "
          "expected=%d", len(seenMetricIDs), len(allMetricIDs))

        # HACK delete model swapper results queue to abort the consumer
        try:
          with MessageBusConnector() as bus:
            bus.deleteMessageQueue(resultsQueueName)
        except Exception:
          _LOGGER.exception("Failed to delete results mq=%s", resultsQueueName)
          raise

      with ModelSwapperInterface() as modelSwapper:
        with modelSwapper.consumeResults() as consumer:
          timer = threading.Timer(120, onTimeout,
                                  args=[modelSwapper._resultsQueueName])
          timer.start()
          try:
            for batch in consumer:
              seenMetricIDs.add(batch.modelID)
              batch.ack()
              if seenMetricIDs == allMetricIDs:
                break
            else:
              self.fail(
                "Expected %d results, but got only %d: %s"
                % (len(allMetricIDs), len(seenMetricIDs), seenMetricIDs,))
            _LOGGER.info("Got %d results from models", len(seenMetricIDs))
          finally:
            timer.cancel()

      # Terminate metric_collector subprocess gracefully to avoid too much
      # error logging junk on the terminal
      metricCollectorSubprocess.send_signal(signal.SIGINT)

      # Terminate metric_collector subprocess gracefully to avoid too much
      # error logging junk on the terminal
      modelSchedulerSubprocess.send_signal(signal.SIGINT)