Example #1
0
def updateQuota(args):
  helpString = (
    "This script updates HTM-IT app quotas in %s.\n"
    "%%prog\n\n"
    "IT MUST BE CALLED AFTER set_edition.py, BUT BEFORE STARTING HTM-IT SERVICES"
    ) % (QuotaConfig.CONFIG_NAME,)

  parser = OptionParser(helpString)

  (_options, posArgs) = parser.parse_args(args)

  if len(posArgs) != 0:
    parser.error("Expected no positional args, but got %s" % (
                 len(posArgs),))

  Quota.init()
  def testCollectAndPublishMetrics(self):
    # Start Metric Collector, create a set of Metrics, wait for it to collect
    # some metrics and to publish them to the metric_exchange, then validate
    # attributes of the published metrics.
    #
    # TODO Add more metric types
    # TODO Deeper validation of the published metrics

    # Start our own instance of metric collector and wait for data points
    with self._startModelSchedulerSubprocess() as modelSchedulerSubprocess, \
        self._startMetricCollectorSubprocess() as metricCollectorSubprocess:
      # Create some models for metric collector to harvest
      region = "us-west-2"
      namespace = "AWS/EC2"
      resourceType = ResourceTypeNames.EC2_INSTANCE

      engine = repository.engineFactory()
      adapter = createCloudwatchDatasourceAdapter()


      ec2Instances = adapter.describeResources(region=region,
                                               resourceType=resourceType)

      self.assertGreater(len(ec2Instances), 0)

      maxModels = 10

      ec2Instances = ec2Instances[:min(maxModels, Quota.getInstanceQuota())]

      metricInstances = []

      _LOGGER.info("Starting %d models", len(ec2Instances))
      self.assertGreater(len(ec2Instances), 0)
      for ec2Instance in ec2Instances:

        metricSpec = {"region": region,
                      "namespace": namespace,
                      "metric": "CPUUtilization",
                      "dimensions": {"InstanceId": ec2Instance["resID"]}}

        modelSpec = {"datasource": "cloudwatch",
                     "metricSpec": metricSpec}

        metricId = adapter.monitorMetric(modelSpec)

        with engine.connect() as conn:
          repository.setMetricStatus(conn, metricId, MetricStatus.ACTIVE)

        metricInstances.append(metricId)

      _LOGGER.info("Waiting for results from models...")

      seenMetricIDs = set()
      allMetricIDs = set(metricInstances)

      # Register a timeout so we won't deadlock the test
      def onTimeout(resultsQueueName):
        _LOGGER.error(
          "Timed out waiting to get results from models; numResults=%d; "
          "expected=%d", len(seenMetricIDs), len(allMetricIDs))

        # HACK delete model swapper results queue to abort the consumer
        try:
          with MessageBusConnector() as bus:
            bus.deleteMessageQueue(resultsQueueName)
        except Exception:
          _LOGGER.exception("Failed to delete results mq=%s", resultsQueueName)
          raise

      with ModelSwapperInterface() as modelSwapper:
        with modelSwapper.consumeResults() as consumer:
          timer = threading.Timer(120, onTimeout,
                                  args=[modelSwapper._resultsQueueName])
          timer.start()
          try:
            for batch in consumer:
              seenMetricIDs.add(batch.modelID)
              batch.ack()
              if seenMetricIDs == allMetricIDs:
                break
            else:
              self.fail(
                "Expected %d results, but got only %d: %s"
                % (len(allMetricIDs), len(seenMetricIDs), seenMetricIDs,))
            _LOGGER.info("Got %d results from models", len(seenMetricIDs))
          finally:
            timer.cancel()

      # Terminate metric_collector subprocess gracefully to avoid too much
      # error logging junk on the terminal
      metricCollectorSubprocess.send_signal(signal.SIGINT)

      # Terminate metric_collector subprocess gracefully to avoid too much
      # error logging junk on the terminal
      modelSchedulerSubprocess.send_signal(signal.SIGINT)
Example #3
0
    def testCollectAndPublishMetrics(self):
        # Start Metric Collector, create a set of Metrics, wait for it to collect
        # some metrics and to publish them to the metric_exchange, then validate
        # attributes of the published metrics.
        #
        # TODO Add more metric types
        # TODO Deeper validation of the published metrics

        # Start our own instance of metric collector and wait for data points
        with self._startModelSchedulerSubprocess() as modelSchedulerSubprocess, \
            self._startMetricCollectorSubprocess() as metricCollectorSubprocess:
            # Create some models for metric collector to harvest
            region = "us-west-2"
            namespace = "AWS/EC2"
            resourceType = ResourceTypeNames.EC2_INSTANCE

            engine = repository.engineFactory()
            adapter = createCloudwatchDatasourceAdapter()

            ec2Instances = adapter.describeResources(region=region,
                                                     resourceType=resourceType)

            self.assertGreater(len(ec2Instances), 0)

            maxModels = 10

            ec2Instances = ec2Instances[:min(maxModels, Quota.getInstanceQuota(
            ))]

            metricInstances = []

            _LOGGER.info("Starting %d models", len(ec2Instances))
            self.assertGreater(len(ec2Instances), 0)
            for ec2Instance in ec2Instances:

                metricSpec = {
                    "region": region,
                    "namespace": namespace,
                    "metric": "CPUUtilization",
                    "dimensions": {
                        "InstanceId": ec2Instance["resID"]
                    }
                }

                modelSpec = {
                    "datasource": "cloudwatch",
                    "metricSpec": metricSpec
                }

                metricId = adapter.monitorMetric(modelSpec)

                with engine.connect() as conn:
                    repository.setMetricStatus(conn, metricId,
                                               MetricStatus.ACTIVE)

                metricInstances.append(metricId)

            _LOGGER.info("Waiting for results from models...")

            seenMetricIDs = set()
            allMetricIDs = set(metricInstances)

            # Register a timeout so we won't deadlock the test
            def onTimeout(resultsQueueName):
                _LOGGER.error(
                    "Timed out waiting to get results from models; numResults=%d; "
                    "expected=%d", len(seenMetricIDs), len(allMetricIDs))

                # HACK delete model swapper results queue to abort the consumer
                try:
                    with MessageBusConnector() as bus:
                        bus.deleteMessageQueue(resultsQueueName)
                except Exception:
                    _LOGGER.exception("Failed to delete results mq=%s",
                                      resultsQueueName)
                    raise

            with ModelSwapperInterface() as modelSwapper:
                with modelSwapper.consumeResults() as consumer:
                    timer = threading.Timer(
                        120, onTimeout, args=[modelSwapper._resultsQueueName])
                    timer.start()
                    try:
                        for batch in consumer:
                            seenMetricIDs.add(batch.modelID)
                            batch.ack()
                            if seenMetricIDs == allMetricIDs:
                                break
                        else:
                            self.fail(
                                "Expected %d results, but got only %d: %s" % (
                                    len(allMetricIDs),
                                    len(seenMetricIDs),
                                    seenMetricIDs,
                                ))
                        _LOGGER.info("Got %d results from models",
                                     len(seenMetricIDs))
                    finally:
                        timer.cancel()

            # Terminate metric_collector subprocess gracefully to avoid too much
            # error logging junk on the terminal
            metricCollectorSubprocess.send_signal(signal.SIGINT)

            # Terminate metric_collector subprocess gracefully to avoid too much
            # error logging junk on the terminal
            modelSchedulerSubprocess.send_signal(signal.SIGINT)