def testGetOneModel(self, requestsMock):

    # We'll mock out only the minimal response that would be required to
    # satisfy the requirements of metric_utils.getAllModels(), and then
    # assert that the returned result is the json-decoded response from the
    # mocked out API.
    requestsMock.get.return_value = Mock(status_code=200,
                                         text='[{"parameters":"True"}]')

    result = metric_utils.getOneModel("localhost", "taurus", "foo")

    requestsMock.get.assert_called_once_with("https://localhost/_models/foo",
                                             verify=ANY, auth=("taurus", ""))

    self.assertEqual(result, {"parameters":"True"})
Ejemplo n.º 2
0
    def testGetOneModel(self, requestsMock):

        # We'll mock out only the minimal response that would be required to
        # satisfy the requirements of metric_utils.getAllModels(), and then
        # assert that the returned result is the json-decoded response from the
        # mocked out API.
        requestsMock.get.return_value = Mock(status_code=200,
                                             text='[{"parameters":"True"}]')

        result = metric_utils.getOneModel("localhost", "taurus", "foo")

        requestsMock.get.assert_called_once_with(
            "https://localhost/_models/foo", verify=ANY, auth=("taurus", ""))

        self.assertEqual(result, {"parameters": "True"})
Ejemplo n.º 3
0
def main():
  logging_support.LoggingSupport.initTool()

  try:
    options = _parseArgs()
    g_log.info("Running %s with options=%r", sys.argv[0], options)

    if options["unmonitorAll"]:
      models = metric_utils.getAllModels(
        host=options["htmServer"],
        apiKey=options["apiKey"])
    else:
      models = tuple(
        metric_utils.getOneModel(
          host=options["htmServer"],
          apiKey=options["apiKey"],
          modelId=modelId)
        for modelId in options["modelIds"]
      )

    # Save model objects to file for use by monitor_metrics
    with open(options["modelsFilePath"], "w") as outFile:
      json.dump(models, outFile, indent=4)

    if not models:
      g_log.info("No models to unmonitor")
      return

    g_log.info("Unmonitoring %d models", len(models))

    for i, model in enumerate(models, 1):
      modelId = model["uid"]
      metric_utils.unmonitorMetric(
        host=options["htmServer"],
        apiKey=options["apiKey"],
        modelId=modelId)
      g_log.info("Unmonitored metric=%s (%d of %d)",
                 modelId, i, len(models))

    g_log.info("Unmonitored %d models", len(models))
  except SystemExit as e:
    if e.code != 0:
      g_log.exception("unmonitor_metrics failed")
    raise
  except Exception:
    g_log.exception("unmonitor_metrics failed")
    raise
Ejemplo n.º 4
0
    def testCreateAllModels(self):

        host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1")
        apikey = os.environ.get("TAURUS_APIKEY", "taurus")

        # Resize metrics down to a much smaller random sample of the original
        # so as to not overload the system under test.  We need only to test that
        # everything returned goes through the right channels.

        metricsConfig = {
            key: value
            for (key, value) in random.sample(
                metric_utils.getMetricsConfiguration().items(), 3)
        }

        expectedMetricNames = []

        for resVal in metricsConfig.itervalues():
            for metricName in resVal["metrics"]:
                expectedMetricNames.append(metricName)

                self.addCleanup(requests.delete,
                                "https://%s/_metrics/custom/%s" %
                                (host, metricName),
                                auth=(apikey, ""),
                                verify=False)

        self.assertGreater(len(expectedMetricNames), 0)

        with patch(
                "taurus_metric_collectors.metric_utils.getMetricsConfiguration",
                return_value=metricsConfig,
                spec_set=metric_utils.getMetricsConfiguration):
            createdModels = metric_utils.createAllModels(host, apikey)

        self.assertEqual(len(createdModels), len(expectedMetricNames))

        for model in createdModels:
            remoteModel = metric_utils.getOneModel(host, apikey, model["uid"])
            self.assertIn(remoteModel["name"], expectedMetricNames)
            # Verify that the model is either in "ACTIVE" or the transient
            # "PENDNG DATA" or "CREATE PENDING" states
            self.assertIn(remoteModel["status"], [1, 2, 8])
  def testCreateAllModels(self):

    host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1")
    apikey = os.environ.get("TAURUS_APIKEY", "taurus")

    # Resize metrics down to a much smaller random sample of the original
    # so as to not overload the system under test.  We need only to test that
    # everything returned goes through the right channels.

    metricsConfig = {
      key:value
      for (key, value)
      in random.sample(metric_utils.getMetricsConfiguration().items(), 3)
    }

    expectedMetricNames = []

    for resVal in metricsConfig.itervalues():
      for metricName in resVal["metrics"]:
        expectedMetricNames.append(metricName)

        self.addCleanup(requests.delete,
                        "https://%s/_metrics/custom/%s" % (host, metricName),
                        auth=(apikey, ""),
                        verify=False)

    self.assertGreater(len(expectedMetricNames), 0)

    with patch("taurus_metric_collectors.metric_utils.getMetricsConfiguration",
               return_value=metricsConfig,
               spec_set=metric_utils.getMetricsConfiguration):
      createdModels = metric_utils.createAllModels(host, apikey)

    self.assertEqual(len(createdModels), len(expectedMetricNames))

    for model in createdModels:
      remoteModel = metric_utils.getOneModel(host, apikey, model["uid"])
      self.assertIn(remoteModel["name"], expectedMetricNames)
      # Verify that the model is either in "ACTIVE" or the transient
      # "PENDNG DATA" or "CREATE PENDING" states
      self.assertIn(remoteModel["status"], [1, 2, 8])