def testCreateAllModelsHappyPath(self, requestsMock):
    requestsMock.post.return_value = Mock(status_code=201,
                                          text='[{"uid":"foo", "name":"bar"}]')

    totalModels = len(
      metric_utils.getMetricNamesFromConfig(
        metric_utils.getMetricsConfiguration()))

    metric_utils.createAllModels("localhost", "taurus")
    self.assertEqual(requestsMock.post.call_count, totalModels)

    for args, kwargs in requestsMock.post.call_args_list:
      self.assertEqual(args[0], "https://localhost/_models")
      self.assertIn("data", kwargs)
      data = json.loads(kwargs["data"])
      self.assertIsInstance(data, dict)
      self.assertIn("datasource", data)
      self.assertEquals(data["datasource"], "custom")
      self.assertIn("metricSpec", data)
      self.assertIn("metric", data["metricSpec"])
      self.assertIn("resource", data["metricSpec"])
      self.assertIn("userInfo", data["metricSpec"])
      self.assertIsInstance(data["metricSpec"]["userInfo"], dict)
      self.assertIn("metricType", data["metricSpec"]["userInfo"])
      self.assertIn("metricTypeName", data["metricSpec"]["userInfo"])
      self.assertIn("symbol", data["metricSpec"]["userInfo"])
      self.assertIn("modelParams", data)
Example #2
0
    def testCreateAllModelsHappyPath(self, requestsMock):
        requestsMock.post.return_value = Mock(
            status_code=201, text='[{"uid":"foo", "name":"bar"}]')

        totalModels = len(
            metric_utils.getMetricNamesFromConfig(
                metric_utils.getMetricsConfiguration()))

        metric_utils.createAllModels("localhost", "taurus")
        self.assertEqual(requestsMock.post.call_count, totalModels)

        for args, kwargs in requestsMock.post.call_args_list:
            self.assertEqual(args[0], "https://localhost/_models")
            self.assertIn("data", kwargs)
            data = json.loads(kwargs["data"])
            self.assertIsInstance(data, dict)
            self.assertIn("datasource", data)
            self.assertEquals(data["datasource"], "custom")
            self.assertIn("metricSpec", data)
            self.assertIn("metric", data["metricSpec"])
            self.assertIn("resource", data["metricSpec"])
            self.assertIn("userInfo", data["metricSpec"])
            self.assertIsInstance(data["metricSpec"]["userInfo"], dict)
            self.assertIn("metricType", data["metricSpec"]["userInfo"])
            self.assertIn("metricTypeName", data["metricSpec"]["userInfo"])
            self.assertIn("symbol", data["metricSpec"]["userInfo"])
            self.assertIn("modelParams", data)
  def testCreateAllModelsWithEmptyMetricFilter(self):
    with self.assertRaises(ValueError) as assertContext:
      metric_utils.createAllModels(host="host",
                                   apiKey="apikey",
                                   onlyMetricNames=[])

    self.assertEqual(assertContext.exception.args[0],
                     "onlyMetricNames is empty")
Example #4
0
    def testCreateAllModelsWithEmptyMetricFilter(self):
        with self.assertRaises(ValueError) as assertContext:
            metric_utils.createAllModels(host="host",
                                         apiKey="apikey",
                                         onlyMetricNames=[])

        self.assertEqual(assertContext.exception.args[0],
                         "onlyMetricNames is empty")
  def testCreateAllModelsWithUnknownsInMetricFilter(self):
    with self.assertRaises(ValueError) as assertContext:
      metric_utils.createAllModels(host="host",
                                   apiKey="apikey",
                                   onlyMetricNames=["a", "b"])

    self.assertIn(
      "elements in onlyMetricNames are not in metrics configuration",
      assertContext.exception.args[0])
Example #6
0
    def testCreateAllModelsWithUnknownsInMetricFilter(self):
        with self.assertRaises(ValueError) as assertContext:
            metric_utils.createAllModels(host="host",
                                         apiKey="apikey",
                                         onlyMetricNames=["a", "b"])

        self.assertIn(
            "elements in onlyMetricNames are not in metrics configuration",
            assertContext.exception.args[0])
def main():
  logging_support.LoggingSupport.initTool()

  try:
    options = _parseArgs()
    g_log.info("Running %s with options=%r", sys.argv[0], options)

    metric_utils.createAllModels(options["htmServer"], options["apiKey"])
  except SystemExit as e:
    if e.code != 0:
      g_log.exception("create_models failed")
    raise
  except Exception:
    g_log.exception("create_models failed")
    raise
def main():
    logging_support.LoggingSupport.initTool()

    try:
        options = _parseArgs()
        g_log.info("Running %s with options=%r", sys.argv[0], options)

        metric_utils.createAllModels(options["htmServer"], options["apiKey"])
    except SystemExit as e:
        if e.code != 0:
            g_log.exception("create_models failed")
        raise
    except Exception:
        g_log.exception("create_models failed")
        raise
  def testCreateAllModels(self):

    host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1")
    apikey = os.environ.get("TAURUS_APIKEY", "taurus")

    # Resize metrics down to a much smaller random sample of the original
    # so as to not overload the system under test.  We need only to test that
    # everything returned goes through the right channels.

    metrics = {
      key:value
      for (key, value)
      in random.sample(metric_utils.getMetricsConfiguration().items(), 3)
    }

    with patch("taurus.metric_collectors.metric_utils.getMetricsConfiguration",
               return_value=metrics,
               spec_set=metric_utils.getMetricsConfiguration):
      createdModels = metric_utils.createAllModels(host, apikey)

    allModels = metric_utils.getAllModels(host, apikey)

    for model in createdModels:
      self.addCleanup(requests.delete,
                      "https://%s/_metrics/custom/%s" % (host, model["name"]),
                      auth=(apikey, ""),
                      verify=False)
      remoteModel = metric_utils.getOneModel(host, apikey, model["uid"])
      self.assertDictEqual(remoteModel, model)
      self.assertIn(model, allModels)
Example #10
0
    def testCreateAllModels(self):

        host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1")
        apikey = os.environ.get("TAURUS_APIKEY", "taurus")

        # Resize metrics down to a much smaller random sample of the original
        # so as to not overload the system under test.  We need only to test that
        # everything returned goes through the right channels.

        metrics = {
            key: value
            for (key, value) in random.sample(
                metric_utils.getMetricsConfiguration().items(), 3)
        }

        with patch(
                "taurus.metric_collectors.metric_utils.getMetricsConfiguration",
                return_value=metrics,
                spec_set=metric_utils.getMetricsConfiguration):
            createdModels = metric_utils.createAllModels(host, apikey)

        allModels = metric_utils.getAllModels(host, apikey)

        for model in createdModels:
            self.addCleanup(requests.delete,
                            "https://%s/_metrics/custom/%s" %
                            (host, model["name"]),
                            auth=(apikey, ""),
                            verify=False)
            remoteModel = metric_utils.getOneModel(host, apikey, model["uid"])
            self.assertDictEqual(remoteModel, model)
            self.assertIn(model, allModels)
def _promoteReadyMetricsToModels():
  """Promote unmonitored company metrics that reached
  _NUM_METRIC_DATA_ROWS_THRESHOLD to models
  """

  # Build a map of all configured metric names to metric/model args for
  # promoting to models
  metricsConfig = metric_utils.getMetricsConfiguration()

  readyMetricNames = _filterMetricsReadyForPromotion(
    metricsConfig=metricsConfig,
    allCustomMetrics=metric_utils.getAllCustomMetrics(
      _TAURUS_HTM_SERVER,
      _TAURUS_API_KEY))

  if not readyMetricNames:
    g_log.debug("There are no metrics that are ready for promotion at this time")
    return

  # Promote them to models
  metric_utils.createAllModels(host=_TAURUS_HTM_SERVER,
                               apiKey=_TAURUS_API_KEY,
                               onlyMetricNames=readyMetricNames)
Example #12
0
  def testCreateAllModels(self):

    host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1")
    apikey = os.environ.get("TAURUS_APIKEY", "taurus")

    # Resize metrics down to a much smaller random sample of the original
    # so as to not overload the system under test.  We need only to test that
    # everything returned goes through the right channels.

    metricsConfig = {
      key:value
      for (key, value)
      in random.sample(metric_utils.getMetricsConfiguration().items(), 3)
    }

    expectedMetricNames = []

    for resVal in metricsConfig.itervalues():
      for metricName in resVal["metrics"]:
        expectedMetricNames.append(metricName)

        self.addCleanup(requests.delete,
                        "https://%s/_metrics/custom/%s" % (host, metricName),
                        auth=(apikey, ""),
                        verify=False)

    self.assertGreater(len(expectedMetricNames), 0)

    with patch("taurus.metric_collectors.metric_utils.getMetricsConfiguration",
               return_value=metricsConfig,
               spec_set=metric_utils.getMetricsConfiguration):
      createdModels = metric_utils.createAllModels(host, apikey)

    self.assertEqual(len(createdModels), len(expectedMetricNames))

    for model in createdModels:
      remoteModel = metric_utils.getOneModel(host, apikey, model["uid"])
      self.assertIn(remoteModel["name"], expectedMetricNames)
      # Verify that the model is either in "ACTIVE" or the transient
      # "PENDNG DATA" or "CREATE PENDING" states
      self.assertIn(remoteModel["status"], [1, 2, 8])
Example #13
0
    def testCreateAllModelsWithMetricNameFilter(self,
                                                createCustomHtmModelMock):
        allMetricNames = metric_utils.getMetricNamesFromConfig(
            metric_utils.getMetricsConfiguration())

        subsetOfMetricNames = allMetricNames[:(len(allMetricNames) + 1) // 2]
        self.assertGreater(len(subsetOfMetricNames), 0)

        createCustomHtmModelMock.side_effect = (lambda **kwargs: dict(
            name=kwargs["metricName"], uid=kwargs["metricName"] * 2))

        models = metric_utils.createAllModels(
            host="host", apiKey="apikey", onlyMetricNames=subsetOfMetricNames)

        self.assertEqual(createCustomHtmModelMock.call_count,
                         len(subsetOfMetricNames))

        self.assertEqual(len(models), len(subsetOfMetricNames))

        self.assertItemsEqual(subsetOfMetricNames,
                              [model["name"] for model in models])
  def testCreateAllModelsWithMetricNameFilter(self, createCustomHtmModelMock):
    allMetricNames = metric_utils.getMetricNamesFromConfig(
      metric_utils.getMetricsConfiguration())

    subsetOfMetricNames = allMetricNames[:(len(allMetricNames) + 1) // 2]
    self.assertGreater(len(subsetOfMetricNames), 0)

    createCustomHtmModelMock.side_effect = (
      lambda **kwargs: dict(name=kwargs["metricName"],
                            uid=kwargs["metricName"] * 2))

    models = metric_utils.createAllModels(host="host",
                                          apiKey="apikey",
                                          onlyMetricNames=subsetOfMetricNames)

    self.assertEqual(createCustomHtmModelMock.call_count,
                     len(subsetOfMetricNames))

    self.assertEqual(len(models), len(subsetOfMetricNames))

    self.assertItemsEqual(subsetOfMetricNames,
                          [model["name"] for model in models])