def testCreateAllModelsHappyPath(self, requestsMock):
    requestsMock.post.return_value = Mock(status_code=201,
                                          text='[{"uid":"foo", "name":"bar"}]')

    totalModels = len(
      metric_utils.getMetricNamesFromConfig(
        metric_utils.getMetricsConfiguration()))

    metric_utils.createAllModels("localhost", "taurus")
    self.assertEqual(requestsMock.post.call_count, totalModels)

    for args, kwargs in requestsMock.post.call_args_list:
      self.assertEqual(args[0], "https://localhost/_models")
      self.assertIn("data", kwargs)
      data = json.loads(kwargs["data"])
      self.assertIsInstance(data, dict)
      self.assertIn("datasource", data)
      self.assertEquals(data["datasource"], "custom")
      self.assertIn("metricSpec", data)
      self.assertIn("metric", data["metricSpec"])
      self.assertIn("resource", data["metricSpec"])
      self.assertIn("userInfo", data["metricSpec"])
      self.assertIsInstance(data["metricSpec"]["userInfo"], dict)
      self.assertIn("metricType", data["metricSpec"]["userInfo"])
      self.assertIn("metricTypeName", data["metricSpec"]["userInfo"])
      self.assertIn("symbol", data["metricSpec"]["userInfo"])
      self.assertIn("modelParams", data)
Beispiel #2
0
    def testCreateAllModelsHappyPath(self, requestsMock):
        requestsMock.post.return_value = Mock(
            status_code=201, text='[{"uid":"foo", "name":"bar"}]')

        totalModels = len(
            metric_utils.getMetricNamesFromConfig(
                metric_utils.getMetricsConfiguration()))

        metric_utils.createAllModels("localhost", "taurus")
        self.assertEqual(requestsMock.post.call_count, totalModels)

        for args, kwargs in requestsMock.post.call_args_list:
            self.assertEqual(args[0], "https://localhost/_models")
            self.assertIn("data", kwargs)
            data = json.loads(kwargs["data"])
            self.assertIsInstance(data, dict)
            self.assertIn("datasource", data)
            self.assertEquals(data["datasource"], "custom")
            self.assertIn("metricSpec", data)
            self.assertIn("metric", data["metricSpec"])
            self.assertIn("resource", data["metricSpec"])
            self.assertIn("userInfo", data["metricSpec"])
            self.assertIsInstance(data["metricSpec"]["userInfo"], dict)
            self.assertIn("metricType", data["metricSpec"]["userInfo"])
            self.assertIn("metricTypeName", data["metricSpec"]["userInfo"])
            self.assertIn("symbol", data["metricSpec"]["userInfo"])
            self.assertIn("modelParams", data)
Beispiel #3
0
    def testCreateAllModels(self):

        host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1")
        apikey = os.environ.get("TAURUS_APIKEY", "taurus")

        # Resize metrics down to a much smaller random sample of the original
        # so as to not overload the system under test.  We need only to test that
        # everything returned goes through the right channels.

        metricsConfig = {
            key: value
            for (key, value) in random.sample(
                metric_utils.getMetricsConfiguration().items(), 3)
        }

        expectedMetricNames = []

        for resVal in metricsConfig.itervalues():
            for metricName in resVal["metrics"]:
                expectedMetricNames.append(metricName)

                self.addCleanup(requests.delete,
                                "https://%s/_metrics/custom/%s" %
                                (host, metricName),
                                auth=(apikey, ""),
                                verify=False)

        self.assertGreater(len(expectedMetricNames), 0)

        with patch(
                "taurus_metric_collectors.metric_utils.getMetricsConfiguration",
                return_value=metricsConfig,
                spec_set=metric_utils.getMetricsConfiguration):
            createdModels = metric_utils.createAllModels(host, apikey)

        self.assertEqual(len(createdModels), len(expectedMetricNames))

        for model in createdModels:
            remoteModel = metric_utils.getOneModel(host, apikey, model["uid"])
            self.assertIn(remoteModel["name"], expectedMetricNames)
            # Verify that the model is either in "ACTIVE" or the transient
            # "PENDNG DATA" or "CREATE PENDING" states
            self.assertIn(remoteModel["status"], [1, 2, 8])
  def testCreateAllModels(self):

    host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1")
    apikey = os.environ.get("TAURUS_APIKEY", "taurus")

    # Resize metrics down to a much smaller random sample of the original
    # so as to not overload the system under test.  We need only to test that
    # everything returned goes through the right channels.

    metricsConfig = {
      key:value
      for (key, value)
      in random.sample(metric_utils.getMetricsConfiguration().items(), 3)
    }

    expectedMetricNames = []

    for resVal in metricsConfig.itervalues():
      for metricName in resVal["metrics"]:
        expectedMetricNames.append(metricName)

        self.addCleanup(requests.delete,
                        "https://%s/_metrics/custom/%s" % (host, metricName),
                        auth=(apikey, ""),
                        verify=False)

    self.assertGreater(len(expectedMetricNames), 0)

    with patch("taurus_metric_collectors.metric_utils.getMetricsConfiguration",
               return_value=metricsConfig,
               spec_set=metric_utils.getMetricsConfiguration):
      createdModels = metric_utils.createAllModels(host, apikey)

    self.assertEqual(len(createdModels), len(expectedMetricNames))

    for model in createdModels:
      remoteModel = metric_utils.getOneModel(host, apikey, model["uid"])
      self.assertIn(remoteModel["name"], expectedMetricNames)
      # Verify that the model is either in "ACTIVE" or the transient
      # "PENDNG DATA" or "CREATE PENDING" states
      self.assertIn(remoteModel["status"], [1, 2, 8])
Beispiel #5
0
    def testCreateAllModelsWithMetricNameFilter(self,
                                                createCustomHtmModelMock):
        allMetricNames = metric_utils.getMetricNamesFromConfig(
            metric_utils.getMetricsConfiguration())

        subsetOfMetricNames = allMetricNames[:(len(allMetricNames) + 1) // 2]
        self.assertGreater(len(subsetOfMetricNames), 0)

        createCustomHtmModelMock.side_effect = (lambda **kwargs: dict(
            name=kwargs["metricName"], uid=kwargs["metricName"] * 2))

        models = metric_utils.createAllModels(
            host="host", apiKey="apikey", onlyMetricNames=subsetOfMetricNames)

        self.assertEqual(createCustomHtmModelMock.call_count,
                         len(subsetOfMetricNames))

        self.assertEqual(len(models), len(subsetOfMetricNames))

        self.assertItemsEqual(subsetOfMetricNames,
                              [model["name"] for model in models])
  def testCreateAllModelsWithMetricNameFilter(self, createCustomHtmModelMock):
    allMetricNames = metric_utils.getMetricNamesFromConfig(
      metric_utils.getMetricsConfiguration())

    subsetOfMetricNames = allMetricNames[:(len(allMetricNames) + 1) // 2]
    self.assertGreater(len(subsetOfMetricNames), 0)

    createCustomHtmModelMock.side_effect = (
      lambda **kwargs: dict(name=kwargs["metricName"],
                            uid=kwargs["metricName"] * 2))

    models = metric_utils.createAllModels(host="host",
                                          apiKey="apikey",
                                          onlyMetricNames=subsetOfMetricNames)

    self.assertEqual(createCustomHtmModelMock.call_count,
                     len(subsetOfMetricNames))

    self.assertEqual(len(models), len(subsetOfMetricNames))

    self.assertItemsEqual(subsetOfMetricNames,
                          [model["name"] for model in models])
 def testGetMetricsConfiguration(self):
   metrics = metric_utils.getMetricsConfiguration()
   self.assertIsInstance(metrics, dict)
   self.assertTrue(metrics)
   for companyName, details in metrics.iteritems():
     self.assertIsInstance(companyName, basestring)
     self.assertIsInstance(details, dict)
     self.assertTrue(details)
     self.assertIn("metrics", details)
     self.assertIn("stockExchange", details)
     self.assertIn("symbol", details)
     for metricName, metric in details["metrics"].iteritems():
       self.assertIsInstance(metricName, basestring)
       self.assertIsInstance(metric, dict)
       self.assertTrue(metric)
       self.assertIn("metricType", metric)
       self.assertIn("metricTypeName", metric)
       self.assertIn("modelParams", metric)
       self.assertIn("provider", metric)
       if metric["provider"] == "twitter":
         self.assertIn("screenNames", metric)
       elif metric["provider"] == "xignite":
         self.assertIn("sampleKey", metric)
Beispiel #8
0
def _promoteReadyMetricsToModels():
  """Promote unmonitored company metrics that reached
  _NUM_METRIC_DATA_ROWS_THRESHOLD to models
  """

  # Build a map of all configured metric names to metric/model args for
  # promoting to models
  metricsConfig = metric_utils.getMetricsConfiguration()

  readyMetricNames = _filterMetricsReadyForPromotion(
    metricsConfig=metricsConfig,
    allCustomMetrics=metric_utils.getAllCustomMetrics(
      _TAURUS_HTM_SERVER,
      _TAURUS_API_KEY))

  if not readyMetricNames:
    g_log.debug("There are no metrics that are ready for promotion at this time")
    return

  # Promote them to models
  metric_utils.createAllModels(host=_TAURUS_HTM_SERVER,
                               apiKey=_TAURUS_API_KEY,
                               onlyMetricNames=readyMetricNames)
Beispiel #9
0
 def testGetMetricsConfiguration(self):
     metrics = metric_utils.getMetricsConfiguration()
     self.assertIsInstance(metrics, dict)
     self.assertTrue(metrics)
     for companyName, details in metrics.iteritems():
         self.assertIsInstance(companyName, basestring)
         self.assertIsInstance(details, dict)
         self.assertTrue(details)
         self.assertIn("metrics", details)
         self.assertIn("stockExchange", details)
         self.assertIn("symbol", details)
         for metricName, metric in details["metrics"].iteritems():
             self.assertIsInstance(metricName, basestring)
             self.assertIsInstance(metric, dict)
             self.assertTrue(metric)
             self.assertIn("metricType", metric)
             self.assertIn("metricTypeName", metric)
             self.assertIn("modelParams", metric)
             self.assertIn("provider", metric)
             if metric["provider"] == "twitter":
                 self.assertIn("screenNames", metric)
             elif metric["provider"] == "xignite":
                 self.assertIn("sampleKey", metric)
def loadMetricSpecs():
  """ Load metric specs for the xignite stock provider

  :returns: a sequence of StockMetricSpec objects

  Excerpt from metrics.json:
  {
    "Accenture": {
      "stockExchange": "NYSE",
      "symbol": "ACN",
      "metrics": {
        "XIGNITE.ACN.CLOSINGPRICE": {
          "metricTypeName": "Stock Price",
          "provider": "xignite",
          "sampleKey": "Close"
        },
        "XIGNITE.ACN.VOLUME": {
          "metricTypeName": "Stock Volume",
          "provider": "xignite",
          "sampleKey": "Volume"
        },
        . . .
      }
    },
    . . .
  }
  """
  return tuple(
    StockMetricSpec(
      metricName=metricName,
      symbol=resVal["symbol"].upper(),
      stockExchange=resVal["stockExchange"],
      sampleKey=metricVal["sampleKey"])
    for resVal in getMetricsConfiguration().itervalues()
    for metricName, metricVal in resVal["metrics"].iteritems()
    if metricVal["provider"] == "xignite" and "sampleKey" in metricVal)