def testCreateAllModelsHappyPath(self, requestsMock): requestsMock.post.return_value = Mock(status_code=201, text='[{"uid":"foo", "name":"bar"}]') totalModels = len( metric_utils.getMetricNamesFromConfig( metric_utils.getMetricsConfiguration())) metric_utils.createAllModels("localhost", "taurus") self.assertEqual(requestsMock.post.call_count, totalModels) for args, kwargs in requestsMock.post.call_args_list: self.assertEqual(args[0], "https://localhost/_models") self.assertIn("data", kwargs) data = json.loads(kwargs["data"]) self.assertIsInstance(data, dict) self.assertIn("datasource", data) self.assertEquals(data["datasource"], "custom") self.assertIn("metricSpec", data) self.assertIn("metric", data["metricSpec"]) self.assertIn("resource", data["metricSpec"]) self.assertIn("userInfo", data["metricSpec"]) self.assertIsInstance(data["metricSpec"]["userInfo"], dict) self.assertIn("metricType", data["metricSpec"]["userInfo"]) self.assertIn("metricTypeName", data["metricSpec"]["userInfo"]) self.assertIn("symbol", data["metricSpec"]["userInfo"]) self.assertIn("modelParams", data)
def _filterMetricsReadyForPromotion(metricsConfig, allCustomMetrics): """Determine which metrics need to be promoted to models. The qualified metrics meet the following criteria: 1. Presently not monitored 2. Metric's name exists in metric collector's current metrics configuration 3. The metric has at least _NUM_METRIC_DATA_ROWS_THRESHOLD metric data elements in Taurus Engine :param dict metricsConfig: Metric configuration object that defines all instances and metrics for all data collectors; as returned by `metric_utils.getMetricsConfiguration()` :param sequence allCustomMetrics: Custom metric info dicts from Taurus Engine as returned by `metric_utils.getAllCustomMetrics()` :returns: Names of of metrics that need to be promoted to models :rtype: sequence """ configuredMetricNames = set( metric_utils.getMetricNamesFromConfig(metricsConfig)) # Compile a set of unmonitored metrics that are in our current metrics # configuration and are ready for promoting to models return tuple( obj["name"] for obj in allCustomMetrics if obj["status"] == _METRIC_STATUS_UNMONITORED and obj["name"] in configuredMetricNames and obj["last_rowid"] >= _NUM_METRIC_DATA_ROWS_THRESHOLD)
def testCreateAllModelsHappyPath(self, requestsMock): requestsMock.post.return_value = Mock( status_code=201, text='[{"uid":"foo", "name":"bar"}]') totalModels = len( metric_utils.getMetricNamesFromConfig( metric_utils.getMetricsConfiguration())) metric_utils.createAllModels("localhost", "taurus") self.assertEqual(requestsMock.post.call_count, totalModels) for args, kwargs in requestsMock.post.call_args_list: self.assertEqual(args[0], "https://localhost/_models") self.assertIn("data", kwargs) data = json.loads(kwargs["data"]) self.assertIsInstance(data, dict) self.assertIn("datasource", data) self.assertEquals(data["datasource"], "custom") self.assertIn("metricSpec", data) self.assertIn("metric", data["metricSpec"]) self.assertIn("resource", data["metricSpec"]) self.assertIn("userInfo", data["metricSpec"]) self.assertIsInstance(data["metricSpec"]["userInfo"], dict) self.assertIn("metricType", data["metricSpec"]["userInfo"]) self.assertIn("metricTypeName", data["metricSpec"]["userInfo"]) self.assertIn("symbol", data["metricSpec"]["userInfo"]) self.assertIn("modelParams", data)
def testCreateAllModelsWithMetricNameFilter(self, createCustomHtmModelMock): allMetricNames = metric_utils.getMetricNamesFromConfig( metric_utils.getMetricsConfiguration()) subsetOfMetricNames = allMetricNames[:(len(allMetricNames) + 1) // 2] self.assertGreater(len(subsetOfMetricNames), 0) createCustomHtmModelMock.side_effect = (lambda **kwargs: dict( name=kwargs["metricName"], uid=kwargs["metricName"] * 2)) models = metric_utils.createAllModels( host="host", apiKey="apikey", onlyMetricNames=subsetOfMetricNames) self.assertEqual(createCustomHtmModelMock.call_count, len(subsetOfMetricNames)) self.assertEqual(len(models), len(subsetOfMetricNames)) self.assertItemsEqual(subsetOfMetricNames, [model["name"] for model in models])
def testCreateAllModelsWithMetricNameFilter(self, createCustomHtmModelMock): allMetricNames = metric_utils.getMetricNamesFromConfig( metric_utils.getMetricsConfiguration()) subsetOfMetricNames = allMetricNames[:(len(allMetricNames) + 1) // 2] self.assertGreater(len(subsetOfMetricNames), 0) createCustomHtmModelMock.side_effect = ( lambda **kwargs: dict(name=kwargs["metricName"], uid=kwargs["metricName"] * 2)) models = metric_utils.createAllModels(host="host", apiKey="apikey", onlyMetricNames=subsetOfMetricNames) self.assertEqual(createCustomHtmModelMock.call_count, len(subsetOfMetricNames)) self.assertEqual(len(models), len(subsetOfMetricNames)) self.assertItemsEqual(subsetOfMetricNames, [model["name"] for model in models])
def testGetMetricNamesFromConfig(self): jsonConfig = ( """ { "3M": { "metrics": { "TWITTER.TWEET.HANDLE.MMM.VOLUME": { "metricType": "TwitterVolume", "metricTypeName": "Twitter Volume", "modelParams": { "minResolution": 0.6 }, "provider": "twitter", "screenNames": [ "3M" ] }, "XIGNITE.MMM.VOLUME": { "metricType": "StockVolume", "metricTypeName": "Stock Volume", "modelParams": { "minResolution": 0.2 }, "provider": "xignite", "sampleKey": "Volume" } }, "stockExchange": "NYSE", "symbol": "MMM" }, "ACE Ltd": { "metrics": { "TWITTER.TWEET.HANDLE.ACE.VOLUME": { "metricType": "TwitterVolume", "metricTypeName": "Twitter Volume", "modelParams": { "minResolution": 0.6 }, "provider": "twitter", "screenNames": [] }, "XIGNITE.ACE.CLOSINGPRICE": { "metricType": "StockPrice", "metricTypeName": "Stock Price", "modelParams": { "minResolution": 0.2 }, "provider": "xignite", "sampleKey": "Close" } }, "stockExchange": "NYSE", "symbol": "ACE" } } """ ) metricNames = metric_utils.getMetricNamesFromConfig(json.loads(jsonConfig)) expected = [ "TWITTER.TWEET.HANDLE.MMM.VOLUME", "XIGNITE.MMM.VOLUME", "TWITTER.TWEET.HANDLE.ACE.VOLUME", "XIGNITE.ACE.CLOSINGPRICE" ] self.assertItemsEqual(expected, metricNames)
def testGetMetricNamesFromConfig(self): jsonConfig = (""" { "3M": { "metrics": { "TWITTER.TWEET.HANDLE.MMM.VOLUME": { "metricType": "TwitterVolume", "metricTypeName": "Twitter Volume", "modelParams": { "minResolution": 0.6 }, "provider": "twitter", "screenNames": [ "3M" ] }, "XIGNITE.MMM.VOLUME": { "metricType": "StockVolume", "metricTypeName": "Stock Volume", "modelParams": { "minResolution": 0.2 }, "provider": "xignite", "sampleKey": "Volume" } }, "stockExchange": "NYSE", "symbol": "MMM" }, "ACE Ltd": { "metrics": { "TWITTER.TWEET.HANDLE.ACE.VOLUME": { "metricType": "TwitterVolume", "metricTypeName": "Twitter Volume", "modelParams": { "minResolution": 0.6 }, "provider": "twitter", "screenNames": [] }, "XIGNITE.ACE.CLOSINGPRICE": { "metricType": "StockPrice", "metricTypeName": "Stock Price", "modelParams": { "minResolution": 0.2 }, "provider": "xignite", "sampleKey": "Close" } }, "stockExchange": "NYSE", "symbol": "ACE" } } """) metricNames = metric_utils.getMetricNamesFromConfig( json.loads(jsonConfig)) expected = [ "TWITTER.TWEET.HANDLE.MMM.VOLUME", "XIGNITE.MMM.VOLUME", "TWITTER.TWEET.HANDLE.ACE.VOLUME", "XIGNITE.ACE.CLOSINGPRICE" ] self.assertItemsEqual(expected, metricNames)