def testCreateAllModels(self): host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1") apikey = os.environ.get("TAURUS_APIKEY", "taurus") # Resize metrics down to a much smaller random sample of the original # so as to not overload the system under test. We need only to test that # everything returned goes through the right channels. metrics = { key:value for (key, value) in random.sample(metric_utils.getMetricsConfiguration().items(), 3) } with patch("taurus.metric_collectors.metric_utils.getMetricsConfiguration", return_value=metrics, spec_set=metric_utils.getMetricsConfiguration): createdModels = metric_utils.createAllModels(host, apikey) allModels = metric_utils.getAllModels(host, apikey) for model in createdModels: self.addCleanup(requests.delete, "https://%s/_metrics/custom/%s" % (host, model["name"]), auth=(apikey, ""), verify=False) remoteModel = metric_utils.getOneModel(host, apikey, model["uid"]) self.assertDictEqual(remoteModel, model) self.assertIn(model, allModels)
def testCreateAllModels(self): host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1") apikey = os.environ.get("TAURUS_APIKEY", "taurus") # Resize metrics down to a much smaller random sample of the original # so as to not overload the system under test. We need only to test that # everything returned goes through the right channels. metrics = { key: value for (key, value) in random.sample( metric_utils.getMetricsConfiguration().items(), 3) } with patch( "taurus.metric_collectors.metric_utils.getMetricsConfiguration", return_value=metrics, spec_set=metric_utils.getMetricsConfiguration): createdModels = metric_utils.createAllModels(host, apikey) allModels = metric_utils.getAllModels(host, apikey) for model in createdModels: self.addCleanup(requests.delete, "https://%s/_metrics/custom/%s" % (host, model["name"]), auth=(apikey, ""), verify=False) remoteModel = metric_utils.getOneModel(host, apikey, model["uid"]) self.assertDictEqual(remoteModel, model) self.assertIn(model, allModels)
def testGetOneModel(self, requestsMock): # We'll mock out only the minimal response that would be required to # satisfy the requirements of metric_utils.getAllModels(), and then # assert that the returned result is the json-decoded response from the # mocked out API. requestsMock.get.return_value = Mock(status_code=200, text='[{"parameters":"True"}]') result = metric_utils.getOneModel("localhost", "taurus", "foo") requestsMock.get.assert_called_once_with("https://localhost/_models/foo", verify=ANY, auth=("taurus", "")) self.assertEqual(result, {"parameters":"True"})
def testGetOneModel(self, requestsMock): # We'll mock out only the minimal response that would be required to # satisfy the requirements of metric_utils.getAllModels(), and then # assert that the returned result is the json-decoded response from the # mocked out API. requestsMock.get.return_value = Mock(status_code=200, text='[{"parameters":"True"}]') result = metric_utils.getOneModel("localhost", "taurus", "foo") requestsMock.get.assert_called_once_with( "https://localhost/_models/foo", verify=ANY, auth=("taurus", "")) self.assertEqual(result, {"parameters": "True"})
def main(): logging_support.LoggingSupport.initTool() try: options = _parseArgs() g_log.info("Running %s with options=%r", sys.argv[0], options) if options["unmonitorAll"]: models = metric_utils.getAllModels( host=options["htmServer"], apiKey=options["apiKey"]) else: models = tuple( metric_utils.getOneModel( host=options["htmServer"], apiKey=options["apiKey"], modelId=modelId) for modelId in options["modelIds"] ) # Save model objects to file for use by monitor_metrics with open(options["modelsFilePath"], "w") as outFile: json.dump(models, outFile, indent=4) if not models: g_log.info("No models to unmonitor") return g_log.info("Unmonitoring %d models", len(models)) for i, model in enumerate(models, 1): modelId = model["uid"] metric_utils.unmonitorMetric( host=options["htmServer"], apiKey=options["apiKey"], modelId=modelId) g_log.info("Unmonitored metric=%s (%d of %d)", modelId, i, len(models)) g_log.info("Unmonitored %d models", len(models)) except SystemExit as e: if e.code != 0: g_log.exception("unmonitor_metrics failed") raise except Exception: g_log.exception("unmonitor_metrics failed") raise
def main(): logging_support.LoggingSupport.initTool() try: options = _parseArgs() g_log.info("Running %s with options=%r", sys.argv[0], options) if options["unmonitorAll"]: models = metric_utils.getAllModels(host=options["htmServer"], apiKey=options["apiKey"]) else: models = tuple( metric_utils.getOneModel(host=options["htmServer"], apiKey=options["apiKey"], modelId=modelId) for modelId in options["modelIds"]) # Save model objects to file for use by monitor_metrics with open(options["modelsFilePath"], "w") as outFile: json.dump(models, outFile, indent=4) if not models: g_log.info("No models to unmonitor") return g_log.info("Unmonitoring %d models", len(models)) for i, model in enumerate(models, 1): modelId = model["uid"] metric_utils.unmonitorMetric(host=options["htmServer"], apiKey=options["apiKey"], modelId=modelId) g_log.info("Unmonitored metric=%s (%d of %d)", modelId, i, len(models)) g_log.info("Unmonitored %d models", len(models)) except SystemExit as e: if e.code != 0: g_log.exception("unmonitor_metrics failed") raise except Exception: g_log.exception("unmonitor_metrics failed") raise
def testCreateAllModels(self): host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1") apikey = os.environ.get("TAURUS_APIKEY", "taurus") # Resize metrics down to a much smaller random sample of the original # so as to not overload the system under test. We need only to test that # everything returned goes through the right channels. metricsConfig = { key:value for (key, value) in random.sample(metric_utils.getMetricsConfiguration().items(), 3) } expectedMetricNames = [] for resVal in metricsConfig.itervalues(): for metricName in resVal["metrics"]: expectedMetricNames.append(metricName) self.addCleanup(requests.delete, "https://%s/_metrics/custom/%s" % (host, metricName), auth=(apikey, ""), verify=False) self.assertGreater(len(expectedMetricNames), 0) with patch("taurus.metric_collectors.metric_utils.getMetricsConfiguration", return_value=metricsConfig, spec_set=metric_utils.getMetricsConfiguration): createdModels = metric_utils.createAllModels(host, apikey) self.assertEqual(len(createdModels), len(expectedMetricNames)) for model in createdModels: remoteModel = metric_utils.getOneModel(host, apikey, model["uid"]) self.assertIn(remoteModel["name"], expectedMetricNames) # Verify that the model is either in "ACTIVE" or the transient # "PENDNG DATA" or "CREATE PENDING" states self.assertIn(remoteModel["status"], [1, 2, 8])