def deleteModel(metricId): try: with web.ctx.connFactory() as conn: metricRow = repository.getMetric(conn, metricId) except app_exceptions.ObjectNotFoundError: raise web.notfound( "ObjectNotFoundError Metric not found: Metric ID: %s" % metricId) if metricRow.datasource == "autostack": raise NotAllowedResponse({ "result": ("Not a standalone model=%s; datasource=%s. Unable" " to DELETE from this endpoint") % ( metricId, metricRow.datasource, ) }) log.debug("Deleting model for %s metric=%s", metricRow.datasource, metricId) with web.ctx.connFactory() as conn: repository.deleteModel(conn, metricId) # NOTE: this is the new way using datasource adapters try: createDatasourceAdapter( metricRow.datasource).unmonitorMetric(metricId) except app_exceptions.ObjectNotFoundError: raise web.notfound( "ObjectNotFoundError Metric not found: Metric ID: %s" % (metricId, )) return utils.jsonEncode({'result': 'success'})
def deleteModel(metricId): try: with web.ctx.connFactory() as conn: metricRow = repository.getMetric(conn, metricId) except app_exceptions.ObjectNotFoundError: raise web.notfound("ObjectNotFoundError Metric not found: Metric ID: %s" % metricId) if metricRow.datasource == "autostack": raise NotAllowedResponse( {"result": ("Not a standalone model=%s; datasource=%s. Unable" " to DELETE from this endpoint") % (metricId, metricRow.datasource,) }) log.debug("Deleting model for %s metric=%s", metricRow.datasource, metricId) with web.ctx.connFactory() as conn: repository.deleteModel(conn, metricId) # NOTE: this is the new way using datasource adapters try: createDatasourceAdapter(metricRow.datasource).unmonitorMetric(metricId) except app_exceptions.ObjectNotFoundError: raise web.notfound( "ObjectNotFoundError Metric not found: Metric ID: %s" % (metricId,)) return utils.jsonEncode({'result': 'success'})
def testImportModelAutostack(self, adapterMock, ctxMock, repositoryMock, quotaRepositoryMock, _engineMock): nativeMetric = { "type": "autostack", "name": "test1", "region": "us-west-2", "datasource": "cloudwatch", "filters": { "tag:Name": [ "*d*" ] }, "metric": { "metric": "DiskWriteBytes", "namespace": "AWS/EC2" } } quotaRepositoryMock.getInstanceCount.return_value = 0 adapter = createDatasourceAdapter("autostack") importModelMock = create_autospec(adapter.importModel) adapterMock.return_value.importModel = importModelMock result = models_api.ModelHandler.createModel(nativeMetric) self.assertIs(result, repositoryMock.getMetric.return_value) repositoryMock.getMetric.assert_called_once_with( ctxMock.connFactory.return_value.__enter__.return_value, adapterMock.return_value.importModel.return_value)
def _collect(task): """ Executed via multiprocessing Pool: Collect metric data and corresponding resource status. :param task: a _DataCollectionTask instance """ log = grok_logging.getExtendedLogger(MetricCollector.__name__) startTime = time.time() result = _DataCollectionResult(metricID=task.metricID) dsAdapter = None try: dsAdapter = createDatasourceAdapter(task.datasource) result.data, result.nextCallStart = dsAdapter.getMetricData( metricSpec=task.metricSpec, start=task.rangeStart, end=None ) except Exception as e: # pylint: disable=W0703 log.exception("getMetricData failed in task=%s", task) result.data = e try: if task.updateResourceStatus: result.resourceStatus = dsAdapter.getMetricResourceStatus(metricSpec=task.metricSpec) except Exception as e: # pylint: disable=W0703 log.exception("getMetricResourceStatus failed in task=%s", task) result.resourceStatus = e result.duration = time.time() - startTime task.resultsQueue.put(result) return True
def _collect(task): """ Executed via multiprocessing Pool: Collect metric data and corresponding resource status. :param task: a _DataCollectionTask instance """ log = grok_logging.getExtendedLogger(MetricCollector.__name__) startTime = time.time() result = _DataCollectionResult(metricID=task.metricID) dsAdapter = None try: dsAdapter = createDatasourceAdapter(task.datasource) result.data, result.nextCallStart = dsAdapter.getMetricData( metricSpec=task.metricSpec, start=task.rangeStart, end=None) except Exception as e: # pylint: disable=W0703 log.exception("getMetricData failed in task=%s", task) result.data = e try: if task.updateResourceStatus: result.resourceStatus = dsAdapter.getMetricResourceStatus( metricSpec=task.metricSpec) except Exception as e: # pylint: disable=W0703 log.exception("getMetricResourceStatus failed in task=%s", task) result.resourceStatus = e result.duration = time.time() - startTime task.resultsQueue.put(result) return True
def testInstanceDefaultsHandlerPOST(self, listMetricIDsMock, _engineFactoryMock): """ Test for POST "/_instances/region/namespace/instanceId" response is validated for appropriate headers, body and status """ listMetricIDsMock.return_value = [] region = "us-west-2" # Currently we are not supporting certain namespaces # unsupportedNamespaces reflects such unsupported namespaces # These namespaces are currently validated for "400 Bad Request" # and expected error message. # Update this list with changes in namespace support unsupportedNamespaces = ("Billing", "StorageGateway") for namespace in unsupportedNamespaces: response = self.app.post("/%s/AWS/%s/abcd1234" % (region, namespace), headers=self.headers, status="*") assertions.assertBadRequest(self, response, "json") result = json.loads(response.body)["result"] self.assertTrue(result.startswith("Not supported.")) cwAdapter = datasource.createDatasourceAdapter("cloudwatch") supportedNamespaces = set() for resourceInfo in cwAdapter.describeSupportedMetrics().values(): for metricInfo in resourceInfo.values(): supportedNamespaces.add(metricInfo["namespace"]) for namespace in supportedNamespaces: response = self.app.post("/%s/%s/abcd1234" % (region, namespace), headers=self.headers) assertions.assertSuccess(self, response) result = app_utils.jsonDecode(response.body) self.assertIsInstance(result, dict) self.assertEqual(result, {"result": "success"})
def DELETE(self, metricName): adapter = createDatasourceAdapter("custom") try: adapter.deleteMetricByName(metricName) except app_exceptions.ObjectNotFoundError: raise web.notfound("Metric not found. Metric name=%s" % (metricName,)) self.addStandardHeaders() return json.dumps({'result': 'success'})
def testCreateDatasourceAdapter(self): """ Make sure createDatasourceAdapter is able to create all expected adapters """ for datasourceName in self.EXPECTED_DATASOURCE_NAMES: adapter = datasource_adapter_factory.createDatasourceAdapter( datasourceName) self.assertEqual(adapter._DATASOURCE, datasourceName) self.assertEqual(adapter.__class__.__name__, "_" + datasourceName.capitalize() + "DatasourceAdapter")
def DELETE(self, metricName): adapter = createDatasourceAdapter("custom") try: adapter.deleteMetricByName(metricName) except app_exceptions.ObjectNotFoundError: raise web.notfound("Metric not found. Metric name=%s" % (metricName, )) self.addStandardHeaders() return json.dumps({'result': 'success'})
def _exportNativeMetric(metric): return createDatasourceAdapter(metric.datasource).exportModel( metric.uid)
def createModel(cls, modelSpec=None): """ NOTE MER-3479: this code path is presently incorrectly used for two purposes: * Creating CloudWatch models (correct) * Importing of all types of metrics (not desirable; there should be a separate endpoint or an import-specific flag in this endpoint for importing that facilitates slightly different behavior, such as suppressing certain errors to allow for re-import in case of tranisent error part way through the prior import) """ if not modelSpec: # Metric data is missing log.error( "Data is missing in request, raising BadRequest exception") raise InvalidRequestResponse({"result": "Metric data is missing"}) # TODO MER-3479: import using import-specific endpoint # NOTE: pending MER-3479, this is presently a hack for exercising # the adapter import API importing = False if modelSpec.get("datasource") == "custom": # Convert to new grok-custom metric modelSpec format # NOTE: backward compatibility during first phase refactoring modelSpec = cls.upgradeCustomModelSpec(modelSpec) if "data" in modelSpec: importing = True elif (modelSpec.get("datasource") == "cloudwatch" and "filters" not in modelSpec): if "type" in modelSpec: # The legacy cloudwatch import modelSpec had the "type" property assert modelSpec["type"] == "metric", repr(modelSpec) importing = True # Convert to new grok-custom metric modelSpec format # NOTE: backward compatibility during first phase refactoring modelSpec = cls.upgradeCloudwatchModelSpec(modelSpec) elif (modelSpec.get("datasource") == "autostack" or modelSpec.get("type") == "autostack"): importing = True # Convert to new autostack metric modelSpec format # NOTE: backward compatibility during first phase refactoring modelSpec = cls.upgradeAutostackModelSpec(modelSpec) try: with web.ctx.connFactory() as conn: with conn.begin(): adapter = createDatasourceAdapter(modelSpec["datasource"]) if modelSpec["datasource"] == "custom": checkQuotaForCustomMetricAndRaise(conn) else: instanceName = adapter.getInstanceNameForModelSpec( modelSpec) checkQuotaForInstanceAndRaise(conn, instanceName) try: if importing: # TODO MER-3479: import using import-specific endpoint # NOTE: pending MER-3479, this is presently a hack for exercising # the adapter import API metricId = adapter.importModel(modelSpec) else: metricId = adapter.monitorMetric(modelSpec) except app_exceptions.MetricAlreadyMonitored as e: metricId = e.uid return repository.getMetric(conn, metricId) except (ValueError, app_exceptions.MetricNotSupportedError) as e: raise InvalidRequestResponse({"result": repr(e)})
def POST(self, region, namespace, instanceId=None): """ Monitor a set of default metrics for a specific instance :: POST /_instances/{region}/{namespace}/{instanceId} Returns: :: { "result": "success" } OR Monitor a set of default metrics for multiple specific instances :: POST /_instances/{region}/{namespace} POST data: :: [ {instanceId}, ... ] Returns: :: { "result": "success" } Note: We expect a 200 OK even when attempting to POST to an instanece in the wrong namespace or the wrong region, this saves the overhead of asking AWS if we're dealing with a valid instance in the given namespace or region with every POST request. We expect the CLI user to know the correct instance ID. """ if instanceId is None: try: dimension = None instances = json.loads(web.data()) except: raise InvalidRequestResponse({"result": "Invalid request"}) else: (dimension, _, identifier) = instanceId.rpartition("/") instances = [identifier] # Check for invalid region or namespace cwAdapter = datasource.createDatasourceAdapter("cloudwatch") supportedRegions = set(region for region, _desc in cwAdapter.describeRegions()) if region not in supportedRegions: raise InvalidRequestResponse({"result": ("Not supported. Region '%s' was" " not found.") % region}) supportedNamespaces = set() for resourceInfo in cwAdapter.describeSupportedMetrics().values(): for metricInfo in resourceInfo.values(): supportedNamespaces.add(metricInfo["namespace"]) if namespace not in supportedNamespaces: raise InvalidRequestResponse({"result": ("Not supported. Namespace '%s' " "was not found.") % namespace}) try: # Attempt to validate instances list using validictory validate(instances, _INSTANCES_MODEL_CREATION_SCHEMA) except ValidationError as e: response = "InvalidArgumentsError: " + str(e) raise InvalidRequestResponse({"result": response}) if instances: for instanceId in instances: server = "/".join([region, namespace, instanceId]) with web.ctx.connFactory() as conn: numMetrics = repository.getMetricCountForServer(conn, server) if numMetrics > 0: # Metrics exist for instance id. pass else: try: resourceType = cloudwatch.NAMESPACE_TO_RESOURCE_TYPE[namespace] except KeyError: raise InvalidRequestResponse({"result": "Not supported."}) modelSpecs = cwAdapter.getDefaultModelSpecs( resourceType, region, instanceId, dimension) for modelSpec in modelSpecs: ModelHandler.createModel(modelSpec) self.addStandardHeaders() return encodeJson({"result": "success"})
def POST(self, region, namespace, instanceId=None): """ Monitor a set of default metrics for a specific instance :: POST /_instances/{region}/{namespace}/{instanceId} Returns: :: { "result": "success" } OR Monitor a set of default metrics for multiple specific instances :: POST /_instances/{region}/{namespace} POST data: :: [ {instanceId}, ... ] Returns: :: { "result": "success" } Note: We expect a 200 OK even when attempting to POST to an instanece in the wrong namespace or the wrong region, this saves the overhead of asking AWS if we're dealing with a valid instance in the given namespace or region with every POST request. We expect the CLI user to know the correct instance ID. """ if instanceId is None: try: dimension = None instances = json.loads(web.data()) except: raise InvalidRequestResponse({"result": "Invalid request"}) else: (dimension, _, identifier) = instanceId.rpartition("/") instances = [identifier] # Check for invalid region or namespace cwAdapter = datasource.createDatasourceAdapter("cloudwatch") supportedRegions = set( region for region, _desc in cwAdapter.describeRegions()) if region not in supportedRegions: raise InvalidRequestResponse({ "result": ("Not supported. Region '%s' was" " not found.") % region }) supportedNamespaces = set() for resourceInfo in cwAdapter.describeSupportedMetrics().values(): for metricInfo in resourceInfo.values(): supportedNamespaces.add(metricInfo["namespace"]) if namespace not in supportedNamespaces: raise InvalidRequestResponse({ "result": ("Not supported. Namespace '%s' " "was not found.") % namespace }) try: # Attempt to validate instances list using validictory validate(instances, _INSTANCES_MODEL_CREATION_SCHEMA) except ValidationError as e: response = "InvalidArgumentsError: " + str(e) raise InvalidRequestResponse({"result": response}) if instances: for instanceId in instances: server = "/".join([region, namespace, instanceId]) with web.ctx.connFactory() as conn: numMetrics = repository.getMetricCountForServer( conn, server) if numMetrics > 0: # Metrics exist for instance id. pass else: try: resourceType = cloudwatch.NAMESPACE_TO_RESOURCE_TYPE[ namespace] except KeyError: raise InvalidRequestResponse( {"result": "Not supported."}) modelSpecs = cwAdapter.getDefaultModelSpecs( resourceType, region, instanceId, dimension) for modelSpec in modelSpecs: ModelHandler.createModel(modelSpec) self.addStandardHeaders() return encodeJson({"result": "success"})
def _exportNativeMetric(metric): return createDatasourceAdapter(metric.datasource).exportModel(metric.uid)
def createModel(cls, modelSpec=None): """ NOTE MER-3479: this code path is presently incorrectly used for two purposes: * Creating CloudWatch models (correct) * Importing of all types of metrics (not desirable; there should be a separate endpoint or an import-specific flag in this endpoint for importing that facilitates slightly different behavior, such as suppressing certain errors to allow for re-import in case of tranisent error part way through the prior import) """ if not modelSpec: # Metric data is missing log.error("Data is missing in request, raising BadRequest exception") raise InvalidRequestResponse({"result": "Metric data is missing"}) # TODO MER-3479: import using import-specific endpoint # NOTE: pending MER-3479, this is presently a hack for exercising # the adapter import API importing = False if modelSpec.get("datasource") == "custom": # Convert to new grok-custom metric modelSpec format # NOTE: backward compatibility during first phase refactoring modelSpec = cls.upgradeCustomModelSpec(modelSpec) if "data" in modelSpec: importing = True elif (modelSpec.get("datasource") == "cloudwatch" and "filters" not in modelSpec): if "type" in modelSpec: # The legacy cloudwatch import modelSpec had the "type" property assert modelSpec["type"] == "metric", repr(modelSpec) importing = True # Convert to new grok-custom metric modelSpec format # NOTE: backward compatibility during first phase refactoring modelSpec = cls.upgradeCloudwatchModelSpec(modelSpec) elif (modelSpec.get("datasource") == "autostack" or modelSpec.get("type") == "autostack"): importing = True # Convert to new autostack metric modelSpec format # NOTE: backward compatibility during first phase refactoring modelSpec = cls.upgradeAutostackModelSpec(modelSpec) try: with web.ctx.connFactory() as conn: with conn.begin(): adapter = createDatasourceAdapter(modelSpec["datasource"]) if modelSpec["datasource"] == "custom": checkQuotaForCustomMetricAndRaise(conn) else: checkQuotaForInstanceAndRaise( conn, adapter.getInstanceNameForModelSpec(modelSpec)) try: if importing: # TODO MER-3479: import using import-specific endpoint # NOTE: pending MER-3479, this is presently a hack for exercising # the adapter import API metricId = adapter.importModel(modelSpec) else: metricId = adapter.monitorMetric(modelSpec) except app_exceptions.MetricAlreadyMonitored as e: metricId = e.uid return repository.getMetric(conn, metricId) except (ValueError, app_exceptions.MetricNotSupportedError) as e: raise InvalidRequestResponse({"result": repr(e)})