Exemplo n.º 1
0
  def updateSettings(self, section=None):
    if section != "usertrack":
      # Everything but usertrack requires authentication...
      apikey = AuthenticatedBaseHandler.extractAuthHeader()
      authResult = AuthenticatedBaseHandler.compareAuthorization(apikey)

      if authResult is False:
        AuthenticatedBaseHandler.raiseAuthFailure()

    dirty = False
    data = web.data()
    if data:
      sections = {}
      if section:
        sections = {section: utils.jsonDecode(data)}
      else:
        sections = utils.jsonDecode(data)

      config = YOMPAppConfig(mode=YOMPAppConfig.MODE_OVERRIDE_ONLY)

      for s in sections:
        if s in self.validSections():
          for key in sections[s]:
            if not config.has_section(s):
              config.add_section(s)
            config.set(s, key, sections[s][key])
            dirty = True
        else:
          return False
      if dirty:
        config.save()

      return dirty
Exemplo n.º 2
0
  def testLifecycleForSingleInstance(self):
    """
    Test for Get '/_instances'
    response is validated for appropriate headers, body and status
    Make sure app returns empty list at initial step

    Test for post '/_instances/region/namespace/instanceId'
    response is validated for appropriate headers, body and status
    Invoke post with valid instanceId

    Test for get '/_instances/{instanceId}'
    response is validated for appropriate headers, body and status
    Check if you can invoke get on previously post'ed instance Instance

    Test for delete '/_instances/region/namespace/instanceId'
    response is validated for appropriate headers, body and status
    This invokes delete call on previously monitored instance

    Test for get '/_instances/{instanceId}'
    response is validated for appropriate headers, body and status
    This invokes get call with instanceId which is deleted from monitored list
    """

    # check initial empty response with get request
    initialGetResponse = self.app.get("", headers=self.headers)
    assertions.assertSuccess(self, initialGetResponse)
    initialGetResult = app_utils.jsonDecode(initialGetResponse.body)
    self.assertItemsEqual(initialGetResult, [])

    # Post single instance details to add under monitor
    region = VALID_EC2_INSTANCES["jenkins-master"]["region"]
    namespace = "EC2"
    instanceId = "%s/AWS/%s/%s" % (
      region, namespace, VALID_EC2_INSTANCES["jenkins-master"]["instanceId"])
    postResponse = self.app.post("/" + instanceId, headers=self.headers)
    assertions.assertSuccess(self, postResponse)
    postResult = app_utils.jsonDecode(postResponse.body)
    self.assertIsInstance(postResult, dict)
    self.assertEqual(postResult, {"result": "success"})

    # Verify that instance is successfully added under monitor
    getPostCheckResponse = self.app.get("", headers=self.headers)
    assertions.assertSuccess(self, getPostCheckResponse)
    getPostCheckResult = app_utils.jsonDecode(getPostCheckResponse.body)
    self.assertEqual(len(getPostCheckResult), 1)
    instanceResult = getPostCheckResult[0]
    self.assertEqual(region, instanceResult["location"])
    self.assertTrue(instanceId, instanceResult["server"])

    # Delete instance from monitor
    deleteResponse = self.app.delete("/", headers=self.headers,
                                     params=json.dumps([instanceId]))
    assertions.assertDeleteSuccessResponse(self, deleteResponse)

    # Check get reponse to confirm that instance has been deleted successfully
    getPostDeleteResponse = self.app.get("", headers=self.headers)
    postResult = app_utils.jsonDecode(getPostDeleteResponse.body)
    self.assertEqual(postResult, [])
Exemplo n.º 3
0
 def testGETDatasources(self):
   """
   Test for GET for '/_metrics/datasources'
   response is validated for appropriate headers and body
   """
   response = self.app.get('/datasources', headers=self.headers)
   assertions.assertSuccess(self, response)
   self.assertIsInstance(utils.jsonDecode(response.body), list)
   self.assertSetEqual(set(utils.jsonDecode(response.body)),
                    set(["autostack", "custom", "cloudwatch"]))
Exemplo n.º 4
0
    def testMonitorMetricViaModelSpec(self):
        """
      Happy path testing for the route "/_models" with new modelSpec format
    """
        modelSpec = {
            "datasource": "cloudwatch",
            "metricSpec": {
                "region": "us-west-2",
                "namespace": "AWS/EC2",
                "metric": "CPUUtilization",
                "dimensions": {"InstanceId": "i-12d67826"},
            },
            "modelParams": {"min": 0, "max": 100},  # optional  # optional
        }

        # create a model
        response = self.app.post("/", utils.jsonEncode(modelSpec), headers=self.headers)
        assertions.assertSuccess(self, response, code=201)
        postResult = utils.jsonDecode(response.body)
        self.assertEqual(len(postResult), 1)
        self._checkCreateModelResult(postResult[0], modelSpec["metricSpec"])

        # get model that was previously created
        uid = postResult[0]["uid"]
        response = self.app.get("/%s" % uid, headers=self.headers)
        assertions.assertSuccess(self, response)
        getModelResult = utils.jsonDecode(response.body)
        self.assertItemsEqual(getModelResult[0].keys(), self.modelsTestData["get_response"].keys())

        # get all models in the system
        response = self.app.get("/", headers=self.headers)
        assertions.assertSuccess(self, response)
        allModelsResult = utils.jsonDecode(response.body)
        self.assertItemsEqual(allModelsResult[0].keys(), self.modelsTestData["get_response"].keys())
        self.assertItemsEqual(allModelsResult[0].keys(), self.modelsTestData["get_response"].keys())
        self.assertEqual(len(allModelsResult), 1)

        # Repeat the request to monitor same metric and verify that it returns the
        # same model uid instead of creating a new one
        response = self.app.post("/", utils.jsonEncode(modelSpec), headers=self.headers)
        assertions.assertSuccess(self, response, code=201)
        postResult = utils.jsonDecode(response.body)
        self.assertEqual(postResult[0]["uid"], uid)
        self.assertEqual(len(postResult), 1)
        self._checkCreateModelResult(postResult[0], modelSpec["metricSpec"])

        # Unmonitor the metric
        response = self.app.delete("/%s" % uid, headers=self.headers)
        assertions.assertDeleteSuccessResponse(self, response)
Exemplo n.º 5
0
    def testInstanceDefaultsHandlerPOST(self, listMetricIDsMock, _engineFactoryMock):
        """
    Test for POST "/_instances/region/namespace/instanceId"
    response is validated for appropriate headers, body and status
    """

        listMetricIDsMock.return_value = []

        region = "us-west-2"

        # Currently we are not supporting certain namespaces
        # unsupportedNamespaces reflects such unsupported namespaces
        # These namespaces are currently validated for "400 Bad Request"
        # and expected error message.
        # Update this list with changes in namespace support
        unsupportedNamespaces = ("Billing", "StorageGateway")

        for namespace in unsupportedNamespaces:
            response = self.app.post("/%s/AWS/%s/abcd1234" % (region, namespace), headers=self.headers, status="*")
            assertions.assertBadRequest(self, response, "json")
            result = json.loads(response.body)["result"]
            self.assertTrue(result.startswith("Not supported."))

        cwAdapter = datasource.createDatasourceAdapter("cloudwatch")
        supportedNamespaces = set()
        for resourceInfo in cwAdapter.describeSupportedMetrics().values():
            for metricInfo in resourceInfo.values():
                supportedNamespaces.add(metricInfo["namespace"])

        for namespace in supportedNamespaces:
            response = self.app.post("/%s/%s/abcd1234" % (region, namespace), headers=self.headers)
            assertions.assertSuccess(self, response)
            result = app_utils.jsonDecode(response.body)
            self.assertIsInstance(result, dict)
            self.assertEqual(result, {"result": "success"})
Exemplo n.º 6
0
  def createModels(data=None):
    if data:
      if isinstance(data, basestring):
        request = utils.jsonDecode(data)
      else:
        request = data

      if not isinstance(request, list):
        request = [request]

      response = []
      for nativeMetric in request:
        try:
          response.append(ModelHandler.createModel(nativeMetric))
        except app_exceptions.ObjectNotFoundError:
          # This happens when there is a race condition between creating the
          # model and another thread/process deleting the metric or metric data.
          # TODO: it does't make sense that this error is suppressed and that
          #   it's reported this way inside the response list among dao.Metric
          #   objects.
          response.append("Model failed during creation. Please try again.")
      return response

    # Metric data is missing
    log.error("Data is missing in request, raising BadRequest exception")
    raise web.badrequest("Metric data is missing")
Exemplo n.º 7
0
    def testGetAllModelData(self):
        """
    test GET /data
    """
        getAllMetricDataResponse = self.app.get("/%s/data" % self.uid, headers=self.headers)
        assertions.assertSuccess(self, getAllMetricDataResponse)
        getAllMetricDataResult = utils.jsonDecode(getAllMetricDataResponse.body)
        assertions.assertSuccess(self, getAllMetricDataResponse)
        self.assertIsInstance(getAllMetricDataResult, dict)
        self.assertItemsEqual(getAllMetricDataResult.keys(), ["data", "names"])

        # Compare http and https response
        https_response = requests.get(
            "https://localhost/_models/%s/data" % self.uid, headers=self.headers, verify=False
        )

        httpsData = json.loads(https_response.text)
        self.assertIsInstance(httpsData, dict)
        self.assertItemsEqual(httpsData.keys(), ["data", "names"])
        self.assertItemsEqual(httpsData["names"], ["timestamp", "value", "anomaly_score", "rowid"])
        self.assertIsInstance(httpsData["data"], list)
        self.assertTrue(all(isinstance(row, list) and len(row) == 4 for row in httpsData["data"]))

        http_response = requests.get("http://localhost/_models/%s/data" % self.uid, headers=self.headers)
        httpData = json.loads(http_response.text)
        self.assertIsInstance(httpData, dict)
        self.assertItemsEqual(httpData.keys(), ["data", "names"])
        self.assertItemsEqual(httpData["names"], ["timestamp", "value", "anomaly_score", "rowid"])
        self.assertIsInstance(httpData["data"], list)
        self.assertTrue(all(isinstance(row, list) and len(row) == 4 for row in httpData["data"]))
Exemplo n.º 8
0
 def testMetricsAPIGETCouldWatch(self, adapterMock):
   adapterMock.return_value.describeRegions.return_value = []
   adapterMock.return_value.describeSupportedMetrics.return_value = {}
   response = self.app.get("/_metrics/cloudwatch", headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   self.assertEqual(result, {'regions': {}, 'namespaces': {}})
Exemplo n.º 9
0
 def testMetricsAPIGETDataSources(self, getDatasources):
   getDatasources.return_value = []
   response = self.app.get("/_metrics/datasources", headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   self.assertEqual(result, [])
   self.assertTrue(getDatasources.called)
Exemplo n.º 10
0
  def POST(self, deviceId):
    """
      Mark notification as seen (POST handler for
      /_notifications/{deviceId}/see)

      ::

          POST /_notifications/{deviceId}/see

          [
            "e78599c4-758b-4c6e-87b1-daabaccff798",
            "4baa5ea6-5c94-46ee-b414-959bb973ddfb",
            "af531f3b-0e8b-41fa-94c4-a404526d791f",
            ...
          ]

      Seen notifications will not appear in
      ``GET /_notifications/{deviceId}`` requests.
    """
    try:
      notificationIds = utils.jsonDecode(web.data())

      with web.ctx.connFactory() as conn:
        repository.batchSeeNotifications(conn, notificationIds)
      raise web.HTTPError(status="204 No Content")
    except (web.HTTPError) as ex:
      log.info(str(ex))
      raise ex
    except Exception as ex:
      log.exception("POST Failed")
      raise web.internalerror(ex)
Exemplo n.º 11
0
  def GET(self): # pylint: disable=C0103
    """
      Get list of autostacks

      Example request::

        GET /_autostacks

      Example response::

        [
          {
            "name": {name}
            "region": {region}
            "filters": {
              "tag:{Name}": ["{value}", "{value}", ...],
              "tag:{Description}": ["{value}", "{value}", ...],
              "tag:{etc}": ["{value}", "{value}", ...]
            },
            "uid": {uid}
          }
          ...
        ]

    """
    self.addStandardHeaders()
    with web.ctx.connFactory() as conn:
      autostackRows = repository.getAutostackList(conn)
    autostackList = [{"uid":autostack.uid,
                      "name":autostack.name,
                      "region":autostack.region,
                      "filters":utils.jsonDecode(autostack.filters)}
                     for autostack in autostackRows]

    return utils.jsonEncode(autostackList)
Exemplo n.º 12
0
 def testModelHandlerListModelsWithSlashEmptyResponse(self, getAllModelsMock,
                                             _engineMock, *args):
   getAllModelsMock.return_value = []
   response = self.app.get("/", headers=self.headers)
   assertions.assertSuccess(self, response)
   result = jsonDecode(response.body)
   self.assertEqual(result, [])
Exemplo n.º 13
0
  def testGETSpecificInstanceFromRegion(self):
    """
    Test for Get
    '/_metrics/cloudwatch/<region-name>/AWS/<namespace>/instances/<InstancdId>'
    response is validated for appropriate headers, body and status
    Test is currently using ec2 box for jenkins-master, this test also
    validates for retriving all supported metrics with dimensions
    """
    supportedMetrics = (
      createCloudwatchDatasourceAdapter().describeSupportedMetrics())
    ec2Metrics = supportedMetrics[ResourceTypeNames.EC2_INSTANCE].keys()

    # Instance used for following test is jenkins-master node
    response = self.app.get("/us-west-2/AWS/EC2/instances/%s"
        % VALID_EC2_INSTANCE["InstanceId"], headers=self.headers)
    assertions.assertSuccess(self, response)
    result = app_utils.jsonDecode(response.body)
    self.assertIsInstance(result, list)
    self.assertGreater(len(ec2Metrics), 0)
    self.assertGreater(len(result), 0)
    self.assertEqual(len(ec2Metrics), len(result))
    for res in result:
      self.assertEqual(res["region"], "us-west-2")
      self.assertEqual(res["namespace"], "AWS/EC2")
      self.assertEqual(res["datasource"], "cloudwatch")
      self.assertIn(res["metric"], ec2Metrics)
      self.assertIsInstance(res["dimensions"], dict)
      self.assertEqual(res["dimensions"]["InstanceId"],
        VALID_EC2_INSTANCE["InstanceId"])
      ec2Metrics.remove(res["metric"])

    self.assertEqual(ec2Metrics, [])
Exemplo n.º 14
0
  def testGetModelDataWithModelUIdAndAnomalyScore(self):
    """
    test GET /metricId/data?anomaly=testanomalyScore
    """
    getMetricDataWithAnomalyQueryResponse = self.app.get(
      "/%s/data?anomaly=%s" % (self.uid, self.testAnomalyScore),
      headers=self.headers)
    getMetricDataWithAnomalyQueryResult = utils.jsonDecode(
      getMetricDataWithAnomalyQueryResponse.body)

    assertions.assertSuccess(self, getMetricDataWithAnomalyQueryResponse)
    self.assertIsInstance(getMetricDataWithAnomalyQueryResult, dict)
    self.assertItemsEqual(getMetricDataWithAnomalyQueryResult.keys(),
     ["data", "names"])
    self.assertGreater(len(getMetricDataWithAnomalyQueryResult["data"]), 0)
    # we are parsing amomaly scores from reponse and chekcing if each of it
    # is satisfying value condition set with GET request.
    # If for some reason this parameter is not applied on DB query, we get
    # full response for this request
    # We are making sure each value for anomaly_score in result matches with
    # condition set in GET request, hence the assertion
    anomalyScores = \
      [row[2] for row in getMetricDataWithAnomalyQueryResult["data"]]
    failedScores = [a for a in anomalyScores if a < self.testAnomalyScore]
    self.assertEqual(failedScores, [])
Exemplo n.º 15
0
 def testDefaultGetSpecificSection(self):
   response = self.app.get("/aws", headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   self.assertIsInstance(result, dict)
   for key in set(self.configurable_options["aws"]):
     if key in settings_api.HIDDEN_SETTINGS["aws"]:
       self.assertNotIn(key, result)
     else:
       self.assertIn(key, result)
Exemplo n.º 16
0
 def _deleteOneMetric(self):
   """
   Delete one metric from test EC2 instance
   """
   app = TestApp(models_api.app.wsgifunc())
   response = app.get("/", headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   self.assertIsInstance(result, list)
   app.delete("/" + result[0]['uid'], headers=self.headers)
 def testGetMetricsWithNonEmptyResponse(self, getDatasourcesMock):
     """
 Test get "/datasources", with non empty response
 response is validated for appropriate headers, body and status
 """
     getDatasourcesMock.return_value = ("autostack", "cloudwatch", "custom")
     response = self.app.get("/datasources", headers=self.headers)
     assertions.assertSuccess(self, response)
     result = app_utils.jsonDecode(response.body)
     self.assertEqual(result, ["autostack", "cloudwatch", "custom"])
 def testGetMetricsWithEmptyResponse(self, getDatasourcesMock):
     """
 Test get "/datasources"
 response is validated for appropriate headers, body and status
 """
     getDatasourcesMock.return_value = tuple()
     response = self.app.get("/datasources", headers=self.headers)
     assertions.assertSuccess(self, response)
     result = app_utils.jsonDecode(response.body)
     self.assertEqual(result, [])
Exemplo n.º 19
0
 def testGetModelDataWithModelUId(self):
     """
 test GET /metricId/data
 """
     getMetricDataResponse = self.app.get("/%s/data" % self.uid, headers=self.headers)
     assertions.assertSuccess(self, getMetricDataResponse)
     getMetricDataResult = utils.jsonDecode(getMetricDataResponse.body)
     self.assertIsInstance(getMetricDataResult, dict)
     self.assertItemsEqual(getMetricDataResult.keys(), ["data", "names"])
     self.assertGreater(len(getMetricDataResult["data"]), 0)
Exemplo n.º 20
0
 def testGetListRegionsEmptyResponse(self, adapterMock):
   """
   Test for Get '/_metrics/cloudwatch/regions'
   response is validated for appropriate headers, body and status
   """
   adapterMock.return_value.describeRegions.return_value = []
   response = self.app.get("/regions", headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   self.assertEqual(result, {})
Exemplo n.º 21
0
 def _deleteInstance(self):
   """
   Delete test EC2 instance created by :py:meth:`_createEC2Instance`
   """
   app = TestApp(instances_api.app.wsgifunc())
   response = app.delete("/", params=json.dumps([self.instanceId]),
                         headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   self.assertIsInstance(result, dict)
   self.assertEqual(result, {"result": "success"})
Exemplo n.º 22
0
 def testGETListRegions(self):
   """
   Test for Get '/_metrics/cloudwatch/regions'
   response is validated for appropriate headers, body and status
   response is validated against json for supported regions
   """
   response = self.app.get("/regions", headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   self.assertIsInstance(result, dict)
   self.assertEqual(json.loads(response.body), self.regions)
Exemplo n.º 23
0
    def testGETInvalidSection(self):
        """
    Test for GET for '/_settings/section, List Some invalid Settings
    resoponse is validated for appropriate headers and body
    """
        response = self.app.get("/dddd", headers=self.headers)

        assertions.assertResponseHeaders(self, response)
        assertions.assertResponseBody(self, response)
        assertions.assertResponseStatusCode(self, response, 200)
        self.assertEqual(app_utils.jsonDecode(response.body), {})
Exemplo n.º 24
0
 def testMetricDataHandlerGetMetricDataWithToTimestamp(self,
     getMetricDataMock, _engineMock):
   getMetricDataMock.return_value = self.decodeRowTuples(
     self.metric_data["withto"])
   response = self.app.get(
     "/be9fab-f416-4845-8dab-02d292244112/data?to=2013-08-15 21:28:00",
      headers=self.headers)
   assertions.assertSuccess(self, response)
   result = jsonDecode(response.body)
   self.assertEqual([row[1:] for row in self.metric_data['withto']],
    result["data"])
Exemplo n.º 25
0
 def testGetListNamespaceNoRegions(self, adapterMock):
   """
   Test for Get '/_metrics/cloudwatch/namespaces'
   response is validated for appropriate headers, body and status
   """
   adapterMock.return_value.describeRegions.return_value = []
   response = self.app.get("/namespaces", headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   # Added Autostacks namespaces to this list for now, to maintain API
   # backwards-compatibility during adapter refactor
   self.assertEqual(result, {'Autostacks': {'metrics': ['InstanceCount']}})
Exemplo n.º 26
0
 def testGETSpecificNamespace(self):
   """
   Test for Get '/_metrics/cloudwatch/AWS/<namespace>'
   response is validated for appropriate headers, body and status
   response is validated against available metrics for each
   namespaces supported
   """
   for namespace in self._supportedAWSNamespaces():
     response = self.app.get("/%s" % namespace, headers=self.headers)
     assertions.assertSuccess(self, response)
     result = app_utils.jsonDecode(response.body)
     self.assertIsInstance(result, dict)
Exemplo n.º 27
0
 def testModelsAPIGET(self,
                      getAllMetricsMock,
                      getInstanceStatusHistoryMock,
                      engineFactoryMock, *args):
   #import pdb; pdb.set_trace()
   getAllMetricsMock.return_value = []
   getInstanceStatusHistoryMock.return_value = []
   response = self.app.get("/_models", headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   self.assertEqual(result, [])
   self.assertTrue(getAllMetricsMock.called)
Exemplo n.º 28
0
 def testGETListInstancesForRegion(self):
   """
   Test for
   Get '/_metrics/cloudwatch/<region-name>/AWS/<namespace>/instances'
   response is validated for appropriate headers, body and status
   response is validated against response
   """
   response = self.app.get("/us-west-2/AWS/EC2",
                                             headers=self.headers)
   assertions.assertSuccess(self, response)
   result = app_utils.jsonDecode(response.body)
   self.assertIsInstance(result, list)
Exemplo n.º 29
0
 def testMetricDataHandlerGetMetricDataWIthAnomaly(self,
                                                   getMetricDataMock,
                                                   _engineMock):
   getMetricDataMock.return_value = self.decodeRowTuples(
     self.metric_data['withanomaly'])
   response = self.app.get(
     "/be9fab-f416-4845-8dab-02d292244112/data?anomaly=0.01",
      headers=self.headers)
   assertions.assertSuccess(self, response)
   result = jsonDecode(response.body)
   self.assertEqual([row[1:] for row in self.metric_data['withanomaly']],
    result["data"])
Exemplo n.º 30
0
def assertDeleteSuccessResponse(test, response):
  """
  This method wraps all assertions for any successful Delete call
  This could be used for model, instances etc
  test : This parameter expects handle for current testcase under execution.
    This is used for further assertions
  response : response received from grok webservice call
  """
  assertSuccess(test, response)
  result = app_utils.jsonDecode(response.body)
  test.assertIsInstance(result, dict)
  test.assertEqual(result, {"result": "success"})
Exemplo n.º 31
0
    def POST(cls):
        """Upload the metric info and metric data as a compressed tarfile to S3.

    The request must include the uid of the metric and may include other JSON
    keys as well. For instance, it is likely that a request from the mobile
    application will include information about the current view and data
    being displayed when the feedback request is sent. Any fields in addition
    to uid will be stored with the feedback archive file that is uploaded to
    S3.
    """
        inputData = json.loads(web.data())
        # Get the metric uid
        uid = inputData["uid"]
        del inputData["uid"]

        inputData["server_id"] = _MACHINE_ID

        # Data is written to a temporary directory before uploading
        path = tempfile.mkdtemp()

        try:
            # Retrieve the metric table record and add it to the other input
            # parameters
            metricFields = [
                schema.metric.c.uid, schema.metric.c.datasource,
                schema.metric.c.name, schema.metric.c.description,
                schema.metric.c.server, schema.metric.c.location,
                schema.metric.c.parameters, schema.metric.c.status,
                schema.metric.c.message, schema.metric.c.last_timestamp,
                schema.metric.c.poll_interval, schema.metric.c.tag_name,
                schema.metric.c.last_rowid
            ]

            with repository.engineFactory().connect() as conn:
                metricRow = repository.getMetric(conn, uid, metricFields)
            metric = dict([
                (col.name, utils.jsonDecode(getattr(metricRow, col.name))
                 if col.name == "parameters" else getattr(metricRow, col.name))
                for col in metricFields
            ])
            if metric["tag_name"]:
                metric["display_name"] = "%s (%s)" % (metric["tag_name"],
                                                      metric["server"])
            else:
                metric["display_name"] = metric["server"]

            inputData["metric"] = utils.jsonEncode(metric)

            metricPath = os.path.join(path, "metric.json")
            with open(metricPath, "w") as f:
                json.dump(inputData, f)

            # Retrieve the metric data
            with repository.engineFactory().connect() as conn:
                metricDataRows = repository.getMetricData(conn, uid)
            metricData = [
                dict([(col.name, getattr(metricData, col.name))
                      for col in schema.metric_data.columns])
                for metricData in metricDataRows
            ]

            metricDataPath = os.path.join(path, "metric_data.csv")
            with open(metricDataPath, "w") as f:
                writer = csv.writer(f)
                if len(metricData) > 0:
                    header = metricData[0].keys()
                    # Write the field names first
                    writer.writerow(header)
                    # Then write out the data for each row
                    for dataDict in metricData:
                        row = [dataDict[h] for h in header]
                        writer.writerow(row)

            # Create a tarfile to upload
            ts = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
            filename = "metric_dump_%s_%s.tar.gz" % (uid, ts)
            tfPath = os.path.join(path, filename)
            with tarfile.open(tfPath, "w:gz") as tf:
                tf.add(metricPath, arcname=os.path.basename(metricPath))
                tf.add(metricDataPath,
                       arcname=os.path.basename(metricDataPath))

            # Upload the tarfile
            return cls._uploadTarfile(filename, tfPath)

        finally:
            shutil.rmtree(path)
Exemplo n.º 32
0
  def POST(self):
    """
    Create new Annotation

    Request::

      POST /_annotations

      {
         "device", "1231AC32FE",
         "timestamp":"2013-08-27 16:45:00",
         "user":"******",
         "server":" AWS/EC2/i-12345678",
         "message": "The CPU Utilization was high ...",
         "data": { JSON Object }
      }

    :param device: Device ID if the annotation was created by the mobile app
                   or Service UID if the annotation was created by a service
    :param timestamp: The date and time to be annotated
    :param user: User name who created the annotation if the annotation was
                 created by the mobile app or service name if the annotation was
                 created by a service
    :param server: Instance ID associated with the annotation
    :param message: Annotation message (Optional if data is provided)
    :param data: Service specific data associated with this annotation
                 (Optional if message is provided)

    Response::

      HTTP Status 201 Created

      {
         "uid": "2a123bb1dd4d46e7a806d62efc29cbb9",
         "device", "1231AC32FE",
         "created":"2013-08-27 16:46:51",
         "timestamp":"2013-08-27 16:45:00",
         "user":"******",
         "server":" AWS/EC2/i-12345678",
         "message": "The CPU Utilization was high ...",
         "data": {JSON Object }
      }
    """
    self.addStandardHeaders()
    webdata = web.data()
    if webdata:
      try:
        if isinstance(webdata, basestring):
          webdata = utils.jsonDecode(webdata)
      except ValueError as e:
        raise web.badrequest("Invalid JSON in request: " + repr(e))

      if "device" in webdata:
        device = webdata["device"]
      else:
        raise web.badrequest("Missing 'device' in request")

      if "timestamp" in webdata:
        timestamp = webdata["timestamp"]
      else:
        raise web.badrequest("Missing 'timestamp' in request")

      if "user" in webdata:
        user = webdata["user"]
      else:
        raise web.badrequest("Missing 'user' in request")

      if "server" in webdata:
        server = webdata["server"]
      else:
        raise web.badrequest("Missing 'server' in request")

      if "message" in webdata:
        message = webdata["message"]
      else:
        message = None

      if "data" in webdata:
        data = webdata["data"]
      else:
        data = None

      if data is None and message is None:
        raise web.badrequest(
            "Annotation must contain either 'message' or 'data'")

      # lower timestamp resolution to seconds because the database rounds up
      # microsecond to the nearest second
      created = datetime.datetime.utcnow().replace(microsecond=0)
      uid = utils.createGuid()

      try:
        with web.ctx.connFactory() as conn:
          repository.addAnnotation(conn=conn, timestamp=timestamp,
                                   device=device, user=user, server=server,
                                   message=message, data=data, created=created,
                                   uid=uid)

        # Prepare response with generated "uid" and "created" fields filled
        response = utils.jsonEncode({
            "uid": uid,
            "created": created,
            "device": device,
            "timestamp": timestamp,
            "user": user,
            "server": server,
            "message": message,
            "data": data,
        })
        raise web.created(response)
      except app_exceptions.ObjectNotFoundError as ex:
        raise web.badrequest(str(ex) or repr(ex))
Exemplo n.º 33
0
    def testLifecycleForMultipleInstances(self):
        """
    Test for Get '/_instances'
    response is validated for appropriate headers, body and status
    This expects response from application in initial stage when
    no instances are under monitor

    Test for post '/_instances'
    response is validated for appropriate headers, body and status
    post multiple instances

    Test for Get '/_instances'
    response is validated for appropriate headers, body and status
    This test check for listed monitored instances from previous step

    Test for delete '/_instances'
    response is validated for appropriate headers, body and status
    invoke delete with valid instanceId for listed monitored instances
    from previous step


    Test for Get '/_instances'
    response is validated for appropriate headers, body and status
    This invokes get call to assert that all instances which were
    under monitor have been deleted and we get empty response
    """
        # Check instance list at initial phase for empty response
        getIntialResponse = self.app.get("", headers=self.headers)
        assertions.assertSuccess(self, getIntialResponse)
        getIntialResult = app_utils.jsonDecode(getIntialResponse.body)
        self.assertItemsEqual(getIntialResult, [])

        # Test for post '/_instances'

        # TODO: Until MER-1172 is resolved
        # test will execute this as temporary. This will add expected instances
        # under monitor. Which will be used for further tests
        # here adding
        params = [
            VALID_EC2_INSTANCES["rpm-builder"]["instanceId"],
            VALID_EC2_INSTANCES["grok-docs"]["instanceId"]
        ]
        region = "us-west-2"
        namespace = "EC2"
        for instance in params:
            postResponse = self.app.post("/%s/AWS/%s/%s" %
                                         (region, namespace, instance),
                                         headers=self.headers)
            assertions.assertSuccess(self, postResponse)
            postResult = app_utils.jsonDecode(postResponse.body)
            self.assertIsInstance(postResult, dict)
            self.assertEqual(postResult, {"result": "success"})

        # TODO Use Api calls below once MER-1172 is resolved

        #postResponse = self.app.post("/us-west-2/AWS/EC2",
        #  params=app_utils.jsonEncode(params), headers=self.headers, status="*")
        #assertions.assertSuccess(self, response)
        #postResult = app_utils.jsonDecode(postResponse.body)
        #self.assertIsInstance(postResult, dict)
        #self.assertEqual(postResult, {"result": "success"})

        # Test for Get '/_instances'
        getPostCheckResponse = self.app.get("", headers=self.headers)
        assertions.assertSuccess(self, getPostCheckResponse)
        getPostCheckResult = app_utils.jsonDecode(getPostCheckResponse.body)
        instanceIds = []
        self.assertIsInstance(getPostCheckResult, list)
        for instance in getPostCheckResult:
            instanceIds.append(instance["server"])
            self.assertEqual(instance["namespace"], "AWS/EC2")
            self.assertEqual(instance["location"], "us-west-2")
        self.assertItemsEqual(
            [instanceId.rpartition("/")[2] for instanceId in instanceIds],
            params)

        # Delete instances under monitor
        deleteResponse = self.app.delete(
            "", params=app_utils.jsonEncode(instanceIds), headers=self.headers)
        assertions.assertDeleteSuccessResponse(self, deleteResponse)

        # check instances to confirm the delete action
        getPostDeleteCheckResponse = self.app.get("", headers=self.headers)
        assertions.assertSuccess(self, getPostDeleteCheckResponse)
        getPostDeleteResult = app_utils.jsonDecode(
            getPostDeleteCheckResponse.body)
        self.assertItemsEqual(getPostDeleteResult, [])
Exemplo n.º 34
0
  def testMonitorMetricViaModelSpec(self):
    """
      Happy path testing for the route "/_models" with new modelSpec format
    """
    modelSpec = {
      "datasource": "cloudwatch",

      "metricSpec": {
        "region": "us-west-2",
        "namespace": "AWS/EC2",
        "metric": "CPUUtilization",
        "dimensions": {
          "InstanceId": "i-12d67826"
        }
      },

      "modelParams": {
        "min": 0,  # optional
        "max": 100  # optional
      }
    }

    # create a model
    response = self.app.post("/", utils.jsonEncode(modelSpec),
                             headers=self.headers)
    assertions.assertSuccess(self, response, code=201)
    postResult = utils.jsonDecode(response.body)
    self.assertEqual(len(postResult), 1)
    self._checkCreateModelResult(postResult[0], modelSpec["metricSpec"])

    # get model that was previously created
    uid = postResult[0]["uid"]
    response = self.app.get("/%s" % uid, headers=self.headers)
    assertions.assertSuccess(self, response)
    getModelResult = utils.jsonDecode(response.body)
    self.assertItemsEqual(getModelResult[0].keys(),
      self.modelsTestData["get_response"].keys())

    # get all models in the system
    response = self.app.get("/", headers=self.headers)
    assertions.assertSuccess(self, response)
    allModelsResult = utils.jsonDecode(response.body)
    self.assertItemsEqual(allModelsResult[0].keys(),
      self.modelsTestData["get_response"].keys())
    self.assertItemsEqual(allModelsResult[0].keys(),
      self.modelsTestData["get_response"].keys())
    self.assertEqual(len(allModelsResult), 1)

    # Repeat the request to monitor same metric and verify that it returns the
    # same model uid instead of creating a new one
    response = self.app.post("/", utils.jsonEncode(modelSpec),
                             headers=self.headers)
    assertions.assertSuccess(self, response, code=201)
    postResult = utils.jsonDecode(response.body)
    self.assertEqual(postResult[0]["uid"], uid)
    self.assertEqual(len(postResult), 1)
    self._checkCreateModelResult(postResult[0], modelSpec["metricSpec"])

    # Unmonitor the metric
    response = self.app.delete("/%s" % uid, headers=self.headers)
    assertions.assertDeleteSuccessResponse(self, response)
Exemplo n.º 35
0
    def testGetMetricDimensionWithResponse(self, adapterMock):
        """
    Test for
    Get '/_metrics/cloudwatch/<region-name>/AWS/<namespace>/metricName'
    response is validated for appropriate headers, body and status
    and response is validated with pre-defined responses
    """
        adapterMock.return_value.describeRegions.return_value = self.regions
        adapterMock.return_value.describeSupportedMetrics.return_value = (
            self.resources)
        adapterMock.return_value.describeResources.return_value = [{
            'grn':
            u'aws://us-west-2/Instance/i-d48ccaba',
            'name':
            u'Foo',
            'resID':
            u'i-d48ccaba'
        }, {
            'grn':
            u'aws://us-west-2/Instance/i-548acc3a',
            'name':
            u'Bar',
            'resID':
            u'i-548acc3a'
        }]

        response = self.app.get("/us-east-1/AWS/EC2/CPUUtilization",
                                headers=self.headers)
        assertions.assertSuccess(self, response)
        result = app_utils.jsonDecode(response.body)
        self.assertItemsEqual(result, [{
            'datasource': 'cloudwatch',
            'dimensions': {
                'InstanceId': 'i-d48ccaba'
            },
            'metric': 'CPUUtilization',
            'namespace': 'AWS/EC2',
            'region': 'us-east-1'
        }, {
            'datasource': 'cloudwatch',
            'dimensions': {
                'InstanceId': 'i-548acc3a'
            },
            'metric': 'CPUUtilization',
            'namespace': 'AWS/EC2',
            'region': 'us-east-1'
        }, {
            'datasource': 'cloudwatch',
            'dimensions': {
                'AutoScalingGroupName': 'i-d48ccaba'
            },
            'metric': 'CPUUtilization',
            'namespace': 'AWS/EC2',
            'region': 'us-east-1'
        }, {
            'datasource': 'cloudwatch',
            'dimensions': {
                'AutoScalingGroupName': 'i-548acc3a'
            },
            'metric': 'CPUUtilization',
            'namespace': 'AWS/EC2',
            'region': 'us-east-1'
        }])
Exemplo n.º 36
0
    def testAnnotationDataIntegrity(self):
        """
    **Test Required Fields**

    * Make sure user is not allowed to add annotations without device
    * Make sure user is not allowed to add annotations without timestamp
    * Make sure user is not allowed to add annotations without instance
    * Make sure user is not allowed to add annotations with invalid/unknown
      instance
    * Do not delete annotations when metric is deleted
    * Delete annotations when instance is deleted
    """
        # Create Instance before annotation
        self._createEC2Instance()

        # Create request without "device"
        req = self.request.copy()
        del req["device"]
        with self.assertRaises(AppError):
            self.app.post("", app_utils.jsonEncode(req), headers=self.headers)

        # Create request without "timestamp"
        req = self.request.copy()
        del req["timestamp"]
        with self.assertRaises(AppError):
            self.app.post("", app_utils.jsonEncode(req), headers=self.headers)

        # Create request without "instance"
        req = self.request.copy()
        del req["server"]
        with self.assertRaises(AppError):
            self.app.post("", app_utils.jsonEncode(req), headers=self.headers)

        # Create request with invalid/unknown "instance"
        req = self.request.copy()
        req["server"] = "dummy"
        with self.assertRaises(AppError):
            self.app.post("", app_utils.jsonEncode(req), headers=self.headers)

        # Create request without "message" and "data"
        req = self.request.copy()
        del req["message"]
        del req["data"]
        with self.assertRaises(AppError):
            self.app.post("", app_utils.jsonEncode(req), headers=self.headers)

        # Add annotation
        response = self.app.post("",
                                 app_utils.jsonEncode(self.request),
                                 headers=self.headers)
        self.assertEqual(response.status, 201)

        # Use this newly created annotation as expected annotation from now on
        expectedAnnotation = app_utils.jsonDecode(response.body)

        # Do not delete annotations when metric is deleted

        # Delete metric
        self._deleteOneMetric()

        # Make sure no annotation was deleted
        response = self.app.get("/", headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertItemsEqual([expectedAnnotation], actual)

        # Delete annotations when instance is deleted
        self._deleteInstance()

        # Make sure annotation was deleted
        response = self.app.get("/", headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertTrue(len(actual) == 0)
Exemplo n.º 37
0
    def testAnnotationLifecycle(self):
        """
    **Happy Path**

    * Make sure annotation was successfully created and all fields were
      initialized.
    * Make sure user can get annotation by ID and fail (Not Found) if he uses
      the wrong ID
    * Make sure user can get annotations by device and receive an empty array if
      he uses the wrong device
    * Make sure user can get annotations by instance and receive an empty array
      if he uses the wrong instance
    * Make sure user can get annotation by date and receive an empty array if he
      uses dates out of range
    * Make sure user can delete annotations
    """
        # Create Instance before annotation
        self._createEC2Instance()

        # Create Annotation
        response = self.app.post("",
                                 app_utils.jsonEncode(self.request),
                                 headers=self.headers)
        self.assertEqual(response.status, 201)

        # Use this newly created annotation as expected annotation from now on
        expectedAnnotation = app_utils.jsonDecode(response.body)

        # The result should contain new "uid" and "created" fields
        self.assertIn("uid", expectedAnnotation)
        self.assertIn("created", expectedAnnotation)
        # All the other fields should match request
        self.assertDictContainsSubset(self.request, expectedAnnotation)

        # Get Annotation By ID
        response = self.app.get("/" + expectedAnnotation["uid"],
                                headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertDictEqual(expectedAnnotation, actual[0])

        # Get Annotation with wrong ID
        with self.assertRaises(AppError) as e:
            response = self.app.get("/dummy", headers=self.headers)
        self.assertIn("Bad response: 404 Not Found", str(e.exception))

        # Get all annotations
        response = self.app.get("/", headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertItemsEqual([expectedAnnotation], actual)

        # Get Annotations by Device
        response = self.app.get("/", {"device": self.request["device"]},
                                headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertDictEqual(expectedAnnotation, actual[0])

        # Get Annotations with wrong Device
        response = self.app.get("/", {"device": "dummy"}, headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertTrue(len(actual) == 0)

        # Get Annotations by server
        response = self.app.get("/", {"server": self.request["server"]},
                                headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertDictEqual(expectedAnnotation, actual[0])

        # Get Annotations with wrong server
        response = self.app.get("/", {"server": "dummy"}, headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertTrue(len(actual) == 0)

        # Get Annotations by date
        response = self.app.get("/", {"from": "2014-01-01 00:00:00"},
                                headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertDictEqual(expectedAnnotation, actual[0])

        response = self.app.get("/", {"from": self.request["timestamp"]},
                                headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertDictEqual(expectedAnnotation, actual[0])

        response = self.app.get("/", {"to": self.request["timestamp"]},
                                headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertDictEqual(expectedAnnotation, actual[0])

        response = self.app.get("/", {
            "from": "2014-01-01 00:00:00",
            "to": "2014-12-31 00:00:00"
        },
                                headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertDictEqual(expectedAnnotation, actual[0])

        # Get Annotations with date out of range
        response = self.app.get("/", {"from": "2014-12-31 00:00:00"},
                                headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertTrue(len(actual) == 0)

        response = self.app.get("/", {"to": "2014-01-01 00:00:00"},
                                headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertTrue(len(actual) == 0)

        # Delete annotation with wrong ID
        with self.assertRaises(AppError) as e:
            self.app.delete("/dummy", headers=self.headers)
        self.assertIn("Bad response: 404 Not Found", str(e.exception))

        # Make sure no annotation was deleted
        response = self.app.get("/", headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertItemsEqual([expectedAnnotation], actual)

        # Delete annotation
        response = self.app.delete("/" + expectedAnnotation["uid"],
                                   headers=self.headers)
        self.assertEqual(response.status, 204)
        self.assertFalse(response.body)

        # Make sure annotation was deleted
        response = self.app.get("/", headers=self.headers)
        actual = app_utils.jsonDecode(response.body)
        self.assertTrue(len(actual) == 0)
Exemplo n.º 38
0
  def PUT(self, modelId=None):
    """
    Create Model

    ::

        POST /_models

    Data: Use the metric as returned by the datasource metric list.

    For example, create a Cloudwatch model as follows:

    ::

        curl http://localhost:8081/_models -X POST -d '
        {
            "region": "us-east-1",
            "namespace": "AWS/EC2",
            "datasource": "cloudwatch",
            "metric": "CPUUtilization",
            "dimensions": {
                "InstanceId": "i-12345678"
            }
        }'

    Or to create a YOMP custom model, include the following data in the
    POST request (uid is the same for the metric and model):

    ::

        {
            "uid": "2a123bb1dd4d46e7a806d62efc29cbb9",
            "datasource": "custom",
            "min": 0.0,
            "max": 5000.0
        }

    The "min" and "max" options are optional for both Cloudwatch and YOMP
    custom metrics.
    """
    if modelId:
      # ModelHandler is overloaded to handle both single-model requests, and
      # multiple-model requests.  As a result, if a user makes a POST, or PUT
      # request, it's possible that the request can be routed to this handler
      # if the url pattern matches.  This specific POST handler is not meant
      # to operate on a known model, therefore, raise an exception, and return
      # a `405 Method Not Allowed` response.
      raise NotAllowedResponse({"result": "Not supported"})

    data = web.data()
    if data:
      try:
        if isinstance(data, basestring):
          request = utils.jsonDecode(data)
        else:
          request = data
      except ValueError as e:
        response = "InvalidArgumentsError(): " + repr(e)
        raise InvalidRequestResponse({"result": response})

      if not isinstance(request, list):
        request = [request]

      response = []
      for nativeMetric in request:
        try:
          # Attempt to validate the request data against a schema
          # TODO: Move this logic into datasource-specific adapters
          if ("type" in nativeMetric.keys() and
              nativeMetric["type"] == "autostack"):
            validate(nativeMetric, _AUTOSTACK_CREATION_SCHEMA)
          elif nativeMetric["datasource"] == "custom":
            validate(nativeMetric, _CUSTOM_MODEL_CREATION_SCHEMA)
          elif nativeMetric["datasource"] == "autostack":
            validate(nativeMetric, _AUTOSTACK_MODEL_IMPORT_SCHEMA)
          else:
            validate(nativeMetric, _CLOUDWATCH_MODEL_CREATION_SCHEMA)

            # Perform additional cloudwatch-specific validation that can't be
            # captured properly in schema.
            if "metricSpec" in nativeMetric:
              # New-style arg
              metricSpec = nativeMetric["metricSpec"]
            else:
              # Legacy arg
              metricSpec = nativeMetric

            if (not isinstance(metricSpec["dimensions"], dict) or
                not metricSpec["dimensions"] or
                not all(key and value
                        for (key, value)
                        in metricSpec["dimensions"].iteritems())):
              raise ValidationError("At least one dimension is required")

        except ValidationError as e:
          # Catch ValidationError if validation fails
          # InvalidRequestResponse produces an HTTP 400 error code
          response = "InvalidArgumentsError(): " + repr(e)
          raise InvalidRequestResponse({"result": response})
    else:
      # Metric data is missing
      log.error("Data is missing in request, raising BadRequest exception")
      raise web.badrequest("Metric data is missing")

    try:
      self.addStandardHeaders()
      metricRowList = self.createModels(data)

      metricDictList = [formatMetricRowProxy(metricRow)
                        for metricRow in metricRowList]
      response = utils.jsonEncode(metricDictList)

      raise web.created(response)

    except web.HTTPError as ex:
      if bool(re.match("([45][0-9][0-9])\s?", web.ctx.status)):
        # Log 400-599 status codes as errors, ignoring 200-399
        log.error(str(ex) or repr(ex))
      raise
    except Exception as ex:
      log.exception("PUT Failed")
      raise web.internalerror(str(ex) or repr(ex))
    def updateModelAnomalyScores(self, engine, metricObj, metricDataRows):
        """
    Calculate the anomaly scores based on the anomaly likelihoods. Update
    anomaly scores in the given metricDataRows MetricData instances, and
    calculate new anomaly likelihood params for the model.

    :param engine: SQLAlchemy engine object
    :type engine: sqlalchemy.engine.Engine
    :param metricObj: the model's Metric instance
    :param metricDataRows: a sequence of MetricData instances in the
      processed order (ascending by timestamp) with updated raw_anomaly_score
      and zeroed out anomaly_score corresponding to the new model inference
      results, but not yet updated in the database. Will update their
      anomaly_score properties, as needed.

    :returns: new anomaly likelihood params for the model

    *NOTE:*
      the processing must be idempotent due to the "at least once" delivery
      semantics of the message bus

    *NOTE:*
      the performance goal is to minimize costly database access and avoid
      falling behind while processing model results, especially during the
      model's initial "catch-up" phase when large inference result batches are
      prevalent.
    """
        # When populated, a cached list of MetricData instances for updating
        # anomaly likelyhood params
        statsSampleCache = None

        # Index into metricDataRows where processing is to resume
        startRowIndex = 0

        statisticsRefreshInterval = self._getStatisticsRefreshInterval(
            batchSize=len(metricDataRows))

        if metricObj.status != MetricStatus.ACTIVE:
            raise MetricNotActiveError(
                "getAnomalyLikelihoodParams failed because metric=%s is not ACTIVE; "
                "status=%s; resource=%s" % (
                    metricObj.uid,
                    metricObj.status,
                    metricObj.server,
                ))

        modelParams = jsonDecode(metricObj.model_params)
        anomalyParams = modelParams.get("anomalyLikelihoodParams", None)
        if not anomalyParams:
            # We don't have a likelihood model yet. Create one if we have sufficient
            # records with raw anomaly scores
            (anomalyParams, statsSampleCache,
             startRowIndex) = (self._initAnomalyLikelihoodModel(
                 engine=engine,
                 metricObj=metricObj,
                 metricDataRows=metricDataRows))

        # Do anomaly likelihood processing on the rest of the new samples
        # NOTE: this loop will be skipped if there are still not enough samples for
        #  creating the anomaly likelihood params
        while startRowIndex < len(metricDataRows):
            # Determine where to stop processing rows prior to next statistics refresh

            if (statsSampleCache is None
                    or len(statsSampleCache) >= self._statisticsMinSampleSize):
                # We're here if:
                #   a. We haven't tried updating anomaly likelihood stats yet
                #                 OR
                #   b. We already updated anomaly likelyhood stats (we had sufficient
                #      samples for it)
                # TODO: unit-test
                endRowID = (anomalyParams["last_rowid_for_stats"] +
                            statisticsRefreshInterval)

                if endRowID < metricDataRows[startRowIndex].rowid:
                    # We're here if:
                    #   a. Statistics refresh interval is smaller than during last stats
                    #      update; this is the typical/normal case when backlog catch-up
                    #      is tapering off, and refresh interval is reduced for smaller
                    #      batches. OR
                    #   b. There is a gap of anomaly scores preceeding the start of the
                    #      current chunk. OR
                    #   c. Statistics config changed.
                    # TODO: unit-test

                    self._log.warning(
                        "Anomaly run cutoff precedes samples (smaller stats "
                        "refreshInterval or gap in anomaly scores or statistics config "
                        "changed) : model=%s; rows=[%s..%s]", metricObj.uid,
                        metricDataRows[startRowIndex].rowid, endRowID)

                    if statsSampleCache is not None:
                        # We already attempted to update anomaly likelihood params, so fix
                        # up endRowID to make sure we make progress and don't get stuck in
                        # an infinite loop
                        endRowID = metricDataRows[startRowIndex].rowid
                        self._log.warning(
                            "Advanced anomaly run cutoff to make progress: "
                            "model=%s; rows=[%s..%s]", metricObj.uid,
                            metricDataRows[startRowIndex].rowid, endRowID)
            else:
                # During prior iteration, there were not enough samples in cache for
                # updating anomaly params

                # We extend the end row so that there will be enough samples
                # to avoid getting stuck in this rut in the current and following
                # iterations
                # TODO: unit-test this
                endRowID = metricDataRows[startRowIndex].rowid + (
                    self._statisticsMinSampleSize - len(statsSampleCache) - 1)

            # Translate endRowID into metricDataRows limitIndex for current run
            if endRowID < metricDataRows[startRowIndex].rowid:
                # Cut-off precedes the remaining samples
                # Normally shouldn't be here (unless statistics config changed or there
                # is a gap in anomaly scores in metric_data table)
                # TODO: unit-test this

                # Set limit to bypass processing of samples for immediate refresh of
                # anomaly likelihood params
                limitIndex = startRowIndex
                self._log.warning(
                    "Anomaly run cutoff precedes samples, so forcing refresh of anomaly "
                    "likelihood params: modelInfo=<%s>; rows=[%s..%s]",
                    getMetricLogPrefix(metricObj),
                    metricDataRows[startRowIndex].rowid, endRowID)
            else:
                # Cutoff is either inside or after the remaining samples
                # TODO: unit-test this
                limitIndex = startRowIndex + min(
                    len(metricDataRows) - startRowIndex,
                    endRowID + 1 - metricDataRows[startRowIndex].rowid)

            # Process the next new sample run
            self._log.debug(
                "Starting anomaly run: model=%s; "
                "startRowIndex=%s; limitIndex=%s; rows=[%s..%s]; "
                "last_rowid_for_stats=%s; refreshInterval=%s; batchSize=%s",
                metricObj.uid, startRowIndex, limitIndex,
                metricDataRows[startRowIndex].rowid, endRowID,
                anomalyParams["last_rowid_for_stats"],
                statisticsRefreshInterval, len(metricDataRows))

            consumedSamples = []
            for md in itertools.islice(metricDataRows, startRowIndex,
                                       limitIndex):
                consumedSamples.append(md)

                (likelihood, ), _, anomalyParams["params"] = (
                    algorithms.updateAnomalyLikelihoods(
                        ((md.timestamp, md.metric_value,
                          md.raw_anomaly_score), ), anomalyParams["params"]))

                # TODO: the float "cast" here seems redundant
                md.anomaly_score = float(1.0 - likelihood)

                # If anomaly score > 0.99 then we greedily update the statistics. 0.99
                # should not repeat too often, but to be safe we wait a few more
                # records before updating again, in order to avoid overloading the DB.
                #
                # TODO: the magic 0.99 and the magic 3 value below should either
                #  be constants or config settings. Where should they be defined?
                if (md.anomaly_score > 0.99 and
                    (anomalyParams["last_rowid_for_stats"] + 3) < md.rowid):
                    if statsSampleCache is None or (
                            len(statsSampleCache) + len(consumedSamples) >=
                            self._statisticsMinSampleSize):
                        # TODO: unit-test this
                        self._log.info(
                            "Forcing refresh of anomaly params for model=%s due "
                            "to exceeded anomaly_score threshold in sample=%r",
                            metricObj.uid, md)
                        break

            if startRowIndex + len(consumedSamples) < len(metricDataRows) or (
                    consumedSamples[-1].rowid >= endRowID):
                # We stopped before the end of new samples, including a bypass-run,
                # or stopped after processing the last item and need one final refresh
                # of anomaly params
                anomalyParams, statsSampleCache = self._refreshAnomalyParams(
                    engine=engine,
                    metricID=metricObj.uid,
                    statsSampleCache=statsSampleCache,
                    consumedSamples=consumedSamples,
                    defaultAnomalyParams=anomalyParams)

            startRowIndex += len(consumedSamples)
        # <--- while

        return anomalyParams
Exemplo n.º 40
0
  def GET(self, autostackId=None): # pylint: disable=C0103
    """
      Get instances for known Autostack:

      ::

          GET /_autostacks/{autostackId}/instances

      Preview Autostack instances:

      ::

          GET /_autostacks/preview_instances?region={region}&filters={filters}

      :param region: AWS Region Name
      :type region: str
      :param filters: AWS Tag value pattern
      :type value: str (JSON object)

      Example query params:

      ::

          region=us-west-2&filters={"tag:Name":["jenkins-master"]}

      :return: List of instance details.  See
               AutostackInstancesHandler.formatInstance() for implementation.

      Example return value:

      ::

          [
            {
              "instanceID": "i-12345678",
              "state": "stopped",
              "regionName": "us-west-2",
              "instanceType": "m1.medium",
              "launchTime": "2013-09-24T02:02:48Z",
              "tags": {
                "Type": "Jenkins",
                "Description": "Jenkins Master",
                "Name": "jenkins-master"
              }
            },
            {
              "instanceID": "i-12345678",
              "state": "running",
              "regionName": "us-west-2",
              "instanceType": "m1.large",
              "launchTime": "2013-12-19T12:02:31Z",
              "tags": {
                "Type": "Jenkins",
                "Name": "jenkins-master",
                "Description": "Jenkin Master(Python 2.7)"
              }
            }
          ]
    """
    self.addStandardHeaders()
    aggSpec = {
      "datasource": "cloudwatch",  # only support EC2 for now
      "region": None,  # to be filled below
      "resourceType": "AWS::EC2::Instance",  # only support EC2 for now
      "filters": None  # to be filled below
    }
    adapter = createCloudwatchDatasourceAdapter()
    if autostackId is not None:
      try:
        with web.ctx.connFactory() as conn:
          autostackRow = repository.getAutostack(conn, autostackId)
      except ObjectNotFoundError:
        raise web.notfound("Autostack not found: Autostack ID: %s"
                           % autostackId)
      except web.HTTPError as ex:
        if bool(re.match(r"([45][0-9][0-9])\s?", web.ctx.status)):
          # Log 400-599 status codes as errors, ignoring 200-399
          log.error(str(ex) or repr(ex))
        raise
      except Exception as ex:
        raise web.internalerror(str(ex) or repr(ex))
      aggSpec["region"] = autostackRow.region
      aggSpec["filters"] = autostackRow.filters
      result = adapter.getMatchingResources(aggSpec)
    else:
      data = web.input(region=None, filters=None)
      if not data.region:
        raise InvalidRequestResponse({"result":"Invalid region"})
      if not data.filters:
        raise InvalidRequestResponse({"result":"Invalid filters"})

      try:
        aggSpec["region"] = data.region
        aggSpec["filters"] = utils.jsonDecode(data.filters)
        result = adapter.getMatchingResources(aggSpec)
      except boto.exception.EC2ResponseError as responseError:
        raise InvalidRequestResponse({"result": responseError.message})

    if result:
      return utils.jsonEncode([self.formatInstance(instance)
                               for instance in result])

    return utils.jsonEncode([])
Exemplo n.º 41
0
    def setUpClass(cls):
        with open(
                os.path.join(grok.app.GROK_HOME,
                             "tests/py/data/app/webservices/models_list.json")
        ) as fileObj:
            cls.model_list = json.load(fileObj)

        cls.autostack = Mock(uid="blahblahblah",
                             region="bogus",
                             filters=jsonEncode({"tag:Name": ["Bogus"]}))
        cls.autostack.name = "Test"

        cls.jsonAutostack = jsonEncode({
            "uid": "blahblahblah",
            "name": "Test",
            "region": "bogus",
            "filters": {
                "tag:Name": ["Bogus"]
            }
        })
        cls.metric = Mock(
            uid="cebe9fab-f416-4845-8dab-02d292244112",
            datasource="autostack",
            description="The number of database connections in use "
            "by Amazon RDS database",
            server="grokdb2",
            location="us-east-1",
            parameters=jsonEncode({
                "region": "us-east-1",
                "DBInstanceIdentifier": "grokdb2"
            }),
            status=1,
            message=None,
            collector_error=None,
            last_timestamp="2013-08-15 21:25:00",
            poll_interval=60,
            tag_name=None,
            model_params=None,
            last_rowid=20277)
        cls.metric.name = "AWS/RDS/DatabaseConnections"

        cls.jsonMetric = jsonEncode({
            "uid":
            cls.metric.uid,
            "datasource":
            cls.metric.datasource,
            "name":
            cls.metric.name,
            "description":
            cls.metric.description,
            "server":
            cls.metric.server,
            "location":
            cls.metric.location,
            "parameters":
            jsonDecode(cls.metric.parameters),
            "status":
            cls.metric.status,
            "message":
            cls.metric.message,
            "last_timestamp":
            cls.metric.last_timestamp,
            "poll_interval":
            cls.metric.poll_interval,
            "tag_name":
            cls.metric.tag_name,
            "last_rowid":
            cls.metric.last_rowid,
            "display_name":
            cls.metric.server
        })
Exemplo n.º 42
0
    def PUT(self, deviceId):
        """
      Create, or update notification settings for device.

      ::

          PUT /_notifications/{deviceId}/settings

          {
            "email_addr": "*****@*****.**",
            "windowsize": 3600,
            "sensitivity": 0.99999
          }

      :param email_addr: Target email address associated with device
      :type email_addr: string
      :param windowsize: Notification window in seconds during which no other
        notifications for a given instance should be sent to a given device
      :type windowsize: int
      :param sensitivity: Anomaly score threshold that should trigger a
        notification
      :type sensitivity: float
    """
        data = web.data()

        if data:
            data = utils.jsonDecode(data) if isinstance(data,
                                                        basestring) else data

            try:
                with web.ctx.connFactory() as conn:
                    settingsRow = repository.getDeviceNotificationSettings(
                        conn, deviceId)

                settingsDict = dict([(col.name, settingsRow[col.name])
                                     for col in schema.notification_settings.c
                                     ])
            except ObjectNotFoundError:
                settingsDict = None

            if settingsDict:
                # Update existing
                changes = dict()

                if "windowsize" in data:
                    changes["windowsize"] = data["windowsize"]

                if "sensitivity" in data:
                    changes["sensitivity"] = data["sensitivity"]

                if "email_addr" in data:
                    changes["email_addr"] = data["email_addr"]

                if changes:
                    log.info(
                        "Notification settings updated for email=%s, "
                        "deviceid=%s, %r",
                        anonymizeEmail(settingsDict["email_addr"]), deviceId,
                        changes.keys())
                    with web.ctx.connFactory() as conn:
                        repository.updateDeviceNotificationSettings(
                            conn, deviceId, changes)

                self.addStandardHeaders()
                for (header, value) in web.ctx.headers:
                    if header == "Content-Type":
                        web.ctx.headers.remove((header, value))
                raise web.HTTPError(status="204 No Content")

            else:
                # Create new settings

                if "windowsize" in data:
                    windowsize = data["windowsize"]
                else:
                    windowsize = 60 * 60  # TODO: Configurable default

                if "sensitivity" in data:
                    sensitivity = data["sensitivity"]
                else:
                    sensitivity = 0.99999  # TODO: Configurable default

                if "email_addr" in data:
                    email_addr = data["email_addr"]
                else:
                    email_addr = None

                with web.ctx.connFactory() as conn:
                    repository.addDeviceNotificationSettings(
                        conn, deviceId, windowsize, sensitivity, email_addr)
                log.info("Notification settings created for deviceid=%s",
                         deviceId)
                self.addStandardHeaders()
                raise web.created("")

        else:
            # Metric data is missing
            log.error(
                "Data is missing in request, raising BadRequest exception")
            raise web.badrequest("Metric data is missing")
Exemplo n.º 43
0
  def testCompleteModelExportApiLifecycle(self):
    """
      Happy path testing for the route "/_models/export"
    """
    data = self.modelsTestData["create_data"]
    createResponse = self.app.put("/", utils.jsonEncode(data),
                       headers=self.headers)
    assertions.assertSuccess(self, createResponse, code=201)

    # NOTE: export uses a new format
    expectedExportSpec = {
      "datasource": data["datasource"],
      "metricSpec": {
        "region": data["region"],
        "namespace": data["namespace"],
        "metric": data["metric"],
        "dimensions": data["dimensions"]
      }
    }

    # Test export all data
    response = self.app.get("/export", headers=self.headers)
    assertions.assertSuccess(self, response)
    exportedData = utils.jsonDecode(response.body)
    self.assertIsInstance(exportedData, list)
    self.assertEqual(exportedData[0], expectedExportSpec)
    responseData = utils.jsonDecode(createResponse.body)
    uid = responseData[0]['uid']

    # Test for exporting single metric.
    response = self.app.get("/%s/export" % uid, headers=self.headers)
    assertions.assertSuccess(self, response)
    exportedData = utils.jsonDecode(response.body)
    self.assertIsInstance(exportedData, list)
    self.assertEqual(exportedData[0], expectedExportSpec)

    # Delete the model that was created earlier
    response = self.app.delete("/%s" % uid, headers=self.headers)
    assertions.assertDeleteSuccessResponse(self, response)

    # Import the model from exported data
    response = self.app.put("/", utils.jsonEncode(exportedData),
                            headers=self.headers)
    assertions.assertSuccess(self, response, code=201)
    responseData = utils.jsonDecode(response.body)
    uid = responseData[0]['uid']

    # Export the newly-imported model
    response = self.app.get("/%s/export" % uid, headers=self.headers)
    assertions.assertSuccess(self, response)
    exportedData = utils.jsonDecode(response.body)
    self.assertIsInstance(exportedData, list)
    self.assertEqual(exportedData[0], expectedExportSpec)

    # Delete the model that was created earlier
    response = self.app.delete("/%s" % uid, headers=self.headers)
    assertions.assertDeleteSuccessResponse(self, response)

    # Import the model using legacy format
    legacyImportSpec = dict(type="metric", **data)
    response = self.app.put("/", utils.jsonEncode(legacyImportSpec),
                            headers=self.headers)
    assertions.assertSuccess(self, response, code=201)
    responseData = utils.jsonDecode(response.body)
    uid = responseData[0]['uid']

    # Export the newly-imported model
    response = self.app.get("/%s/export" % uid, headers=self.headers)
    assertions.assertSuccess(self, response)
    exportedData = utils.jsonDecode(response.body)
    self.assertIsInstance(exportedData, list)
    self.assertEqual(exportedData[0], expectedExportSpec)
Exemplo n.º 44
0
  def setUpClass(cls):
    """
    Setup steps for all test cases.
    Focus for these is to cover all API checks for ModelDataHandler.
    Hence, this does all setup creating metric, waiting for
    metricData across all testcases, all API call for querying metricData
    will be against single metric created in setup
    Setup Process
    1) Update conf with aws credentials, ManagedTempRepository will not
       work in this test
    2) Select test instance such that its running from longer time,
       We are using instance older than 15 days
    3) Create Metric, wait for min metricData rows to become available
       Set to 100, configurable
    4) Pick testRowId, set it lower value this will make sure to have
       Non NULL value for anomaly_score field for given row while invoking
       GET with consitions, set to 5
    5) Decide queryParams for anomalyScore, to and from timestamp
    """
    cls.headers = getDefaultHTTPHeaders(YOMP.app.config)

    # All other sevices needs AWS credentials to work
    # Set AWS credentials
    YOMP.app.config.loadConfig()

    # Select test instance such that its running from longer time
    g_logger.info("Getting long-running EC2 Instances")
    instances = aws_utils.getLongRunningEC2Instances("us-west-2",
      YOMP.app.config.get("aws", "aws_access_key_id"),
      YOMP.app.config.get("aws", "aws_secret_access_key"), 15)
    testInstance = instances[randrange(1, len(instances))]

    createModelData = {
      "region": "us-west-2",
      "namespace": "AWS/EC2",
      "datasource": "cloudwatch",
      "metric": "CPUUtilization",
      "dimensions": {
        "InstanceId": testInstance.id
      }
    }

    # Number of minimum rows
    cls.minDataRows = 100

    cls.app = TestApp(models_api.app.wsgifunc())

    # create test metric
    g_logger.info("Creating test metric; modelSpec=%s", createModelData)
    response = cls.app.put("/", utils.jsonEncode(createModelData),
     headers=cls.headers)
    postResult = utils.jsonDecode(response.body)
    maxWaitTime = 600
    waitTimeMetricData = 0
    waitAnomalyScore = 0


    # Wait for enough metric data to be available
    cls.uid = postResult[0]["uid"]
    engine = repository.engineFactory()
    with engine.connect() as conn:
      cls.metricData = [row for row
                         in repository.getMetricData(conn, cls.uid)]
    with engine.connect() as conn:
      cls.testMetric = repository.getMetric(conn, cls.uid)

    # Confirm that we have enough metricData
    g_logger.info("Waiting for metric data")
    while (len(cls.metricData) < cls.minDataRows and
           waitTimeMetricData < maxWaitTime):
      g_logger.info("not ready, waiting for metric data: got %d of %d ...",
                    len(cls.metricData), cls.minDataRows)
      time.sleep(5)
      waitTimeMetricData += 5
      with engine.connect() as conn:
        cls.metricData = [row for row
                           in repository.getMetricData(conn, cls.uid)]

    # taking lower value for testRowId, this will make sure to have
    # Non NULL value for anomaly_score field for given row
    cls.testRowId = 5

    with engine.connect() as conn:
      cls.testMetricRow = (repository.getMetricData(conn,
                                                     cls.uid,
                                                     rowid=cls.testRowId)
                          .fetchone())

    # Make sure we did not receive None etc for anomaly score
    g_logger.info("cls.testMetricRow.anomaly_score=%r",
                  cls.testMetricRow.anomaly_score)
    g_logger.info("waitAnomalyScore=%r", waitAnomalyScore)
    while (cls.testMetricRow.anomaly_score is None and
           waitAnomalyScore < maxWaitTime):
      g_logger.info("anomaly_score not ready, sleeping...")
      time.sleep(5)
      waitAnomalyScore += 5
      with engine.connect() as conn:
        cls.testMetricRow = (repository.getMetricData(conn,
                                                      cls.uid,
                                                      rowid=cls.testRowId)
                            .fetchone())

    # Decide queryParams for anomalyScore, to and from timestamp
    cls.testAnomalyScore = cls.testMetricRow.anomaly_score
    cls.testTimeStamp = cls.testMetricRow.timestamp
Exemplo n.º 45
0
  def POST(self): # pylint: disable=C0103
    r"""
      Create an Autostack

      ::

          POST /_autostacks

          {
            "name": {name},
            "region": {region},
            "filters": {
              "tag:{Name}": ["{value}", "{value}", ...],
              "tag:{Description}": ["{value}", "{value}", ...],
              "tag:{etc}": ["{value}", "{value}", ...]
            }
          }

      Request body must be a dictionary that includes:

      :param name: Unique autostack name
      :type name: str
      :param region: AWS region
      :type region: str
      :param filters: AWS Tag value pattern
      :type filters: dict

      The creation request will be rejected if the filters match more than
      MAX_INSTANCES_PER_AUTOSTACK.

      From http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html:

      ::

        You can also use wildcards with the filter values. An asterisk (*)
        matches zero or more characters, and a question mark (?) matches
        exactly one character. For example, you can use *database* as a filter
        value to get all EBS snapshots that include database in the
        description. If you were to specify database as the filter value, then
        only snapshots whose description equals database would be returned.
        Filter values are case sensitive. We support only exact string
        matching, or substring matching (with wildcards).

        Tip

          Your search can include the literal values of the wildcard
          characters; you just need to escape them with a backslash before the
          character. For example, a value of \*numenta\?\\ searches for the
          literal string *numenta?\.
    """
    try:
      self.addStandardHeaders()
      data = web.data()
      if not data:
        raise web.badrequest("Metric data is missing")
      nativeMetric = utils.jsonDecode(data)
      try:
        stackSpec = {
          "name": nativeMetric["name"],
          "aggSpec": {
            "datasource": "cloudwatch",  # only support cloudwatch for now
            "region": nativeMetric["region"],
            "resourceType": "AWS::EC2::Instance",  # only support EC2 for now
            "filters": nativeMetric["filters"]
          }
        }
        adapter = createAutostackDatasourceAdapter()

        with web.ctx.connFactory() as conn:
          checkQuotaForInstanceAndRaise(conn, None)

        autostack = adapter.createAutostack(stackSpec)
        result = dict(autostack.items())
      except DuplicateRecordError:
        # TODO [MER-3543]: Make sure this actually gets hit
        raise web.internalerror(
            "The name you are trying to use, '%s', is already in use in AWS "
            "region '%s'. Please enter a unique Autostack name." % (
                nativeMetric.get("name", "None"),
                nativeMetric.get("region", "None")))
      raise web.created(utils.jsonEncode(result))
    except (web.HTTPError, QuotaError) as ex:
      if bool(re.match(r"([45][0-9][0-9])\s?", web.ctx.status)):
        # Log 400-599 status codes as errors, ignoring 200-399
        log.error(str(ex) or repr(ex))
      raise
    except Exception as ex:
      log.exception("POST Failed")
      raise web.internalerror(str(ex) or repr(ex))
    def _initAnomalyLikelihoodModel(self, engine, metricObj, metricDataRows):
        """ Create the anomaly likelihood model for the given Metric instance.
    Assumes that the metric doesn't have anomaly params yet.

    :param engine: SQLAlchemy engine object
    :type engine: sqlalchemy.engine.Engine

    :param metricObj: Metric instance with no anomaly likelihood params
    :param metricDataRows: a sequence of MetricData instances
      corresponding to the inference results batch in the processed order
      (ascending by rowid and timestamp) with updated raw_anomaly_score and
      zeroed out anomaly_score corresponding to the new model inference results,
      but not yet updated in the database. Will not alter this sequence.

    :returns: the tuple (anomalyParams, statsSampleCache, startRowIndex)
      anomalyParams: None, if there are too few samples; otherwise, the anomaly
        likelyhood objects as returned by algorithms.estimateAnomalyLikelihoods
      statsSampleCache: None, if there are too few samples; otherwise, a list of
        MetricData instances comprising of a concatenation of rows sourced
        from metric_data tail and topped off with necessary items from the
        given metricDataRows for a minimum of self._statisticsMinSampleSize and
        a maximum of self._statisticsSampleSize total items.
      startRowIndex: Index into the given metricDataRows where processing of
        anomaly scores is to start; if there are too few samples to generate
        the anomaly likelihood params, then startRowIndex will reference past
        the last item in the given metricDataRows sequence.
    """
        if metricObj.status != MetricStatus.ACTIVE:
            raise MetricNotActiveError(
                "getAnomalyLikelihoodParams failed because metric=%s is not ACTIVE; "
                "status=%s; resource=%s" % (
                    metricObj.uid,
                    metricObj.status,
                    metricObj.server,
                ))

        modelParams = jsonDecode(metricObj.model_params)
        anomalyParams = modelParams.get("anomalyLikelihoodParams", None)

        assert not anomalyParams, anomalyParams

        statsSampleCache = None

        # Index into metricDataRows where processing of anomaly scores is to start
        startRowIndex = 0

        with engine.connect() as conn:
            numProcessedRows = repository.getProcessedMetricDataCount(
                conn, metricObj.uid)

        if numProcessedRows + len(
                metricDataRows) >= self._statisticsMinSampleSize:
            # We have enough samples to initialize the anomaly likelihood model
            # TODO: unit-test

            # Determine how many samples will be used from metricDataRows
            numToConsume = max(
                0, self._statisticsMinSampleSize - numProcessedRows)
            consumedSamples = metricDataRows[:numToConsume]
            startRowIndex += numToConsume

            # Create the anomaly likelihood model
            anomalyParams, statsSampleCache = self._refreshAnomalyParams(
                engine=engine,
                metricID=metricObj.uid,
                statsSampleCache=None,
                consumedSamples=consumedSamples,
                defaultAnomalyParams=anomalyParams)

            # If this assertion fails, it implies that the count retrieved by our
            # call to MetricData.count above is no longer correct
            assert anomalyParams

            self._log.info(
                "Generated initial anomaly params for model=%s: "
                "numSamples=%d; firstRowID=%s; lastRowID=%s; ", metricObj.uid,
                len(statsSampleCache), statsSampleCache[0].rowid,
                statsSampleCache[-1].rowid)
        else:
            # Not enough raw scores yet to begin anomaly likelyhoods processing
            # TODO: unit-test
            startRowIndex = len(metricDataRows)

        return anomalyParams, statsSampleCache, startRowIndex
Exemplo n.º 47
0
 def _getCloudWatchCommon(self, url, expectedResult):
     response = self.app.get(url, headers=self.headers)
     assertions.assertSuccess(self, response)
     result = app_utils.jsonDecode(response.body)
     self.assertIsInstance(result, dict)
     self.assertEqual(result, expectedResult)
Exemplo n.º 48
0
  def testCompleteModelsApiLifecycle(self):
    """
      Happy path testing for the route "/_models"
    """
    # get all models in the system when there are no models
    # expected response is []
    response = self.app.get("/", headers=self.headers)
    assertions.assertSuccess(self, response)
    allModelsResult = utils.jsonDecode(response.body)
    self.assertEqual(len(allModelsResult), 0)
    self.assertIsInstance(allModelsResult, list)

    data = self.modelsTestData["create_data"]

    # create a model using PUT;
    # Any HTTP POST call is forwarded to HTTP PUT in the Model API.
    #   def POST(self):
    #      return self.PUT()
    # The tests are just calling PUT.
    # TODO: wouldn't POST be a better method to test in that case, since it
    #  would exercise both POST and PUT?
    response = self.app.put("/", utils.jsonEncode(data), headers=self.headers)
    assertions.assertSuccess(self, response, code=201)
    postResult = utils.jsonDecode(response.body)
    self.assertEqual(len(postResult), 1)
    self._checkCreateModelResult(postResult[0], data)

    # get model that was previously created
    uid = postResult[0]["uid"]
    response = self.app.get("/%s" % uid, headers=self.headers)
    assertions.assertSuccess(self, response)
    getModelResult = utils.jsonDecode(response.body)
    self.assertItemsEqual(getModelResult[0].keys(),
      self.modelsTestData["get_response"].keys())

    # get all models in the system
    response = self.app.get("/", headers=self.headers)
    assertions.assertSuccess(self, response)
    allModelsResult = utils.jsonDecode(response.body)
    self.assertItemsEqual(allModelsResult[0].keys(),
      self.modelsTestData["get_response"].keys())
    self.assertItemsEqual(allModelsResult[0].keys(),
      self.modelsTestData["get_response"].keys())
    self.assertEqual(len(allModelsResult), 1)

    # Repeat the request to monitor same metric and verify that it returns the
    # same model uid instead of creating a new one
    response = self.app.post("/", utils.jsonEncode(data), headers=self.headers)

    assertions.assertSuccess(self, response, code=201)
    postResult = utils.jsonDecode(response.body)
    self.assertEqual(postResult[0]["uid"], uid)
    self.assertEqual(len(postResult), 1)
    self._checkCreateModelResult(postResult[0], data)

    # Compare http and https responses for all models
    for x in range(3):
      https_response = requests.get("https://localhost/_models",
                                    headers=self.headers,
                                    verify=False)
      http_response = requests.get("http://localhost/_models",
                                   headers=self.headers)

      self.assertEqual(http_response.status_code, 200)
      self.assertEqual(https_response.status_code, 200)

      httpsData = json.loads(https_response.text)

      try:
        self.assertIsInstance(httpsData, list)
        self.assertTrue(httpsData)
        for item in httpsData:
          self.assertIn("status", item)
          self.assertIn("last_rowid", item)
          self.assertIn("display_name", item)
          self.assertIn("uid", item)
          self.assertIn("datasource", item)

        httpData = json.loads(http_response.text)
        self.assertIsInstance(httpData, list)
        self.assertTrue(httpData)
        for item in httpData:
          self.assertIn("status", item)
          self.assertIn("last_rowid", item)
          self.assertIn("display_name", item)
          self.assertIn("uid", item)
          self.assertIn("datasource", item)

        self.assertEqual(http_response.text, https_response.text)

        break
      except AssertionError:
        time.sleep(10)

    else:
      self.fail("Unable to synchronize http and https responses.")

    # Compare http and https response for all models data
    https_response = requests.get("https://localhost/_models/data",
                                  headers=self.headers,
                                  verify=False)
    http_response = requests.get("http://localhost/_models/data",
                                 headers=self.headers)

    self.assertEqual(http_response.status_code, 200)
    self.assertEqual(https_response.status_code, 200)

    httpData = json.loads(http_response.text)
    self.assertIsInstance(httpData, dict)
    self.assertItemsEqual(httpData.keys(), ["metrics", "names"])
    self.assertItemsEqual(httpData["names"], ["timestamp",
                                              "value",
                                              "anomaly_score",
                                              "rowid"])

    httpsData = json.loads(https_response.text)
    self.assertIsInstance(httpsData, dict)
    self.assertItemsEqual(httpsData.keys(), ["metrics", "names"])
    self.assertItemsEqual(httpsData["names"], ["timestamp",
                                               "value",
                                               "anomaly_score",
                                               "rowid"])

    # delete the model that was created earlier
    response = self.app.delete("/%s" % uid, headers=self.headers)
    assertions.assertDeleteSuccessResponse(self, response)
Exemplo n.º 49
0
    def PUT(self, modelId=None):
        """
    Create Model

    ::

        POST /_models

    Data: Use the metric as returned by the datasource metric list.

    For example, create a custom model, include the following data in the
    POST request (uid is the same for the metric and model):

    ::

        {
            "uid": "2a123bb1dd4d46e7a806d62efc29cbb9",
            "datasource": "custom",
            "min": 0.0,
            "max": 5000.0
        }

    The "min" and "max" options are optional.
    """
        if modelId:
            # ModelHandler is overloaded to handle both single-model requests, and
            # multiple-model requests.  As a result, if a user makes a POST, or PUT
            # request, it's possible that the request can be routed to this handler
            # if the url pattern matches.  This specific POST handler is not meant
            # to operate on a known model, therefore, raise an exception, and return
            # a `405 Method Not Allowed` response.
            raise NotAllowedResponse({"result": "Not supported"})

        data = web.data()
        if data:
            try:
                if isinstance(data, basestring):
                    request = utils.jsonDecode(data)
                else:
                    request = data
            except ValueError as e:
                response = "InvalidArgumentsError(): " + repr(e)
                raise InvalidRequestResponse({"result": response})

            if not isinstance(request, list):
                request = [request]

            response = []
            for nativeMetric in request:
                try:
                    validate(nativeMetric, _CUSTOM_MODEL_CREATION_SCHEMA)

                except ValidationError as e:
                    # Catch ValidationError if validation fails
                    # InvalidRequestResponse produces an HTTP 400 error code
                    response = "InvalidArgumentsError(): " + repr(e)
                    raise InvalidRequestResponse({"result": response})
        else:
            # Metric data is missing
            log.error(
                "Data is missing in request, raising BadRequest exception")
            raise web.badrequest("Metric data is missing")

        try:
            self.addStandardHeaders()
            metricRowList = self.createModels(data)

            metricDictList = [
                formatMetricRowProxy(metricRow) for metricRow in metricRowList
            ]
            response = utils.jsonEncode(metricDictList)

            raise web.created(response)

        except web.HTTPError as ex:
            if bool(re.match("([45][0-9][0-9])\s?", web.ctx.status)):
                # Log 400-599 status codes as errors, ignoring 200-399
                log.error(str(ex) or repr(ex))
            raise
        except Exception as ex:
            log.exception("PUT Failed")
            raise web.internalerror(str(ex) or repr(ex))
Exemplo n.º 50
0
 def testDeleteModelValid(self, _createDatasourceAdapterMock,
                          _deleteHTMModel, _getMetricMock, _deleteModelMock,
                          _engineMock):
     response = self.app.delete("/12232-jn939", headers=self.headers)
     result = jsonDecode(response.body)
     self.assertEqual(result, {"result": "success"})
Exemplo n.º 51
0
  def POST(self, autostackId, data=None): # pylint: disable=C0103,R0201
    """
      Create one or more Autostack Metric(s)

      ::

          POST /_autostacks/{autostackId}/metrics

          [
            {
              "namespace": "AWS/EC2",
              "metric": "CPUUtilization"
            },
            ...
          ]

      Request body is a list of items, each of which are a subset of the
      standard cloudwatch native metric, specifying only:

      :param namespace: AWS Namespace
      :type namespace: str
      :param metric: AWS Metric name
      :type str:

      `datasource`, `region`, and `dimensions` normally required when creating
      models are not necessary.
    """
    try:
      self.addStandardHeaders()
      with web.ctx.connFactory() as conn:
        autostackRow = repository.getAutostack(conn,
                                               autostackId)
      data = data or utils.jsonDecode(web.data())

      for nativeMetric in data:
        try:
          if nativeMetric["namespace"] == "Autostacks":
            slaveDatasource = "autostack"
          else:
            slaveDatasource = "cloudwatch"  # only support cloudwatch for now

          modelParams = {}
          if "min" and "max" in nativeMetric:
            modelParams["min"] = nativeMetric["min"]
            modelParams["max"] = nativeMetric["max"]

          modelSpec = {
            "datasource": "autostack",
            "metricSpec": {
              "autostackId": autostackRow.uid,
              "slaveDatasource": slaveDatasource,
              "slaveMetric": nativeMetric
            },
            "modelParams": modelParams
          }

          metricId = (createAutostackDatasourceAdapter()
                      .monitorMetric(modelSpec))
          with web.ctx.connFactory() as conn:
            metricRow = repository.getMetric(conn, metricId)
          metricDict = convertMetricRowToMetricDict(metricRow)

        except KeyError:
          raise web.badrequest("Missing details in request")

        except ValueError:
          response = {"result": "failure"}
          raise web.badrequest(utils.jsonEncode(response))

      response = {"result": "success", "metric": metricDict}
      raise web.created(utils.jsonEncode(response))

    except ObjectNotFoundError:
      raise web.notfound("Autostack not found: Autostack ID: %s" % autostackId)
    except (web.HTTPError) as ex:
      if bool(re.match(r"([45][0-9][0-9])\s?", web.ctx.status)):
        # Log 400-599 status codes as errors, ignoring 200-399
        log.error(str(ex) or repr(ex))
      raise
    except Exception as ex:
      log.exception("POST Failed")
      raise web.internalerror(str(ex) or repr(ex))