Esempio n. 1
0
 def createTestData(self):
   print "Searching for instances in us-west-2 that are running since 15 days or more."
   instances = aws.getLongRunningEC2Instances("us-west-2", os.environ["AWS_ACCESS_KEY_ID"],
       os.environ["AWS_SECRET_ACCESS_KEY"], 15)
   print "Found %s instance(s)." % len(instances)
   metrics = ["CPUUtilization", "DiskReadBytes", "DiskWriteBytes", "NetworkIn", "NetworkOut"]
   testData = []
   for instance in instances:
     for metric in metrics:
       model = {
            "region": "us-west-2",
            "namespace": "AWS/EC2",
            "datasource": "cloudwatch",
            "metric": metric,
            "dimensions": {
              "InstanceId": instance.id
         }
       }
       testData.append(model)
   return testData
Esempio n. 2
0
    def setUpClass(cls):
        """
    Setup steps for all test cases.
    Focus for these is to cover all API checks for ModelDataHandler.
    Hence, this does all setup creating metric, waiting for
    metricData across all testcases, all API call for querying metricData
    will be against single metric created in setup
    Setup Process
    1) Update conf with aws credentials, ManagedTempRepository will not
       work in this test
    2) Select test instance such that its running from longer time,
       We are using instance older than 15 days
    3) Create Metric, wait for min metricData rows to become available
       Set to 100, configurable
    4) Pick testRowId, set it lower value this will make sure to have
       Non NULL value for anomaly_score field for given row while invoking
       GET with consitions, set to 5
    5) Decide queryParams for anomalyScore, to and from timestamp
    """
        cls.headers = getDefaultHTTPHeaders(YOMP.app.config)

        # All other sevices needs AWS credentials to work
        # Set AWS credentials
        YOMP.app.config.loadConfig()

        # Select test instance such that its running from longer time
        g_logger.info("Getting long-running EC2 Instances")
        instances = aws_utils.getLongRunningEC2Instances(
            "us-west-2",
            YOMP.app.config.get("aws", "aws_access_key_id"),
            YOMP.app.config.get("aws", "aws_secret_access_key"),
            15,
        )
        testInstance = instances[randrange(1, len(instances))]

        createModelData = {
            "region": "us-west-2",
            "namespace": "AWS/EC2",
            "datasource": "cloudwatch",
            "metric": "CPUUtilization",
            "dimensions": {"InstanceId": testInstance.id},
        }

        # Number of minimum rows
        cls.minDataRows = 100

        cls.app = TestApp(models_api.app.wsgifunc())

        # create test metric
        g_logger.info("Creating test metric; modelSpec=%s", createModelData)
        response = cls.app.put("/", utils.jsonEncode(createModelData), headers=cls.headers)
        postResult = utils.jsonDecode(response.body)
        maxWaitTime = 600
        waitTimeMetricData = 0
        waitAnomalyScore = 0

        # Wait for enough metric data to be available
        cls.uid = postResult[0]["uid"]
        engine = repository.engineFactory()
        with engine.connect() as conn:
            cls.metricData = [row for row in repository.getMetricData(conn, cls.uid)]
        with engine.connect() as conn:
            cls.testMetric = repository.getMetric(conn, cls.uid)

        # Confirm that we have enough metricData
        g_logger.info("Waiting for metric data")
        while len(cls.metricData) < cls.minDataRows and waitTimeMetricData < maxWaitTime:
            g_logger.info("not ready, waiting for metric data: got %d of %d ...", len(cls.metricData), cls.minDataRows)
            time.sleep(5)
            waitTimeMetricData += 5
            with engine.connect() as conn:
                cls.metricData = [row for row in repository.getMetricData(conn, cls.uid)]

        # taking lower value for testRowId, this will make sure to have
        # Non NULL value for anomaly_score field for given row
        cls.testRowId = 5

        with engine.connect() as conn:
            cls.testMetricRow = repository.getMetricData(conn, cls.uid, rowid=cls.testRowId).fetchone()

        # Make sure we did not receive None etc for anomaly score
        g_logger.info("cls.testMetricRow.anomaly_score=%r", cls.testMetricRow.anomaly_score)
        g_logger.info("waitAnomalyScore=%r", waitAnomalyScore)
        while cls.testMetricRow.anomaly_score is None and waitAnomalyScore < maxWaitTime:
            g_logger.info("anomaly_score not ready, sleeping...")
            time.sleep(5)
            waitAnomalyScore += 5
            with engine.connect() as conn:
                cls.testMetricRow = repository.getMetricData(conn, cls.uid, rowid=cls.testRowId).fetchone()

        # Decide queryParams for anomalyScore, to and from timestamp
        cls.testAnomalyScore = cls.testMetricRow.anomaly_score
        cls.testTimeStamp = cls.testMetricRow.timestamp
Esempio n. 3
0
  def setUpClass(cls):
    """
    Setup steps for all test cases.
    Focus for these is to cover all API checks for ModelDataHandler.
    Hence, this does all setup creating metric, waiting for
    metricData across all testcases, all API call for querying metricData
    will be against single metric created in setup
    Setup Process
    1) Update conf with aws credentials, ManagedTempRepository will not
       work in this test
    2) Select test instance such that its running from longer time,
       We are using instance older than 15 days
    3) Create Metric, wait for min metricData rows to become available
       Set to 100, configurable
    4) Pick testRowId, set it lower value this will make sure to have
       Non NULL value for anomaly_score field for given row while invoking
       GET with consitions, set to 5
    5) Decide queryParams for anomalyScore, to and from timestamp
    """
    cls.headers = getDefaultHTTPHeaders(YOMP.app.config)

    # All other sevices needs AWS credentials to work
    # Set AWS credentials
    YOMP.app.config.loadConfig()

    # Select test instance such that its running from longer time
    g_logger.info("Getting long-running EC2 Instances")
    instances = aws_utils.getLongRunningEC2Instances("us-west-2",
      YOMP.app.config.get("aws", "aws_access_key_id"),
      YOMP.app.config.get("aws", "aws_secret_access_key"), 15)
    testInstance = instances[randrange(1, len(instances))]

    createModelData = {
      "region": "us-west-2",
      "namespace": "AWS/EC2",
      "datasource": "cloudwatch",
      "metric": "CPUUtilization",
      "dimensions": {
        "InstanceId": testInstance.id
      }
    }

    # Number of minimum rows
    cls.minDataRows = 100

    cls.app = TestApp(models_api.app.wsgifunc())

    # create test metric
    g_logger.info("Creating test metric; modelSpec=%s", createModelData)
    response = cls.app.put("/", utils.jsonEncode(createModelData),
     headers=cls.headers)
    postResult = utils.jsonDecode(response.body)
    maxWaitTime = 600
    waitTimeMetricData = 0
    waitAnomalyScore = 0


    # Wait for enough metric data to be available
    cls.uid = postResult[0]["uid"]
    engine = repository.engineFactory()
    with engine.connect() as conn:
      cls.metricData = [row for row
                         in repository.getMetricData(conn, cls.uid)]
    with engine.connect() as conn:
      cls.testMetric = repository.getMetric(conn, cls.uid)

    # Confirm that we have enough metricData
    g_logger.info("Waiting for metric data")
    while (len(cls.metricData) < cls.minDataRows and
           waitTimeMetricData < maxWaitTime):
      g_logger.info("not ready, waiting for metric data: got %d of %d ...",
                    len(cls.metricData), cls.minDataRows)
      time.sleep(5)
      waitTimeMetricData += 5
      with engine.connect() as conn:
        cls.metricData = [row for row
                           in repository.getMetricData(conn, cls.uid)]

    # taking lower value for testRowId, this will make sure to have
    # Non NULL value for anomaly_score field for given row
    cls.testRowId = 5

    with engine.connect() as conn:
      cls.testMetricRow = (repository.getMetricData(conn,
                                                     cls.uid,
                                                     rowid=cls.testRowId)
                          .fetchone())

    # Make sure we did not receive None etc for anomaly score
    g_logger.info("cls.testMetricRow.anomaly_score=%r",
                  cls.testMetricRow.anomaly_score)
    g_logger.info("waitAnomalyScore=%r", waitAnomalyScore)
    while (cls.testMetricRow.anomaly_score is None and
           waitAnomalyScore < maxWaitTime):
      g_logger.info("anomaly_score not ready, sleeping...")
      time.sleep(5)
      waitAnomalyScore += 5
      with engine.connect() as conn:
        cls.testMetricRow = (repository.getMetricData(conn,
                                                      cls.uid,
                                                      rowid=cls.testRowId)
                            .fetchone())

    # Decide queryParams for anomalyScore, to and from timestamp
    cls.testAnomalyScore = cls.testMetricRow.anomaly_score
    cls.testTimeStamp = cls.testMetricRow.timestamp