コード例 #1
0
    def testProcessFailedDefineModelCommandResultWhileInCreatePendingState(
            self, repoMock, *_args):
        """Test the scenario where a failed "defineModel" result is delivered while
    the Metric is in CREATE_PENDING state
    """
        class MetricRowSpec(object):
            status = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.CREATE_PENDING)
        repoMock.getMetric.return_value = metricRowMock

        runner = anomaly_service.AnomalyService()

        metricID = "abc"
        result = anomaly_service.ModelCommandResult(
            commandID="123",
            method="defineModel",
            status=htmengineerrno.ERR_INVALID_ARG,
            errorMessage="invalid arg")

        runner._processModelCommandResult(metricID=metricID, result=result)

        repoMock.setMetricStatus.assert_called_with(
            (repoMock.engineFactory.return_value.connect.return_value.
             __enter__.return_value), metricID, MetricStatus.ERROR,
            result.errorMessage)
コード例 #2
0
    def testRunWithModelInferenceResultBatch(self, _repositoryMock,
                                             ModelSwapperInterfaceMock,
                                             *_args):
        """ Test AnomalyService.run() cycle with a single model inference results
    batch
    """
        batch = model_swapper_interface._ConsumedResultBatch(
            modelID="abcdef",
            objects=[
                ModelInferenceResult(rowID=1,
                                     status=0,
                                     anomalyScore=0,
                                     multiStepBestPredictions={1: 1})
            ],
            ack=Mock(spec_set=(lambda multiple: None)))

        consumeResultsReturnValueMock = MagicMock(__enter__=Mock(
            return_value=[batch]))

        (ModelSwapperInterfaceMock.return_value.__enter__.return_value.
         consumeResults.return_value) = consumeResultsReturnValueMock

        service = anomaly_service.AnomalyService()

        resource = "metric's resource"

        modelSpec = dict(datasource="custom",
                         metricSpec=dict(
                             metric="MY.METRIC.STOCK.VOLUME",
                             resource=resource,
                             userInfo=dict(displayName="Stock Volume")))

        metricRowProxyMock = MetricRowProxyMock(
            uid="abcdef",
            datasource="my-test-custom",
            name="MY.METRIC.STOCK.VOLUME",
            description="test metric",
            server=resource,
            location="metric's location",
            parameters=json.dumps(modelSpec))

        tsDatetime1 = datetime.datetime(2015, 4, 17, 12, 3, 35)

        metricDataRow = anomaly_service.MutableMetricDataRow(
            uid="abcdef",
            rowid=1,
            metric_value=10.9,
            timestamp=tsDatetime1,
            raw_anomaly_score=0.1,
            anomaly_score=0,
            multi_step_best_predictions={1: 1},
            display_value=0)
        metricDataRows = [metricDataRow]
        with patch.object(service,
                          "_processModelInferenceResults",
                          autospec=True,
                          return_value=(metricRowProxyMock, metricDataRows)):
            service.run()
            service._processModelInferenceResults.assert_called_once_with(
                batch.objects, metricID=metricDataRow.uid)
コード例 #3
0
    def testComposeModelCommandResultForDeleteFail(self, repoMock, *_args):
        """ Make sure we can compose a model command result message for publishing
    a failed "deleteModel" on the AMQP exchange
    """
        repoMock.getMetric.side_effect = Exception(
            "getMetric should not have been called here")

        service = anomaly_service.AnomalyService()

        modelID = "123456abcdef"
        result = anomaly_service.ModelCommandResult(
            commandID="123",
            method="deleteModel",
            status=1,
            errorMessage="bad, bad, bad")

        msg = service._composeModelCommandResultMessage(modelID=modelID,
                                                        cmdResult=result)

        # Validate the message against its JSON schema
        schemaStream = pkg_resources.resource_stream(
            "htmengine.runtime.json_schema",
            "model_command_result_amqp_message.json")
        jsonSchema = json.load(schemaStream)

        validictory.validate(msg, jsonSchema)

        self.assertEqual(msg.pop("method"), result.method)
        self.assertEqual(msg.pop("modelId"), modelID)
        self.assertEqual(msg.pop("commandId"), result.commandID)
        self.assertEqual(msg.pop("status"), result.status)
        self.assertEqual(msg.pop("errorMessage"), result.errorMessage)

        self.assertFalse(msg)
コード例 #4
0
    def testTruncatedMetricDataRowsInScrubInferernceResults(self, *_args):
        """Calling _scrubInferenceResultsAndInitMetricData with fewer
    metricDataRows than inferenceResults should raise
    RejectedInferenceResultBatch
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.UNMONITORED,
                             parameters=None)

        engineMock = Mock(spec_set=sqlalchemy.engine.Engine)

        runner = anomaly_service.AnomalyService()

        with self.assertRaises(
                anomaly_service.RejectedInferenceResultBatch) as cm:
            runner._scrubInferenceResultsAndInitMetricData(
                engine=engineMock,
                inferenceResults=[Mock()],
                metricDataRows=[],
                metricObj=metricRowMock)

        self.assertIn("No MetricData row for inference result",
                      cm.exception.args[0])
コード例 #5
0
    def testMetricNotActiveErrorDuringAnomalyLikelihoodUpdate(
            self, updateAnomalyLikelihoodParamsMock, repoMock, *_args):
        """MetricNotActiveError raised during anomaly likelihood update
    should result in rejection of results
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.UNMONITORED,
                             parameters=None)
        repoMock.getMetric.return_value = metricRowMock
        updateAnomalyLikelihoodParamsMock.side_effect = (
            app_exceptions.MetricNotActiveError("faking it"))

        runner = anomaly_service.AnomalyService()

        runner._scrubInferenceResultsAndInitMetricData = Mock(
            spec_set=runner._scrubInferenceResultsAndInitMetricData,
            return_value=None)

        runner.likelihoodHelper.updateModelAnomalyScores = Mock(
            spec_set=runner.likelihoodHelper.updateModelAnomalyScores,
            return_value=dict())

        self.assertIsNone(
            runner._processModelInferenceResults(
                inferenceResults=[metricRowMock], metricID="abc"))

        self.assertEqual(updateAnomalyLikelihoodParamsMock.call_count, 0)
コード例 #6
0
    def testErrorResultAndActiveModelInScrubInferernceResults(
            self, repoMock, *_args):
        """Calling _scrubInferenceResultsAndInitMetricData with a failed inference
    result and ACTIVE model should set the model to ERROR state and raise
    RejectedInferenceResultBatch.
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.ACTIVE,
                             parameters=None,
                             uid=0)

        class MetricDataRowSpec(object):
            uid = None
            rowid = None
            metric_value = None
            timestamp = None
            raw_anomaly_score = None

        metricRowDataMock = Mock(spec_set=MetricDataRowSpec,
                                 uid=0,
                                 rowid=0,
                                 timestamp=None,
                                 metric_value=0,
                                 raw_anomaly_score=None)

        engineMock = Mock(spec_set=sqlalchemy.engine.Engine)

        cmMock = Mock()
        engineMock.connect.return_value = cmMock
        cmMock.__enter__ = cmMock
        cmMock.__exit__ = cmMock

        connMock = Mock(spec_set=sqlalchemy.engine.Connection)
        cmMock.return_value = connMock

        runner = anomaly_service.AnomalyService()

        with self.assertRaises(
                anomaly_service.RejectedInferenceResultBatch) as cm:
            runner._scrubInferenceResultsAndInitMetricData(
                engine=engineMock,
                inferenceResults=[
                    ModelInferenceResult(rowID=0,
                                         status=MetricStatus.ERROR,
                                         errorMessage="bad inference")
                ],
                metricDataRows=[metricRowDataMock],
                metricObj=metricRowMock)

        repoMock.setMetricStatus.assert_called_with(connMock, 0,
                                                    MetricStatus.ERROR,
                                                    "bad inference")
        self.assertIn("promoted to ERROR state", cm.exception.args[0])
コード例 #7
0
    def testRowIdMismatchInScrubInferenceResults(self, *_args):
        """Calling _scrubInferenceResultsAndInitMetricData with a rowID mismatch
    between an item in metricDataRows and inferenceResults should raise
    RejectedInferenceResultBatch
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.ACTIVE,
                             parameters=None)

        class MetricDataRowSpec(object):
            uid = None
            rowid = None
            metric_value = None
            timestamp = None

        metricRowDataMock = Mock(spec_set=MetricDataRowSpec,
                                 uid=0,
                                 rowid=0,
                                 timestamp=None,
                                 metric_value=0)

        engineMock = Mock(spec_set=sqlalchemy.engine.Engine)

        runner = anomaly_service.AnomalyService()

        with self.assertRaises(
                anomaly_service.RejectedInferenceResultBatch) as cm:
            runner._scrubInferenceResultsAndInitMetricData(
                engine=engineMock,
                inferenceResults=[
                    ModelInferenceResult(rowID=1,
                                         status=0,
                                         anomalyScore=0,
                                         multiStepBestPredictions={1: 1})
                ],
                metricDataRows=[metricRowDataMock],
                metricObj=metricRowMock)

        self.assertIn("RowID mismatch between inference result",
                      cm.exception.args[0])
コード例 #8
0
    def testComposeModelCommandResultObjNotFound(self, repoMock, *_args):
        """ Make sure ObjectNotFoundError is raised when composing a model command
    result message for publishing "defineModel" and the metric is not found
    """
        repoMock.getMetric.side_effect = app_exceptions.ObjectNotFoundError(
            "getMetric should not have been called here")

        service = anomaly_service.AnomalyService()

        modelID = "123456abcdef"
        result = anomaly_service.ModelCommandResult(commandID="123",
                                                    method="defineModel",
                                                    status=0)

        with self.assertRaises(app_exceptions.ObjectNotFoundError):
            service._composeModelCommandResultMessage(modelID=modelID,
                                                      cmdResult=result)
コード例 #9
0
    def testComposeModelCommandResultForDefine(self, repoMock, *_args):
        """ Make sure we can compose a model command result message for publishing
    "defineModel" on the AMQP exchange
    """
        class MetricRowSpec(object):
            status = MetricStatus.CREATE_PENDING
            name = "metric.name"
            server = "metric.server"
            parameters = json.dumps(dict(value1="one", value2=2))

        repoMock.getMetric.return_value = MetricRowSpec

        service = anomaly_service.AnomalyService()

        modelID = "123456abcdef"
        result = anomaly_service.ModelCommandResult(commandID="123",
                                                    method="defineModel",
                                                    status=0)

        msg = service._composeModelCommandResultMessage(modelID=modelID,
                                                        cmdResult=result)

        # Validate the message against its JSON schema
        schemaStream = pkg_resources.resource_stream(
            "htmengine.runtime.json_schema",
            "model_command_result_amqp_message.json")
        jsonSchema = json.load(schemaStream)

        validictory.validate(msg, jsonSchema)

        self.assertEqual(msg.pop("method"), result.method)
        self.assertEqual(msg.pop("modelId"), modelID)
        self.assertEqual(msg.pop("commandId"), result.commandID)
        self.assertEqual(msg.pop("status"), result.status)
        self.assertEqual(msg.pop("errorMessage"), result.errorMessage)

        modelInfo = msg.pop("modelInfo")
        self.assertEqual(modelInfo.pop("metricName"), MetricRowSpec.name)
        self.assertEqual(modelInfo.pop("resource"), MetricRowSpec.server)
        self.assertEqual(modelInfo.pop("modelSpec"),
                         json.loads(MetricRowSpec.parameters))
        self.assertFalse(modelInfo)

        self.assertFalse(msg)
コード例 #10
0
    def testErrorResultAndErrorModelInScrubInferenceResults(self, *_args):
        """Calling _scrubInferenceResultsAndInitMetricData with a failed inference
    result and errored out model should raise RejectedInferenceResultBatch
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.ERROR,
                             parameters=None)

        class MetricDataRowSpec(object):
            uid = None
            rowid = None
            metric_value = None
            timestamp = None
            raw_anomaly_score = None
            multi_step_best_predictions = None

        metricRowDataMock = Mock(spec_set=MetricDataRowSpec,
                                 uid=0,
                                 rowid=0,
                                 timestamp=None,
                                 metric_value=0,
                                 multi_step_best_predictions=None)

        runner = anomaly_service.AnomalyService()

        with self.assertRaises(
                anomaly_service.RejectedInferenceResultBatch) as cm:
            runner._scrubInferenceResultsAndInitMetricData(
                engine=Mock(),
                inferenceResults=[
                    ModelInferenceResult(rowID=0,
                                         status=1,
                                         errorMessage="bad inference")
                ],
                metricDataRows=[metricRowDataMock],
                metricObj=metricRowMock)

        self.assertIn("was in ERROR state", cm.exception.args[0])
コード例 #11
0
    def testProcessModelInferenceResultsHandlingOfRejectedInferenceResultBatch(
            self, AnomalyLikelihoodHelperMock, repoMock, *_args):
        """Make sure _processModelInferenceResults handles
    RejectedInferenceResultBatch from _scrubInferenceResultsAndInitMetricData
    without crashing
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.ACTIVE,
                             parameters=None)
        repoMock.getMetric.return_value = metricRowMock

        class MetricDataRowSpec(object):
            uid = None
            rowid = Mock()

        metricRowDataMock = Mock(spec_set=MetricDataRowSpec)
        repoMock.getMetricData.return_value = [metricRowDataMock]

        runner = anomaly_service.AnomalyService()
        ex = anomaly_service.RejectedInferenceResultBatch("blah")
        runner._scrubInferenceResultsAndInitMetricData = Mock(
            spec_set=runner._scrubInferenceResultsAndInitMetricData,
            side_effect=ex)

        runner._log.error = Mock(wraps=runner._log.error)
        self.assertIsNone(
            runner._processModelInferenceResults(inferenceResults=[Mock()],
                                                 metricID=Mock()))

        self.assertTrue(runner._log.error.called)
        self.assertIn("Rejected inference result batch=",
                      runner._log.error.call_args[0][0])
        self.assertIs(runner._log.error.call_args[0][-1], ex)

        self.assertFalse(AnomalyLikelihoodHelperMock.called)
コード例 #12
0
    def testProcessModelInferenceResultsWithMetricNotFoundOnEntry(
            self, repoMock, *_args):
        """Make sure _processModelInferenceResults handles
    ObjectNotFoundError from repository.getMetric() without crashing
    """
        repoMock.getMetric.side_effect = app_exceptions.ObjectNotFoundError(
            "Metric not found")

        runner = anomaly_service.AnomalyService()

        with patch.object(runner._log,
                          "warning",
                          new=Mock(wraps=runner._log.warning)):
            self.assertIsNone(
                runner._processModelInferenceResults(inferenceResults=[Mock()],
                                                     metricID="abc"))

            self.assertTrue(repoMock.getMetric.called)
            self.assertTrue(runner._log.warning.called)
            self.assertIn("Received inference results for unknown model=",
                          runner._log.warning.call_args[0][0])
コード例 #13
0
    def testRejectionOfInferenceResultsForInactiveMetric(
            self, repoMock, *_args):
        """Calling _processModelInferenceResults against a metric that is not in
    ACTIVE state should result in rejection of results
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.UNMONITORED,
                             parameters=None)
        repoMock.getMetric.return_value = metricRowMock

        runner = anomaly_service.AnomalyService()

        self.assertIsNone(
            runner._processModelInferenceResults(
                inferenceResults=[metricRowMock], metricID="abc"))
コード例 #14
0
    def testProcessSuccessfulDefineModelCommandResultWhileInErrorState(
            self, repoMock, *_args):
        """Test the scenario where "defineModel" result is delivered after the
    Metric has already been placed in error state
    """
        class MetricRowSpec(object):
            status = None

        metricRowMock = Mock(spec_set=MetricRowSpec, status=MetricStatus.ERROR)
        repoMock.getMetric.return_value = metricRowMock

        runner = anomaly_service.AnomalyService()

        metricID = "abc"
        result = anomaly_service.ModelCommandResult(commandID="123",
                                                    method="defineModel",
                                                    status=0)

        runner._processModelCommandResult(metricID=metricID, result=result)

        self.assertFalse(repoMock.setMetricStatus.called)
コード例 #15
0
    def testProcessSuccessfulDefineModelCommandResultWhileInActiveState(
            self, repoMock, *_args):
        """This is the other normal processing path where "defineModel" result
    is re-delivered as the side-effect of at-least-once delivery guarantee
    """
        class MetricRowSpec(object):
            status = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.ACTIVE)
        repoMock.getMetric.return_value = metricRowMock

        runner = anomaly_service.AnomalyService()

        metricID = "abc"
        result = anomaly_service.ModelCommandResult(commandID="123",
                                                    method="defineModel",
                                                    status=0)

        runner._processModelCommandResult(metricID=metricID, result=result)

        self.assertFalse(repoMock.setMetricStatus.called)
コード例 #16
0
    def testProcessSuccessfulDefineModelCommandResultWhileInCreatePendingState(
            self, repoMock, *_args):
        """This is the normal processing path for "defineModel" result"""
        class MetricRowSpec(object):
            status = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.CREATE_PENDING)
        repoMock.getMetricWithSharedLock.return_value = metricRowMock

        runner = anomaly_service.AnomalyService()

        metricID = "abc"
        result = anomaly_service.ModelCommandResult(commandID="123",
                                                    method="defineModel",
                                                    status=0)

        runner._processModelCommandResult(metricID=metricID, result=result)

        repoMock.setMetricStatus.assert_called_with(
            (repoMock.engineFactory.return_value.connect.return_value.
             __enter__.return_value), metricID, MetricStatus.ACTIVE)
コード例 #17
0
    def testProcessFailedDefineModelCommandResultWhileInErrorState(
            self, repoMock, *_args):
        """Test the scenario where a failed "defineModel" result is delivered after
    the Metric has already been placed in ERROR state
    """
        class MetricRowSpec(object):
            status = None

        metricRowMock = Mock(spec_set=MetricRowSpec, status=MetricStatus.ERROR)
        repoMock.getMetricWithSharedLock.return_value = metricRowMock

        runner = anomaly_service.AnomalyService()

        metricID = "abc"
        result = anomaly_service.ModelCommandResult(
            commandID="123",
            method="defineModel",
            status=htmengineerrno.ERR_INVALID_ARG,
            errorMessage="invalid arg")

        runner._processModelCommandResult(metricID=metricID, result=result)

        self.assertFalse(repoMock.setMetricStatus.called)
コード例 #18
0
    def testComposeModelCommandResultNotMonitored(self, repoMock, *_args):
        """ Make sure MetricNotMonitoredError is raised when composing a model
    command result message for publishing "defineModel" and metric properties
    are not set
    """
        class MetricRowSpec(object):
            status = MetricStatus.UNMONITORED
            name = "metric.name"
            server = "metric.server"
            parameters = None

        repoMock.getMetric.return_value = MetricRowSpec

        service = anomaly_service.AnomalyService()

        modelID = "123456abcdef"
        result = anomaly_service.ModelCommandResult(commandID="123",
                                                    method="defineModel",
                                                    status=0)

        with self.assertRaises(app_exceptions.MetricNotMonitoredError):
            msg = service._composeModelCommandResultMessage(modelID=modelID,
                                                            cmdResult=result)