示例#1
0
  def _processInputRow(self, row, currentRunInputSamples):
    """
    :param row: ModelInputRow instance

    :param currentRunInputSamples: a list; the input row's data will be appended
      to this list if the row is processed successfully

    :returns: a ModelInferenceResult instance
    """
    try:
      if self._model is None:
        self._loadModel()

      # Convert a flat input row into a format that is consumable by an OPF
      # model
      self._inputRowEncoder.appendRecord(row.data)
      inputRecord = self._inputRowEncoder.getNextRecordDict()

      # Infer
      r = self._model.run(inputRecord)

      currentRunInputSamples.append(row.data)

      return ModelInferenceResult(
        rowID=row.rowID,
        status=0,
        anomalyScore=r.inferences["anomalyScore"])

    except (Exception, _ModelRunnerError) as e:  # pylint: disable=W0703
      self._logger.exception("%r: Inference failed for row=%r", self, row)
      return ModelInferenceResult(rowID=row.rowID,
        status=e.errno if isinstance(e, _ModelRunnerError) else htmengineerrno.ERR,
        errorMessage="Inference failed for rowID=%s of modelID=%s (%r): "
          "(tb: ...%s)" % (row.rowID, self._modelID, e,
                           traceback.format_exc()[-self._MAX_TRACEBACK_TAIL:]))
    def testRunWithModelInferenceResultBatch(self, _repositoryMock,
                                             ModelSwapperInterfaceMock,
                                             *_args):
        """ Test AnomalyService.run() cycle with a single model inference results
    batch
    """
        batch = model_swapper_interface._ConsumedResultBatch(
            modelID="abcdef",
            objects=[
                ModelInferenceResult(rowID=1,
                                     status=0,
                                     anomalyScore=0,
                                     multiStepBestPredictions={1: 1})
            ],
            ack=Mock(spec_set=(lambda multiple: None)))

        consumeResultsReturnValueMock = MagicMock(__enter__=Mock(
            return_value=[batch]))

        (ModelSwapperInterfaceMock.return_value.__enter__.return_value.
         consumeResults.return_value) = consumeResultsReturnValueMock

        service = anomaly_service.AnomalyService()

        resource = "metric's resource"

        modelSpec = dict(datasource="custom",
                         metricSpec=dict(
                             metric="MY.METRIC.STOCK.VOLUME",
                             resource=resource,
                             userInfo=dict(displayName="Stock Volume")))

        metricRowProxyMock = MetricRowProxyMock(
            uid="abcdef",
            datasource="my-test-custom",
            name="MY.METRIC.STOCK.VOLUME",
            description="test metric",
            server=resource,
            location="metric's location",
            parameters=json.dumps(modelSpec))

        tsDatetime1 = datetime.datetime(2015, 4, 17, 12, 3, 35)

        metricDataRow = anomaly_service.MutableMetricDataRow(
            uid="abcdef",
            rowid=1,
            metric_value=10.9,
            timestamp=tsDatetime1,
            raw_anomaly_score=0.1,
            anomaly_score=0,
            multi_step_best_predictions={1: 1},
            display_value=0)
        metricDataRows = [metricDataRow]
        with patch.object(service,
                          "_processModelInferenceResults",
                          autospec=True,
                          return_value=(metricRowProxyMock, metricDataRows)):
            service.run()
            service._processModelInferenceResults.assert_called_once_with(
                batch.objects, metricID=metricDataRow.uid)
    def testErrorResultAndActiveModelInScrubInferernceResults(
            self, repoMock, *_args):
        """Calling _scrubInferenceResultsAndInitMetricData with a failed inference
    result and ACTIVE model should set the model to ERROR state and raise
    RejectedInferenceResultBatch.
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.ACTIVE,
                             parameters=None,
                             uid=0)

        class MetricDataRowSpec(object):
            uid = None
            rowid = None
            metric_value = None
            timestamp = None
            raw_anomaly_score = None

        metricRowDataMock = Mock(spec_set=MetricDataRowSpec,
                                 uid=0,
                                 rowid=0,
                                 timestamp=None,
                                 metric_value=0,
                                 raw_anomaly_score=None)

        engineMock = Mock(spec_set=sqlalchemy.engine.Engine)

        cmMock = Mock()
        engineMock.connect.return_value = cmMock
        cmMock.__enter__ = cmMock
        cmMock.__exit__ = cmMock

        connMock = Mock(spec_set=sqlalchemy.engine.Connection)
        cmMock.return_value = connMock

        runner = anomaly_service.AnomalyService()

        with self.assertRaises(
                anomaly_service.RejectedInferenceResultBatch) as cm:
            runner._scrubInferenceResultsAndInitMetricData(
                engine=engineMock,
                inferenceResults=[
                    ModelInferenceResult(rowID=0,
                                         status=MetricStatus.ERROR,
                                         errorMessage="bad inference")
                ],
                metricDataRows=[metricRowDataMock],
                metricObj=metricRowMock)

        repoMock.setMetricStatus.assert_called_with(connMock, 0,
                                                    MetricStatus.ERROR,
                                                    "bad inference")
        self.assertIn("promoted to ERROR state", cm.exception.args[0])
  def testModelInferenceResultSerializableStateWithErrorMessage(self):
    rowID = 1
    status = 1
    errorMessage = "error"
    inferenceResult = ModelInferenceResult(rowID=rowID, status=status,
      errorMessage=errorMessage)
    self.assertEqual(inferenceResult.rowID, rowID)
    self.assertEqual(inferenceResult.status, status)
    self.assertEqual(inferenceResult.errorMessage, errorMessage)
    self.assertIsNone(inferenceResult.anomalyScore)
    self.assertIn("ModelInferenceResult<", str(inferenceResult))
    self.assertIn("ModelInferenceResult<", repr(inferenceResult))

    inferenceResult2 = _ModelRequestResultBase.__createFromState__(
      inferenceResult.__getstate__())

    self.assertEqual(inferenceResult2.rowID, rowID)
    self.assertEqual(inferenceResult2.status, status)
    self.assertEqual(inferenceResult2.errorMessage, errorMessage)
    self.assertIsNone(inferenceResult2.anomalyScore)
    self.assertIn("ModelInferenceResult<", str(inferenceResult2))
    self.assertIn("ModelInferenceResult<", repr(inferenceResult2))
    def testRowIdMismatchInScrubInferenceResults(self, *_args):
        """Calling _scrubInferenceResultsAndInitMetricData with a rowID mismatch
    between an item in metricDataRows and inferenceResults should raise
    RejectedInferenceResultBatch
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.ACTIVE,
                             parameters=None)

        class MetricDataRowSpec(object):
            uid = None
            rowid = None
            metric_value = None
            timestamp = None

        metricRowDataMock = Mock(spec_set=MetricDataRowSpec,
                                 uid=0,
                                 rowid=0,
                                 timestamp=None,
                                 metric_value=0)

        engineMock = Mock(spec_set=sqlalchemy.engine.Engine)

        runner = anomaly_service.AnomalyService()

        with self.assertRaises(
                anomaly_service.RejectedInferenceResultBatch) as cm:
            runner._scrubInferenceResultsAndInitMetricData(
                engine=engineMock,
                inferenceResults=[
                    ModelInferenceResult(rowID=1,
                                         status=0,
                                         anomalyScore=0,
                                         multiStepBestPredictions={1: 1})
                ],
                metricDataRows=[metricRowDataMock],
                metricObj=metricRowMock)

        self.assertIn("RowID mismatch between inference result",
                      cm.exception.args[0])
    def testErrorResultAndErrorModelInScrubInferenceResults(self, *_args):
        """Calling _scrubInferenceResultsAndInitMetricData with a failed inference
    result and errored out model should raise RejectedInferenceResultBatch
    """
        class MetricRowSpec(object):
            uid = None
            status = None
            parameters = None
            server = None

        metricRowMock = Mock(spec_set=MetricRowSpec,
                             status=MetricStatus.ERROR,
                             parameters=None)

        class MetricDataRowSpec(object):
            uid = None
            rowid = None
            metric_value = None
            timestamp = None
            raw_anomaly_score = None
            multi_step_best_predictions = None

        metricRowDataMock = Mock(spec_set=MetricDataRowSpec,
                                 uid=0,
                                 rowid=0,
                                 timestamp=None,
                                 metric_value=0,
                                 multi_step_best_predictions=None)

        runner = anomaly_service.AnomalyService()

        with self.assertRaises(
                anomaly_service.RejectedInferenceResultBatch) as cm:
            runner._scrubInferenceResultsAndInitMetricData(
                engine=Mock(),
                inferenceResults=[
                    ModelInferenceResult(rowID=0,
                                         status=1,
                                         errorMessage="bad inference")
                ],
                metricDataRows=[metricRowDataMock],
                metricObj=metricRowMock)

        self.assertIn("was in ERROR state", cm.exception.args[0])