def checkModelDeleted(self, uid): """Check that the model has been deleted""" response = requests.get("https://localhost/_models", auth=(self.__apiKey, ""), verify=False) for model in response.json(): self.assertNotEqual(model["uid"], uid, "Model showing up after deletion.") with self.assertRaises(model_checkpoint_mgr.ModelNotFound): model_checkpoint_mgr.ModelCheckpointMgr().loadModelDefinition(uid)
def checkMetricUnmonitoredById(self, uid): engine = repository.engineFactory(config=self.__config) with engine.begin() as conn: metricObj = repository.getMetric( conn, uid, fields=[schema.metric.c.status, schema.metric.c.parameters]) self.assertEqual(metricObj.status, MetricStatus.UNMONITORED) self.assertIsNone(metricObj.parameters) with self.assertRaises(model_checkpoint_mgr.ModelNotFound): model_checkpoint_mgr.ModelCheckpointMgr().loadModelDefinition(uid)
def checkMetricDeleted(self, uid): engine = repository.engineFactory(config=self.__config) with engine.begin() as conn: with self.assertRaises(Exception) as e: metric = repository.getMetric(conn, uid) models = repository.getAllModels(conn) for model in models: self.assertNotEqual(model.uid, uid, "Model showing up after deletion.") with self.assertRaises(model_checkpoint_mgr.ModelNotFound): model_checkpoint_mgr.ModelCheckpointMgr().loadModelDefinition(uid)
def checkModelDeleted(self, uid): """Check that the model has been deleted""" engine = repository.engineFactory(config=self.__config) with engine.begin() as conn: try: metric = repository.getMetric(conn, uid) raise Exception("Metric not deleted as expected") except app_exceptions.ObjectNotFoundError: pass models = repository.getAllModels(conn) for model in models: self.assertNotEqual(model.uid, uid, "Model showing up after deletion.") with self.assertRaises(model_checkpoint_mgr.ModelNotFound): model_checkpoint_mgr.ModelCheckpointMgr().loadModelDefinition(uid)
def _auxTestRunModelWithFullThenIncrementalCheckpoints( self, classifierEnabled): modelID = "foobar" checkpointMgr = model_checkpoint_mgr.ModelCheckpointMgr() args = getScalarMetricWithTimeOfDayAnomalyParams(metricData=[0], minVal=0, maxVal=1000) args["modelConfig"]["modelParams"]["clEnable"] = classifierEnabled # Submit requests including a model creation command and two data rows. args["inputRecordSchema"] = ( FieldMetaInfo("c0", FieldMetaType.datetime, FieldMetaSpecial.timestamp), FieldMetaInfo("c1", FieldMetaType.float, FieldMetaSpecial.none), ) with ModelSwapperInterface() as swapperAPI: # Define the model _LOGGER.info("Defining the model") swapperAPI.defineModel(modelID=modelID, args=args, commandID="defineModelCmd1") # Send input rows to the model inputRows = [ ModelInputRow( rowID="rowfoo", data=[datetime.datetime(2014, 5, 23, 8, 13, 00), 5.3]), ModelInputRow( rowID="rowbar", data=[datetime.datetime(2014, 5, 23, 8, 13, 15), 2.4]), ] _LOGGER.info( "Submitting batch of %d input rows with ids=[%s..%s]...", len(inputRows), inputRows[0].rowID, inputRows[-1].rowID) swapperAPI.submitRequests(modelID=modelID, requests=inputRows) # Run model_runner and collect results with self._startModelRunnerSubprocess( modelID) as modelRunnerProcess: resultBatches = self._consumeResults(numExpectedBatches=2, timeout=15) self._waitForProcessToStopAndCheck(modelRunnerProcess) with MessageBusConnector() as bus: # The results message queue should be empty now self.assertTrue(bus.isEmpty(swapperAPI._resultsQueueName)) self.assertEqual(len(resultBatches), 2, repr(resultBatches)) # First result batch should be the first defineModel result batch = resultBatches[0] self.assertEqual(batch.modelID, modelID) self.assertEqual(len(batch.objects), 1) result = batch.objects[0] self.assertIsInstance(result, ModelCommandResult) self.assertEqual(result.method, "defineModel") self.assertEqual(result.status, htmengineerrno.SUCCESS) self.assertEqual(result.commandID, "defineModelCmd1") # The second result batch should be for the two input rows batch = resultBatches[1] self.assertEqual(batch.modelID, modelID) self.assertEqual(len(batch.objects), len(inputRows)) for inputRow, result in zip(inputRows, batch.objects): self.assertIsInstance(result, ModelInferenceResult) self.assertEqual(result.status, htmengineerrno.SUCCESS) self.assertEqual(result.rowID, inputRow.rowID) self.assertIsInstance(result.anomalyScore, float) if classifierEnabled: self.assertIsInstance(result.multiStepBestPredictions, dict) else: self.assertIsNone(result.multiStepBestPredictions) # Verify model checkpoint model = checkpointMgr.load(modelID) del model attrs = checkpointMgr.loadCheckpointAttributes(modelID) self.assertIn( model_runner._ModelArchiver._BATCH_IDS_CHECKPOINT_ATTR_NAME, attrs, msg=repr(attrs)) self.assertEqual(len(attrs[ model_runner._ModelArchiver._BATCH_IDS_CHECKPOINT_ATTR_NAME]), 2, msg=repr(attrs)) self.assertNotIn(model_runner._ModelArchiver. _INPUT_SAMPLES_SINCE_CHECKPOINT_ATTR_NAME, attrs, msg=repr(attrs)) # Now, check incremental checkpointing inputRows2 = [ ModelInputRow( rowID=2, data=[datetime.datetime(2014, 5, 23, 8, 13, 20), 2.7]), ModelInputRow( rowID=3, data=[datetime.datetime(2014, 5, 23, 8, 13, 25), 3.9]), ] _LOGGER.info( "Submitting batch of %d input rows with ids=[%s..%s]...", len(inputRows2), inputRows2[0].rowID, inputRows2[-1].rowID) inputBatchID = swapperAPI.submitRequests(modelID=modelID, requests=inputRows2) with self._startModelRunnerSubprocess( modelID) as modelRunnerProcess: resultBatches = self._consumeResults(numExpectedBatches=1, timeout=15) self._waitForProcessToStopAndCheck(modelRunnerProcess) with MessageBusConnector() as bus: self.assertTrue(bus.isEmpty(swapperAPI._resultsQueueName)) batch = resultBatches[0] self.assertEqual(batch.modelID, modelID) self.assertEqual(len(batch.objects), len(inputRows2)) for inputRow, result in zip(inputRows2, batch.objects): self.assertIsInstance(result, ModelInferenceResult) self.assertEqual(result.status, htmengineerrno.SUCCESS) self.assertEqual(result.rowID, inputRow.rowID) self.assertIsInstance(result.anomalyScore, float) if classifierEnabled: self.assertIsInstance(result.multiStepBestPredictions, dict) else: self.assertIsNone(result.multiStepBestPredictions) model = checkpointMgr.load(modelID) del model attrs = checkpointMgr.loadCheckpointAttributes(modelID) self.assertIn( model_runner._ModelArchiver._BATCH_IDS_CHECKPOINT_ATTR_NAME, attrs, msg=repr(attrs)) self.assertSequenceEqual(attrs[ model_runner._ModelArchiver._BATCH_IDS_CHECKPOINT_ATTR_NAME], [inputBatchID], msg=repr(attrs)) self.assertIn(model_runner._ModelArchiver. _INPUT_SAMPLES_SINCE_CHECKPOINT_ATTR_NAME, attrs, msg=repr(attrs)) self.assertSequenceEqual( model_runner._ModelArchiver._decodeDataSamples( attrs[model_runner._ModelArchiver. _INPUT_SAMPLES_SINCE_CHECKPOINT_ATTR_NAME]), [row.data for row in inputRows2], msg=repr(attrs)) # Final run with incremental checkpointing inputRows3 = [ ModelInputRow( rowID=4, data=[datetime.datetime(2014, 5, 23, 8, 13, 30), 4.7]), ModelInputRow( rowID=5, data=[datetime.datetime(2014, 5, 23, 8, 13, 35), 5.9]), ] _LOGGER.info( "Submitting batch of %d input rows with ids=[%s..%s]...", len(inputRows3), inputRows3[0].rowID, inputRows3[-1].rowID) inputBatchID = swapperAPI.submitRequests(modelID=modelID, requests=inputRows3) with self._startModelRunnerSubprocess( modelID) as modelRunnerProcess: resultBatches = self._consumeResults(numExpectedBatches=1, timeout=15) self._waitForProcessToStopAndCheck(modelRunnerProcess) with MessageBusConnector() as bus: self.assertTrue(bus.isEmpty(swapperAPI._resultsQueueName)) batch = resultBatches[0] self.assertEqual(batch.modelID, modelID) self.assertEqual(len(batch.objects), len(inputRows3)) for inputRow, result in zip(inputRows3, batch.objects): self.assertIsInstance(result, ModelInferenceResult) self.assertEqual(result.status, htmengineerrno.SUCCESS) self.assertEqual(result.rowID, inputRow.rowID) self.assertIsInstance(result.anomalyScore, float) if classifierEnabled: self.assertIsInstance(result.multiStepBestPredictions, dict) else: self.assertIsNone(result.multiStepBestPredictions) model = checkpointMgr.load(modelID) del model attrs = checkpointMgr.loadCheckpointAttributes(modelID) self.assertIn( model_runner._ModelArchiver._BATCH_IDS_CHECKPOINT_ATTR_NAME, attrs, msg=repr(attrs)) self.assertSequenceEqual(attrs[ model_runner._ModelArchiver._BATCH_IDS_CHECKPOINT_ATTR_NAME], [inputBatchID], msg=repr(attrs)) self.assertIn(model_runner._ModelArchiver. _INPUT_SAMPLES_SINCE_CHECKPOINT_ATTR_NAME, attrs, msg=repr(attrs)) self.assertSequenceEqual( model_runner._ModelArchiver._decodeDataSamples( attrs[model_runner._ModelArchiver. _INPUT_SAMPLES_SINCE_CHECKPOINT_ATTR_NAME]), [row.data for row in itertools.chain(inputRows2, inputRows3)], msg=repr(attrs)) # Delete the model _LOGGER.info("Deleting the model=%s", modelID) swapperAPI.deleteModel(modelID=modelID, commandID="deleteModelCmd1") with self._startModelRunnerSubprocess( modelID) as modelRunnerProcess: resultBatches = self._consumeResults(numExpectedBatches=1, timeout=15) self._waitForProcessToStopAndCheck(modelRunnerProcess) self.assertEqual(len(resultBatches), 1, repr(resultBatches)) # First result batch should be the first defineModel result batch = resultBatches[0] self.assertEqual(batch.modelID, modelID) self.assertEqual(len(batch.objects), 1) result = batch.objects[0] self.assertIsInstance(result, ModelCommandResult) self.assertEqual(result.method, "deleteModel") self.assertEqual(result.status, htmengineerrno.SUCCESS) self.assertEqual(result.commandID, "deleteModelCmd1") with MessageBusConnector() as bus: self.assertTrue(bus.isEmpty(swapperAPI._resultsQueueName)) # The model input queue should be deleted now self.assertFalse( bus.isMessageQeueuePresent( swapperAPI._getModelInputQName(modelID=modelID))) # The model checkpoint should be gone too with self.assertRaises(model_checkpoint_mgr.ModelNotFound): checkpointMgr.load(modelID) with self.assertRaises(model_checkpoint_mgr.ModelNotFound): checkpointMgr.loadModelDefinition(modelID) with self.assertRaises(model_checkpoint_mgr.ModelNotFound): checkpointMgr.loadCheckpointAttributes(modelID) with self.assertRaises(model_checkpoint_mgr.ModelNotFound): checkpointMgr.remove(modelID)