def test_run_selected_algorithm(self, timeMock): timeMock.return_value, timeseries = self.data(time()) result, ensemble, datapoint = algorithms.run_selected_algorithm( timeseries, "test.metric") self.assertTrue(result) self.assertTrue(len(filter(None, ensemble)) >= settings.CONSENSUS) self.assertEqual(datapoint, 1000)
def test_run_selected_algorithm(self, timeMock): timeMock.return_value, timeseries = self.data(time()) # @modified 20200206 - Feature #3400: Identify air gaps in the metric data # Added the airgapped_metrics list # result, ensemble, datapoint = algorithms.run_selected_algorithm(timeseries, "test.metric") airgapped_metrics = [ 'test.metric.airgapped.1', 'test.metric.airgapped.2' ] # @modified 20200520 - Feature #3400: Identify air gaps in the metric data # Feature #3508: ionosphere.untrainable_metrics # Feature #3504: Handle airgaps in batch metrics # Added airgapped_metrics_filled, run_negatives_present and # check_for_airgaps_only # result, ensemble, datapoint = algorithms.run_selected_algorithm(timeseries, "test.metric", airgapped_metrics) airgapped_metrics_filled = [] run_negatives_present = False check_for_airgaps_only = False # @modified 20200604 - Feature #3566: custom_algorithms # Added algorithms_run # @modified 20210519 - Feature #4076: CUSTOM_STALE_PERIOD # Added custom_stale_metrics_dict custom_stale_metrics_dict = {} result, ensemble, datapoint, negatives_found, algorithms_run = algorithms.run_selected_algorithm( timeseries, 'test.metric', airgapped_metrics, airgapped_metrics_filled, run_negatives_present, check_for_airgaps_only, custom_stale_metrics_dict) self.assertTrue(result) # @modified 20200808 - Bug #3666: Failing algorithm_tests on Python 3.8.3 # self.assertTrue(len(filter(None, ensemble)) >= settings.CONSENSUS) self.assertTrue( len(list(filter(None, ensemble))) >= settings.CONSENSUS) self.assertEqual(datapoint, 1000)
def test_run_selected_algorithm(self, timeMock): timeMock.return_value, timeseries = self.data(time()) # @modified 20200206 - Feature #3400: Identify air gaps in the metric data # Added the airgapped_metrics list # result, ensemble, datapoint = algorithms.run_selected_algorithm(timeseries, "test.metric") airgapped_metrics = [ 'test.metric.airgapped.1', 'test.metric.airgapped.2' ] # @modified 20200520 - Feature #3400: Identify air gaps in the metric data # Feature #3508: ionosphere.untrainable_metrics # Feature #3504: Handle airgaps in batch metrics # Added airgapped_metrics_filled, run_negatives_present and # check_for_airgaps_only # result, ensemble, datapoint = algorithms.run_selected_algorithm(timeseries, "test.metric", airgapped_metrics) airgapped_metrics_filled = [] run_negatives_present = False check_for_airgaps_only = False result, ensemble, datapoint, negatives_found = algorithms.run_selected_algorithm( timeseries, 'test.metric', airgapped_metrics, airgapped_metrics_filled, run_negatives_present, check_for_airgaps_only) self.assertTrue(result) self.assertTrue(len(filter(None, ensemble)) >= settings.CONSENSUS) self.assertEqual(datapoint, 1000)
def test_run_selected_algorithm(self, timeMock): timeMock.return_value, timeseries = self.data(time()) # @modified 20200206 - Feature #3400: Identify air gaps in the metric data # Added the airgapped_metrics list # result, ensemble, datapoint = algorithms.run_selected_algorithm(timeseries, "test.metric") airgapped_metrics = [ 'test.metric.airgapped.1', 'test.metric.airgapped.2' ] result, ensemble, datapoint = algorithms.run_selected_algorithm( timeseries, "test.metric", airgapped_metrics) self.assertTrue(result) self.assertTrue(len(filter(None, ensemble)) >= settings.CONSENSUS) self.assertEqual(datapoint, 1000)
def test_run_selected_algorithm_runs_novel_algorithm(self, timeMock, algorithmsListMock, consensusMock): """ Assert that a user can add their own custom algorithm. This mocks out settings.ALGORITHMS and settings.CONSENSUS to use only a single custom-defined function (alwaysTrue) """ algorithmsListMock.__iter__.return_value = ['alwaysTrue'] consensusMock = 1 timeMock.return_value, timeseries = self.data(time()) alwaysTrue = Mock(return_value=True) with patch.dict(algorithms.__dict__, {'alwaysTrue': alwaysTrue}): result, ensemble, tail_avg = algorithms.run_selected_algorithm(timeseries) alwaysTrue.assert_called_with(timeseries) self.assertTrue(result) self.assertEqual(ensemble, [True]) self.assertEqual(tail_avg, 334)
def test_run_selected_algorithm_runs_novel_algorithm( self, timeMock, algorithmsListMock, consensusMock): """ Assert that a user can add their own custom algorithm. This mocks out settings.ALGORITHMS and settings.CONSENSUS to use only a single custom-defined function (alwaysTrue) """ algorithmsListMock.__iter__.return_value = ['alwaysTrue'] consensusMock = 1 timeMock.return_value, timeseries = self.data(time()) alwaysTrue = Mock(return_value=True) with patch.dict(algorithms.__dict__, {'alwaysTrue': alwaysTrue}): result, ensemble, tail_avg = algorithms.run_selected_algorithm( timeseries) alwaysTrue.assert_called_with(timeseries) self.assertTrue(result) self.assertEqual(ensemble, [True]) self.assertEqual(tail_avg, 334)
def test_run_selected_algorithm(self, timeMock): timeMock.return_value, timeseries = self.data(time()) result, ensemble, datapoint = algorithms.run_selected_algorithm(timeseries, "test.metric") self.assertTrue(result) self.assertTrue(len(filter(None, ensemble)) >= settings.CONSENSUS) self.assertEqual(datapoint, 1000)