def test__evaluate_signal_detrend(self, detrend_signal_mock, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock): train = Mock(autospec=pd.DataFrame) test = Mock(autospec=pd.DataFrame) detrend_signal_mock.side_effect = [train, test] load_signal_mock.side_effect = [train, test] load_pipeline_mock.return_value = self.pipeline detrend = True returned = benchmark._evaluate_signal( self.pipeline, self.signal, self.hyper, self.metrics, test_split=True, detrend=detrend) expected_return = self.set_score(1, ANY, ANY) assert returned == expected_return expected_calls = [ call('signal-name-train'), call('signal-name-test') ] assert load_signal_mock.call_args_list == expected_calls expected_calls = [ call(train, 'value'), call(test, 'value') ] assert detrend_signal_mock.call_args_list == expected_calls load_pipeline_mock.assert_called_once_with(self.pipeline, self.hyper) analyze_mock.assert_called_once_with(self.pipeline, train, test) load_anomalies_mock.assert_called_once_with(self.signal)
def test__evaluate_signal_exception( self, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock): train = Mock(autospec=pd.DataFrame) test = Mock(autospec=pd.DataFrame) load_signal_mock.side_effect = [train, test] load_pipeline_mock.return_value = self.pipeline analyze_mock.side_effect = Exception("failed analyze.") returned = benchmark._evaluate_signal( self.pipeline, self.signal, self.hyper, self.metrics, True) expected_return = self.set_score(0, ANY, ANY) expected_return['status'] = 'ERROR' assert returned == expected_return expected_calls = [ call('signal-name-train'), call('signal-name-test') ] assert load_signal_mock.call_args_list == expected_calls load_pipeline_mock.assert_called_once_with(self.pipeline, self.hyper) analyze_mock.assert_called_once_with(self.pipeline, train, test) assert load_anomalies_mock.called
def test__evaluate_signal_exception_confusion_matrix( self, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock): anomalies = pd.DataFrame({ 'start': [10, 35], 'end': [20, 40] }) train = Mock(autospec=pd.DataFrame) test = Mock(autospec=pd.DataFrame) load_signal_mock.side_effect = [train, test] load_pipeline_mock.return_value = self.pipeline load_anomalies_mock.return_value = anomalies analyze_mock.side_effect = Exception("failed analyze.") metrics = {'confusion_matrix': Mock(autospec=contextual_confusion_matrix)} metrics = {**metrics, **self.metrics} returned = benchmark._evaluate_signal( self.pipeline, self.signal, self.hyper, metrics, True) expected_return = self.set_score(0, ANY, ANY) expected_return['status'] = 'ERROR' expected_return['tn'] = None expected_return['fp'] = 0 expected_return['fn'] = 2 expected_return['tp'] = 0 assert returned == expected_return
def test__evaluate_signal_no_test_split(self, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock): train = test = Mock(autospec=pd.DataFrame) load_signal_mock.side_effect = [train, test] load_pipeline_mock.return_value = self.pipeline test_split = False returned = benchmark._evaluate_signal(self.pipeline, self.name, self.dataset, self.signal, self.hyper, self.metrics, test_split=test_split) expected_return = self.set_score(1, ANY, test_split) assert returned == expected_return expected_calls = [call('signal-name')] assert load_signal_mock.call_args_list == expected_calls load_pipeline_mock.assert_called_once_with(self.pipeline, self.hyper) analyze_mock.assert_called_once_with(self.pipeline, train, test) load_anomalies_mock.assert_called_once_with(self.signal)