def test_set_eval_mode(self, mock_eval, mock_call): """ Make sure that evaluation is done in evaluation mode. """ mock_mgr = MagicMock() mock_mgr.attach_mock(mock_eval, 'eval') mock_mgr.attach_mock(mock_call, 'call') evaluator = Evaluator() evaluator.evaluate(self.seq2seq, self.dataset) expected_calls = [call.eval()] + \ self.dataset.num_batches(evaluator.batch_size) * [call.call(ANY, ANY, volatile=ANY)] self.assertEquals(expected_calls, mock_mgr.mock_calls)
def test_set_eval_mode(self, mock_eval, mock_call): """ Make sure that evaluation.txt is done in evaluation.txt mode. """ mock_mgr = MagicMock() mock_mgr.attach_mock(mock_eval, 'eval') mock_mgr.attach_mock(mock_call, 'call') evaluator = Evaluator(batch_size=64) with patch('seq2seq.evaluator.evaluator.torch.stack', return_value=None), \ patch('seq2seq.loss.NLLLoss.eval_batch', return_value=None): evaluator.evaluate(self.seq2seq, self.dataset) num_batches = int(math.ceil(len(self.dataset) / evaluator.batch_size)) expected_calls = [call.eval()] + num_batches * [call.call(ANY, ANY, ANY)] self.assertEquals(expected_calls, mock_mgr.mock_calls)
def test_set_eval_mode(self, mock_eval, mock_call): """ Make sure that evaluation is done in evaluation mode. """ mock_mgr = MagicMock() mock_mgr.attach_mock(mock_eval, 'eval') mock_mgr.attach_mock(mock_call, 'call') evaluator = Evaluator() with patch('machine.evaluator.evaluator.torch.stack', return_value=None), \ patch('machine.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('machine.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('machine.loss.NLLLoss.eval_batch', return_value=None): evaluator.evaluate(self.seq2seq, self.data_iterator, trainer.get_batch_data) num_batches = len(self.data_iterator) expected_calls = [call.eval()] + num_batches * \ [call.call(ANY, ANY, ANY)] self.assertEqual(expected_calls, mock_mgr.mock_calls)
param.data.uniform_(-0.08, 0.08) @patch.object(Seq2seq, '__call__', return_value=( [], None, dict(inputs=[], length=[10] * 64, sequence=MagicMock()))) @patch.object(Seq2seq, 'eval') def test_set_eval_mode(self, mock_eval, mock_call): """ Make sure that evaluation is done in evaluation mode. """ mock_mgr = MagicMock() mock_mgr.attach_mock(mock_eval, 'eval') mock_mgr.attach_mock(mock_call, 'call') evaluator = Evaluator(batch_size=64) <<<<<<< HEAD with patch('seq2seq.evaluator.evaluator.torch.stack', return_value=None), \ patch('seq2seq.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('seq2seq.loss.NLLLoss.eval_batch', return_value=None): evaluator.evaluate(self.seq2seq, self.dataset, trainer.get_batch_data) ======= with patch('machine.evaluator.evaluator.torch.stack', return_value=None), \ patch('machine.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('machine.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('machine.loss.NLLLoss.eval_batch', return_value=None): evaluator.evaluate(self.seq2seq, self.dataset, trainer.get_batch_data) >>>>>>> upstream/master num_batches = int(math.ceil(len(self.dataset) / evaluator.batch_size)) expected_calls = [call.eval()] + num_batches * \ [call.call(ANY, ANY, ANY)] self.assertEquals(expected_calls, mock_mgr.mock_calls)
def editor(vim): mockeditor = Editor(vim) assert vim.mock_calls == [call.eval("has('nvim')")] vim.reset_mock() # Clear above constructor vim calls from call list return mockeditor