def test_list_subclass_long_strict(self): m = Mock() m.call('X'*20) self.checkRaises( [call.call('Y'*20)], m.method_calls, "[call.call('YYYYYYYYYYYYYYYYYY... (<type 'list'>)!= " "[call.call('XXXXXXXXXXXXXXXXXX... (<class 'mock._CallList'>)", strict=True, )
def test_list_subclass_long_strict(self): m = Mock() m.call('XXXXXXXXXXXXXXXXXXXX') self.checkRaises( [call.call('YYYYYYYYYYYYYYYYYYYY')], m.method_calls, "[call.call('YYYYYYYYYYYYYYYYYY... (<{0} 'list'>) != [call.call('XXXXXXXXXXXXXXXXXX... ({1})" .format(class_type_name, call_list_repr), strict=True)
def test_list_subclass_long_strict(self): m = Mock() m.call('X'*20) self.check_raises( [call.call('Y'*20)], m.method_calls, ("[call.call('YYYYYYYYYYYYYYYYYY... " "(<{0} 'list'>) != " "[call.call('XXXXXXXXXXXXXXXXXX... " "({1})").format(class_type_name, call_list_repr), strict=True, )
def test_list_subclass_long_strict(self): m = Mock() m.call('X'*20) self.checkRaises( [call.call('Y'*20)], m.method_calls, ("[call.call('YYYYYYYYYYYYYYYYYY... " "(<{0} 'list'>)!= " "[call.call('XXXXXXXXXXXXXXXXXX... " "(<class 'mock._CallList'>)").format(class_type_name), strict=True, )
def test_list_subclass_long_strict(self): m = Mock() m.call('X'*20) self.checkRaises( [call.call('Y'*20)], m.method_calls, ("[call.call('YYYYYYYYYYYYYYYYYY... " "(<{0} 'list'>) != " "[call.call('XXXXXXXXXXXXXXXXXX... " "(<class 'mock._CallList'>)").format(class_type_name), strict=True, )
def test_set_eval_mode(self, mock_eval, mock_call): """ Make sure that evaluation is done in evaluation mode. """ mock_mgr = MagicMock() mock_mgr.attach_mock(mock_eval, 'eval') mock_mgr.attach_mock(mock_call, 'call') evaluator = Evaluator() evaluator.evaluate(self.seq2seq, self.dataset) expected_calls = [call.eval()] + \ self.dataset.num_batches(evaluator.batch_size) * [call.call(ANY, ANY, volatile=ANY)] self.assertEquals(expected_calls, mock_mgr.mock_calls)
def test_set_eval_mode(self, mock_eval, mock_call): """ Make sure that evaluation.txt is done in evaluation.txt mode. """ mock_mgr = MagicMock() mock_mgr.attach_mock(mock_eval, 'eval') mock_mgr.attach_mock(mock_call, 'call') evaluator = Evaluator(batch_size=64) with patch('seq2seq.evaluator.evaluator.torch.stack', return_value=None), \ patch('seq2seq.loss.NLLLoss.eval_batch', return_value=None): evaluator.evaluate(self.seq2seq, self.dataset) num_batches = int(math.ceil(len(self.dataset) / evaluator.batch_size)) expected_calls = [call.eval()] + num_batches * [call.call(ANY, ANY, ANY)] self.assertEquals(expected_calls, mock_mgr.mock_calls)
def test_set_eval_mode(self, mock_eval, mock_call): """ Make sure that evaluation is done in evaluation mode. """ mock_mgr = MagicMock() mock_mgr.attach_mock(mock_eval, 'eval') mock_mgr.attach_mock(mock_call, 'call') evaluator = Evaluator() with patch('machine.evaluator.evaluator.torch.stack', return_value=None), \ patch('machine.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('machine.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('machine.loss.NLLLoss.eval_batch', return_value=None): evaluator.evaluate(self.seq2seq, self.data_iterator, trainer.get_batch_data) num_batches = len(self.data_iterator) expected_calls = [call.eval()] + num_batches * \ [call.call(ANY, ANY, ANY)] self.assertEqual(expected_calls, mock_mgr.mock_calls)
param.data.uniform_(-0.08, 0.08) @patch.object(Seq2seq, '__call__', return_value=( [], None, dict(inputs=[], length=[10] * 64, sequence=MagicMock()))) @patch.object(Seq2seq, 'eval') def test_set_eval_mode(self, mock_eval, mock_call): """ Make sure that evaluation is done in evaluation mode. """ mock_mgr = MagicMock() mock_mgr.attach_mock(mock_eval, 'eval') mock_mgr.attach_mock(mock_call, 'call') evaluator = Evaluator(batch_size=64) <<<<<<< HEAD with patch('seq2seq.evaluator.evaluator.torch.stack', return_value=None), \ patch('seq2seq.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('seq2seq.loss.NLLLoss.eval_batch', return_value=None): evaluator.evaluate(self.seq2seq, self.dataset, trainer.get_batch_data) ======= with patch('machine.evaluator.evaluator.torch.stack', return_value=None), \ patch('machine.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('machine.metrics.WordAccuracy.eval_batch', return_value=None), \ patch('machine.loss.NLLLoss.eval_batch', return_value=None): evaluator.evaluate(self.seq2seq, self.dataset, trainer.get_batch_data) >>>>>>> upstream/master num_batches = int(math.ceil(len(self.dataset) / evaluator.batch_size)) expected_calls = [call.eval()] + num_batches * \ [call.call(ANY, ANY, ANY)] self.assertEquals(expected_calls, mock_mgr.mock_calls)