def test_enter_exit_threadsafe(self): # This test ensures reporter.__enter__ correctly stores the reporter # in the thread-local storage. def thread_func(reporter, record): with reporter: # Sleep for a tiny moment to cause an overlap of the context # managers. time.sleep(0.01) record.append(pytorch_trainer.get_current_reporter()) record1 = [] # The current repoter in each thread is stored here. record2 = [] reporter1 = pytorch_trainer.Reporter() reporter2 = pytorch_trainer.Reporter() thread1 = threading.Thread( target=thread_func, args=(reporter1, record1)) thread2 = threading.Thread( target=thread_func, args=(reporter2, record2)) thread1.daemon = True thread2.daemon = True thread1.start() thread2.start() thread1.join() thread2.join() self.assertIs(record1[0], reporter1) self.assertIs(record2[0], reporter2)
def test_enter_exit(self): reporter1 = pytorch_trainer.Reporter() reporter2 = pytorch_trainer.Reporter() with reporter1: self.assertIs(pytorch_trainer.get_current_reporter(), reporter1) with reporter2: self.assertIs(pytorch_trainer.get_current_reporter(), reporter2) self.assertIs(pytorch_trainer.get_current_reporter(), reporter1)
def test_scope(self): reporter1 = pytorch_trainer.Reporter() reporter2 = pytorch_trainer.Reporter() with reporter1: observation = {} with reporter2.scope(observation): self.assertIs(pytorch_trainer.get_current_reporter(), reporter2) self.assertIs(reporter2.observation, observation) self.assertIs(pytorch_trainer.get_current_reporter(), reporter1) self.assertIsNot(reporter2.observation, observation)
def test_evaluate(self): reporter = pytorch_trainer.Reporter() reporter.add_observer('target', self.target) with reporter: mean = self.evaluator.evaluate() # No observation is reported to the current reporter. Instead the # evaluator collect results in order to calculate their mean. self.assertEqual(len(reporter.observation), 0) # The converter gets results of the iterator. self.assertEqual(len(self.converter.args), len(self.data)) for i in range(len(self.data)): numpy.testing.assert_array_equal(self.converter.args[i]['batch'], self.data[i]) self.assertIsNone(self.converter.args[i]['device']) # The model gets results of converter. self.assertEqual(len(self.target.args), len(self.batches)) for i in range(len(self.batches)): numpy.testing.assert_array_equal(self.target.args[i], self.batches[i]) self.assertAlmostEqual(mean['target/loss'], self.expect_mean, places=4) self.evaluator.finalize() self.assertTrue(self.iterator.finalized)
def test_evaluate(self): data = self.data batches = self.batches device = self.device iterator, converter, target, evaluator = (self.prepare( data, batches, device)) reporter = pytorch_trainer.Reporter() reporter.add_observer('target', target) with reporter: mean = evaluator.evaluate() # The converter gets results of the iterator and the device number. self.assertEqual(len(converter.args), len(data)) expected_device_arg = self.device for i in range(len(data)): numpy.testing.assert_array_equal( converter.args[i]['batch'].cpu().numpy(), self.data[i]) self.assertEqual(converter.args[i]['device'].type, expected_device_arg) # The model gets results of converter. self.assertEqual(len(target.args), len(batches)) for i in range(len(batches)): self.assertEqual(target.args[i], self.batches[i]) expect_mean = torch.stack([torch.stack(x).sum() for x in self.batches]).mean() self.assertAlmostEqual(mean['target/loss'].cpu().numpy(), expect_mean.cpu().numpy(), places=4)
def test_report(self): reporter = pytorch_trainer.Reporter() with reporter: pytorch_trainer.report({'x': 1}) observation = reporter.observation self.assertIn('x', observation) self.assertEqual(observation['x'], 1)
def test_report_with_observer(self): reporter = pytorch_trainer.Reporter() observer = object() reporter.add_observer('o', observer) with reporter: pytorch_trainer.report({'x': 1}, observer) observation = reporter.observation self.assertIn('o/x', observation) self.assertEqual(observation['o/x'], 1)
def test_evaluate(self): reporter = pytorch_trainer.Reporter() reporter.add_observer('target', self.target) with reporter: self.evaluator.evaluate() # The model gets results of converter. self.assertEqual(len(self.target.args), len(self.batches)) for i in range(len(self.batches)): numpy.testing.assert_array_equal(self.target.args[i], self.batches[i])
def test_report_scope(self): reporter = pytorch_trainer.Reporter() observation = {} with reporter: with pytorch_trainer.report_scope(observation): pytorch_trainer.report({'x': 1}) self.assertIn('x', observation) self.assertEqual(observation['x'], 1) self.assertNotIn('x', reporter.observation)
def test_evaluate(self): reporter = pytorch_trainer.Reporter() reporter.add_observer('target', self.target) with reporter: mean = self.evaluator.evaluate() # The model gets results of converter. self.assertEqual(len(self.target.args), len(self.batches)) for i in range(len(self.batches)): numpy.testing.assert_array_equal(self.target.args[i][0], self.batches[i]['x']) numpy.testing.assert_array_equal(self.target.args[i][1], self.batches[i]['y']) expect_mean = torch.stack( [x['x'].sum() + x['y'].sum() for x in self.batches]).mean() self.assertAlmostEqual(mean['target/loss'].cpu().numpy(), expect_mean.cpu().numpy(), places=4)
def test_add_observers(self): reporter = pytorch_trainer.Reporter() observer1 = object() reporter.add_observer('o1', observer1) observer2 = object() reporter.add_observer('o2', observer2) reporter.report({'x': 1}, observer1) reporter.report({'y': 2}, observer2) observation = reporter.observation self.assertIn('o1/x', observation) self.assertEqual(observation['o1/x'], 1) self.assertIn('o2/y', observation) self.assertEqual(observation['o2/y'], 2) self.assertNotIn('x', observation) self.assertNotIn('y', observation) self.assertNotIn('o1/y', observation) self.assertNotIn('o2/x', observation)
def test_evaluator(self): reporter = pytorch_trainer.Reporter() reporter.add_observer('target', self.target) with reporter: self.evaluator.evaluate()
def test_current_report(self): reporter = pytorch_trainer.Reporter() with reporter: mean = self.evaluator() # The result is reported to the current reporter. self.assertEqual(reporter.observation, mean)
def test_empty_reporter(self): reporter = pytorch_trainer.Reporter() self.assertEqual(reporter.observation, {})
def test_report_with_unregistered_observer(self): reporter = pytorch_trainer.Reporter() observer = object() with reporter: with self.assertRaises(KeyError): pytorch_trainer.report({'x': 1}, observer)
def test_keep_graph_default(self): x = torch.from_numpy(numpy.array([1], numpy.float32)).requires_grad_(True) y = functional.sigmoid(x) reporter = pytorch_trainer.Reporter() reporter.report({'y': y}) self.assertFalse(reporter.observation['y'].requires_grad)