Exemple #1
0
    def test_enter_exit_threadsafe(self):
        # This test ensures reporter.__enter__ correctly stores the reporter
        # in the thread-local storage.

        def thread_func(reporter, record):
            with reporter:
                # Sleep for a tiny moment to cause an overlap of the context
                # managers.
                time.sleep(0.01)
                record.append(chainer.get_current_reporter())

        record1 = []  # The current repoter in each thread is stored here.
        record2 = []
        reporter1 = chainer.Reporter()
        reporter2 = chainer.Reporter()
        thread1 = threading.Thread(
            target=thread_func,
            args=(reporter1, record1))
        thread2 = threading.Thread(
            target=thread_func,
            args=(reporter2, record2))
        thread1.daemon = True
        thread2.daemon = True
        thread1.start()
        thread2.start()
        thread1.join()
        thread2.join()
        self.assertIs(record1[0], reporter1)
        self.assertIs(record2[0], reporter2)
Exemple #2
0
 def test_enter_exit(self):
     reporter1 = chainer.Reporter()
     reporter2 = chainer.Reporter()
     with reporter1:
         self.assertIs(chainer.get_current_reporter(), reporter1)
         with reporter2:
             self.assertIs(chainer.get_current_reporter(), reporter2)
         self.assertIs(chainer.get_current_reporter(), reporter1)
Exemple #3
0
 def test_scope(self):
     reporter1 = chainer.Reporter()
     reporter2 = chainer.Reporter()
     with reporter1:
         observation = {}
         with reporter2.scope(observation):
             self.assertIs(chainer.get_current_reporter(), reporter2)
             self.assertIs(reporter2.observation, observation)
         self.assertIs(chainer.get_current_reporter(), reporter1)
         self.assertIsNot(reporter2.observation, observation)
Exemple #4
0
    def test_evaluate(self, backend_config):
        data = backend_config.get_array(self.data)
        batches = [backend_config.get_array(b) for b in self.batches]
        device = backend_config.device

        iterator, converter, target, evaluator = (self.prepare(
            data, batches, device))

        reporter = chainer.Reporter()
        reporter.add_observer('target', target)
        with reporter:
            mean = evaluator.evaluate()

        # The converter gets results of the iterator and the device number.
        self.assertEqual(len(converter.args), len(data))
        if backend_config.use_cuda:
            expected_device_arg = backend_config.cuda_device
        else:
            expected_device_arg = -1

        for i in range(len(data)):
            numpy.testing.assert_array_equal(
                _cpu._to_cpu(converter.args[i]['batch']), self.data[i])
            self.assertEqual(converter.args[i]['device'], expected_device_arg)

        # The model gets results of converter.
        self.assertEqual(len(target.args), len(batches))
        for i in range(len(batches)):
            numpy.testing.assert_array_equal(_cpu._to_cpu(target.args[i]),
                                             self.batches[i])

        expect_mean = numpy.mean([numpy.sum(x) for x in self.batches])
        self.assertAlmostEqual(_cpu._to_cpu(mean['target/loss']),
                               expect_mean,
                               places=4)
Exemple #5
0
def _test_roc_auc_evaluator_with_labels(data1):
    """test `pos_labels` and `ignore_labels` behavior"""

    predictor = DummyPredictor()
    dataset = NumpyTupleDataset(*data1)

    iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False)
    evaluator = ROCAUCEvaluator(
        iterator, predictor, name='val',
        pos_labels=[1, 2], ignore_labels=-1,
    )

    # --- test evaluate ---
    repo = chainer.Reporter()
    repo.add_observer('target', predictor)
    with repo:
        observation = evaluator.evaluate()

    expected_roc_auc = 0.75
    # print('observation ', observation)
    assert observation['target/roc_auc'] == expected_roc_auc

    # --- test __call__ ---
    result = evaluator()
    # print('result ', result)
    assert result['val/main/roc_auc'] == expected_roc_auc
Exemple #6
0
 def test_keep_graph(self):
     x = chainer.Variable(numpy.array([1], numpy.float32))
     y = functions.sigmoid(x)
     reporter = chainer.Reporter()
     with self._scope(True):
         reporter.report({'y': y})
     assert reporter.observation['y'].creator is not None
Exemple #7
0
 def test_not_keep_graph(self):
     x = chainer.Variable(numpy.array([1], numpy.float32))
     y = functions.Sigmoid()(x)
     reporter = chainer.Reporter()
     with self._scope(False):
         reporter.report({'y': y})
     self.assertIsNone(reporter.observation['y'].creator)
Exemple #8
0
    def test_report_without_observer(self):
        reporter = chainer.Reporter()
        reporter.report({'x': 1})

        observation = reporter.observation
        self.assertIn('x', observation)
        self.assertEqual(observation['x'], 1)
Exemple #9
0
    def test_report_key(self, metrics_fun, compute_metrics):
        repo = chainer.Reporter()

        link = Regressor(predictor=DummyPredictor(),
                         metrics_fun=metrics_fun)
        link.compute_metrics = compute_metrics
        repo.add_observer('target', link)
        with repo:
            observation = {}
            with reporter.report_scope(observation):
                link(self.x, self.t)

        # print('observation ', observation)
        actual_keys = set(observation.keys())
        if compute_metrics:
            if metrics_fun is None:
                assert set(['target/loss']) == actual_keys
            elif isinstance(metrics_fun, dict):
                assert set(['target/loss', 'target/user_key']) == actual_keys
            elif callable(metrics_fun):
                assert set(['target/loss', 'target/metrics']) == actual_keys
            else:
                raise TypeError()
        else:
            assert set(['target/loss']) == actual_keys
Exemple #10
0
 def test_keep_graph_default(self):
     x = chainer.Variable(numpy.array([1], numpy.float32))
     y, = functions.Sigmoid().apply((x, ))
     reporter = chainer.Reporter()
     with self._scope(None):
         reporter.report({'y': y})
     self.assertIsNone(reporter.observation['y'].creator)
Exemple #11
0
    def test_evaluate(self):
        reporter = chainer.Reporter()
        reporter.add_observer('target', self.target)
        with reporter:
            mean = self.evaluator.evaluate()

        # No observation is reported to the current reporter. Instead the
        # evaluator collect results in order to calculate their mean.
        self.assertEqual(len(reporter.observation), 0)

        # The converter gets results of the iterator.
        self.assertEqual(len(self.converter.args), len(self.data))
        for i in range(len(self.data)):
            numpy.testing.assert_array_equal(self.converter.args[i]['batch'],
                                             self.data[i])
            self.assertIsNone(self.converter.args[i]['device'])

        # The model gets results of converter.
        self.assertEqual(len(self.target.args), len(self.batches))
        for i in range(len(self.batches)):
            numpy.testing.assert_array_equal(self.target.args[i],
                                             self.batches[i])

        self.assertAlmostEqual(mean['target/loss'], self.expect_mean, places=4)

        self.evaluator.finalize()
        self.assertTrue(self.iterator.finalized)
Exemple #12
0
 def test_report_with_observer(self):
     reporter = chainer.Reporter()
     observer = object()
     reporter.add_observer('o', observer)
     with reporter:
         chainer.report({'x': 1}, observer)
     observation = reporter.observation
     self.assertIn('o/x', observation)
     self.assertEqual(observation['o/x'], 1)
Exemple #13
0
    def test_report_scope(self):
        reporter = chainer.Reporter()
        observation = {}

        with reporter:
            with chainer.report_scope(observation):
                chainer.report({'x': 1})

        self.assertIn('x', observation)
        self.assertEqual(observation['x'], 1)
        self.assertNotIn('x', reporter.observation)
Exemple #14
0
    def test_evaluate(self):
        reporter = chainer.Reporter()
        reporter.add_observer('target', self.link)
        with reporter:
            mean = self.evaluator.evaluate()

        # No observation is reported to the current reporter. Instead the
        # evaluator collect results in order to calculate their mean.
        self.assertEqual(len(reporter.observation), 0)

        self.assertEqual(mean['target/map'], self.expect_map)
Exemple #15
0
    def test_evaluate(self):
        reporter = chainer.Reporter()
        reporter.add_observer('target', self.target)
        with reporter:
            self.evaluator.evaluate()

        # The model gets results of converter.
        self.assertEqual(len(self.target.args), len(self.batches))
        for i in range(len(self.batches)):
            numpy.testing.assert_array_equal(self.target.args[i],
                                             self.batches[i])
Exemple #16
0
    def test_image(self):
        img = np.zeros(750).reshape((10, 5, 5, 3))

        with self.assertRaises(IndexError) as e:
            summary.image(img, 'test', ch_axis=-1)
        assert 'out of range' in str(e.exception)

        reporter = chainer.Reporter()
        observer = summary.chainerui_image_observer
        reporter.add_observer(summary.CHAINERUI_IMAGE_PREFIX, observer)
        with reporter.scope(observer.observation):
            summary.image(img, 'test', ch_axis=-1)

        assert len(observer.observation) == 1
        key = summary.CHAINERUI_IMAGE_PREFIX+'/test'
        assert key in observer.observation

        img2 = np.zeros(750).reshape((10, 5, 5, 3))
        img2[0, 0, 0, 1] = 1
        with reporter.scope(observer.observation):
            summary.image(img2, 'test', ch_axis=-1)
        assert len(observer.observation) == 1
        assert key in observer.observation
        assert np.allclose(observer.observation[key]['array'], img2)
        assert 'row' not in observer.observation[key]
        assert 'mode' not in observer.observation[key]

        img3 = np.zeros(750).reshape((10, 5, 5, 3))
        img3[0, 0, 0, 2] = 1
        img3 = chainer.Variable(img3)
        with reporter.scope(observer.observation):
            summary.image(img3, row=5, ch_axis=-1, mode='hsv')
        assert len(observer.observation) == 2
        none_key = summary.CHAINERUI_IMAGE_PREFIX+'/0'
        assert none_key in observer.observation
        assert np.allclose(observer.observation[none_key]['array'], img3.data)
        assert observer.observation[none_key]['row'] == 5
        assert observer.observation[none_key]['mode'] == 'hsv'

        img4 = np.zeros(750).reshape((10, 3, 5, 5))
        img4[0, 0, 1, 0] = 1
        with reporter.scope(observer.observation):
            summary.image(img4, 'test')
        assert len(observer.observation) == 2
        expected_img4 = img4.transpose(0, 2, 3, 1)
        assert np.allclose(observer.observation[key]['array'], expected_img4)
        assert 'row' not in observer.observation[key]
        assert 'mode' not in observer.observation[key]
Exemple #17
0
    def test_evaluate(self):
        reporter = chainer.Reporter()
        reporter.add_observer('target', self.target)
        with reporter:
            mean = self.evaluator.evaluate()

        # The model gets results of converter.
        self.assertEqual(len(self.target.args), len(self.batches))
        for i in range(len(self.batches)):
            numpy.testing.assert_array_equal(self.target.args[i][0],
                                             self.batches[i]['x'])
            numpy.testing.assert_array_equal(self.target.args[i][1],
                                             self.batches[i]['y'])

        expect_mean = numpy.mean(
            [numpy.sum(x['x']) + numpy.sum(x['y']) for x in self.batches])
        self.assertAlmostEqual(mean['target/loss'], expect_mean, places=4)
def _test_prc_auc_evaluator_raise_error(data, raise_value_error=True):

    predictor = DummyPredictor()
    dataset = NumpyTupleDataset(*data)

    iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False)
    evaluator = PRCAUCEvaluator(
        iterator, predictor, name='train',
        pos_labels=1, ignore_labels=None,
        raise_value_error=raise_value_error
    )
    repo = chainer.Reporter()
    repo.add_observer('target', predictor)
    with repo:
        observation = evaluator.evaluate()

    return observation['target/prc_auc']
    def test_evaluate(self):
        reporter = chainer.Reporter()
        reporter.add_observer('target', self.link)
        with reporter:
            mean = self.evaluator.evaluate()

        # No observation is reported to the current reporter. Instead the
        # evaluator collect results in order to calculate their mean.
        self.assertEqual(len(reporter.observation), 0)

        key = 'ap/iou=0.50:0.95/area=all/max_dets=100'
        np.testing.assert_equal(mean['target/m{}'.format(key)],
                                self.expected_ap)
        np.testing.assert_equal(mean['target/{}/cls0'.format(key)], np.nan)
        np.testing.assert_equal(mean['target/{}/cls1'.format(key)], np.nan)
        np.testing.assert_equal(mean['target/{}/cls2'.format(key)],
                                self.expected_ap)
Exemple #20
0
def _test_r2_score_evaluator(inputs):
    predictor = DummyPredictor()
    x0, x1, _ = inputs
    dataset = NumpyTupleDataset(x0, x1)

    iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False)
    evaluator = R2ScoreEvaluator(iterator, predictor, name='train')
    repo = chainer.Reporter()
    repo.add_observer('target', predictor)
    with repo:
        observation = evaluator.evaluate()

    expected = r2_score(x0, x1)
    pytest.approx(observation['target/r2_score'][0], expected)

    # --- test __call__ ---
    result = evaluator()
    pytest.approx(result['train/main/r2_score'][0], expected)
Exemple #21
0
    def __init__(self,
                 model,
                 iterator,
                 optimizers,
                 converter,
                 device=None,
                 loss_func=None,
                 iterative=False):
        super(Updater, self).__init__(iterator, optimizers, converter, device,
                                      loss_func)
        self.model = model
        self.optimizers = optimizers
        self.iterative = iterative

        reporter = chainer.Reporter()
        reporter.add_observer('main', model)
        reporter.add_observers('main', model.namedlinks(skipself=True))
        self.reporter = reporter
Exemple #22
0
def run():
    # arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-eid', '--experiment_id', type=str)
    parser.add_argument('-d',
                        '--model_directory',
                        type=str,
                        default=MODEL_DIRECTORY)
    parser.add_argument('-dd',
                        '--dataset_directory',
                        type=str,
                        default=DATASET_DIRECTORY)
    parser.add_argument('-cls', '--class_ids', type=str, default=CLASS_IDS_ALL)
    parser.add_argument('-s', '--seed', type=int, default=RANDOM_SEED)
    parser.add_argument('-g', '--gpu', type=int, default=GPU)
    args = parser.parse_args()
    directory_output = os.path.join(args.model_directory, args.experiment_id)

    # set random seed, gpu
    random.seed(args.seed)
    np.random.seed(args.seed)
    cp.random.seed(args.seed)
    chainer.cuda.get_device(args.gpu).use()

    # load dataset
    dataset_test = datasets.ShapeNet(args.dataset_directory,
                                     args.class_ids.split(','), 'test')

    # setup model & optimizer
    model = models.Model()
    model.to_gpu()
    chainer.serializers.load_npz(os.path.join(directory_output, 'model.npz'),
                                 model)

    # evaluate
    reporter = chainer.Reporter()
    observation = {}
    with reporter.scope(observation):
        training.validation(None, model, dataset_test)
    for key in sorted(observation.keys()):
        key_display = key
        for class_id in CLASS_NAMES.keys():
            key_display = key_display.replace(class_id, CLASS_NAMES[class_id])
        print '%s: %.4f' % (key_display, observation[key])
Exemple #23
0
    def test_add_observers(self):
        reporter = chainer.Reporter()
        observer1 = object()
        reporter.add_observer('o1', observer1)
        observer2 = object()
        reporter.add_observer('o2', observer2)

        reporter.report({'x': 1}, observer1)
        reporter.report({'y': 2}, observer2)

        observation = reporter.observation
        self.assertIn('o1/x', observation)
        self.assertEqual(observation['o1/x'], 1)
        self.assertIn('o2/y', observation)
        self.assertEqual(observation['o2/y'], 2)
        self.assertNotIn('x', observation)
        self.assertNotIn('y', observation)
        self.assertNotIn('o1/y', observation)
        self.assertNotIn('o2/x', observation)
    def test_consistency(self):
        reporter = chainer.Reporter()

        if self.comm.rank == 0:
            multi_iterator = SerialIterator(self.dataset,
                                            self.batchsize,
                                            repeat=False,
                                            shuffle=False)
        else:
            multi_iterator = None
        multi_link = _SemanticSegmentationStubLink(self.labels,
                                                   self.initial_count)
        multi_evaluator = SemanticSegmentationEvaluator(multi_iterator,
                                                        multi_link,
                                                        label_names=('cls0',
                                                                     'cls1',
                                                                     'cls2'),
                                                        comm=self.comm)
        reporter.add_observer('target', multi_link)
        with reporter:
            multi_mean = multi_evaluator.evaluate()

        if self.comm.rank != 0:
            self.assertEqual(multi_mean, {})
            return

        single_iterator = SerialIterator(self.dataset,
                                         self.batchsize,
                                         repeat=False,
                                         shuffle=False)
        single_link = _SemanticSegmentationStubLink(self.labels)
        single_evaluator = SemanticSegmentationEvaluator(single_iterator,
                                                         single_link,
                                                         label_names=('cls0',
                                                                      'cls1',
                                                                      'cls2'))
        reporter.add_observer('target', single_link)
        with reporter:
            single_mean = single_evaluator.evaluate()

        self.assertEqual(set(multi_mean.keys()), set(single_mean.keys()))
        for key in multi_mean.keys():
            np.testing.assert_equal(single_mean[key], multi_mean[key])
Exemple #25
0
    def test_evaluate(self):
        reporter = chainer.Reporter()
        reporter.add_observer('target', self.target)
        with reporter:
            mean = self.evaluator.evaluate()

        # The converter gets results of the iterator and the device number.
        self.assertEqual(len(self.converter.args), len(self.data))
        for i in range(len(self.data)):
            numpy.testing.assert_array_equal(self.converter.args[i]['batch'],
                                             self.data[i])
            self.assertEqual(self.converter.args[i]['device'], 1)

        # The model gets results of converter.
        self.assertEqual(len(self.target.args), len(self.batches))
        for i in range(len(self.batches)):
            numpy.testing.assert_array_equal(self.target.args[i],
                                             self.batches[i])

        expect_mean = numpy.mean([numpy.sum(x) for x in self.batches])
        self.assertAlmostEqual(mean['target/loss'], expect_mean, places=4)
def _test_prc_auc_evaluator_default_args(data0):

    predictor = DummyPredictor()
    dataset = NumpyTupleDataset(*data0)

    iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False)
    evaluator = PRCAUCEvaluator(
        iterator, predictor, name='train',
        pos_labels=1, ignore_labels=None
    )
    repo = chainer.Reporter()
    repo.add_observer('target', predictor)
    with repo:
        observation = evaluator.evaluate()

    expected_prc_auc = 0.7916
    pytest.approx(observation['target/prc_auc'], expected_prc_auc)

    # --- test __call__ ---
    result = evaluator()
    pytest.approx(result['train/main/prc_auc'], expected_prc_auc)
Exemple #27
0
def setup_support(
    batch_size: int,
    gpu: Optional[int],
    model: Model,
    dataset: chainer.dataset.DatasetMixin,
):
    optimizer = _create_optimizer(model)
    train_iter = MultiprocessIterator(dataset, batch_size)

    if gpu is not None:
        model.to_gpu(gpu)

    updater = StandardUpdater(
        device=gpu,
        iterator=train_iter,
        optimizer=optimizer,
        converter=concat_optional,
    )

    reporter = chainer.Reporter()
    reporter.add_observer('main', model)

    return updater, reporter
Exemple #28
0
def _test_roc_auc_evaluator_default_args(data0):

    predictor = DummyPredictor()
    dataset = NumpyTupleDataset(*data0)

    iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False)
    evaluator = ROCAUCEvaluator(
        iterator, predictor, name='train',
        pos_labels=1, ignore_labels=None
    )
    repo = chainer.Reporter()
    repo.add_observer('target', predictor)
    with repo:
        observation = evaluator.evaluate()

    expected_roc_auc = 0.75
    # print('observation ', observation)
    assert observation['target/roc_auc'] == expected_roc_auc

    # --- test __call__ ---
    result = evaluator()
    # print('result ', result)
    assert result['train/main/roc_auc'] == expected_roc_auc
    def test_evaluate(self):
        reporter = chainer.Reporter()
        reporter.add_observer('main', self.link)
        with reporter:
            eval_ = self.evaluator.evaluate()

        # No observation is reported to the current reporter. Instead the
        # evaluator collect results in order to calculate their mean.
        np.testing.assert_equal(len(reporter.observation), 0)

        np.testing.assert_equal(eval_['main/miou'], self.miou)
        np.testing.assert_equal(eval_['main/pixel_accuracy'],
                                self.pixel_accuracy)
        np.testing.assert_equal(eval_['main/mean_class_accuracy'],
                                self.mean_class_accuracy)
        np.testing.assert_equal(eval_['main/iou/a'], self.iou_a)
        np.testing.assert_equal(eval_['main/iou/b'], self.iou_b)
        np.testing.assert_equal(eval_['main/iou/c'], np.nan)
        np.testing.assert_equal(eval_['main/class_accuracy/a'],
                                self.class_accuracy_a)
        np.testing.assert_equal(eval_['main/class_accuracy/b'],
                                self.class_accuracy_b)
        np.testing.assert_equal(eval_['main/class_accuracy/c'], np.nan)
def _test_prc_auc_evaluator_with_labels(data1):
    """test `pos_labels` and `ignore_labels` behavior"""

    predictor = DummyPredictor()
    dataset = NumpyTupleDataset(*data1)

    iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False)
    evaluator = PRCAUCEvaluator(
        iterator, predictor, name='val',
        pos_labels=[1, 2], ignore_labels=-1,
    )

    # --- test evaluate ---
    repo = chainer.Reporter()
    repo.add_observer('target', predictor)
    with repo:
        observation = evaluator.evaluate()

    expected_prc_auc = 0.7916
    pytest.approx(observation['target/prc_auc'], expected_prc_auc)

    # --- test __call__ ---
    result = evaluator()
    pytest.approx(result['val/main/prc_auc'], expected_prc_auc)