def test_writer(self, mock_is_primary_func: mock.MagicMock) -> None:
        """
        Tests that the tensorboard writer writes the correct scalars to SummaryWriter
        iff is_primary() is True.
        """
        for phase_idx, master in product([0, 1, 2], [True, False]):
            train, phase_type = ((True, "train") if phase_idx % 2 == 0 else
                                 (False, "test"))
            mock_is_primary_func.return_value = master

            # set up the task and state
            config = get_test_task_config()
            config["dataset"]["train"]["batchsize_per_replica"] = 2
            config["dataset"]["test"]["batchsize_per_replica"] = 5
            task = build_task(config)
            task.prepare()
            task.advance_phase()
            task.phase_idx = phase_idx
            task.train = train

            losses = [1.23, 4.45, 12.3, 3.4]
            sample_fetch_times = [1.1, 2.2, 3.3, 2.2]

            summary_writer = SummaryWriter(self.base_dir)
            # create a spy on top of summary_writer
            summary_writer = mock.MagicMock(wraps=summary_writer)

            # create a loss lr tensorboard hook
            tensorboard_plot_hook = TensorboardPlotHook(summary_writer)

            # run the hook in the correct order
            tensorboard_plot_hook.on_phase_start(task)

            # test tasks which do not pass the sample_fetch_times as well
            disable_sample_fetch_times = phase_idx == 0

            for loss, sample_fetch_time in zip(losses, sample_fetch_times):
                task.losses.append(loss)
                step_data = ({} if disable_sample_fetch_times else {
                    "sample_fetch_time": sample_fetch_time
                })
                task.last_batch = LastBatchInfo(None, None, None, None,
                                                step_data)
                tensorboard_plot_hook.on_step(task)

            tensorboard_plot_hook.on_phase_end(task)

            if master:
                # add_scalar() should have been called with the right scalars
                if train:
                    learning_rate_key = f"Learning Rate/{phase_type}"
                    summary_writer.add_scalar.assert_any_call(
                        learning_rate_key,
                        mock.ANY,
                        global_step=mock.ANY,
                        walltime=mock.ANY,
                    )
                avg_loss_key = f"Losses/{phase_type}"
                summary_writer.add_scalar.assert_any_call(avg_loss_key,
                                                          mock.ANY,
                                                          global_step=mock.ANY)
                for meter in task.meters:
                    for name in meter.value:
                        meter_key = f"Meters/{phase_type}/{meter.name}/{name}"
                        summary_writer.add_scalar.assert_any_call(
                            meter_key, mock.ANY, global_step=mock.ANY)
                if step_data:
                    summary_writer.add_scalar.assert_any_call(
                        f"Speed/{phase_type}/cumulative_sample_fetch_time",
                        mock.ANY,
                        global_step=mock.ANY,
                        walltime=mock.ANY,
                    )
            else:
                # add_scalar() shouldn't be called since is_primary() is False
                summary_writer.add_scalar.assert_not_called()
            summary_writer.add_scalar.reset_mock()
示例#2
0
    def test_writer(self, mock_is_master_func: mock.MagicMock) -> None:
        """
        Tests that the tensorboard writer writes the correct scalars to SummaryWriter
        iff is_master() is True.
        """
        for phase_idx, master in product([0, 1, 2], [True, False]):
            train, phase_type = ((True, "train") if phase_idx % 2 == 0 else
                                 (False, "test"))
            mock_is_master_func.return_value = master

            # set up the task and state
            config = get_test_task_config()
            config["dataset"]["train"]["batchsize_per_replica"] = 2
            config["dataset"]["test"]["batchsize_per_replica"] = 5
            task = build_task(config)
            task.prepare()
            task.phase_idx = phase_idx
            task.train = train

            losses = [1.23, 4.45, 12.3, 3.4]

            local_variables = {}

            summary_writer = SummaryWriter(self.base_dir)
            # create a spy on top of summary_writer
            summary_writer = mock.MagicMock(wraps=summary_writer)

            # create a loss lr tensorboard hook
            tensorboard_plot_hook = TensorboardPlotHook(summary_writer)

            # test that the hook logs a warning and doesn't write anything to
            # the writer if on_phase_start() is not called for initialization
            # before on_step() is called.
            with self.assertLogs() as log_watcher:
                tensorboard_plot_hook.on_step(task, local_variables)

            self.assertTrue(
                len(log_watcher.records) == 1
                and log_watcher.records[0].levelno == logging.WARN and
                "learning_rates is not initialized" in log_watcher.output[0])

            # test that the hook logs a warning and doesn't write anything to
            # the writer if on_phase_start() is not called for initialization
            # if on_phase_end() is called.
            with self.assertLogs() as log_watcher:
                tensorboard_plot_hook.on_phase_end(task, local_variables)

            self.assertTrue(
                len(log_watcher.records) == 1
                and log_watcher.records[0].levelno == logging.WARN and
                "learning_rates is not initialized" in log_watcher.output[0])
            summary_writer.add_scalar.reset_mock()

            # run the hook in the correct order
            tensorboard_plot_hook.on_phase_start(task, local_variables)

            for loss in losses:
                task.losses.append(loss)
                tensorboard_plot_hook.on_step(task, local_variables)

            tensorboard_plot_hook.on_phase_end(task, local_variables)

            if master:
                # add_scalar() should have been called with the right scalars
                if train:
                    loss_key = f"{phase_type}_loss"
                    learning_rate_key = f"{phase_type}_learning_rate_updates"
                    summary_writer.add_scalar.assert_any_call(
                        loss_key,
                        mock.ANY,
                        global_step=mock.ANY,
                        walltime=mock.ANY)
                    summary_writer.add_scalar.assert_any_call(
                        learning_rate_key,
                        mock.ANY,
                        global_step=mock.ANY,
                        walltime=mock.ANY,
                    )
                avg_loss_key = f"avg_{phase_type}_loss"
                summary_writer.add_scalar.assert_any_call(avg_loss_key,
                                                          mock.ANY,
                                                          global_step=mock.ANY)
                for meter in task.meters:
                    for name in meter.value:
                        meter_key = f"{phase_type}_{meter.name}_{name}"
                        summary_writer.add_scalar.assert_any_call(
                            meter_key, mock.ANY, global_step=mock.ANY)
            else:
                # add_scalar() shouldn't be called since is_master() is False
                summary_writer.add_scalar.assert_not_called()
            summary_writer.add_scalar.reset_mock()