示例#1
0
def test_ImageToTensorBoard(tmp_path):
    """Smoke test `ImageToTensorBoard` in Eager and Compiled mode."""
    tmp_path = str(tmp_path)

    def plotting_cb(fig, axes):
        axes[0, 0].plot(np.random.randn(2), np.random.randn(2))
        axes[1, 0].plot(np.random.randn(2), np.random.randn(2))
        axes[0, 1].plot(np.random.randn(2), np.random.randn(2))
        axes[1, 1].plot(np.random.randn(2), np.random.randn(2))

    fig_kwargs = dict(figsize=(10, 10))
    subplots_kwargs = dict(sharex=True, nrows=2, ncols=2)
    task = ImageToTensorBoard(
        tmp_path, plotting_cb, "image", fig_kw=fig_kwargs, subplots_kw=subplots_kwargs
    )

    task(0)
    compiled_task = tf.function(task)
    compiled_task(0)
示例#2
0
plot_prediction(fig, ax)
plt.show()

# %% [markdown]
# ## Set up monitoring tasks
#
# We now define the `MonitorTask`s that will be executed during the optimisation.
# For this tutorial we set up three tasks:
# - `ModelToTensorBoard`: writes the models hyper-parameters such as `likelihood.variance` and `kernel.lengthscales` to a TensorBoard.
# - `ImageToTensorBoard`: writes custom matplotlib images to a TensorBoard.
# - `ScalarToTensorBoard`: writes any scalar value to a TensorBoard. Here, we use it to write the model's training objective.

# %%
log_dir = "logs"  # Directory where TensorBoard files will be written.
model_task = ModelToTensorBoard(log_dir, model)
image_task = ImageToTensorBoard(log_dir, plot_prediction, "image_samples")
lml_task = ScalarToTensorBoard(log_dir, lambda: model.training_loss(),
                               "training_objective")

# %% [markdown]
# We now group the tasks in a set of fast and slow tasks and pass them to the monitor.
# This allows us to execute the groups at a different frequency.

# %%
# Plotting tasks can be quite slow. We want to run them less frequently.
# We group them in a `MonitorTaskGroup` and set the period to 5.
slow_tasks = MonitorTaskGroup(image_task, period=5)

# The other tasks are fast. We run them at each iteration of the optimisation.
fast_tasks = MonitorTaskGroup([model_task, lml_task], period=1)
示例#3
0
def elbo_cb(data=None, **_):
    return model.elbo(data)


output_logdir = enumerated_logdir()

model_task = ModelToTensorBoard(output_logdir, model)
elbo_task = ScalarToTensorBoard(output_logdir, elbo_cb, "elbo")
print_task = ExecuteCallback(callback=print_cb)

# We group these tasks and specify a period of `100` steps for them
fast_tasks = MonitorTaskGroup([model_task, elbo_task, print_task], period=100)

# We also want to see the model's fit during the optimisation
image_task = ImageToTensorBoard(output_logdir, plot_model, "samples_image")

# We typically don't want to plot too frequently during optimisation,
# which is why we specify a larger period for this task.
slow_taks = MonitorTaskGroup(image_task, period=500)
monitor = Monitor(fast_tasks, slow_taks)


def monitored_training_loop(epochs: int):
    tf_optimization_step = tf.function(optimization_step)

    batches = iter(train_dataset)

    for epoch in range(epochs):
        for _ in range(ci_niter(num_batches_per_epoch)):
            batch = next(batches)
示例#4
0
    def configure_tensorboard_monitor(self,
                                      scalar_period,
                                      imgs_period,
                                      nb_images=1,
                                      do_phase_space=None):
        if do_phase_space is None:
            do_phase_space = self.model.phase_space_dim == 2
        if self.experiment.tensorboard_dir is None or scalar_period < 1:
            return None

        def create_bloss_tasks(directory):
            bloss_names = [
                '-ly', '-lx', 'penalty_term', 'alpha_term', '-H', '+KL'
            ]
            bloss_tasks = []

            def create_lambda(i):
                return lambda train_bloss=None, **kwargs: train_bloss[i]

            for i, name in enumerate(bloss_names):
                bloss_tasks.append(
                    ScalarToTensorBoard(directory, create_lambda(i),
                                        'bloss/' + name))
            return bloss_tasks

        train_dir = os.path.join(self.experiment.tensorboard_dir, 'train')
        test_dir = os.path.join(self.experiment.tensorboard_dir, 'test')

        # diff_task = ModelToTensorBoard(train_dir, self.model.sde_model.diffusion)
        # drift_task = ModelToTensorBoard(train_dir, self.model.sde_model.drift_svgp)
        diff_task = []
        drift_task = []

        train_loss = ScalarToTensorBoard(
            train_dir, lambda train_loss=None, **kwargs: train_loss, 'loss')
        test_loss = ScalarToTensorBoard(
            test_dir,
            lambda epoch=None, kl_scheduler=None, **kwargs: self.test_loss(
                epoch, kl_scheduler),
            'loss')

        train_bloss_list = create_bloss_tasks(train_dir)

        # train_bloss_list = []  # TODO: remove or add

        generator = self.experiment.test_dataset if self.experiment.has_test else self.experiment.train_dataset
        y_inputs = []
        y_targets = []
        for y in generator.take(1):
            for y_input, y_target in self.tbptt_chunks_generator(y):
                break
        #         y_inputs.append(y_input)
        #         y_targets.append(y_target)
        # y_input = tf.concat(y_inputs, axis=1)
        # y_target = tf.concat(y_targets, axis=1)

        def calc_drift_error(**kwargs):
            samples, entropies, encoded_dist, q0_stats, states = draw_fast_samples(
                self.model, None, y_input)
            fx, var_fx = self.model.sde_model.drift_svgp.predict_f(
                tf.reshape(samples, (-1, samples.shape[-1])))
            fx = tf.reshape(fx, samples.shape)
            return tf.reduce_mean(
                tf.square(samples[..., 1:, :] - samples[..., :-1, :] -
                          fx[..., :-1, :]))

        drift_error = ScalarToTensorBoard(train_dir, calc_drift_error,
                                          'drift_error')
        beta_alpha = ScalarToTensorBoard(
            train_dir, lambda **kwargs: tf.reduce_mean(
                self.model.sde_model.diffusion.expected_diffusion()),
            'beta_div_alpha')
        if imgs_period > 0:
            print('Creating image callbacks')
            images_dir = os.path.join(self.experiment.tensorboard_dir,
                                      'images')

            nrows = 2 if self.model.phase_space_dim > 3 else 1
            encoded_samples = ImageToTensorBoard(
                images_dir,
                lambda f, a: plot_encoded_samples(f, a, self.model, y_input),
                'encoded_samples',
                fig_kw={'figsize': (12, 12)},
                subplots_kw={
                    'nrows': nrows,
                    'ncols': np.ceil(5 / 2).astype(int)
                })

            def plot_synth(fig, axes):
                plot_synthetic_samples(fig,
                                       axes,
                                       self.model,
                                       y_input,
                                       y_target,
                                       simulation_steps=y.shape[-2])

            nrows = 2 if do_phase_space else 1
            synthetic_samples = ImageToTensorBoard(
                images_dir,
                plot_synth,
                'synthetic_samples',
                fig_kw={'figsize': (12, 12)},
                subplots_kw={
                    'nrows': nrows,
                    'ncols': nb_images
                })

            def plot_dec(fig, axes):
                plot_decoder(fig, axes, self.model, y_input, y_target)

            nrows = 2 if self.experiment.batch_size > 1 else 1
            dec_images = ImageToTensorBoard(
                images_dir,
                plot_dec,
                'decoder',
                fig_kw={'figsize': (12, 12)},
                subplots_kw={
                    'nrows': nrows,
                    'ncols': min(self.experiment.batch_size // nrows, 2)
                })
            drift_images = ImageToTensorBoard(
                images_dir,
                lambda fig, axes: plot_drift_predictions(
                    fig, axes, self.model, y_input),
                'drift',
                fig_kw={'figsize': (12, 12)},
                subplots_kw={
                    'nrows': nrows,
                    'ncols': self.model.sde_model.dimension
                })

            monitor = Monitor(
                MonitorTaskGroup([train_loss, test_loss] + train_bloss_list,
                                 period=scalar_period),
                # MonitorTaskGroup([drift_error, beta_alpha], period=scalar_period),
                MonitorTaskGroup([
                    synthetic_samples, dec_images, encoded_samples,
                    drift_images
                ],
                                 period=imgs_period))
            print('done')
        else:
            monitor = Monitor(
                MonitorTaskGroup([train_loss, test_loss] + train_bloss_list,
                                 period=scalar_period),
                MonitorTaskGroup([drift_error, beta_alpha],
                                 period=scalar_period),
            )
        return monitor