Exemple #1
0
    def _test_chechpoint_roundtrip(self, use_global_step: bool, num_checkpoints: Optional[int]=5):
        """
        Performs saving/restoring roundtrip, either with or without using `global_step`.
        Note that if `global_step` is used the save will create one checkpoint for each value
        of the global step.
        """

        with tempfile.TemporaryDirectory() as tmp_event_dir:

            # Create a variable and do several checkpoints
            with session_context(tf.Graph()) as session:
                dummy_var = self._create_dummy_variable(session)
                monitor_context = mon.MonitorContext()
                monitor_context.session = session
                if use_global_step:
                    monitor_context.global_step_tensor = mon.create_global_step(session)
                monitor_task = mon.CheckpointTask(tmp_event_dir)

                for i in range(num_checkpoints):
                    session.run(dummy_var.assign(i))
                    if use_global_step:
                        session.run(monitor_context.global_step_tensor.assign(10 * i))
                    monitor_task(monitor_context)

            # Restore the session and read the variables.
            # Verify if the latest checkpoint was restored.
            with session_context(tf.Graph()) as session:
                dummy_var = self._create_dummy_variable(session)
                global_step_tensor = mon.create_global_step(session) if use_global_step else None
                mon.restore_session(session, tmp_event_dir)
                self.assertEqual(session.run(dummy_var), num_checkpoints - 1)
                if use_global_step:
                    self.assertEqual(session.run(global_step_tensor), 10 * (num_checkpoints - 1))
Exemple #2
0
    def _test_chechpoint_roundtrip(self, use_global_step: bool, num_checkpoints: Optional[int]=5):
        """
        Performs saving/restoring roundtrip, either with or without using `global_step`.
        Note that if `global_step` is used the save will create one checkpoint for each value
        of the global step.
        """

        with tempfile.TemporaryDirectory() as tmp_event_dir:

            # Create a variable and do several checkpoints
            with session_context(tf.Graph()) as session:
                dummy_var = self._create_dummy_variable(session)
                monitor_context = mon.MonitorContext()
                monitor_context.session = session
                if use_global_step:
                    monitor_context.global_step_tensor = mon.create_global_step(session)
                monitor_task = mon.CheckpointTask(tmp_event_dir)

                for i in range(num_checkpoints):
                    session.run(dummy_var.assign(i))
                    if use_global_step:
                        session.run(monitor_context.global_step_tensor.assign(10 * i))
                    monitor_task(monitor_context)

            # Restore the session and read the variables.
            # Verify if the latest checkpoint was restored.
            with session_context(tf.Graph()) as session:
                dummy_var = self._create_dummy_variable(session)
                global_step_tensor = mon.create_global_step(session) if use_global_step else None
                mon.restore_session(session, tmp_event_dir)
                self.assertEqual(session.run(dummy_var), num_checkpoints - 1)
                if use_global_step:
                    self.assertEqual(session.run(global_step_tensor), 10 * (num_checkpoints - 1))
Exemple #3
0
def test_exp_quadrature_diag(create_kernel):
    Xmu = DataExpQuadrature.Xmu
    Xcov = DataExpQuadrature.Xcov[0, :, :, :]
    with session_context():
        k = create_kernel(kernels)
        a = k.compute_eKdiag(Xmu, Xcov)
    with session_context():
        ek = create_kernel(ekernels)
        b = ek.compute_eKdiag(Xmu, Xcov)
    assert_allclose(a, b, rtol=DataExpQuadrature.threshold)
Exemple #4
0
def test_exp_quadrature_exKxz(create_kernel):
    Z = DataExpQuadrature.Z
    Xmu = DataExpQuadrature.Xmu
    Xcov = DataExpQuadrature.Xcov[0]
    with session_context():
        k = create_kernel(kernels)
        k.num_gauss_hermite_points = DataExpQuadrature.num_gauss_hermite_points
        a = k.compute_exKxz(Z, Xmu, Xcov)
    with session_context():
        ek = create_kernel(ekernels)
        b = ek.compute_exKxz(Z, Xmu, Xcov)
    assert_allclose(a, b, rtol=DataExpQuadrature.threshold)
    def test_update_scipy_optimiser(self):
        """
        Checks that the `update_optimiser` function sets the ScipyOptimizer state to the model
        parameters. Also checks that it sets the `optimiser_updated` flag to True.
        """

        with session_context(tf.Graph()):
            model = create_linear_model()
            optimiser = gpflow.train.ScipyOptimizer()
            context = mon.MonitorContext()
            context.session = model.enquire_session()
            context.optimiser = optimiser
            w, b, var = model.w.value, model.b.value, model.var.value
            call_count = 0

            def step_callback(*args, **kwargs):
                nonlocal model, optimiser, context, w, b, var, call_count
                context.optimiser_updated = False
                mon.update_optimiser(context, *args, **kwargs)
                w_new, b_new, var_new = model.enquire_session().run([model.w.unconstrained_tensor,
                                                                     model.b.unconstrained_tensor,
                                                                     model.var.unconstrained_tensor])
                self.assertTrue(np.alltrue(np.not_equal(w, w_new)))
                self.assertTrue(np.alltrue(np.not_equal(b, b_new)))
                self.assertTrue(np.alltrue(np.not_equal(var, var_new)))
                self.assertTrue(context.optimiser_updated)
                call_count += 1
                w, b, var = w_new, b_new, var_new

            optimiser.minimize(model, maxiter=10, step_callback=step_callback)
            self.assertGreater(call_count, 0)
Exemple #6
0
    def test_with_natgrad_optimiser(self):
        """
        Test the monitor with the Natural Gradient optimiser.
        """

        def optimise(model, step_callback, _) -> None:
            """
            Optimisation function that creates and calls NatGradPtimizer optimiser.
            """
            var_list = [(model.q_mu, model.q_sqrt)]
            # we don't want adam optimizing these
            model.q_mu.set_trainable(False)
            model.q_sqrt.set_trainable(False)

            optimiser = gpflow.train.NatGradOptimizer(1.0)
            optimiser.minimize(model, maxiter=10, var_list=var_list, step_callback=step_callback)

        with session_context(tf.Graph()):
            # NatGrad optimiser works only with variational parameters. So we can't use the
            # dummy linear model here.
            model_data = create_leaner_model_data(20)
            z = np.linspace(0, 1, 5)[:, None]
            model = gpflow.models.SVGP(model_data.x, model_data.y, gpflow.kernels.RBF(1),
                                       gpflow.likelihoods.Gaussian(), Z=z)
            self._optimise_model(model, optimise)
Exemple #7
0
    def test_with_natgrad_optimiser(self):
        """
        Test the monitor with the Natural Gradient optimiser.
        """

        def optimise(model, step_callback, _) -> None:
            """
            Optimisation function that creates and calls NatGradPtimizer optimiser.
            """
            var_list = [(model.q_mu, model.q_sqrt)]
            # we don't want adam optimizing these
            model.q_mu.set_trainable(False)
            model.q_sqrt.set_trainable(False)

            optimiser = gpflow.train.NatGradOptimizer(1.0)
            optimiser.minimize(model, maxiter=10, var_list=var_list, step_callback=step_callback)

        with session_context(tf.Graph()):
            # NatGrad optimiser works only with variational parameters. So we can't user the
            # dummy linear model here.
            model_data = create_leaner_model_data(20)
            z = np.linspace(0, 1, 5)[:, None]
            model = gpflow.models.SVGP(model_data.x, model_data.y, gpflow.kernels.RBF(1),
                                       gpflow.likelihoods.Gaussian(), Z=z)
            self._optimise_model(model, optimise)
Exemple #8
0
    def test_print_timings(self):
        """
        Tests rate calculation for the PrintTimingsTask (doesn't test the actual printing)
        """
        with session_context(tf.Graph()):
            monitor_task = mon.PrintTimingsTask()
            monitor_task._print_timings = mock.MagicMock()
            monitor_context = mon.MonitorContext()
            monitor_context.session = tf.Session()
            monitor_context.global_step_tensor = mon.create_global_step(monitor_context.session)
            monitor_context.init_global_step = 100

            # First call
            monitor_context.iteration_no = 10
            monitor_context.total_time = 20.0
            monitor_context.optimisation_time = 16.0
            monitor_context.session.run(monitor_context.global_step_tensor.assign(150))
            monitor_task(monitor_context)
            args = monitor_task._print_timings.call_args_list[0][0]
            self.assertTupleEqual(args, (10, 150, 0.5, 0.5, 3.125, 3.125))

            # Second call
            monitor_context.iteration_no = 24
            monitor_context.total_time = 30.0
            monitor_context.optimisation_time = 24.0
            monitor_context.session.run(monitor_context.global_step_tensor.assign(196))
            monitor_task(monitor_context)
            args = monitor_task._print_timings.call_args_list[1][0]
            self.assertTupleEqual(args, (24, 196, 0.8, 1.4, 4.0, 5.75))
Exemple #9
0
def test_saving_deep_parameterized_object(session_tf, filename,
                                          deep_structure):
    sess_a = session_tf
    gp.Saver().save(filename, deep_structure)
    with session_context() as sess_b:
        copy = gp.Saver().load(filename)
        equal_params(deep_structure.a,
                     copy.a,
                     session_a=sess_a,
                     session_b=sess_b)
        equal_params(deep_structure.b,
                     copy.b,
                     session_a=sess_a,
                     session_b=sess_b)
        equal_params(deep_structure.c.a,
                     copy.c.a,
                     session_a=sess_a,
                     session_b=sess_b)
        equal_params(deep_structure.c.b,
                     copy.c.b,
                     session_a=sess_a,
                     session_b=sess_b)
        equal_params(deep_structure.c.c.a,
                     copy.c.c.a,
                     session_a=sess_a,
                     session_b=sess_b)
        equal_params(deep_structure.c.c.b,
                     copy.c.c.b,
                     session_a=sess_a,
                     session_b=sess_b)
Exemple #10
0
    def test_print_timings(self):
        """
        Tests rate calculation for the PrintTimingsTask (doesn't test the actual printing)
        """
        with session_context(tf.Graph()):
            monitor_task = mon.PrintTimingsTask()
            monitor_task._print_timings = mock.MagicMock()
            monitor_context = mon.MonitorContext()
            monitor_context.session = tf.Session()
            monitor_context.global_step_tensor = mon.create_global_step(monitor_context.session)
            monitor_context.init_global_step = 100

            # First call
            monitor_context.iteration_no = 10
            monitor_context.total_time = 20.0
            monitor_context.optimisation_time = 16.0
            monitor_context.session.run(monitor_context.global_step_tensor.assign(150))
            monitor_task(monitor_context)
            args = monitor_task._print_timings.call_args_list[0][0]
            self.assertTupleEqual(args, (10, 150, 0.5, 0.5, 3.125, 3.125))

            # Second call
            monitor_context.iteration_no = 24
            monitor_context.total_time = 30.0
            monitor_context.optimisation_time = 24.0
            monitor_context.session.run(monitor_context.global_step_tensor.assign(196))
            monitor_task(monitor_context)
            args = monitor_task._print_timings.call_args_list[1][0]
            self.assertTupleEqual(args, (24, 196, 0.8, 1.4, 4.0, 5.75))
def test_quadrature(white, mean):
    with session_context() as session:
        c = DataQuadrature
        d = c.tensors(white, mean)
        quad_args = d.Xmu, d.Xvar, c.H, c.D_in, (c.D_out, )
        mean_quad = mvnquad(d.mean_fn, *quad_args)
        var_quad = mvnquad(d.var_fn, *quad_args)
        mean_sq_quad = mvnquad(d.mean_sq_fn, *quad_args)
        mean_analytic, var_analytic = uncertain_conditional(
            d.Xmu,
            d.Xvar,
            d.feat,
            d.kern,
            d.q_mu,
            d.q_sqrt,
            mean_function=d.mean_function,
            full_cov_output=False,
            white=white)

        mean_quad, var_quad, mean_sq_quad = session.run(
            [mean_quad, var_quad, mean_sq_quad], feed_dict=d.feed_dict)
        var_quad = var_quad + (mean_sq_quad - mean_quad**2)
        mean_analytic, var_analytic = session.run(
            [mean_analytic, var_analytic], feed_dict=d.feed_dict)

        assert_almost_equal(mean_quad, mean_analytic, decimal=6)
        assert_almost_equal(var_quad, var_analytic, decimal=6)
Exemple #12
0
def test_saving_gpflow_model(session_tf, filename, model):
    x_new = Data.x_new()
    predict_origin = model.predict_f(x_new)
    gp.Saver().save(filename, model)
    with session_context() as session:
        loaded = gp.Saver().load(filename)
        predict_loaded = loaded.predict_f(x_new)
        assert_allclose(predict_origin, predict_loaded)
Exemple #13
0
def test_loading_without_autocompile(session_tf, filename, model):
    gp.Saver().save(filename, model)
    with session_context() as session:
        context = gp.SaverContext(autocompile=False)
        loaded = gp.Saver().load(filename, context=context)
        assert loaded.is_built(session_tf.graph) == gp.Build.NO
        assert loaded.is_built(session.graph) == gp.Build.NO
        assert not any(loaded.trainable_tensors)
Exemple #14
0
def test_diagquad_1d(mu1, var1):
    with session_context() as session:
        quad = gpflow.quadrature.ndiagquad(
                lambda *X: tf.exp(X[0]), 25,
                [cast(mu1)], [cast(var1)])
        res = session.run(quad)
        expected = np.exp(mu1 + var1/2)
        assert_allclose(res, expected, atol=1e-10)
Exemple #15
0
def test_saving_gpflow_model(session_tf, filename, model):
    x_new = Data.x_new()
    predict_origin = model.predict_f(x_new)
    gp.Saver().save(filename, model)
    with session_context() as session:
        loaded = gp.Saver().load(filename)
        predict_loaded = loaded.predict_f(x_new)
        assert_allclose(predict_origin, predict_loaded)
Exemple #16
0
def test_loading_without_autocompile(session_tf, filename, model):
    gp.Saver().save(filename, model)
    with session_context() as session:
        context = gp.SaverContext(autocompile=False)
        loaded = gp.Saver().load(filename, context=context)
        assert loaded.is_built(session_tf.graph) == gp.Build.NO
        assert loaded.is_built(session.graph) == gp.Build.NO
        assert not any(loaded.trainable_tensors)
Exemple #17
0
def test_loading_into_specific_session(session_tf, filename, model):
    x_new = Data.x_new()
    predict_origin = model.predict_f(x_new)
    gp.Saver().save(filename, model)
    with session_context() as session:
        context = gp.SaverContext(session=session)
        loaded = gp.Saver().load(filename, context=context)
        predict_loaded = loaded.predict_f(x_new, session=session)
    assert_allclose(predict_origin, predict_loaded)
Exemple #18
0
def test_diagquad_with_kwarg(mu2, var2):
    with session_context() as session:
        alpha = np.array([2.5, -1.3])
        quad = gpflow.quadrature.ndiagquad(
                lambda X, Y: tf.exp(X * Y), 25,
                cast(mu2), cast(var2), Y=alpha)
        res = session.run(quad)
        expected = np.exp(alpha * mu2 + alpha**2 * var2/2)
        assert_allclose(res, expected, atol=1e-10)
Exemple #19
0
def test_loading_into_specific_session(session_tf, filename, model):
    x_new = Data.x_new()
    predict_origin = model.predict_f(x_new)
    gp.Saver().save(filename, model)
    with session_context() as session:
        context = gp.SaverContext(session=session)
        loaded = gp.Saver().load(filename, context=context)
        predict_loaded = loaded.predict_f(x_new, session=session)
    assert_allclose(predict_origin, predict_loaded)
Exemple #20
0
def test_diagquad_2d(mu1, var1, mu2, var2):
    with session_context() as session:
        alpha = 2.5
        quad = gpflow.quadrature.ndiagquad(
                lambda *X: tf.exp(X[0] + alpha * X[1]), 35,
                [cast(mu1), cast(mu2)], [cast(var1), cast(var2)])
        res = session.run(quad)
        expected = np.exp(mu1 + var1/2 + alpha * mu2 + alpha**2 * var2/2)
        assert_allclose(res, expected, atol=1e-10)
Exemple #21
0
def test_saving_deep_parameterized_object(session_tf, filename, deep_structure):
    sess_a = session_tf
    gp.Saver().save(filename, deep_structure)
    with session_context() as sess_b:
        copy = gp.Saver().load(filename)
        equal_params(deep_structure.a, copy.a, session_a=sess_a, session_b=sess_b)
        equal_params(deep_structure.b, copy.b, session_a=sess_a, session_b=sess_b)
        equal_params(deep_structure.c.a, copy.c.a, session_a=sess_a, session_b=sess_b)
        equal_params(deep_structure.c.b, copy.c.b, session_a=sess_a, session_b=sess_b)
        equal_params(deep_structure.c.c.a, copy.c.c.a, session_a=sess_a, session_b=sess_b)
        equal_params(deep_structure.c.c.b, copy.c.c.b, session_a=sess_a, session_b=sess_b)
Exemple #22
0
    def test_with_scipy_optimiser(self):
        """
        Tests the monitor with the Scipy optimiser
        """

        def optimise(model, step_callback, _) -> None:
            """
            Optimisation function that creates and calls ScipyOptimizer optimiser.
            """
            optimiser = gpflow.train.ScipyOptimizer()
            optimiser.minimize(model, maxiter=10, step_callback=step_callback)

        with session_context(tf.Graph()):
            self._optimise_model(create_linear_model(), optimise)
Exemple #23
0
    def test_with_tensorflow_optimiser(self):
        """
        Tests the monitor with a tensorflow optimiser
        """

        def optimise(model, step_callback, global_step_tensor) -> None:
            """
            Optimisation function that creates and calls the tensorflow AdamOptimizer optimiser.
            """
            optimiser = gpflow.train.AdamOptimizer(0.01)
            optimiser.minimize(model, maxiter=10, step_callback=step_callback,
                               global_step=global_step_tensor)

        with session_context(tf.Graph()):
            self._optimise_model(create_linear_model(), optimise, True)
Exemple #24
0
    def test_with_tensorflow_optimiser(self):
        """
        Tests the monitor with a tensorflow optimiser
        """

        def optimise(model, step_callback, global_step_tensor) -> None:
            """
            Optimisation function that creates and calls the tensorflow AdamOptimizer optimiser.
            """
            optimiser = gpflow.train.AdamOptimizer(0.01)
            optimiser.minimize(model, maxiter=10, step_callback=step_callback,
                               global_step=global_step_tensor)

        with session_context(tf.Graph()):
            self._optimise_model(create_linear_model(), optimise, True)
Exemple #25
0
    def test_std_tensorboard_only_scalars(self):
        """
        Tests the standard tensorboard task with scalar parameters only
        """

        with session_context(tf.Graph()):
            model = create_linear_model()

            def task_factory(writer: mon.LogdirWriter):
                return mon.ModelToTensorBoardTask(writer, model, only_scalars=True)

            summary = run_tensorboard_task(task_factory)
            self.assertAlmostEqual(summary['DummyLinearModel/b'].simple_value, float(model.b.value))
            self.assertAlmostEqual(summary['DummyLinearModel/var'].simple_value,
                                   float(model.var.value))
            self.assertAlmostEqual(summary['optimisation/likelihood'].simple_value,
                                   model.compute_log_likelihood(), places=5)
            self.assertNotIn('DummyLinearModel/w', summary.keys())
Exemple #26
0
    def test_with_scipy_optimiser(self, update_optimiser):
        """
        Tests the monitor with the Scipy optimiser
        """

        optimiser = gpflow.train.ScipyOptimizer()

        def optimise(model, step_callback, _) -> None:
            """
            Optimisation function that creates and calls ScipyOptimizer optimiser.
            """
            nonlocal optimiser
            optimiser.minimize(model, maxiter=10, step_callback=step_callback)

        with session_context(tf.Graph()):
            self._optimise_model(create_linear_model(), optimise, optimiser=optimiser)

        self.assertGreater(update_optimiser.call_count, 0)
def test_no_uncertainty(white, mean):
    with session_context() as sess:
        m = mean_function_factory(Data.rng, mean, Data.D_in, Data.D_out)
        k = gpflow.kernels.RBF(1, variance=Data.rng.rand())
        model = MomentMatchingSVGP(
            Data.X, Data.Y, k, gpflow.likelihoods.Gaussian(),
            mean_function=m, Z=Data.X.copy(), whiten=white)
        model.full_output_cov = False
        gpflow.train.AdamOptimizer().minimize(model, maxiter=50)

        mean1, var1 = model.predict_f(Data.Xnew_mu)
        pred_mm = model.uncertain_predict_f_moment_matching(
                            tf.constant(Data.Xnew_mu), tf.constant(Data.Xnew_covar))
        mean2, var2 = sess.run(pred_mm)

        assert_almost_equal(mean1, mean2)
        for n in range(Data.N_new):
            assert_almost_equal(var1[n, :], var2[n, ...])
def test_no_uncertainty(white, mean):
    with session_context() as sess:
        m = mean_function_factory(Data.rng, mean, Data.D_in, Data.D_out)
        k = gpflow.kernels.RBF(1, variance=Data.rng.rand())
        model = MomentMatchingSVGP(
            Data.X, Data.Y, k, gpflow.likelihoods.Gaussian(),
            mean_function=m, Z=Data.X.copy(), whiten=white)
        model.full_cov_output = False
        gpflow.train.AdamOptimizer().minimize(model, maxiter=50)

        mean1, var1 = model.predict_f(Data.Xnew_mu)
        pred_mm = model.uncertain_predict_f_moment_matching(
                            tf.constant(Data.Xnew_mu), tf.constant(Data.Xnew_covar))
        mean2, var2 = sess.run(pred_mm)

        assert_almost_equal(mean1, mean2)
        for n in range(Data.N_new):
            assert_almost_equal(var1[n, :], var2[n, ...])
Exemple #29
0
    def test_std_tensorboard_only_scalars(self):
        """
        Tests the standard tensorboard task with scalar parameters only
        """

        with session_context(tf.Graph()):
            model = create_linear_model()

            def task_factory(event_dir: str):
                return mon.ModelToTensorBoardTask(event_dir, model, only_scalars=True)

            summary = run_tensorboard_task(task_factory)
            self.assertAlmostEqual(summary['DummyLinearModel/b'].simple_value, float(model.b.value))
            self.assertAlmostEqual(summary['DummyLinearModel/var'].simple_value,
                                   float(model.var.value))
            self.assertAlmostEqual(summary['optimisation/likelihood'].simple_value,
                                   model.compute_log_likelihood(), places=5)
            self.assertNotIn('DummyLinearModel/w', summary.keys())
def test_monte_carlo_2_din(white, mean):
    with session_context() as sess:
        k = gpflow.kernels.RBF(DataMC2.D_in, variance=DataMC2.rng.rand())
        m = mean_function_factory(DataMC2.rng, mean, DataMC2.D_in, DataMC2.D_out)
        model = MomentMatchingSVGP(
            DataMC2.X, DataMC2.Y, k, gpflow.likelihoods.Gaussian(),
            mean_function=m, Z=DataMC2.X.copy(), whiten=white)
        model.full_cov_output = True
        gpflow.train.AdamOptimizer().minimize(model)

        pred_mm = model.uncertain_predict_f_moment_matching(
                            tf.constant(DataMC2.Xnew_mu), tf.constant(DataMC2.Xnew_covar))
        mean1, var1 = sess.run(pred_mm)

        for n in range(DataMC2.N_new):
            mean2, var2 = model.uncertain_predict_f_monte_carlo(
                DataMC2.Xnew_mu[n, ...],
                DataMC2.L[n, ...])
            assert_almost_equal(mean1[n, ...], mean2, decimal=2)
            assert_almost_equal(var1[n, ...], var2, decimal=2)
def test_monte_carlo_2_din(white, mean):
    with session_context() as sess:
        k = gpflow.kernels.RBF(DataMC2.D_in, variance=DataMC2.rng.rand())
        m = mean_function_factory(DataMC2.rng, mean, DataMC2.D_in, DataMC2.D_out)
        model = MomentMatchingSVGP(
            DataMC2.X, DataMC2.Y, k, gpflow.likelihoods.Gaussian(),
            mean_function=m, Z=DataMC2.X.copy(), whiten=white)
        model.full_output_cov = True
        gpflow.train.AdamOptimizer().minimize(model)

        pred_mm = model.uncertain_predict_f_moment_matching(
                            tf.constant(DataMC2.Xnew_mu), tf.constant(DataMC2.Xnew_covar))
        mean1, var1 = sess.run(pred_mm)

        for n in range(DataMC2.N_new):
            mean2, var2 = model.uncertain_predict_f_monte_carlo(
                DataMC2.Xnew_mu[n, ...],
                DataMC2.L[n, ...])
            assert_almost_equal(mean1[n, ...], mean2, decimal=2)
            assert_almost_equal(var1[n, ...], var2, decimal=2)
    def test_std_tensorboard_all_parameters(self):
        """
        Tests the standard tensorboard task with all parameters and extra summaries
        """
        with session_context(tf.Graph()):
            model = create_linear_model()

            def task_factory(writer: mon.LogdirWriter):
                # create 2 extra summaries
                dummy_vars = [tf.Variable(5.0), tf.Variable(6.0)]
                dummy_vars_init = tf.variables_initializer(dummy_vars)
                model.enquire_session().run(dummy_vars_init)
                add_summaries = [tf.summary.scalar('dummy' + str(i), dummy_var)
                                 for i, dummy_var in enumerate(dummy_vars)]
                return mon.ModelToTensorBoardTask(writer, model, only_scalars=False,
                                                  additional_summaries=add_summaries)

            summary = run_tensorboard_task(task_factory)
            self.assertAlmostEqual(summary['dummy0'].simple_value, 5.0)
            self.assertAlmostEqual(summary['dummy1'].simple_value, 6.0)
            self.assertIn('DummyLinearModel/w', summary.keys())
def test_quadrature(white, mean):
    with session_context() as session:
        c = DataQuadrature
        d = c.tensors(white, mean)
        quad_args = d.Xmu, d.Xvar, c.H, c.D_in, (c.D_out,)
        mean_quad = mvnquad(d.mean_fn, *quad_args)
        var_quad = mvnquad(d.var_fn, *quad_args)
        mean_sq_quad = mvnquad(d.mean_sq_fn, *quad_args)
        mean_analytic, var_analytic = uncertain_conditional(
            d.Xmu, d.Xvar, d.feat, d.kern,
            d.q_mu, d.q_sqrt,
            mean_function=d.mean_function,
            full_cov_output=False,
            white=white)

        mean_quad, var_quad, mean_sq_quad = session.run(
            [mean_quad, var_quad, mean_sq_quad], feed_dict=d.feed_dict)
        var_quad = var_quad + (mean_sq_quad - mean_quad**2)
        mean_analytic, var_analytic = session.run(
            [mean_analytic, var_analytic], feed_dict=d.feed_dict)

        assert_almost_equal(mean_quad, mean_analytic, decimal=6)
        assert_almost_equal(var_quad, var_analytic, decimal=6)
Exemple #34
0
    def test_lml_tensorboard(self):
        """
        Tests the LML tensorboard task
        """
        with session_context(tf.Graph()):
            # Create a number of models with the same set of parameters and equal number of
            # data points except one. The data from these model will mimic mini-batches.
            mini_batches = 10
            complete_size = 12
            incomplete_size = 7
            mini_batch_sizes = [complete_size if i < mini_batches - 1 else incomplete_size
                                for i in range(mini_batches)]
            mini_batch_data = [create_leaner_model_data(size) for size in mini_batch_sizes]
            mini_models = [DummyLinearModel(d.x, d.y, d.w, d.b, d.var)
                           for d in mini_batch_data]
            # Calculate average log likelihood across all models
            avg_lml = sum(mdl.compute_log_likelihood() * size
                          for mdl, size in zip(mini_models, mini_batch_sizes))
            avg_lml /= sum(mini_batch_sizes)

            # Join together the datasets from all mini-batch models
            xs = np.concatenate(tuple(d.x for d in mini_batch_data))
            ys = np.concatenate(tuple(d.y for d in mini_batch_data))

            # Create model with the same parameters and joint datasets
            d = mini_batch_data[0]
            model = DummyLinearModel(xs, ys, d.w, d.b, d.var)

            def task_factory(event_dir: str):
                return mon.LmlToTensorBoardTask(event_dir, model, minibatch_size=complete_size,
                                                display_progress=False)

            # Run LML task, extract the LML value and compare with the one computed over models with
            # small data sets
            summary = run_tensorboard_task(task_factory)
            self.assertAlmostEqual(summary['DummyLinearModel/full_lml'].simple_value, avg_lml,
                                   places=5)
Exemple #35
0
    def test_lml_tensorboard(self):
        """
        Tests the LML tensorboard task
        """
        with session_context(tf.Graph()):
            # Create a number of models with the same set of parameters and equal number of
            # data points except one. The data from these model will mimic mini-batches.
            mini_batches = 10
            complete_size = 12
            incomplete_size = 7
            mini_batch_sizes = [complete_size if i < mini_batches - 1 else incomplete_size
                                for i in range(mini_batches)]
            mini_batch_data = [create_leaner_model_data(size) for size in mini_batch_sizes]
            mini_models = [DummyLinearModel(d.x, d.y, d.w, d.b, d.var)
                           for d in mini_batch_data]
            # Calculate average log likelihood across all models
            avg_lml = sum(mdl.compute_log_likelihood() * size
                          for mdl, size in zip(mini_models, mini_batch_sizes))
            avg_lml /= sum(mini_batch_sizes)

            # Join together the datasets from all mini-batch models
            xs = np.concatenate(tuple(d.x for d in mini_batch_data))
            ys = np.concatenate(tuple(d.y for d in mini_batch_data))

            # Create model with the same parameters and joint datasets
            d = mini_batch_data[0]
            model = DummyLinearModel(xs, ys, d.w, d.b, d.var)

            def task_factory(writer: mon.LogdirWriter):
                return mon.LmlToTensorBoardTask(writer, model, minibatch_size=complete_size,
                                                display_progress=False)

            # Run LML task, extract the LML value and compare with the one computed over models with
            # small data sets
            summary = run_tensorboard_task(task_factory)
            self.assertAlmostEqual(summary['DummyLinearModel/full_lml'].simple_value, avg_lml,
                                   places=5)
def _exec_notebook_ts(notebook_filename):
    with session_context():
        ts = time.time()
        _exec_notebook(notebook_filename)
        elapsed = time.time() - ts
        print(notebook_filename, 'took {0} seconds.'.format(elapsed))
Exemple #37
0
class Data:
    rng = np.random.RandomState(1)
    num_data = 5
    num_ind = 4
    D_in = 2
    D_out = 2

    Xmu = rng.randn(num_data, D_in)
    L = gen_L(rng, num_data, D_in, D_in)
    Xvar = np.array([l @ l.T for l in L])
    Z = rng.randn(num_ind, D_in)

    # distributions don't need to be compiled (No Parameter objects)
    # but the members should be Tensors created in the same graph
    graph = tf.Graph()
    with test_util.session_context(graph) as sess:
        gauss = Gaussian(tf.constant(Xmu), tf.constant(Xvar))
        dirac = Gaussian(tf.constant(Xmu),
                         tf.constant(np.zeros((num_data, D_in, D_in))))
        gauss_diag = DiagonalGaussian(tf.constant(Xmu),
                                      tf.constant(rng.rand(num_data, D_in)))
        dirac_diag = DiagonalGaussian(tf.constant(Xmu),
                                      tf.constant(np.zeros((num_data, D_in))))
        dirac_markov_gauss = MarkovGaussian(
            tf.constant(Xmu), tf.constant(np.zeros((2, num_data, D_in, D_in))))

        # create the covariance for the pairwise markov-gaussian
        dummy_gen = lambda rng, n, *shape: np.array(
            [rng.randn(*shape) for _ in range(n)])
        L_mg = dummy_gen(rng, num_data, D_in, 2 * D_in)  # N+1 x D x 2D
        LL = np.concatenate((L_mg[:-1], L_mg[1:]), 1)  # N x 2D x 2D
        Xcov = LL @ np.transpose(LL, (0, 2, 1))
        Xc = np.concatenate((Xcov[:, :D_in, :D_in], Xcov[-1:, D_in:, D_in:]),
                            0)  # N+1 x D x D
        Xcross = np.concatenate(
            (Xcov[:, :D_in, D_in:], np.zeros(
                (1, D_in, D_in))), 0)  # N+1 x D x D
        Xcc = np.stack([Xc, Xcross])  # 2 x N+1 x D x D

        markov_gauss = MarkovGaussian(Xmu, Xcc)

    with gpflow.decors.defer_build():
        # features
        ip = features.InducingPoints(Z)
        # kernels
        rbf_prod_seperate_dims = kernels.Product([
            kernels.RBF(1,
                        variance=rng.rand(),
                        lengthscales=rng.rand(),
                        active_dims=[0]),
            kernels.RBF(1,
                        variance=rng.rand(),
                        lengthscales=rng.rand(),
                        active_dims=[1])
        ])

        rbf_lin_sum = kernels.Sum([
            kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()),
            kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()),
            kernels.Linear(D_in, variance=rng.rand())
        ])

        rbf = kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand())

        lin_kern = kernels.Linear(D_in, variance=rng.rand())

        # mean functions
        lin = mean_functions.Linear(rng.rand(D_in, D_out), rng.rand(D_out))
        iden = mean_functions.Identity(
            D_in)  # Note: Identity can only be used if Din == Dout
        zero = mean_functions.Zero(output_dim=D_out)
        const = mean_functions.Constant(rng.rand(D_out))
def _exec_notebook_ts(notebook_filename):
    with session_context():
        ts = time.time()
        _exec_notebook(notebook_filename)
        elapsed = time.time() - ts
        print(notebook_filename, 'took {0} seconds.'.format(elapsed))