Beispiel #1
0
    def test_invertible_matrices(self):
        for cls in (LooseInvertibleMatrix, StrictInvertibleMatrix):
            for n in [1, 3, 5]:
                m = cls(np.random.randn(n, n))
                self.assertEqual(repr(m), f'{cls.__qualname__}(size={n})')
                self.assertEqual(m.size, n)

                m = tk.layers.jit_compile(m)

                # check the initial value is an orthogonal matrix
                matrix, _ = m(inverse=False, compute_log_det=False)
                inv_matrix, _ = m(inverse=True, compute_log_det=False)
                assert_allclose(np.eye(n), T.matmul(matrix, inv_matrix),
                                rtol=1e-4, atol=1e-6)
                assert_allclose(np.eye(n), T.matmul(inv_matrix, matrix),
                                rtol=1e-4, atol=1e-6)

                # check the invertibility
                check_invertible_matrix(self, m, n)

                # check the gradient
                matrix, log_det = m(inverse=False, compute_log_det=True)
                params = list(tk.layers.iter_parameters(m))
                grads = T.grad(
                    [T.reduce_sum(matrix), T.reduce_sum(log_det)], params)

                # update with gradient, then check the invertibility
                if cls is StrictInvertibleMatrix:
                    for param, grad in zip(params, grads):
                        with T.no_grad():
                            T.assign(param, param + 0.001 * grad)
                    check_invertible_matrix(self, m, n)
 def eval_step(x, n_z=exp.config.test_n_z):
     with tk.layers.scoped_eval_mode(vae), T.no_grad():
         chain = vae.get_chain(x, n_z=n_z)
         log_qz_given_x = T.reduce_mean(chain.q['z'].log_prob())
         log_pz = T.reduce_mean(chain.p['z'].log_prob())
         log_px_given_z = T.reduce_mean(chain.p['x'].log_prob())
         kl = log_pz - log_qz_given_x
         elbo = log_px_given_z + kl
         nll = -chain.vi.evaluation.is_loglikelihood(reduction='mean')
     return {'elbo': elbo, 'nll': nll, 'kl': kl, 'log p(x|z)': log_px_given_z,
            'log q(z|x)': log_qz_given_x, 'log p(z)': log_pz}
 def plot_samples(epoch=None):
     epoch = epoch or loop.epoch
     with tk.layers.scoped_eval_mode(vae), T.no_grad():
         logits = vae.p(n_z=100)['x'].distribution.logits
         images = T.reshape(
             T.cast(T.clip(T.nn.sigmoid(logits) * 255., 0., 255.), dtype=T.uint8),
             [-1, 28, 28],
         )
     utils.save_images_collection(
         images=T.to_numpy(images),
         filename=exp.abspath(f'plotting/{epoch}.png'),
         grid_size=(10, 10),
     )
Beispiel #4
0
    def test_param_and_buffer(self):
        layer = BaseLayer()

        # add parameter & buffer
        w_initial = T.random.randn([5, 4])
        c_initial = T.random.randn([5, 3])
        add_parameter(layer, 'w', w_initial)
        add_parameter(layer, 'b', None)
        add_buffer(layer, 'c', c_initial)
        add_buffer(layer, 'd', None)

        # get parameter and buffer
        assert_allclose(get_parameter(layer, 'w'), w_initial)
        self.assertIsNone(get_parameter(layer, 'b'))
        assert_allclose(get_buffer(layer, 'c'), c_initial)
        self.assertIsNone(get_buffer(layer, 'd'))

        # assignment
        w_value = np.random.randn(5, 4)
        with T.no_grad():
            T.assign_data(get_parameter(layer, 'w'), w_value)
        assert_allclose(get_buffer(layer, 'w'), w_value)

        # get parameters and buffers
        add_parameter(layer, 'w2', T.as_tensor(w_initial, force_copy=True))
        add_buffer(layer, 'c2', T.as_tensor(c_initial, force_copy=True))

        w = get_parameter(layer, 'w')
        w2 = get_parameter(layer, 'w2')
        c = get_buffer(layer, 'c')
        c2 = get_buffer(layer, 'c2')

        self.assertListEqual(list(iter_parameters(layer)), [w, w2])
        self.assertListEqual(get_parameters(layer), [w, w2])
        self.assertDictEqual(dict(iter_named_parameters(layer)), {'w': w, 'w2': w2})
        self.assertListEqual(list(iter_buffers(layer)), [c, c2])
        self.assertListEqual(get_buffers(layer), [c, c2])
        self.assertDictEqual(dict(iter_named_buffers(layer)), {'c': c, 'c2': c2})

        seq = _MyWrapper(layer)
        self.assertListEqual(list(iter_parameters(seq)), [w, w2])
        self.assertListEqual(list(iter_parameters(seq, recursive=False)), [])
        self.assertListEqual(get_parameters(seq, recursive=False), [])
        self.assertDictEqual(dict(iter_named_parameters(seq)), {'wrapped.w': w, 'wrapped.w2': w2})
        self.assertDictEqual(dict(iter_named_parameters(seq, recursive=False)), {})
        self.assertListEqual(list(iter_buffers(seq)), [c, c2])
        self.assertListEqual(list(iter_buffers(seq, recursive=False)), [])
        self.assertListEqual(get_buffers(seq, recursive=False), [])
        self.assertDictEqual(dict(iter_named_buffers(seq)), {'wrapped.c': c, 'wrapped.c2': c2})
        self.assertDictEqual(dict(iter_named_buffers(seq, recursive=False)), {})
Beispiel #5
0
 def eval_step(x, y):
     with tk.layers.scoped_eval_mode(net), T.no_grad():
         logits = net(x)
         acc = utils.calculate_acc(logits, y)
     return {'acc': acc}
Beispiel #6
0
 def eval_step(x, n_z=exp.config.test_n_z):
     with tk.layers.scoped_eval_mode(vae), T.no_grad():
         chain = vae.get_chain(x, n_z=n_z)
         loss = chain.vi.training.sgvb(reduction='mean')
         nll = -chain.vi.evaluation.is_loglikelihood(reduction='mean')
     return {'elbo': loss, 'nll': nll}
Beispiel #7
0
def calculate_acc(logits: T.Tensor, y: T.Tensor) -> T.Tensor:
    with T.no_grad():
        out_y = T.argmax(logits, axis=-1)
        return T.reduce_mean(T.cast(T.equal(out_y, y), dtype=T.float32))