Ejemplo n.º 1
0
        def h(cls, value_ndims, feature_axis, m_factory):
            in_shape = make_conv_shape([], 3, [6, 7, 8][:value_ndims - 1])
            inputs = [
                T.random.randn(in_shape)
                for in_shape in [[50] + in_shape, [50, 7] + in_shape]
            ]

            out_shape = list(in_shape)
            out_shape[feature_axis] = 4
            module = jit_compile(m_factory(3, 4, activation=tk.layers.Tanh))
            layer = jit_compile(cls(module))

            for adj in [None, make_random_adj_matrix(50)]:
                for x in inputs:
                    y = layer(x, adj)
                    self.assertEqual(T.shape(y),
                                     T.shape(x)[:-value_ndims] + out_shape)

                    # compute the expected output
                    expected, m_front = T.flatten_to_ndims(x, value_ndims + 1)
                    expected = module(expected)
                    expected = T.unflatten_from_ndims(expected, m_front)

                    # assert the output is expected
                    assert_allclose_(y, expected)
Ejemplo n.º 2
0
        def do_check(**kwargs):
            d = DiscretizedLogistic(mean, log_scale, **kwargs)
            event_ndims = kwargs.get('event_ndims', 0)
            value_shape = T.get_broadcast_shape(T.shape(mean), T.shape(log_scale))
            log_prob_fn_kwargs = copy.copy(kwargs)
            log_prob_fn_kwargs.pop('discretize_sample', None)
            log_prob_fn_kwargs['discretize'] = \
                log_prob_fn_kwargs.pop('discretize_given', True)

            def log_prob_fn(t):
                return T.random.discretized_logistic_log_prob(
                    T.as_tensor(t), mean=mean, log_scale=log_scale,
                    **log_prob_fn_kwargs
                )

            check_distribution_instance(
                ctx=self,
                d=d,
                event_ndims=event_ndims,
                batch_shape=value_shape[: len(value_shape) - event_ndims],
                event_shape=value_shape[len(value_shape) - event_ndims:],
                min_event_ndims=0,
                max_event_ndims=len(value_shape),
                log_prob_fn=log_prob_fn,
                # other attributes,
                **kwargs
            )
Ejemplo n.º 3
0
    def test_monto_carlo_objective(self):
        log_p, log_q = prepare_test_payload()

        obj = monte_carlo_objective(log_p, log_q, axis=[0])
        assert_allclose(
            T.reduce_mean(obj),
            monte_carlo_objective(log_p, log_q, axis=[0], reduction='mean'),
            rtol=1e-4, atol=1e-6
        )
        assert_allclose(
            T.reduce_sum(obj),
            monte_carlo_objective(log_p, log_q, axis=[0], reduction='sum'),
            rtol=1e-4, atol=1e-6
        )
        obj_shape = T.shape(obj)
        assert_allclose(obj, T.log_mean_exp(log_p - log_q, axis=[0]))

        obj_k = monte_carlo_objective(log_p, log_q, axis=[0], keepdims=True)
        assert_allclose(
            T.reduce_mean(obj_k),
            monte_carlo_objective(log_p, log_q, axis=[0], keepdims=True, reduction='mean')
        )
        assert_allclose(
            T.reduce_sum(obj_k),
            monte_carlo_objective(log_p, log_q, axis=[0], keepdims=True, reduction='sum')
        )
        self.assertListEqual([1] + obj_shape, T.shape(obj_k))
        assert_allclose(
            obj_k,
            T.log_mean_exp(log_p - log_q, axis=[0], keepdims=True)
        )
Ejemplo n.º 4
0
    def test_iwae(self):
        assert_allclose_ = functools.partial(assert_allclose,
                                             rtol=1e-5,
                                             atol=1e-6)

        x, y, z, f, log_f, log_q = prepare_test_payload(reparameterized=True)
        wk_hat = f / T.reduce_sum(f, axis=[0], keepdims=True)
        cost = iwae_estimator(log_f, axis=[0])
        assert_allclose_(-cost, iwae_estimator(log_f, axis=[0], negative=True))
        assert_allclose_(T.reduce_mean(cost),
                         iwae_estimator(log_f, axis=[0], reduction='mean'))
        assert_allclose_(T.reduce_sum(cost),
                         iwae_estimator(log_f, axis=[0], reduction='sum'))
        cost_shape = T.shape(cost)
        assert_allclose_(
            T.grad([T.reduce_sum(cost)], [y])[0],
            T.reduce_sum(wk_hat * (2 * x * y), axis=[0]))

        x, y, z, f, log_f, log_q = prepare_test_payload(reparameterized=True)
        wk_hat = f / T.reduce_sum(f, axis=[0], keepdims=True)
        cost_k = iwae_estimator(log_f, axis=[0], keepdims=True)
        assert_allclose_(
            T.reduce_mean(cost_k),
            iwae_estimator(log_f, axis=[0], keepdims=True, reduction='mean'))
        assert_allclose_(
            T.reduce_sum(cost_k),
            iwae_estimator(log_f, axis=[0], keepdims=True, reduction='sum'))
        assert_allclose_(
            -cost_k,
            T.to_numpy(
                iwae_estimator(log_f, axis=[0], keepdims=True, negative=True)))
        self.assertListEqual([1] + cost_shape, T.shape(cost_k))
        assert_allclose_(
            T.grad([T.reduce_sum(cost_k)], [y])[0],
            T.reduce_sum(wk_hat * (2 * x * y), axis=[0]))
Ejemplo n.º 5
0
    def test_elbo(self):
        log_p, log_q = prepare_test_payload()

        obj = elbo_objective(log_p, log_q)
        assert_allclose(
            T.reduce_mean(obj),
            elbo_objective(log_p, log_q, reduction='mean')
        )
        assert_allclose(
            T.reduce_sum(obj),
            elbo_objective(log_p, log_q, reduction='sum')
        )
        obj_shape = T.shape(obj)
        assert_allclose(obj, log_p - log_q)

        obj_r = elbo_objective(log_p, log_q, axis=[0])
        self.assertListEqual(obj_shape[1:], T.shape(obj_r))
        assert_allclose(obj_r, T.reduce_mean(log_p - log_q, axis=[0]))

        obj_rk = elbo_objective(log_p, log_q, axis=[0], keepdims=True)
        assert_allclose(
            T.reduce_mean(obj_rk),
            elbo_objective(log_p, log_q, axis=[0], keepdims=True, reduction='mean')
        )
        assert_allclose(
            T.reduce_sum(obj_rk),
            elbo_objective(log_p, log_q, axis=[0], keepdims=True, reduction='sum')
        )
        self.assertListEqual([1] + obj_shape[1:], T.shape(obj_rk))
        assert_allclose(
            obj_rk,
            T.reduce_mean(log_p - log_q, axis=[0], keepdims=True)
        )
Ejemplo n.º 6
0
    def test_monto_carlo_objective(self):
        log_p, log_q = prepare_test_payload()

        ll = importance_sampling_log_likelihood(log_p, log_q, axis=[0])
        ll_shape = T.shape(ll)
        assert_allclose_(ll, T.log_mean_exp(log_p - log_q, axis=[0]))
        assert_allclose_(
            T.reduce_mean(ll),
            importance_sampling_log_likelihood(log_p,
                                               log_q,
                                               axis=[0],
                                               reduction='mean'))
        assert_allclose_(
            T.reduce_sum(ll),
            importance_sampling_log_likelihood(log_p,
                                               log_q,
                                               axis=[0],
                                               reduction='sum'))

        ll_k = importance_sampling_log_likelihood(log_p,
                                                  log_q,
                                                  axis=[0],
                                                  keepdims=True)
        self.assertListEqual([1] + ll_shape, T.shape(ll_k))
        assert_allclose_(
            ll_k, T.log_mean_exp(log_p - log_q, axis=[0], keepdims=True))
Ejemplo n.º 7
0
    def test_Normal(self):
        mean = np.random.randn(3, 4)
        logstd = np.random.randn(2, 3, 4)
        mean_t = T.as_tensor(mean)
        logstd_t = T.as_tensor(logstd)

        normal = Normal(mean=mean_t, logstd=logstd_t, event_ndims=1)

        # copy()
        normal2 = normal.copy()
        self.assertIsInstance(normal2, Normal)
        self.assertIs(normal2.logstd, logstd_t)
        self.assertEqual(normal2.event_ndims, 1)

        # sample(n_samples=None)
        t = normal.sample()
        self.assertIsInstance(t, StochasticTensor)
        self.assertIs(t.distribution, normal)
        self.assertEqual(t.n_samples, None)
        self.assertEqual(t.group_ndims, 0)
        self.assertEqual(t.reparameterized, True)
        self.assertIsInstance(t.tensor, T.Tensor)
        self.assertEqual(T.shape(t.tensor), [2, 3, 4])

        for log_pdf in [t.log_prob(), normal.log_prob(t)]:
            assert_allclose(
                log_pdf,
                T.random.normal_log_pdf(given=t.tensor,
                                        mean=mean_t,
                                        logstd=logstd_t,
                                        group_ndims=1))

        # sample(n_samples=5)
        t = normal.sample(n_samples=5, group_ndims=-1, reparameterized=False)
        self.assertIsInstance(t, StochasticTensor)
        self.assertIs(t.distribution, normal)
        self.assertEqual(t.n_samples, 5)
        self.assertEqual(t.group_ndims, -1)
        self.assertEqual(t.reparameterized, False)
        self.assertIsInstance(t.tensor, T.Tensor)
        self.assertEqual(T.shape(t.tensor), [5, 2, 3, 4])

        for log_pdf in [t.log_prob(-1), normal.log_prob(t, -1)]:
            assert_allclose(
                log_pdf,
                T.random.normal_log_pdf(given=t.tensor,
                                        mean=mean_t,
                                        logstd=logstd_t,
                                        group_ndims=0))
Ejemplo n.º 8
0
    def test_randint(self):
        for low, high in [(0, 5), (-3, 4)]:
            for dtype, device in product(number_dtypes, [None, T.CPU_DEVICE]):
                # test sample dtype and shape
                t = T.random.randint(low=low,
                                     high=high,
                                     shape=[n_samples, 2, 3, 4],
                                     dtype=dtype,
                                     device=device)
                self.assertEqual(T.get_dtype(t), dtype)
                self.assertEqual(T.get_device(t), device or T.current_device())
                self.assertEqual(T.shape(t), [n_samples, 2, 3, 4])
                x = T.to_numpy(t).astype(np.int32)

                # test sample value range
                r = list(range(low, high))
                self.assertTrue(
                    all((int(v) in r) for v in set(x.reshape([-1]).tolist())))

                # test the prob of each value
                p = 1. / len(r)
                size = 1. * np.size(x)
                for i in r:
                    self.assertLessEqual(
                        abs(np.sum(x == i) / size - p),
                        5. * np.sqrt(p * (1. - p)) / np.sqrt(size))

        with pytest.raises(Exception, match='`low` < `high` does not hold'):
            _ = T.random.randint(low=2, high=1, shape=[2, 3, 4])
Ejemplo n.º 9
0
    def test_add(self):
        x_observed = T.as_tensor(
            np.arange(24, dtype=np.float32).reshape([2, 3, 4]))
        net = BayesianNet({'x': x_observed})
        d = UnitNormal([3, 4])
        self.assertNotIn('x', net)
        self.assertNotIn('y', net)

        # add an observed node
        x = net.add('x', d, n_samples=2, group_ndims=1)
        self.assertIs(net.get('x'), x)
        self.assertIs(net['x'], x)
        self.assertIn('x', net)
        self.assertListEqual(list(net), ['x'])

        self.assertIsInstance(x, StochasticTensor)
        self.assertIs(x.distribution, d)
        self.assertEqual(x.n_samples, 2)
        self.assertEqual(x.group_ndims, 1)
        self.assertEqual(x.reparameterized, True)
        self.assertIs(x.tensor, x_observed)
        self.assertEqual(T.shape(x.tensor), [2, 3, 4])

        # add an unobserved node
        y = net.add('y', d, group_ndims=1, reparameterized=False)
        self.assertIs(net.get('y'), y)
        self.assertIs(net['y'], y)
        self.assertIn('y', net)
        self.assertListEqual(list(net), ['x', 'y'])

        self.assertIsInstance(y, StochasticTensor)
        self.assertIs(y.distribution, d)
        self.assertEqual(y.n_samples, None)
        self.assertEqual(y.group_ndims, 1)
        self.assertEqual(y.reparameterized, False)
        self.assertEqual(T.shape(y.tensor), [3, 4])

        # error adding the same variable
        with pytest.raises(ValueError,
                           match="Stochastic tensor 'x' already exists."):
            _ = net.add('x', d)

        # test remove
        net.remove('x')
        self.assertNotIn('x', net)
        del net['y']
        self.assertNotIn('y', net)
Ejemplo n.º 10
0
    def test_normalize_adj(self):
        def D(t):
            return np.diag(1. / t)

        node_count = 50
        eps = 1e-6

        # directed
        def G(d, y):
            return np.dot(D(d), y)

        empty_adj = T.sparse.from_dense(T.zeros([node_count, node_count]))
        self.assertEqual(
            T.shape(T.sparse.get_indices(empty_adj, coord_first=True))[1], 0)
        x_list = ([make_random_adj_matrix(node_count)
                   for _ in range(3)] + [empty_adj])
        y_list = [T.sparse.to_numpy(x) for x in x_list]
        d_list = [np.maximum(np.sum(y, axis=-1), eps) for y in y_list]
        d_sum = sum(d_list, 0.)

        for x, y, d in zip(x_list, y_list, d_list):
            assert_allclose(gnn.normalize_adj(x, epsilon=eps),
                            G(d, y),
                            atol=1e-4,
                            rtol=1e-6)

        out_list = gnn.normalize_partitioned_adj(x_list, epsilon=eps)
        for y, out in zip(y_list, out_list):
            assert_allclose(out, G(d_sum, y), atol=1e-4, rtol=1e-6)

        # undirected
        def G(d, y):
            d = D(np.sqrt(d))
            return np.dot(np.dot(d, y), d)

        x_list = [
            make_random_adj_matrix(node_count, directed=False)
            for _ in range(3)
        ]
        y_list = [T.sparse.to_numpy(x) for x in x_list]
        d_list = [np.maximum(np.sum(y, axis=-1), eps) for y in y_list]
        d_sum = sum(d_list, 0.)

        for x, y, d in zip(x_list, y_list, d_list):
            assert_allclose(gnn.normalize_adj(x, undirected=True, epsilon=eps),
                            G(d, y),
                            atol=1e-4,
                            rtol=1e-6)

        out_list = gnn.normalize_partitioned_adj(x_list,
                                                 undirected=True,
                                                 epsilon=eps)
        for y, out in zip(y_list, out_list):
            assert_allclose(out, G(d_sum, y), atol=1e-4, rtol=1e-6)

        # errors
        with pytest.raises(Exception,
                           match='`adj_matrices` must not be empty'):
            _ = gnn.normalize_partitioned_adj([])
Ejemplo n.º 11
0
    def test_IdentityFlow(self):
        x = T.random.randn([2, 3, 4, 5])

        for event_ndims in (0, 1, 2):
            flow = tk.layers.jit_compile(tk.flows.IdentityFlow(event_ndims))
            log_det_shape = T.shape(x)[:4 - event_ndims]
            expected_log_det = T.zeros(log_det_shape)
            flow_standard_check(self, flow, x, x, expected_log_det,
                                T.random.randn(log_det_shape))
Ejemplo n.º 12
0
def full_scan_average_check(ctx, factory, input_x, expected):
    weight = T.variable(T.shape(input_x)[1:], initializer=tk.init.zeros,
                        requires_grad=False)
    avg = factory([weight])
    for x in input_x:
        T.assign(weight, x)
        avg.update()
    avg.commit()
    assert_allclose(weight, expected, atol=1e-4, rtol=1e-6)
Ejemplo n.º 13
0
    def test_sample_and_log_prob(self):
        logits = np.random.randn(2, 3, 4)
        logits_t = T.as_tensor(logits)

        for int_dtype in int_dtypes:
            bernoulli = Bernoulli(logits=logits_t,
                                  event_ndims=1,
                                  dtype=int_dtype)

            # n_samples is None
            t = bernoulli.sample()
            self.assertIsInstance(t, StochasticTensor)
            self.assertIs(t.distribution, bernoulli)
            self.assertEqual(T.get_dtype(t.tensor), int_dtype)
            self.assertEqual(t.n_samples, None)
            self.assertEqual(t.group_ndims, 0)
            self.assertEqual(t.reparameterized, False)
            self.assertIsInstance(t.tensor, T.Tensor)
            self.assertEqual(T.shape(t.tensor), [2, 3, 4])

            for log_pdf in [t.log_prob(), bernoulli.log_prob(t)]:
                assert_allclose(
                    log_pdf,
                    T.random.bernoulli_log_prob(given=t.tensor,
                                                logits=logits_t,
                                                group_ndims=1))

            # n_samples == 5
            t = bernoulli.sample(n_samples=5, group_ndims=-1)
            self.assertIsInstance(t, StochasticTensor)
            self.assertIs(t.distribution, bernoulli)
            self.assertEqual(T.get_dtype(t.tensor), int_dtype)
            self.assertEqual(t.n_samples, 5)
            self.assertEqual(t.group_ndims, -1)
            self.assertEqual(t.reparameterized, False)
            self.assertIsInstance(t.tensor, T.Tensor)
            self.assertEqual(T.shape(t.tensor), [5, 2, 3, 4])

            for log_pdf in [t.log_prob(-1), bernoulli.log_prob(t, -1)]:
                assert_allclose(
                    log_pdf,
                    T.random.bernoulli_log_prob(given=t.tensor,
                                                logits=logits_t,
                                                group_ndims=0))
Ejemplo n.º 14
0
    def test_StdDataInit_for_Conv(self):
        in_channels = 7
        out_channels = 9
        data_init = tk.init.StdDataInit()

        for spatial_ndims in (1, 2, 3):
            for transpose, use_bias, kernel_size, stride, padding, dilation in product(
                (False, True),
                (True, False),
                (1, [3, 2, 1][:spatial_ndims]),
                (1, [2, 3, 1][:spatial_ndims]),
                (0, 'full'),
                (1, [1, 3, 2][:spatial_ndims]),
            ):
                if transpose:
                    cls_name = f'LinearConvTranspose{spatial_ndims}d'
                else:
                    cls_name = f'LinearConv{spatial_ndims}d'
                cls = getattr(tk.layers, cls_name)

                # prepare for the test
                x = T.random.randn(
                    make_conv_shape([11], in_channels, [16, 15,
                                                        14][:spatial_ndims]))

                def check_x(layer):
                    y = layer(x)
                    y_mean, y_var = T.calculate_mean_and_var(
                        y, axis=[-T.rank(x)] + get_spatial_axis(spatial_ndims))
                    if use_bias:
                        assert_allclose(y_mean,
                                        T.zeros_like(y_mean),
                                        atol=1e-6,
                                        rtol=1e-4)
                    assert_allclose(y_var,
                                    T.ones_like(y_var),
                                    atol=1e-6,
                                    rtol=1e-4)

                # construct the layer
                layer = cls(
                    in_channels,
                    out_channels,
                    data_init=data_init,
                    use_bias=use_bias,
                    kernel_size=kernel_size,
                    stride=stride,
                    padding=padding,
                )

                # test initialize via data
                check_x(layer)

                # test new data will not cause it re-initialized
                _ = layer(T.random.randn(T.shape(x)))
                check_x(layer)
Ejemplo n.º 15
0
 def _scale_and_log_scale(self,
                          pre_scale: Tensor,
                          inverse: bool,
                          compute_log_scale: bool
                          ) -> Tuple[Tensor, Optional[Tensor]]:
     scale = pre_scale
     if compute_log_scale:
         log_scale: Optional[Tensor] = randn([1] + shape(pre_scale))
     else:
         log_scale: Optional[Tensor] = None
     return scale, log_scale
Ejemplo n.º 16
0
def check_invertible_matrix(ctx, m, size):
    matrix, log_det = m(inverse=False, compute_log_det=False)
    ctx.assertIsNone(log_det)

    matrix, log_det = m(inverse=False, compute_log_det=True)
    ctx.assertEqual(T.shape(matrix), [size, size])
    assert_allclose(T.matrix_inverse(T.matrix_inverse(matrix)),
                    matrix, rtol=1e-4, atol=1e-6)
    assert_allclose(T.linalg.slogdet(matrix)[1], log_det,
                    rtol=1e-4, atol=1e-6)

    inv_matrix, inv_log_det = m(inverse=True, compute_log_det=True)
    ctx.assertEqual(T.shape(inv_matrix), [size, size])
    assert_allclose(T.matrix_inverse(inv_matrix),
                    matrix, rtol=1e-4, atol=1e-6)
    assert_allclose(T.matrix_inverse(T.matrix_inverse(inv_matrix)),
                    inv_matrix, rtol=1e-4, atol=1e-6)
    assert_allclose(inv_log_det, -log_det, rtol=1e-4, atol=1e-6)
    assert_allclose(T.linalg.slogdet(inv_matrix)[1], -log_det,
                    rtol=1e-4, atol=1e-6)
Ejemplo n.º 17
0
    def test_ExpScale(self):
        x = T.random.randn([2, 3, 4])
        scale = ExpScale()
        scale = tk.layers.jit_compile(scale)

        for pre_scale in [T.random.randn([4]),
                          T.random.randn([3, 1]),
                          T.random.randn([2, 1, 1]),
                          T.random.randn([2, 3, 4])]:
            expected_y = x * T.exp(pre_scale)
            expected_log_det = T.strict_broadcast_to_shape(pre_scale, T.shape(x))
            check_scale(self, scale, x, pre_scale, expected_y, expected_log_det)
Ejemplo n.º 18
0
    def test_sgvb(self):
        assert_allclose_ = functools.partial(assert_allclose,
                                             rtol=1e-5,
                                             atol=1e-6)

        # default
        x, y, z, f, log_f, log_q = prepare_test_payload(reparameterized=True)
        cost = sgvb_estimator(f)
        assert_allclose_(-cost, sgvb_estimator(f, negative=True))
        assert_allclose_(T.reduce_mean(cost),
                         sgvb_estimator(f, reduction='mean'))
        assert_allclose_(T.reduce_sum(cost), sgvb_estimator(f,
                                                            reduction='sum'))
        cost_shape = T.shape(cost)
        assert_allclose_(
            T.grad([T.reduce_sum(cost)], [y])[0],
            T.reduce_sum(2 * x * y * f, axis=[0]))

        x, y, z, f, log_f, log_q = prepare_test_payload(reparameterized=True)
        cost_r = sgvb_estimator(f, axis=[0])
        assert_allclose_(-cost_r, sgvb_estimator(f, axis=[0], negative=True))
        self.assertListEqual(cost_shape[1:], T.shape(cost_r))
        assert_allclose_(
            T.grad([T.reduce_sum(cost_r)], [y])[0],
            T.reduce_sum(2 * x * y * f, axis=[0]) / 7)

        x, y, z, f, log_f, log_q = prepare_test_payload(reparameterized=True)
        cost_rk = sgvb_estimator(f, axis=[0], keepdims=True)
        assert_allclose_(T.reduce_mean(cost_rk),
                         sgvb_estimator(f, axis=[0], reduction='mean'))
        assert_allclose_(T.reduce_sum(cost_rk),
                         sgvb_estimator(f, axis=[0], reduction='sum'))
        assert_allclose_(
            -cost_rk, sgvb_estimator(f, axis=[0], keepdims=True,
                                     negative=True))
        self.assertListEqual([1] + cost_shape[1:], T.shape(cost_rk))
        assert_allclose_(
            T.grad([T.reduce_sum(cost_rk)], [y])[0],
            T.reduce_sum(2 * x * y * f, axis=[0]) / 7)
Ejemplo n.º 19
0
    def test_LinearScale(self):
        x = T.random.randn([2, 3, 4])
        scale = LinearScale(epsilon=T.EPSILON)
        self.assertIn('epsilon=', repr(scale))
        scale = tk.layers.jit_compile(scale)

        for pre_scale in [T.random.randn([4]),
                          T.random.randn([3, 1]),
                          T.random.randn([2, 1, 1]),
                          T.random.randn([2, 3, 4])]:
            expected_y = x * pre_scale
            expected_log_det = T.strict_broadcast_to_shape(
                T.log(T.abs(pre_scale)), T.shape(x))
            check_scale(self, scale, x, pre_scale, expected_y, expected_log_det)
Ejemplo n.º 20
0
    def test_sample_and_log_prob(self):
        for dtype in float_dtypes:
            normal = UnitNormal(shape=[2, 3, 4], event_ndims=1, dtype=dtype)

            # sample(n_samples=None)
            t = normal.sample()
            self.assertIsInstance(t, StochasticTensor)
            self.assertIs(t.distribution, normal)
            self.assertEqual(t.n_samples, None)
            self.assertEqual(t.group_ndims, 0)
            self.assertEqual(t.reparameterized, True)
            self.assertIsInstance(t.tensor, T.Tensor)
            self.assertEqual(T.get_dtype(t.tensor), dtype)
            self.assertEqual(T.shape(t.tensor), [2, 3, 4])

            for log_pdf in [t.log_prob(), normal.log_prob(t)]:
                assert_allclose(
                    log_pdf,
                    T.random.randn_log_pdf(given=t.tensor, group_ndims=1))

            # sample(n_samples=5)
            t = normal.sample(n_samples=5,
                              group_ndims=-1,
                              reparameterized=False)
            self.assertIsInstance(t, StochasticTensor)
            self.assertIs(t.distribution, normal)
            self.assertEqual(t.n_samples, 5)
            self.assertEqual(t.group_ndims, -1)
            self.assertEqual(t.reparameterized, False)
            self.assertIsInstance(t.tensor, T.Tensor)
            self.assertEqual(T.get_dtype(t.tensor), dtype)
            self.assertEqual(T.shape(t.tensor), [5, 2, 3, 4])

            for log_pdf in [t.log_prob(-1), normal.log_prob(t, -1)]:
                assert_allclose(
                    log_pdf,
                    T.random.randn_log_pdf(given=t.tensor, group_ndims=0))
Ejemplo n.º 21
0
    def test_rand(self):
        for dtype, device in product(float_dtypes, [None, T.CPU_DEVICE]):
            # test sample dtype and shape
            t = T.random.rand([n_samples, 2, 3, 4], dtype=dtype, device=device)
            self.assertEqual(T.get_dtype(t), dtype)
            self.assertEqual(T.get_device(t), device or T.current_device())
            self.assertEqual(T.shape(t), [n_samples, 2, 3, 4])

            # test sample mean
            x = T.to_numpy(t)
            x_mean = np.mean(x, axis=0)
            np.testing.assert_array_less(
                np.abs(0.5 - x_mean),
                (3. * np.sqrt(1. / 12) / np.sqrt(n_samples) *
                 np.ones_like(x_mean)))
Ejemplo n.º 22
0
        def get_samples(mean, log_scale, n_samples=None, **kwargs):
            seed = next_seed()
            kwargs.setdefault('epsilon', 1e-7)
            sample_shape = T.get_broadcast_shape(T.shape(mean),
                                                 T.shape(log_scale))
            if n_samples is not None:
                sample_shape = [n_samples] + sample_shape

            np.random.seed(seed)
            T.random.seed(seed)
            u = T.random.uniform(shape=sample_shape,
                                 low=kwargs['epsilon'],
                                 high=1. - kwargs['epsilon'],
                                 dtype=T.get_dtype(mean))
            u = T.to_numpy(u)

            np.random.seed(seed)
            T.random.seed(seed)
            r = T.random.discretized_logistic(mean,
                                              log_scale,
                                              n_samples=n_samples,
                                              **kwargs)

            return u, r
Ejemplo n.º 23
0
def make_random_adj_matrix(node_count: int,
                           p=0.1,
                           dtype=T.float_x(),
                           directed=True) -> T.SparseTensor:
    edge_count = int(node_count * node_count * p)
    indices = np.random.randint(0, node_count, size=[2, edge_count])
    if not directed:
        indices = np.concatenate(
            [indices, np.stack([indices[1], indices[0]], axis=0)], axis=1)
    indices = T.as_tensor(indices, dtype=T.int64)
    values = T.abs(T.random.randn([T.shape(indices)[1]], dtype=dtype)) + 1e-6
    return T.sparse.make_sparse(indices,
                                values,
                                shape=[node_count, node_count],
                                coord_first=True)
Ejemplo n.º 24
0
    def test_layer_names_as_types(self):
        args = tk.layers.LayerArgs()
        args.set_args(['dense', 'conv2d'], activation=tk.layers.LeakyReLU)
        args.set_args(['conv2d'], kernel_size=3)

        self.assertEqual(args.get_kwargs('dense'),
                         {'activation': tk.layers.LeakyReLU})
        self.assertEqual(args.get_kwargs('conv2d'), {
            'activation': tk.layers.LeakyReLU,
            'kernel_size': 3,
        })

        l1 = args.build('dense', 4, 4)
        self.assertIsInstance(l1[1], tk.layers.LeakyReLU)
        l2 = args.build('conv2d', 4, 4)
        self.assertIsInstance(l2[1], tk.layers.LeakyReLU)
        self.assertEqual(T.shape(l2[0].weight_store()), [4, 4, 3, 3])
Ejemplo n.º 25
0
        def do_test_sample(is_one_hot: bool, n_z: Optional[int],
                           dtype: Optional[str], float_dtype: str):
            probs_t = T.as_tensor(probs, dtype=float_dtype)
            logits_t = T.as_tensor(logits, dtype=float_dtype)
            value_shape = [n_classes] if is_one_hot else []

            if dtype is not None:
                expected_dtype = dtype
            else:
                expected_dtype = T.int32 if is_one_hot else T.categorical_dtype

            # sample
            sample_shape = [n_z] if n_z is not None else []
            kwargs = {'dtype': dtype} if dtype else {}
            t = (T.random.one_hot_categorical
                 if is_one_hot else T.random.categorical)(probs_t,
                                                          n_samples=n_z,
                                                          **kwargs)
            self.assertEqual(T.get_dtype(t), expected_dtype)
            self.assertEqual(T.get_device(t), T.current_device())
            self.assertEqual(T.shape(t),
                             sample_shape + [2, 3, 4] + value_shape)

            # check values
            x = T.to_numpy(t)
            if is_one_hot:
                self.assertEqual(set(x.flatten().tolist()), {0, 1})
            else:
                if n_z is None:
                    self.assertTrue(
                        set(x.flatten().tolist()).issubset(
                            set(range(n_classes))))
                else:
                    self.assertEqual(set(x.flatten().tolist()),
                                     set(range(n_classes)))

            # check log_prob
            do_check_log_prob(
                given=t,
                batch_ndims=len(t.shape) - int(is_one_hot),
                Z_log_prob_fn=partial(
                    (T.random.one_hot_categorical_log_prob
                     if is_one_hot else T.random.categorical_log_prob),
                    logits=logits_t),
                np_log_prob=log_prob(x, probs, n_classes, is_one_hot))
Ejemplo n.º 26
0
        def g(output_size, kernel_size, stride, padding, dilation):
            # use conv to generate the `input_size`
            spatial_ndims = len(output_size)
            layer_cls = getattr(tk.layers, f'LinearConv{spatial_ndims}d')
            layer = layer_cls(
                in_channels=1,
                out_channels=1,
                kernel_size=kernel_size,
                stride=stride,
                padding=padding,
                dilation=dilation,
            )
            x = T.random.randn(make_conv_shape([1], 1, output_size))
            y = layer(x)
            y_shape = T.shape(y)
            input_size = [y_shape[a] for a in get_spatial_axis(spatial_ndims)]

            # do check
            f(input_size, output_size, kernel_size, stride, padding, dilation)
Ejemplo n.º 27
0
        def do_test_sample(n_z, sample_shape, float_dtype, dtype):
            probs_t = T.as_tensor(probs, dtype=float_dtype)
            logits_t = T.as_tensor(logits, dtype=float_dtype)
            t = T.random.bernoulli(probs=probs_t, n_samples=n_z, dtype=dtype)
            self.assertEqual(T.get_dtype(t), dtype)
            self.assertEqual(T.get_device(t), T.current_device())
            self.assertEqual(T.shape(t), sample_shape + [2, 3, 4])

            # all values must be either 0 or 1
            x = T.to_numpy(t)
            self.assertEqual(set(x.flatten().tolist()), {0, 1})

            # check the log prob
            do_check_log_prob(
                given=t,
                batch_ndims=len(t.shape),
                Z_log_prob_fn=partial(T.random.bernoulli_log_prob,
                                      logits=logits_t),
                np_log_prob=log_prob(x))
Ejemplo n.º 28
0
    def test_SigmoidScale(self):
        x = T.random.randn([2, 3, 4])

        for pre_scale_bias in [None, 0., 1.5]:
            scale = SigmoidScale(**(
                {'pre_scale_bias': pre_scale_bias}
                if pre_scale_bias is not None else {}
            ))
            if pre_scale_bias is None:
                pre_scale_bias = 0.
            self.assertIn(f'pre_scale_bias={pre_scale_bias}', repr(scale))
            scale = tk.layers.jit_compile(scale)

            for pre_scale in [T.random.randn([4]),
                              T.random.randn([3, 1]),
                              T.random.randn([2, 1, 1]),
                              T.random.randn([2, 3, 4])]:
                expected_y = x * T.nn.sigmoid(pre_scale + pre_scale_bias)
                expected_log_det = T.strict_broadcast_to_shape(
                    T.nn.log_sigmoid(pre_scale + pre_scale_bias), T.shape(x))
                check_scale(self, scale, x, pre_scale, expected_y, expected_log_det)
Ejemplo n.º 29
0
def print_parameters_summary(params: List[T.Variable],
                             names: List[str],
                             printer: Optional[Callable[[str], Any]] = print):
    shapes = []
    sizes = []
    total_size = 0
    max_shape_len = 0
    max_size_len = 0
    right_pad = ' ' * 3

    for param in params:
        shape = T.shape(param)
        size = np.prod(shape)
        total_size += size
        shapes.append(str(shape))
        sizes.append(f'{size:,d}')
        max_shape_len = max(max_shape_len, len(shapes[-1]))
        max_size_len = max(max_size_len, len(sizes[-1]))

    total_size = f'{total_size:,d}'
    right_len = max(max_shape_len + len(right_pad) + max_size_len,
                    len(total_size))

    param_info = []
    max_name_len = 0
    for param, name, shape, size in zip(params, names, shapes, sizes):
        max_name_len = max(max_name_len, len(name))
        right = f'{shape:<{max_shape_len}s}{right_pad}{size:>{max_size_len}s}'
        right = f'{right:>{right_len}s}'
        param_info.append((name, right))

    if param_info:
        param_info.append(('Total', f'{total_size:>{right_len}s}'))
        lines = mltk.format_key_values(param_info,
                                       title='Parameters',
                                       formatter=str).strip().split('\n')
        k = len(lines[-1])
        lines.insert(-1, '-' * k)

        printer('\n'.join(lines))
Ejemplo n.º 30
0
    def test_randn(self):
        for dtype, device in product(float_dtypes, [None, T.CPU_DEVICE]):
            # test sample dtype and shape
            t = T.random.randn([n_samples, 2, 3, 4],
                               dtype=dtype,
                               device=device)
            self.assertEqual(T.get_dtype(t), dtype)
            self.assertEqual(T.get_device(t), device or T.current_device())
            self.assertEqual(T.shape(t), [n_samples, 2, 3, 4])

            # test sample mean
            x = T.to_numpy(t)
            x_mean = np.mean(x, axis=0)
            np.testing.assert_array_less(
                np.abs(x_mean), 3. / np.sqrt(n_samples) * np.ones_like(x_mean))

            # test log_prob
            do_check_log_prob(given=t,
                              batch_ndims=len(x.shape),
                              Z_log_prob_fn=T.random.randn_log_pdf,
                              np_log_prob=np.log(
                                  np.exp(-x**2 / 2.) / np.sqrt(2 * np.pi)))