def test_NormedWeight(self): initial_value = np.random.randn(2, 3, 4) new_value = np.random.randn(2, 3, 4) for axis in [-3, -2, -1, 0, 1, 2]: store = NormedWeightStore([2, 3, 4], initializer=initial_value, axis=axis) self.assertEqual(repr(store), 'NormedWeightStore(shape=[2, 3, 4])') expected_value = T.as_tensor(initial_value) / T.norm_except_axis( T.as_tensor(initial_value), axis=axis, keepdims=True) assert_allclose(store.get(), expected_value, rtol=1e-4) assert_allclose(store(), expected_value, rtol=1e-4) assert_allclose(store.v, expected_value, rtol=1e-4) store.set(T.as_tensor(new_value)) expected_value = T.as_tensor(new_value) / T.norm_except_axis( T.as_tensor(new_value), axis=axis, keepdims=True) assert_allclose(store.get(), expected_value, rtol=1e-4) assert_allclose(store(), expected_value, rtol=1e-4) assert_allclose(store.v, expected_value, rtol=1e-4) for axis in (-4, 3): with pytest.raises(ValueError, match='`axis` out of range.'): _ = NormedWeightStore([2, 3, 4], initializer=initial_value, axis=axis)
def test_coalesce_and_get_indices_values(self): row, col = np.array([0, 1, 2]), np.array([2, 0, 1]) values = np.array([1., 2., 3.]) shape = [3, 3] x = make_ndarray_by_coo(row, col, values, shape) for force_coalesced in (False, True): t = T.sparse.make_sparse( T.as_tensor([[0, 0, 2, 1], [2, 2, 1, 0]], dtype=T.int32), T.as_tensor([.5, .5, 3., 2.]), shape=shape, coord_first=True, force_coalesced=force_coalesced, ) self.assertEqual(T.sparse.is_coalesced(t), force_coalesced) if force_coalesced: self.assertIs(T.sparse.coalesce(t), t) else: t2 = T.sparse.coalesce(t) self.assertIsNot(t2, t) t = t2 assert_allclose(t, x) assert_equal( T.sparse.get_indices(t), np.stack( [row, col], axis=(0 if T.sparse.SPARSE_INDICES_DEFAULT_IS_COORD_FIRST else 1))) assert_equal(T.sparse.get_indices(t, coord_first=True), np.stack([row, col], axis=0)) assert_equal(T.sparse.get_indices(t, coord_first=False), np.stack([row, col], axis=1)) assert_equal(T.sparse.get_values(t), values)
def prepare_test_payload(reparameterized): x = T.as_tensor(np.random.normal(size=[7, 13])) # input y = T.requires_grad(T.as_tensor(np.random.normal(size=[13]))) # param if reparameterized: z = y * x # sample else: z = T.stop_grad(y) * x f = T.exp(y * z) log_f = y * z log_q = (x**2 - 1) * (y**3) return x, y, z, f, log_f, log_q
def test_param_and_buffer(self): layer = BaseLayer() # add parameter & buffer w_initial = T.random.randn([5, 4]) c_initial = T.random.randn([5, 3]) add_parameter(layer, 'w', w_initial) add_parameter(layer, 'b', None) add_buffer(layer, 'c', c_initial) add_buffer(layer, 'd', None) # get parameter and buffer assert_allclose(get_parameter(layer, 'w'), w_initial) self.assertIsNone(get_parameter(layer, 'b')) assert_allclose(get_buffer(layer, 'c'), c_initial) self.assertIsNone(get_buffer(layer, 'd')) # assignment w_value = np.random.randn(5, 4) with T.no_grad(): T.assign_data(get_parameter(layer, 'w'), w_value) assert_allclose(get_buffer(layer, 'w'), w_value) # get parameters and buffers add_parameter(layer, 'w2', T.as_tensor(w_initial, force_copy=True)) add_buffer(layer, 'c2', T.as_tensor(c_initial, force_copy=True)) w = get_parameter(layer, 'w') w2 = get_parameter(layer, 'w2') c = get_buffer(layer, 'c') c2 = get_buffer(layer, 'c2') self.assertListEqual(list(iter_parameters(layer)), [w, w2]) self.assertListEqual(get_parameters(layer), [w, w2]) self.assertDictEqual(dict(iter_named_parameters(layer)), {'w': w, 'w2': w2}) self.assertListEqual(list(iter_buffers(layer)), [c, c2]) self.assertListEqual(get_buffers(layer), [c, c2]) self.assertDictEqual(dict(iter_named_buffers(layer)), {'c': c, 'c2': c2}) seq = _MyWrapper(layer) self.assertListEqual(list(iter_parameters(seq)), [w, w2]) self.assertListEqual(list(iter_parameters(seq, recursive=False)), []) self.assertListEqual(get_parameters(seq, recursive=False), []) self.assertDictEqual(dict(iter_named_parameters(seq)), {'wrapped.w': w, 'wrapped.w2': w2}) self.assertDictEqual(dict(iter_named_parameters(seq, recursive=False)), {}) self.assertListEqual(list(iter_buffers(seq)), [c, c2]) self.assertListEqual(list(iter_buffers(seq, recursive=False)), []) self.assertListEqual(get_buffers(seq, recursive=False), []) self.assertDictEqual(dict(iter_named_buffers(seq)), {'wrapped.c': c, 'wrapped.c2': c2}) self.assertDictEqual(dict(iter_named_buffers(seq, recursive=False)), {})
def test_Normal(self): mean = np.random.randn(3, 4) logstd = np.random.randn(2, 3, 4) mean_t = T.as_tensor(mean) logstd_t = T.as_tensor(logstd) normal = Normal(mean=mean_t, logstd=logstd_t, event_ndims=1) # copy() normal2 = normal.copy() self.assertIsInstance(normal2, Normal) self.assertIs(normal2.logstd, logstd_t) self.assertEqual(normal2.event_ndims, 1) # sample(n_samples=None) t = normal.sample() self.assertIsInstance(t, StochasticTensor) self.assertIs(t.distribution, normal) self.assertEqual(t.n_samples, None) self.assertEqual(t.group_ndims, 0) self.assertEqual(t.reparameterized, True) self.assertIsInstance(t.tensor, T.Tensor) self.assertEqual(T.shape(t.tensor), [2, 3, 4]) for log_pdf in [t.log_prob(), normal.log_prob(t)]: assert_allclose( log_pdf, T.random.normal_log_pdf(given=t.tensor, mean=mean_t, logstd=logstd_t, group_ndims=1)) # sample(n_samples=5) t = normal.sample(n_samples=5, group_ndims=-1, reparameterized=False) self.assertIsInstance(t, StochasticTensor) self.assertIs(t.distribution, normal) self.assertEqual(t.n_samples, 5) self.assertEqual(t.group_ndims, -1) self.assertEqual(t.reparameterized, False) self.assertIsInstance(t.tensor, T.Tensor) self.assertEqual(T.shape(t.tensor), [5, 2, 3, 4]) for log_pdf in [t.log_prob(-1), normal.log_prob(t, -1)]: assert_allclose( log_pdf, T.random.normal_log_pdf(given=t.tensor, mean=mean_t, logstd=logstd_t, group_ndims=0))
def test_construct(self): # no observation net = BayesianNet() self.assertEqual(len(net), 0) self.assertEqual(list(net), []) self.assertEqual(dict(net.observed), {}) self.assertEqual(net._original_observed, {}) self.assertEqual(net._stochastic_tensors, {}) with pytest.raises(Exception): # `net.observed` should be read-only net.observed['x'] = T.zeros([]) # with observation normal = UnitNormal([2, 3, 4]) x = T.as_tensor(np.random.randn(3, 4)) y = normal.sample() net = BayesianNet({'x': x, 'y': y}) self.assertEqual(len(net), 0) self.assertEqual(list(net), []) self.assertEqual(list(net.observed), ['x', 'y']) self.assertIs(net.observed['x'], x) self.assertIs(net.observed['y'], y.tensor) self.assertIs(net._original_observed['x'], x) self.assertIs(net._original_observed['y'], y)
def test_qr(self): for k in [1, 5]: m = np.random.randn(k, k) q, r = T.linalg.qr(T.as_tensor(m)) expected_q, expected_r = np.linalg.qr(m) assert_allclose(q, expected_q) assert_allclose(r, expected_r)
def test_slogdet(self): for k in [1, 5]: m = np.random.randn(k, k) sign, logdet = T.linalg.slogdet(T.as_tensor(m)) expected_sign, expected_logdet = np.linalg.slogdet(m) assert_allclose(sign, expected_sign) assert_allclose(logdet, expected_logdet)
def test_copy(self): logits = np.random.randn(2, 3, 4) logits_t = T.as_tensor(logits) bernoulli = Bernoulli(logits=logits_t, event_ndims=1) with mock.patch('tensorkit.distributions.bernoulli.copy_distribution', wraps=copy_distribution) as f_copy: bernoulli2 = bernoulli.copy(event_ndims=2) self.assertIsInstance(bernoulli2, Bernoulli) self.assertIs(bernoulli2.logits, bernoulli.logits) self.assertEqual(bernoulli2.event_ndims, 2) self.assertEqual(f_copy.call_args, ((), { 'cls': Bernoulli, 'base': bernoulli, 'attrs': ('dtype', 'event_ndims', 'epsilon', 'device', 'validate_tensors'), 'mutual_attrs': (('logits', 'probs'), ), 'compute_deps': { 'logits': ('epsilon', ) }, 'original_mutual_params': { 'logits': bernoulli.logits }, 'overrided_params': { 'event_ndims': 2 }, }))
def test_copy(self): logits = np.random.randn(2, 3, 4) logits_t = T.as_tensor(logits) cat = _MyBaseCategorical(logits=logits_t, probs=None, event_ndims=1, dtype=T.int32) with mock.patch( 'tensorkit.distributions.categorical.copy_distribution', wraps=copy_distribution) as f_copy: cat2 = cat.copy(event_ndims=2) self.assertIsInstance(cat2, _MyBaseCategorical) self.assertIs(cat2.logits, cat.logits) self.assertEqual(cat2.event_ndims, 2) self.assertEqual(f_copy.call_args, ((), { 'cls': _MyBaseCategorical, 'base': cat, 'attrs': ('dtype', 'event_ndims', 'epsilon', 'device', 'validate_tensors'), 'mutual_attrs': (('logits', 'probs'), ), 'compute_deps': { 'logits': ('epsilon', ) }, 'original_mutual_params': { 'logits': cat.logits }, 'overrided_params': { 'event_ndims': 2 }, }))
def test_query_pair(self): x_observed = T.as_tensor( np.arange(24, dtype=np.float32).reshape([2, 3, 4])) net = BayesianNet({'x': x_observed}) normal = UnitNormal([3, 4]) x = net.add('x', normal) y = net.add('y', normal) # test single query x_out, x_log_prob = net.query_pair('x') self.assertIsInstance(x_out, T.Tensor) self.assertIsInstance(x_log_prob, T.Tensor) self.assertIs(x_out, x.tensor) assert_allclose(x_log_prob, normal.log_prob(x_observed)) # test multiple query [(x_out, x_log_prob), (y_out, y_log_prob)] = \ net.query_pairs(iter(['x', 'y'])) for o in [x_out, x_log_prob, y_out, y_log_prob]: self.assertIsInstance(o, T.Tensor) self.assertIs(x_out, x.tensor) self.assertIs(y_out, y.tensor) assert_allclose(x_log_prob, normal.log_prob(x_observed)) assert_allclose(x_log_prob, normal.log_prob(x.tensor)) assert_allclose(y_log_prob, normal.log_prob(y.tensor))
def test_construct(self): logits = np.random.randn(2, 3, 4) probs = sigmoid(logits) for int_dtype, float_dtype in product(int_dtypes, float_dtypes): logits_t = T.as_tensor(logits, dtype=float_dtype) probs_t = T.as_tensor(probs, dtype=float_dtype) mutual_params = {'logits': logits_t, 'probs': probs_t} # construct from logits or probs for key, val in mutual_params.items(): other_key = [k for k in mutual_params if k != key][0] bernoulli = Bernoulli(event_ndims=1, dtype=int_dtype, epsilon=1e-6, **{key: val}) self.assertEqual(bernoulli.continuous, False) self.assertEqual(bernoulli.reparameterized, False) self.assertEqual(bernoulli.min_event_ndims, 0) self.assertEqual(bernoulli.dtype, int_dtype) self.assertEqual(bernoulli.event_ndims, 1) self.assertEqual(bernoulli.epsilon, 1e-6) self.assertIs(getattr(bernoulli, key), val) assert_allclose(getattr(bernoulli, other_key), mutual_params[other_key], rtol=1e-4) self.assertEqual(bernoulli._mutual_params, {key: val}) # must specify either logits or probs, but not both with pytest.raises(ValueError, match='Either `logits` or `probs` must be ' 'specified, but not both.'): _ = Bernoulli(logits=logits_t, probs=probs_t, dtype=int_dtype) with pytest.raises(ValueError, match='Either `logits` or `probs` must be ' 'specified, but not both.'): _ = Bernoulli(logits=None, probs=None, dtype=int_dtype) # nan test for key, val in mutual_params.items(): with pytest.raises(Exception, match='Infinity or NaN value encountered'): _ = Bernoulli( validate_tensors=True, dtype=int_dtype, **{key: T.as_tensor(np.nan, dtype=float_dtype)})
def test_log_pdf_mask(self): x = np.random.randn(3, 4, 5) for dtype in float_dtypes: x_t = T.as_tensor(x, dtype=dtype) ret = log_pdf_mask(x_t >= 0., x_t**2, T.random.LOG_ZERO_VALUE) expected = np.where(x >= 0., x**2, T.random.LOG_ZERO_VALUE) assert_allclose(ret, expected, rtol=1e-4)
def f(t): if T.sparse.is_sparse_tensor(t): t = T.sparse.to_numpy(t) if isinstance(t, (T.Tensor, StochasticTensor)): t = T.to_numpy(T.as_tensor(t)) if isinstance(t, sp.spmatrix): t = t.toarray() return t
def test_regularizations(self): tensors = [np.random.randn(2, 3, 4), np.random.randn(5, 6)] tensors_t = [T.as_tensor(t) for t in tensors] assert_allclose(T.nn.l1_regularization([]), 0.) assert_allclose(T.nn.l1_regularization(tensors_t), l1_reg(tensors)) assert_allclose(T.nn.l2_regularization([]), 0.) assert_allclose(T.nn.l2_regularization(tensors_t), l2_reg(tensors))
def do_test_sample(is_one_hot: bool, n_z: Optional[int], dtype: Optional[str], float_dtype: str): probs_t = T.as_tensor(probs, dtype=float_dtype) logits_t = T.as_tensor(logits, dtype=float_dtype) value_shape = [n_classes] if is_one_hot else [] if dtype is not None: expected_dtype = dtype else: expected_dtype = T.int32 if is_one_hot else T.categorical_dtype # sample sample_shape = [n_z] if n_z is not None else [] kwargs = {'dtype': dtype} if dtype else {} t = (T.random.one_hot_categorical if is_one_hot else T.random.categorical)(probs_t, n_samples=n_z, **kwargs) self.assertEqual(T.get_dtype(t), expected_dtype) self.assertEqual(T.get_device(t), T.current_device()) self.assertEqual(T.shape(t), sample_shape + [2, 3, 4] + value_shape) # check values x = T.to_numpy(t) if is_one_hot: self.assertEqual(set(x.flatten().tolist()), {0, 1}) else: if n_z is None: self.assertTrue( set(x.flatten().tolist()).issubset( set(range(n_classes)))) else: self.assertEqual(set(x.flatten().tolist()), set(range(n_classes))) # check log_prob do_check_log_prob( given=t, batch_ndims=len(t.shape) - int(is_one_hot), Z_log_prob_fn=partial( (T.random.one_hot_categorical_log_prob if is_one_hot else T.random.categorical_log_prob), logits=logits_t), np_log_prob=log_prob(x, probs, n_classes, is_one_hot))
def do_test_sample(n_z, sample_shape, float_dtype, dtype): probs_t = T.as_tensor(probs, dtype=float_dtype) logits_t = T.as_tensor(logits, dtype=float_dtype) t = T.random.bernoulli(probs=probs_t, n_samples=n_z, dtype=dtype) self.assertEqual(T.get_dtype(t), dtype) self.assertEqual(T.get_device(t), T.current_device()) self.assertEqual(T.shape(t), sample_shape + [2, 3, 4]) # all values must be either 0 or 1 x = T.to_numpy(t) self.assertEqual(set(x.flatten().tolist()), {0, 1}) # check the log prob do_check_log_prob( given=t, batch_ndims=len(t.shape), Z_log_prob_fn=partial(T.random.bernoulli_log_prob, logits=logits_t), np_log_prob=log_prob(x))
def test_SimpleParamStore(self): initial_value = np.random.randn(2, 3, 4) store = SimpleParamStore([2, 3, 4], initializer=initial_value) self.assertEqual(repr(store), 'SimpleParamStore(shape=[2, 3, 4])') assert_allclose(store.get(), initial_value, rtol=1e-4) assert_allclose(store(), initial_value, rtol=1e-4) new_value = np.random.randn(2, 3, 4) store.set(T.as_tensor(new_value)) assert_allclose(store.get(), new_value, rtol=1e-4) assert_allclose(store(), new_value, rtol=1e-4)
def test_assert_finite(self): d = Distribution(T.int32, [], continuous=True, reparameterized=True, event_ndims=0, min_event_ndims=0) d.validate_tensors = False t = T.as_tensor(np.nan) self.assertIs(d._assert_finite(t, 't'), t) d.validate_tensors = True with pytest.raises(Exception, match='Infinity or NaN value encountered'): _ = d._assert_finite(t, 't')
def test_prob(self): t00 = np.random.randn(2, 3) t0 = T.as_tensor(t00) d = Distribution( T.float32, [2, 3], continuous=True, reparameterized=True, event_ndims=1, min_event_ndims=0) d.log_prob = Mock(return_value=t0) given = T.random.randn([5, 2, 3]) ret = d.prob(given, group_ndims=1) self.assertEqual(d.log_prob.call_args, ((given, 1), {})) assert_allclose(ret, np.exp(t00))
def test_activation_functions(self): x = np.random.randn(2, 3, 4) x = np.concatenate([x, np.zeros([2, 3, 1])], axis=-1) self.assertTrue(np.any(x < 0)) self.assertTrue(np.any(x > 0)) self.assertTrue(np.any(x == 0)) x_t = T.as_tensor(x) # test relu assert_allclose(T.nn.relu(x_t), x * (x >= 0)) # test leaky_relu assert_allclose( T.nn.leaky_relu(x_t), x * (x >= 0) + (T.nn.LEAKY_RELU_DEFAULT_SLOPE * x * (x < 0))) assert_allclose(T.nn.leaky_relu(x_t, negative_slope=0.2), x * (x >= 0) + (0.2 * x * (x < 0))) # test sigmoid assert_allclose( T.nn.sigmoid(x_t), np.where(x >= 0, 1. / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x)))) # test log_sigmoid assert_allclose( T.nn.log_sigmoid(x_t), np.where(x >= 0, -np.log1p(np.exp(-x)), x - np.log1p(np.exp(x)))) # test softmax def softmax(x, axis): x_max = np.max(x, axis=axis, keepdims=True) x_exp = np.exp(x - x_max) return x_exp / np.sum(x_exp, axis=axis, keepdims=True) for axis in [-3, -2, -1, 0, 1, 2]: assert_allclose(T.nn.softmax(x_t, axis=axis), softmax(x, axis=axis)) # test log_softmax def log_softmax(x, axis): x_max = np.max(x, axis=axis, keepdims=True) x_diff = x - x_max return x_diff - np.log( np.sum(np.exp(x_diff), axis=axis, keepdims=True)) for axis in [-3, -2, -1, 0, 1, 2]: assert_allclose(T.nn.log_softmax(x_t, axis=axis), log_softmax(x, axis=axis)) # test softplus assert_allclose(T.nn.softplus(x_t), np.log1p(np.exp(x)))
def test_seed(self): T.random.seed(1234) x = T.to_numpy(T.random.normal(T.as_tensor(0.), T.as_tensor(1.))) y = T.to_numpy(T.random.normal(T.as_tensor(0.), T.as_tensor(1.))) self.assertFalse(np.allclose(x, y)) T.random.seed(1234) z = T.to_numpy(T.random.normal(T.as_tensor(0.), T.as_tensor(1.))) assert_allclose(x, z)
def test_copy(self): mean = np.random.randn(3, 4) logstd = np.random.randn(2, 3, 4) mean_t = T.as_tensor(mean) logstd_t = T.as_tensor(logstd) normal = _MyBaseNormal(mean=mean_t, logstd=logstd_t, event_ndims=1, xyz=123, reparameterized=False) self.assertEqual(normal.xyz, 123) with mock.patch('tensorkit.distributions.normal.copy_distribution', wraps=copy_distribution) as f_copy: normal2 = normal.copy(event_ndims=2) self.assertIsInstance(normal2, _MyBaseNormal) self.assertEqual(normal2.xyz, 123) self.assertIs(normal2.mean, normal.mean) self.assertIs(normal2.logstd, normal.logstd) self.assertEqual(normal2.event_ndims, 2) self.assertEqual(normal2.reparameterized, False) self.assertEqual(f_copy.call_args, ((), { 'cls': _MyBaseNormal, 'base': normal, 'attrs': ('mean', 'reparameterized', 'event_ndims', 'device', 'validate_tensors', 'xyz'), 'mutual_attrs': (('std', 'logstd'), ), 'original_mutual_params': { 'logstd': normal.logstd }, 'overrided_params': { 'event_ndims': 2 }, }))
def make_random_adj_matrix(node_count: int, p=0.1, dtype=T.float_x(), directed=True) -> T.SparseTensor: edge_count = int(node_count * node_count * p) indices = np.random.randint(0, node_count, size=[2, edge_count]) if not directed: indices = np.concatenate( [indices, np.stack([indices[1], indices[0]], axis=0)], axis=1) indices = T.as_tensor(indices, dtype=T.int64) values = T.abs(T.random.randn([T.shape(indices)[1]], dtype=dtype)) + 1e-6 return T.sparse.make_sparse(indices, values, shape=[node_count, node_count], coord_first=True)
def test_add(self): x_observed = T.as_tensor( np.arange(24, dtype=np.float32).reshape([2, 3, 4])) net = BayesianNet({'x': x_observed}) d = UnitNormal([3, 4]) self.assertNotIn('x', net) self.assertNotIn('y', net) # add an observed node x = net.add('x', d, n_samples=2, group_ndims=1) self.assertIs(net.get('x'), x) self.assertIs(net['x'], x) self.assertIn('x', net) self.assertListEqual(list(net), ['x']) self.assertIsInstance(x, StochasticTensor) self.assertIs(x.distribution, d) self.assertEqual(x.n_samples, 2) self.assertEqual(x.group_ndims, 1) self.assertEqual(x.reparameterized, True) self.assertIs(x.tensor, x_observed) self.assertEqual(T.shape(x.tensor), [2, 3, 4]) # add an unobserved node y = net.add('y', d, group_ndims=1, reparameterized=False) self.assertIs(net.get('y'), y) self.assertIs(net['y'], y) self.assertIn('y', net) self.assertListEqual(list(net), ['x', 'y']) self.assertIsInstance(y, StochasticTensor) self.assertIs(y.distribution, d) self.assertEqual(y.n_samples, None) self.assertEqual(y.group_ndims, 1) self.assertEqual(y.reparameterized, False) self.assertEqual(T.shape(y.tensor), [3, 4]) # error adding the same variable with pytest.raises(ValueError, match="Stochastic tensor 'x' already exists."): _ = net.add('x', d) # test remove net.remove('x') self.assertNotIn('x', net) del net['y'] self.assertNotIn('y', net)
def test_matmul(self): indices = T.as_tensor(np.random.randint(0, 50, size=[2, 200])) values = T.random.randn([200]) shape = [60, 50] y = T.random.randn([50, 30]) for force_coalesced in [False, True]: x = T.sparse.make_sparse(indices, values, shape=shape, force_coalesced=force_coalesced) assert_allclose( T.sparse.matmul(x, y), np.dot(T.sparse.to_numpy(x), T.to_numpy(y)), rtol=1e-4, atol=1e-6, )
def test_reduce_sum(self): indices = T.as_tensor(np.random.randint(0, 50, size=[2, 200])) values = T.random.randn([200]) shape = [60, 50] for force_coalesced in [False, True]: x = T.sparse.make_sparse(indices, values, shape=shape, force_coalesced=force_coalesced) y = T.sparse.to_numpy(x) for axis in (None, 0, 1, -1, -2): assert_allclose( T.sparse.reduce_sum(x, axis=axis), np.sum(y, axis=axis), rtol=1e-4, atol=1e-6, )
def test_sample_and_log_prob(self): logits = np.random.randn(2, 3, 4) logits_t = T.as_tensor(logits) for int_dtype in int_dtypes: bernoulli = Bernoulli(logits=logits_t, event_ndims=1, dtype=int_dtype) # n_samples is None t = bernoulli.sample() self.assertIsInstance(t, StochasticTensor) self.assertIs(t.distribution, bernoulli) self.assertEqual(T.get_dtype(t.tensor), int_dtype) self.assertEqual(t.n_samples, None) self.assertEqual(t.group_ndims, 0) self.assertEqual(t.reparameterized, False) self.assertIsInstance(t.tensor, T.Tensor) self.assertEqual(T.shape(t.tensor), [2, 3, 4]) for log_pdf in [t.log_prob(), bernoulli.log_prob(t)]: assert_allclose( log_pdf, T.random.bernoulli_log_prob(given=t.tensor, logits=logits_t, group_ndims=1)) # n_samples == 5 t = bernoulli.sample(n_samples=5, group_ndims=-1) self.assertIsInstance(t, StochasticTensor) self.assertIs(t.distribution, bernoulli) self.assertEqual(T.get_dtype(t.tensor), int_dtype) self.assertEqual(t.n_samples, 5) self.assertEqual(t.group_ndims, -1) self.assertEqual(t.reparameterized, False) self.assertIsInstance(t.tensor, T.Tensor) self.assertEqual(T.shape(t.tensor), [5, 2, 3, 4]) for log_pdf in [t.log_prob(-1), bernoulli.log_prob(t, -1)]: assert_allclose( log_pdf, T.random.bernoulli_log_prob(given=t.tensor, logits=logits_t, group_ndims=0))
def test_outputs(self): x_observed = T.as_tensor( np.arange(24, dtype=np.float32).reshape([2, 3, 4])) net = BayesianNet({'x': x_observed}) normal = UnitNormal([3, 4]) x = net.add('x', normal) y = net.add('y', normal) # test single query x_out = net.output('x') self.assertIs(x_out, x.tensor) self.assertIsInstance(x_out, T.Tensor) assert_equal(x_out, x_observed) # test multiple query x_out, y_out = net.outputs(iter(['x', 'y'])) self.assertIs(x_out, x.tensor) self.assertIs(y_out, y.tensor) self.assertIsInstance(x_out, T.Tensor) self.assertIsInstance(y_out, T.Tensor) assert_equal(x_out, x_observed)
def do_check(pool_type, spatial_ndims, x, kernel_size, stride, padding, count_padded_zeros): kwargs = {} kernel_size = validate_conv_size('kernel_size', kernel_size, spatial_ndims) stride = validate_conv_size('stride', stride, spatial_ndims) padding = validate_padding(padding, kernel_size, [1] * spatial_ndims, spatial_ndims) padding = [p[0] for p in padding] if pool_type == 'avg': kwargs['count_padded_zeros'] = count_padded_zeros elif not count_padded_zeros: return assert_allclose(getattr(T.nn, f'{pool_type}_pool{spatial_ndims}d')( T.as_tensor(x), padding=padding, kernel_size=kernel_size, stride=stride, **kwargs), getattr(ops, f'{pool_type}_pool_nd')( spatial_ndims, x, padding=padding, kernel_size=kernel_size, stride=stride, **kwargs, ), atol=1e-6, rtol=1e-4, err_msg=f'pool_type={pool_type}, ' f'spatial_ndims={spatial_ndims}, ' f'kernel_size={kernel_size}, ' f'stride={stride}, ' f'padding={padding}, ' f'count_padded_zeros={count_padded_zeros}')