def test_sparseblockouter(self): o = tensor.ftensor4() x = tensor.ftensor3() y = tensor.ftensor3() xIdx = tensor.imatrix() yIdx = tensor.imatrix() out = self.outer_op(o, x, y, xIdx, yIdx) f = aesara.function( [o, x, y, xIdx, yIdx], out, on_unused_input="warn", mode=self.mode ) ( o_val, x_val, y_val, xIdx_val, yIdx_val, ) = self.outer_data() th_out = f(o_val, x_val, y_val, xIdx_val, yIdx_val) ref_out = self.outer_numpy(o_val, x_val, y_val, xIdx_val, yIdx_val) utt.assert_allclose(ref_out, th_out)
def test_sparseblockgemvF(self): # Test the fortan order for W (which can happen in the grad for some # graphs). b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() o = self.gemv_op( b.take(oIdx, axis=0), tensor.DimShuffle((False, False, False, False), (0, 1, 3, 2))( tensor.as_tensor_variable(W) ), h, iIdx, oIdx, ) f = aesara.function([W, h, iIdx, b, oIdx], o, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() th_out = f(np.swapaxes(W_val, 2, 3), h_val, iIdx_val, b_val, oIdx_val) ref_out = self.gemv_numpy( b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val ) utt.assert_allclose(ref_out, th_out)
def test_outer_infershape(self): o = tensor.ftensor4() x = tensor.ftensor3() y = tensor.ftensor3() xIdx = tensor.imatrix() yIdx = tensor.imatrix() self._compile_and_check( [o, x, y, xIdx, yIdx], [self.outer_op(o, x, y, xIdx, yIdx)], self.outer_data(), self.outer_class, )
def test_gemv_infershape(self): b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() self._compile_and_check( [W, h, iIdx, b, oIdx], [self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)], self.gemv_data(), self.gemv_class, )
def test_dot_infershape(self): b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() self._compile_and_check( [W, h, iIdx, b, oIdx], [sparse_block_dot(W, h, iIdx, b, oIdx)], self.gemv_data(), self.gemv_class, )
def test_rebuild_strict(self): # Test fix for error reported at # https://groups.google.com/d/topic/aesara-users/BRK0UEB72XA/discussion w = tensor.imatrix() x, y = tensor.ivectors("x", "y") z = x * y f = aesara.function([w, y], z, givens=[(x, w)], rebuild_strict=False) z_val = f(np.ones((3, 5), dtype="int32"), np.arange(5, dtype="int32")) assert z_val.ndim == 2 assert np.all(z_val == np.ones((3, 5)) * np.arange(5))
def test_on_real_input(self): x = dvector() rng = np.random.RandomState(23) xval = rng.randn(10) np.all(0 == aesara.function([x], imag(x))(xval)) np.all(xval == aesara.function([x], real(x))(xval)) x = imatrix() xval = np.asarray(rng.randn(3, 3) * 100, dtype="int32") np.all(0 == aesara.function([x], imag(x))(xval)) np.all(xval == aesara.function([x], real(x))(xval))
def test_sparseblockgemv_grad_shape(self): b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx) go = aesara.grad(o.sum(), [b, W, h]) f = aesara.function([W, h, iIdx, b, oIdx], go, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() # just make sure that it runs correcly and all the shapes are ok. b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val) assert b_g.shape == b_val.shape assert h_g.shape == h_val.shape assert W_g.shape == W_val.shape
def test_sparseblockgemv(self): # Compares the numpy and aesara versions of sparseblockgemv. b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx) f = aesara.function([W, h, iIdx, b, oIdx], o, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val) ref_out = self.gemv_numpy( b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val ) utt.assert_allclose(ref_out, th_out)
def test_SwitchingProcess(): np.random.seed(2023532) test_states = np.r_[2, 0, 1, 2, 0, 1] test_dists = [ Constant.dist(0), pm.Poisson.dist(100.0), pm.Poisson.dist(1000.0) ] test_dist = SwitchingProcess.dist(test_dists, test_states) assert np.array_equal(test_dist.shape, test_states.shape) test_sample = test_dist.random() assert test_sample.shape == (test_states.shape[0], ) assert np.all(test_sample[test_states == 0] == 0) assert np.all(0 < test_sample[test_states == 1]) assert np.all(test_sample[test_states == 1] < 1000) assert np.all(100 < test_sample[test_states == 2]) test_mus = np.r_[100, 100, 500, 100, 100, 100] test_dists = [ Constant.dist(0), pm.Poisson.dist(test_mus), pm.Poisson.dist(10000.0), ] test_dist = SwitchingProcess.dist(test_dists, test_states) assert np.array_equal(test_dist.shape, test_states.shape) test_sample = test_dist.random() assert test_sample.shape == (test_states.shape[0], ) assert np.all(200 < test_sample[2] < 600) assert np.all(0 < test_sample[5] < 200) assert np.all(5000 < test_sample[test_states == 2]) test_dists = [ Constant.dist(0), pm.Poisson.dist(100.0), pm.Poisson.dist(1000.0) ] test_dist = SwitchingProcess.dist(test_dists, test_states) for i in range(len(test_dists)): test_logp = test_dist.logp( np.tile(test_dists[i].mode.eval(), test_states.shape)).eval() assert test_logp[test_states != i].max() < test_logp[test_states == i].min() # Try a continuous mixture test_states = np.r_[2, 0, 1, 2, 0, 1] test_dists = [ pm.Normal.dist(0.0, 1.0), pm.Normal.dist(100.0, 1.0), pm.Normal.dist(1000.0, 1.0), ] test_dist = SwitchingProcess.dist(test_dists, test_states) assert np.array_equal(test_dist.shape, test_states.shape) test_sample = test_dist.random() assert test_sample.shape == (test_states.shape[0], ) assert np.all(test_sample[test_states == 0] < 10) assert np.all(50 < test_sample[test_states == 1]) assert np.all(test_sample[test_states == 1] < 150) assert np.all(900 < test_sample[test_states == 2]) # Make sure we can use a large number of distributions in the mixture test_states = np.ones(50) test_dists = [Constant.dist(i) for i in range(50)] test_dist = SwitchingProcess.dist(test_dists, test_states) assert np.array_equal(test_dist.shape, test_states.shape) with pytest.raises(TypeError): SwitchingProcess.dist([1], test_states) with aesara.change_flags(compute_test_value="off"): # Test for the case when a default can't be computed test_dist = pm.Poisson.dist(at.scalar()) # Confirm that there's no default with pytest.raises(AttributeError): test_dist.default() # Let it try to sample using `Distribution.random` and fail with pytest.raises(ValueError): SwitchingProcess.dist([test_dist], test_states) # Evaluate multiple observed state sequences in an extreme case test_states = at.imatrix("states") test_states.tag.test_value = np.zeros((10, 4)).astype("int32") test_dist = SwitchingProcess.dist( [Constant.dist(0), Constant.dist(1)], test_states) test_obs = np.tile(np.arange(4), (10, 1)).astype("int32") test_logp = test_dist.logp(test_obs) exp_logp = np.tile( np.array([0.0] + [-np.inf] * 3, dtype=aesara.config.floatX), (10, 1)) assert np.array_equal(test_logp.tag.test_value, exp_logp)