Esempio n. 1
0
def test_filter_grad():
    def compare_grads(lds):
        init_params, pair_params, node_params = lds

        dotter = randn_like(
            natural_filter_forward_general(init_params, pair_params,
                                           node_params)[0])

        def messages_to_scalar(messages):
            return contract(dotter, messages)

        def py_fun(node_params):
            messages, lognorm = natural_filter_forward_general(
                init_params, pair_params, node_params)
            return np.cos(lognorm) + messages_to_scalar(messages)

        def cy_fun(node_params):
            dense_messages, lognorm = _natural_filter_forward_general(
                init_params, pair_params, node_params)
            messages = unpack_dense_messages(dense_messages)
            return np.cos(lognorm) + messages_to_scalar(messages)

        g_py = grad(py_fun)(node_params)
        g_cy = grad(cy_fun)(node_params)

        assert allclose(g_py, g_cy)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        yield compare_grads, rand_lds(n, T)
Esempio n. 2
0
 def helper(homog):
     n, p, T = npr.randint(1, 5), npr.randint(1, 5), npr.randint(10,50)
     lds = rand_lds(n, p, None if homog else T)
     states, data = generate_data(T, *lds)
     natparam = lds_standard_to_natparam(*lds)
     E_stats = natural_lds_Estep(natparam, data)
     assert allclose(sub(add(natparam, E_stats), E_stats), natparam)
Esempio n. 3
0
def test_smoother_grads():
    def compare_smoother_grads(lds):
        init_params, pair_params, node_params = lds

        symmetrize = make_unop(lambda x: (x + x.T)/2. if np.ndim(x) == 2 else x, tuple)

        messages, _ = natural_filter_forward_general(*lds)
        dotter = randn_like(natural_smoother_general(messages, *lds))

        def py_fun(messages):
            result = natural_smoother_general(messages, *lds)
            assert shape(result) == shape(dotter)
            return contract(dotter, result)

        dense_messages, _ = _natural_filter_forward_general(
            init_params, pair_params, node_params)
        def cy_fun(messages):
            result = _natural_smoother_general(messages, pair_params)
            result = result[0][:3], result[1], result[2]
            assert shape(result) == shape(dotter)
            return contract(dotter, result)

        result_py = py_fun(messages)
        result_cy = cy_fun(dense_messages)
        assert np.isclose(result_py, result_cy)

        g_py = grad(py_fun)(messages)
        g_cy = unpack_dense_messages(grad(cy_fun)(dense_messages))

        assert allclose(g_py, g_cy)

    npr.seed(0)
    for _ in xrange(50):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        yield compare_smoother_grads, rand_lds(n, T)
Esempio n. 4
0
def test_sampler_grads():
    def compare_sampler_grads(lds, num_samples, seed):
        init_params, pair_params, node_params = lds

        messages, _ = natural_filter_forward_general(init_params, pair_params,
                                                     node_params)

        def fun1(messages):
            npr.seed(seed)
            samples = natural_sample_backward_general(messages, pair_params,
                                                      num_samples)
            return np.sum(np.sin(samples))

        grads1 = grad(fun1)(messages)

        messages, _ = _natural_filter_forward_general(init_params, pair_params,
                                                      node_params)

        def fun2(messages):
            npr.seed(seed)
            samples = _natural_sample_backward(messages, pair_params,
                                               num_samples)
            return np.sum(np.sin(samples))

        grads2 = grad(fun2)(messages)

        unpack_dense_grads = lambda x: interleave(*map(lambda y: zip(*y), x))

        assert allclose(grads1, unpack_dense_grads(grads2))

    npr.seed(0)
    for i in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        num_samples = npr.randint(1, 10)
        yield compare_sampler_grads, rand_lds(n, T), num_samples, i
Esempio n. 5
0
def test_sampler_grads():
    def compare_sampler_grads(lds, num_samples, seed):
        init_params, pair_params, node_params = lds

        messages, _ = natural_filter_forward_general(
            init_params, pair_params, node_params)
        def fun1(messages):
            npr.seed(seed)
            samples = natural_sample_backward_general(messages, pair_params, num_samples)
            return np.sum(np.sin(samples))
        grads1 = grad(fun1)(messages)

        messages, _ = _natural_filter_forward_general(
                init_params, pair_params, node_params)
        def fun2(messages):
            npr.seed(seed)
            samples = _natural_sample_backward(messages, pair_params, num_samples)
            return np.sum(np.sin(samples))
        grads2 = grad(fun2)(messages)

        unpack_dense_grads = lambda x: interleave(*map(lambda y: zip(*y), x))

        assert allclose(grads1, unpack_dense_grads(grads2))

    npr.seed(0)
    for i in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        num_samples = npr.randint(1,10)
        yield compare_sampler_grads, rand_lds(n, T), num_samples, i
 def helper(homog):
     n, p, T = npr.randint(1, 5), npr.randint(1, 5), npr.randint(10, 50)
     lds = rand_lds(n, p, None if homog else T)
     states, data = generate_data(T, *lds)
     natparam = lds_standard_to_natparam(*lds)
     E_stats = natural_lds_Estep(natparam, data)
     assert allclose(sub(add(natparam, E_stats), E_stats), natparam)
Esempio n. 7
0
def test_filter_grad():
    def compare_grads(lds):
        init_params, pair_params, node_params = lds

        dotter = randn_like(natural_filter_forward_general(
            init_params, pair_params, node_params)[0])

        def messages_to_scalar(messages):
            return contract(dotter, messages)

        def py_fun(node_params):
            messages, lognorm = natural_filter_forward_general(
                init_params, pair_params, node_params)
            return np.cos(lognorm) + messages_to_scalar(messages)

        def cy_fun(node_params):
            dense_messages, lognorm = _natural_filter_forward_general(
                init_params, pair_params, node_params)
            messages = unpack_dense_messages(dense_messages)
            return np.cos(lognorm) + messages_to_scalar(messages)

        g_py = grad(py_fun)(node_params)
        g_cy = grad(cy_fun)(node_params)

        assert allclose(g_py, g_cy)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        yield compare_grads, rand_lds(n, T)
Esempio n. 8
0
def test_pack_dense():
    npr.seed(0)

    def check_params(natparam):
        natparam2 = pack_dense(*unpack_dense(natparam))
        assert np.allclose(natparam, natparam2)

    for _ in xrange(5):
        n, k = npr.randint(1, 5), npr.randint(1, 3)
        yield check_params, rand_natparam(n, k)
Esempio n. 9
0
def test_param_conversion():
    npr.seed(0)

    def check_params(natparam):
        natparam2 = standard_to_natural(*natural_to_standard(natparam))
        assert np.allclose(natparam, natparam2)

    for _ in xrange(5):
        n, k = npr.randint(1, 5), npr.randint(1, 3)
        yield check_params, rand_natparam(n, k)
Esempio n. 10
0
def test_pack_dense():
    npr.seed(0)

    def check_params(natparam):
        natparam2 = pack_dense(*unpack_dense(natparam))
        assert np.allclose(natparam, natparam2)

    for _ in xrange(5):
        n, k = npr.randint(1, 5), npr.randint(1, 3)
        yield check_params, rand_natparam(n, k)
Esempio n. 11
0
def test_param_conversion():
    npr.seed(0)

    def check_params(natparam):
        natparam2 = standard_to_natural(*natural_to_standard(natparam))
        assert np.allclose(natparam, natparam2)

    for _ in xrange(5):
        n, k = npr.randint(1, 5), npr.randint(1, 3)
        yield check_params, rand_natparam(n, k)
Esempio n. 12
0
def test_expectedstats_autograd():
    npr.seed(0)

    def check_expectedstats(natparam):
        E_stats1 = expectedstats(natparam)
        E_stats2 = grad(logZ)(natparam)
        assert np.allclose(E_stats1, E_stats2)

    for _ in xrange(20):
        n, k = npr.randint(1, 5), npr.randint(1, 3)
        yield check_expectedstats, rand_natparam(n, k)
Esempio n. 13
0
def test_lognorm_grad():
    def compare_lognorm_grads(hmm):
        dotter = npr.randn()
        py_grad = grad(lambda x: dotter * python_hmm_logZ(x))(hmm)
        cy_grad = grad(lambda x: dotter * cython_hmm_logZ(x))(hmm)
        assert allclose(py_grad, cy_grad)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 10), npr.randint(10, 50)
        yield compare_lognorm_grads, rand_hmm(n, T)
Esempio n. 14
0
def test_expectedstats_autograd():
    npr.seed(0)

    def check_expectedstats(natparam):
        E_stats1 = expectedstats(natparam)
        E_stats2 = grad(logZ)(natparam)
        assert np.allclose(E_stats1, E_stats2)

    for _ in xrange(20):
        n, k = npr.randint(1, 5), npr.randint(1, 3)
        yield check_expectedstats, rand_natparam(n, k)
Esempio n. 15
0
def test_lognorm_grad():
    def compare_lognorm_grads(hmm):
        dotter = npr.randn()
        py_grad = grad(lambda x: dotter * python_hmm_logZ(x))(hmm)
        cy_grad = grad(lambda x: dotter * cython_hmm_logZ(x))(hmm)
        assert allclose(py_grad, cy_grad)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 10), npr.randint(10, 50)
        yield compare_lognorm_grads, rand_hmm(n, T)
Esempio n. 16
0
def test_lognorm():
    def compare_lognorms(hmm):
        py_logZ = python_hmm_logZ(hmm)
        cy_logZ = cython_hmm_logZ(hmm)
        cy_logZ2 = cython_hmm_logZ_normalized(hmm)[0]
        assert np.isclose(py_logZ, cy_logZ)
        assert np.isclose(py_logZ, cy_logZ2)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 10), npr.randint(10, 50)
        yield compare_lognorms, rand_hmm(n, T)
Esempio n. 17
0
def test_lognorm():
    def compare_lognorms(hmm):
        py_logZ = python_hmm_logZ(hmm)
        cy_logZ = cython_hmm_logZ(hmm)
        cy_logZ2 = cython_hmm_logZ_normalized(hmm)[0]
        assert np.isclose(py_logZ, cy_logZ)
        assert np.isclose(py_logZ, cy_logZ2)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 10), npr.randint(10, 50)
        yield compare_lognorms, rand_hmm(n, T)
Esempio n. 18
0
def test_multivariate_normal_logpdf_shared_params(D=10):
    # Test broadcasting over datapoints with shared parameters
    leading_ndim = npr.randint(1, 4)
    shp = npr.randint(1, 10, size=leading_ndim)
    x = npr.randn(*shp, D)
    mu = npr.randn(D)
    L = npr.randn(D, D)
    Sigma = np.dot(L, L.T)

    ll1 = multivariate_normal_logpdf(x, mu, Sigma)
    ll2 = np.reshape(mvn.logpdf(x, mu, Sigma), shp)
    assert np.allclose(ll1, ll2)
Esempio n. 19
0
def test_multivariate_normal_logpdf_unique_params(D=10):
    # Test broadcasting over datapoints and corresponding parameters
    leading_ndim = npr.randint(1, 4)
    shp = npr.randint(1, 10, size=leading_ndim)
    x = npr.randn(*shp, D)
    mu = npr.randn(*shp, D)
    L = npr.randn(*shp, D, D)
    Sigma = np.matmul(L, np.swapaxes(L, -1, -2))

    ll1 = multivariate_normal_logpdf(x, mu, Sigma)
    ll2 = np.empty(shp)
    for inds in product(*[np.arange(s) for s in shp]):
        ll2[inds] = mvn.logpdf(x[inds], mu[inds], Sigma[inds])
    assert np.allclose(ll1, ll2)
Esempio n. 20
0
def test_filters():
    def compare_filters(lds):
        init_params, pair_params, node_params = lds
        messages1, lognorm1 = natural_filter_forward_general(
            init_params, pair_params, node_params)
        dense_messages2, lognorm2 = _natural_filter_forward_general(
            init_params, pair_params, node_params)
        messages2 = unpack_dense_messages(dense_messages2)

        assert allclose(messages1, messages2)
        assert np.isclose(lognorm1, lognorm2)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        yield compare_filters, rand_lds(n, T)
Esempio n. 21
0
def test_trace_extradims():
    def fun(x): return to_scalar(np.trace(x, offset=offset))
    d_fun = lambda x : to_scalar(grad(fun)(x))
    mat = npr.randn(5,6,4,3)
    offset = npr.randint(-5,6)
    check_grads(fun, mat)
    check_grads(d_fun, mat)
Esempio n. 22
0
def test_trace2():
    def fun(x): return np.trace(x, offset=offset)
    d_fun = lambda x : to_scalar(grad(fun)(x))
    mat = npr.randn(11, 10)
    offset = npr.randint(-9,11)
    check_grads(fun, mat)
    check_grads(d_fun, mat)
Esempio n. 23
0
def test_trace2():
    def fun(x): return np.trace(x, offset=offset)
    d_fun = lambda x : to_scalar(grad(fun)(x))
    mat = npr.randn(11, 10)
    offset = npr.randint(-9,11)
    check_grads(fun, mat)
    check_grads(d_fun, mat)
Esempio n. 24
0
def test_E_step_inhomog():
    def compare_E_step(lds, data):
        natparam = lds_standard_to_natparam(*lds)
        E_init_stats, E_pairwise_stats, E_node_stats = natural_lds_Estep(natparam, data)
        E_init_stats2, E_pairwise_stats2, E_node_stats2 = pylds_E_step_inhomog(lds, data)

        assert all(map(np.allclose, E_init_stats, E_init_stats2))
        assert all(map(np.allclose, E_pairwise_stats, E_pairwise_stats2))
        assert all(map(np.allclose, E_node_stats, E_node_stats2))

    for _ in xrange(10):
        n, p, T = npr.randint(1, 5), npr.randint(1, 5), npr.randint(10,50)
        lds = rand_lds(n, p, T)
        states, data = generate_data(T, *lds)

        yield compare_E_step, lds, data
Esempio n. 25
0
def test_trace_extradims():
    def fun(x): return to_scalar(np.trace(x, offset=offset))
    d_fun = lambda x : to_scalar(grad(fun)(x))
    mat = npr.randn(5,6,4,3)
    offset = npr.randint(-5,6)
    check_grads(fun, mat)
    check_grads(d_fun, mat)
Esempio n. 26
0
def test_trace_extradims():
    def fun(x):
        return np.trace(x, offset=offset)

    mat = npr.randn(5, 6, 4, 3)
    offset = npr.randint(-5, 6)
    check_grads(fun)(mat)
Esempio n. 27
0
def test_trace2():
    def fun(x):
        return np.trace(x, offset=offset)

    mat = npr.randn(11, 10)
    offset = npr.randint(-9, 11)
    check_grads(fun)(mat)
Esempio n. 28
0
def test_filters():
    def compare_filters(lds):
        init_params, pair_params, node_params = lds
        messages1, lognorm1 = natural_filter_forward_general(
            init_params, pair_params, node_params)
        dense_messages2, lognorm2 = _natural_filter_forward_general(
            init_params, pair_params, node_params)
        messages2 = unpack_dense_messages(dense_messages2)

        assert allclose(messages1, messages2)
        assert np.isclose(lognorm1, lognorm2)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        yield compare_filters, rand_lds(n, T)
Esempio n. 29
0
    def testEinsumRepeatedOneHot(self):
        x = npr.randn(3, 2)
        y = npr.randn(3, 2)
        e = npr.randint(0, x.shape[0], 5)

        def fun(x, y, e):
            one_hot_e = tracers.one_hot(e, x.shape[0])
            return np.einsum('ab,bc,ad,dc->', one_hot_e, x, one_hot_e, y)

        expr = self._rewriter_test_helper(fun,
                                          rewrites.einsum_repeated_one_hot, x,
                                          y, e)
        self.assertEqual(len(expr.expr_node.args), 4)
        self.assertEqual(
            sum(node.fun == tracers.one_hot
                for node in expr.expr_node.parents), 1)

        def fun(x, y, e):
            one_hot_e = tracers.one_hot(e, x.shape[0])
            return np.einsum('ab,bc,ad,dc->ac', one_hot_e, x, one_hot_e, y)

        expr = self._rewriter_test_helper(fun,
                                          rewrites.einsum_repeated_one_hot, x,
                                          y, e)
        self.assertEqual(len(expr.expr_node.args), 4)
        self.assertEqual(
            sum(node.fun == tracers.one_hot
                for node in expr.expr_node.parents), 1)
Esempio n. 30
0
 def objective_grad_and_log_norm(var_param):
     seed = npr.randint(2**32)
     log_weights = compute_log_weights(var_param, seed)
     log_norm = np.max(log_weights)
     scaled_values = np.exp(log_weights - log_norm)**alpha
     obj_value = np.log(np.mean(scaled_values))/alpha + log_norm
     obj_grad = alpha*log_weights_vjp(var_param, seed, scaled_values) / scaled_values.size
     return (obj_value, obj_grad)
Esempio n. 31
0
def test_filters():
    def compare_filters(lds, data):
        (filtered_mus, filtered_sigmas), loglike = filter_forward(data, *lds)

        messages, lognorm = natural_filter_forward(lds_standard_to_natparam(*lds), data)
        prediction_messages, filter_messages = uninterleave(messages)
        natural_filtered_mus, natural_filtered_sigmas = zip(*map(natural_to_mean, filter_messages))

        assert all(map(np.allclose, filtered_mus, natural_filtered_mus))
        assert all(map(np.allclose, filtered_sigmas, natural_filtered_sigmas))
        assert np.isclose(loglike, lognorm)

    for _ in xrange(10):
        n, p, T = npr.randint(1, 5), npr.randint(1, 5), npr.randint(10,50)
        lds = rand_lds(n, p)
        states, data = generate_data(T, *lds)

        yield compare_filters, lds, data
Esempio n. 32
0
 def generate_tagging_set(self, Xtr, size=20):
     indices = []
     for i in range(size):
         index = npr.randint(0, len(Xtr))
         if index in indices:
             continue
         indices.append(index)
         image = self.get_image(Xtr[index], index)
         image.save('tagging/decoy_mnist/' + str(index) + '.png')
def test_E_step_inhomog():
    def compare_E_step(lds, data):
        natparam = lds_standard_to_natparam(*lds)
        E_init_stats, E_pairwise_stats, E_node_stats = natural_lds_Estep(
            natparam, data)
        E_init_stats2, E_pairwise_stats2, E_node_stats2 = pylds_E_step_inhomog(
            lds, data)

        assert all(map(np.allclose, E_init_stats, E_init_stats2))
        assert all(map(np.allclose, E_pairwise_stats, E_pairwise_stats2))
        assert all(map(np.allclose, E_node_stats, E_node_stats2))

    for _ in xrange(10):
        n, p, T = npr.randint(1, 5), npr.randint(1, 5), npr.randint(10, 50)
        lds = rand_lds(n, p, T)
        states, data = generate_data(T, *lds)

        yield compare_E_step, lds, data
Esempio n. 34
0
    def sample(self, total_steps=0):
        """Sample from the experience buffer by rank prioritization if specified.
		Otherwise sampling is done uniformly.

		Keyword arguments:
		total_steps -- number of steps taken in experiment (default: 0)
		"""

        N = self.size
        num_samples = np.min(
            (self.batch_size * self.num_strata_samples, self.size))

        # Perform uniform sampling of experience buffer
        if not self.mem_priority:
            indices = npr.choice(range(N), replace=False, size=num_samples)
            exp_batch = np.array(self.exp_buffer)[indices]
            weights = np.ones(len(indices)) / (len(indices) * 1.0)
            return np.reshape(exp_batch, (num_samples, -1)), weights, indices
        # Perform prioritized sampling of experience buffer
        else:
            # Find the closest precomptued distribution by size
            dist_idx = math.floor(N / float(self.capacity) *
                                  self.num_partitions)
            distribution = self.distributions[int(dist_idx)]
            N = dist_idx * 100
            rank_indices_set = set()
            # Perform stratified sampling of priority queue
            for i_exp in range(num_samples)[::-1]:
                # To increase the training batch size we sample several times from each strata, repeated indices are eliminated
                rank_indices_set.add(
                    npr.randint(
                        distribution['strata_ends'][i_exp /
                                                    self.num_strata_samples],
                        distribution['strata_ends'][(i_exp /
                                                     self.num_strata_samples) +
                                                    1]))
            rank_indices = list(rank_indices_set)
            exp_indices = self.pq.get_values_by_val(rank_indices)
            exp_batch = [
                self.exp_buffer[int(exp_idx)] for exp_idx in exp_indices
            ]

            # Compute importance sampling weights
            beta = np.min([
                self.beta_zero +
                (total_steps - self.num_init_train - 1) * self.beta_grad, 1
            ])
            IS_weights = np.power(N * distribution['pdf'][rank_indices],
                                  -1 * beta)

            # Normalize IS_weights by maximum weight, guarantees that IS weights only scale downwards
            w_max = np.max(IS_weights)
            IS_weights = IS_weights / float(w_max)
            return np.reshape(exp_batch,
                              (len(exp_indices), -1)), IS_weights, exp_indices
def test_filters():
    def compare_filters(lds, data):
        (filtered_mus, filtered_sigmas), loglike = filter_forward(data, *lds)

        messages, lognorm = natural_filter_forward(
            lds_standard_to_natparam(*lds), data)
        prediction_messages, filter_messages = uninterleave(messages)
        natural_filtered_mus, natural_filtered_sigmas = zip(
            *map(natural_to_mean, filter_messages))

        assert all(map(np.allclose, filtered_mus, natural_filtered_mus))
        assert all(map(np.allclose, filtered_sigmas, natural_filtered_sigmas))
        assert np.isclose(loglike, lognorm)

    for _ in xrange(10):
        n, p, T = npr.randint(1, 5), npr.randint(1, 5), npr.randint(10, 50)
        lds = rand_lds(n, p)
        states, data = generate_data(T, *lds)

        yield compare_filters, lds, data
Esempio n. 36
0
def test_samplers():
    def compare_samplers(lds, num_samples, seed):
        init_params, pair_params, node_params = lds

        npr.seed(seed)
        messages1, _ = natural_filter_forward_general(
            init_params, pair_params, node_params)
        samples1 = natural_sample_backward_general(messages1, pair_params, num_samples)

        npr.seed(seed)
        dense_messages2, _ = _natural_filter_forward_general(
            init_params, pair_params, node_params)
        samples2 = _natural_sample_backward(dense_messages2, pair_params, num_samples)

        assert np.allclose(samples1, samples2)

    npr.seed(0)
    for i in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        num_samples = npr.randint(1,10)
        yield compare_samplers, rand_lds(n, T), num_samples, i
Esempio n. 37
0
def test_smoothers():
    def compare_smoothers(lds):
        init_params, pair_params, node_params = lds

        messages1, _ = natural_filter_forward_general(
            init_params, pair_params, node_params)
        E_init_stats1, E_pair_stats1, E_node_stats1 = \
            natural_smoother_general(messages1, *lds)

        dense_messages2, _ = _natural_filter_forward_general(
            init_params, pair_params, node_params)
        E_init_stats2, E_pair_stats2, E_node_stats2 = \
            _natural_smoother_general(dense_messages2, pair_params)

        assert allclose(E_init_stats1[:3], E_init_stats2[:3])
        assert allclose(E_pair_stats1, E_pair_stats2)
        assert allclose(E_node_stats1, E_node_stats2)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        yield compare_smoothers, rand_lds(n, T)
Esempio n. 38
0
def test_smoothers():
    def compare_smoothers(lds):
        init_params, pair_params, node_params = lds

        messages1, _ = natural_filter_forward_general(init_params, pair_params,
                                                      node_params)
        E_init_stats1, E_pair_stats1, E_node_stats1 = \
            natural_smoother_general(messages1, *lds)

        dense_messages2, _ = _natural_filter_forward_general(
            init_params, pair_params, node_params)
        E_init_stats2, E_pair_stats2, E_node_stats2 = \
            _natural_smoother_general(dense_messages2, pair_params)

        assert allclose(E_init_stats1[:3], E_init_stats2[:3])
        assert allclose(E_pair_stats1, E_pair_stats2)
        assert allclose(E_node_stats1, E_node_stats2)

    npr.seed(0)
    for _ in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        yield compare_smoothers, rand_lds(n, T)
def test_general_inference():
    def get_general_node_params(x, lds):
        T, p = x.shape
        C, sigma_obs = lds[-2:]

        J, Jzx, Jxx, logZ = pair_mean_to_natural(C, sigma_obs)
        h = np.einsum('tzx,tx->tz', Jzx, x)
        logZ += np.einsum('ti,tij,tj->t', x, Jxx,
                          x) - p / 2. * np.log(2 * np.pi)

        return J, h, logZ

    def compare_E_step(lds, data):
        natparam = init_params, pair_params, node_params = lds_standard_to_natparam(
            *lds)
        general_node_params = get_general_node_params(data, lds)
        C, sigma_obs = lds[-2:]
        sample, E_stats, lognorm = natural_lds_inference(natparam, data)
        sample2, E_stats2, lognorm2 = natural_lds_inference_general(
            (init_params, pair_params), general_node_params)
        sample3, E_stats3, lognorm3 = natural_lds_inference_general_nosaving(
            (init_params, pair_params), general_node_params)
        sample4, E_stats4, lognorm4 = natural_lds_inference_general_autograd(
            (init_params, pair_params), general_node_params)

        assert allclose(E_stats[:-1], E_stats2[:-1])
        assert allclose(E_stats2, E_stats3)
        assert allclose(E_stats2, E_stats4)
        assert np.isclose(lognorm, lognorm2)
        assert np.isclose(lognorm, lognorm3)
        assert np.isclose(lognorm, lognorm4)

    for _ in xrange(10):
        n, p, T = npr.randint(1, 5), npr.randint(1, 5), npr.randint(10, 50)
        lds = rand_lds(n, p, T)
        states, data = generate_data(T, *lds)

        yield compare_E_step, lds, data
Esempio n. 40
0
def test_samplers():
    def compare_samplers(lds, num_samples, seed):
        init_params, pair_params, node_params = lds

        npr.seed(seed)
        messages1, _ = natural_filter_forward_general(init_params, pair_params,
                                                      node_params)
        samples1 = natural_sample_backward_general(messages1, pair_params,
                                                   num_samples)

        npr.seed(seed)
        dense_messages2, _ = _natural_filter_forward_general(
            init_params, pair_params, node_params)
        samples2 = _natural_sample_backward(dense_messages2, pair_params,
                                            num_samples)

        assert np.allclose(samples1, samples2)

    npr.seed(0)
    for i in xrange(25):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        num_samples = npr.randint(1, 10)
        yield compare_samplers, rand_lds(n, T), num_samples, i
Esempio n. 41
0
def test_smoother_grads():
    def compare_smoother_grads(lds):
        init_params, pair_params, node_params = lds

        symmetrize = make_unop(
            lambda x: (x + x.T) / 2. if np.ndim(x) == 2 else x, tuple)

        messages, _ = natural_filter_forward_general(*lds)
        dotter = randn_like(natural_smoother_general(messages, *lds))

        def py_fun(messages):
            result = natural_smoother_general(messages, *lds)
            assert shape(result) == shape(dotter)
            return contract(dotter, result)

        dense_messages, _ = _natural_filter_forward_general(
            init_params, pair_params, node_params)

        def cy_fun(messages):
            result = _natural_smoother_general(messages, pair_params)
            result = result[0][:3], result[1], result[2]
            assert shape(result) == shape(dotter)
            return contract(dotter, result)

        result_py = py_fun(messages)
        result_cy = cy_fun(dense_messages)
        assert np.isclose(result_py, result_cy)

        g_py = grad(py_fun)(messages)
        g_cy = unpack_dense_messages(grad(cy_fun)(dense_messages))

        assert allclose(g_py, g_cy)

    npr.seed(0)
    for _ in xrange(50):
        n, T = npr.randint(1, 5), npr.randint(10, 50)
        yield compare_smoother_grads, rand_lds(n, T)
Esempio n. 42
0
def test_categorical_logpdf(T=100, K=4, D=10, C=8):
    # Test single datapoint log pdf
    x = npr.randint(0, C, size=(T, D))
    logits = npr.randn(K, D, C)
    logits -= logsumexp(logits, axis=-1, keepdims=True)
    ps = np.exp(logits)
    log_ps = np.log(ps)

    ll1 = categorical_logpdf(x[:, None, :], logits)
    ll2 = np.zeros((T, K))
    for n in range(T):
        for k in range(K):
            for d in range(D):
                ll2[n, k] += log_ps[k, d, x[n, d]]
    assert np.allclose(ll1, ll2)
Esempio n. 43
0
def test_general_inference():
    def get_general_node_params(x, lds):
        T, p = x.shape
        C, sigma_obs = lds[-2:]

        J, Jzx, Jxx, logZ = pair_mean_to_natural(C, sigma_obs)
        h = np.einsum('tzx,tx->tz', Jzx, x)
        logZ += np.einsum('ti,tij,tj->t', x, Jxx, x) - p/2.*np.log(2*np.pi)

        return J, h, logZ

    def compare_E_step(lds, data):
        natparam = init_params, pair_params, node_params = lds_standard_to_natparam(*lds)
        general_node_params = get_general_node_params(data, lds)
        C, sigma_obs = lds[-2:]
        sample, E_stats, lognorm = natural_lds_inference(natparam, data)
        sample2, E_stats2, lognorm2 = natural_lds_inference_general(
            (init_params, pair_params), general_node_params)
        sample3, E_stats3, lognorm3 = natural_lds_inference_general_nosaving(
            (init_params, pair_params), general_node_params)
        sample4, E_stats4, lognorm4 = natural_lds_inference_general_autograd(
            (init_params, pair_params), general_node_params)

        assert allclose(E_stats[:-1], E_stats2[:-1])
        assert allclose(E_stats2, E_stats3)
        assert allclose(E_stats2, E_stats4)
        assert np.isclose(lognorm, lognorm2)
        assert np.isclose(lognorm, lognorm3)
        assert np.isclose(lognorm, lognorm4)

    for _ in xrange(10):
        n, p, T = npr.randint(1, 5), npr.randint(1, 5), npr.randint(10,50)
        lds = rand_lds(n, p, T)
        states, data = generate_data(T, *lds)

        yield compare_E_step, lds, data
Esempio n. 44
0
    def __init__(self, K, D, M=0):
        super(NegativeBinomialSemiMarkovTransitions, self).__init__(K, D, M=M)

        # Initialize the super state transition probabilities
        self.Ps = npr.rand(K, K)
        np.fill_diagonal(self.Ps, 0)
        self.Ps /= self.Ps.sum(axis=1, keepdims=True)

        # Initialize the negative binomial duration probabilities
        self.rs = npr.randint(1, 11, size=K)
        # self.rs = np.ones(K, dtype=int)
        # self.ps = npr.rand(K)
        self.ps = 0.5 * np.ones(K)

        # Initialize the transition matrix
        self._trans_matrix = None
Esempio n. 45
0
    def __init__(self, K, D, M=0, r_min=1, r_max=20):
        assert K > 1, "Explicit duration models only work if num states > 1."
        super(NegativeBinomialSemiMarkovTransitions, self).__init__(K, D, M=M)

        # Initialize the super state transition probabilities
        self.Ps = npr.rand(K, K)
        np.fill_diagonal(self.Ps, 0)
        self.Ps /= self.Ps.sum(axis=1, keepdims=True)

        # Initialize the negative binomial duration probabilities
        self.r_min, self.r_max = r_min, r_max
        self.rs = npr.randint(r_min, r_max + 1, size=K)
        # self.rs = np.ones(K, dtype=int)
        # self.ps = npr.rand(K)
        self.ps = 0.5 * np.ones(K)

        # Initialize the transition matrix
        self._transition_matrix = None
    # Initialize variational parameters
    rs = npr.RandomState(0)
    num_samples = 5
    init_mean = rs.randn(num_weights)
    init_log_std = -5 * np.ones(num_weights)
    variational_params = np.concatenate([init_mean, init_log_std])

    # Set up figure.
    fig = plt.figure(figsize=(8,8), facecolor='white')
    ax = fig.add_subplot(111, frameon=False)
    plt.ion()
    plt.show(block=False)

    for step in range(num_steps):
        # Grab a random datum
        datum_id = npr.randint(0, num_datums)

        # Assess expected reward across all possible actions (loop over context + action vectors)
        rewards = []
        contexts = np.zeros((num_actions, F))
        for aa in range(num_actions):
            contexts[aa,:] = np.hstack((x[datum_id, :], [aa]))
            outputs = generate_nn_output(variational_params,
                                         np.expand_dims(contexts[aa,:],0),
                                         num_weights,
                                         num_samples)
            rewards.append(np.mean(outputs))

        # Check which is greater and choose that [1,0] = eat | [0,1] do not eat
        # If argmax returns 0, then we eat, otherwise we don't
        action_chosen = np.argmax(rewards)
 def int_series(center, spread):
     return npr.randint(center - spread,
                        high=center + spread + 1,
                        size=count)
Esempio n. 48
0
        * np.array([radial_std, tangential_std])
    features[:, 0] += 1.
    labels = np.repeat(np.arange(num_classes), num_per_class)

    angles = rads[labels] + rate * np.exp(features[:, 0])
    rotations = np.stack(
        [np.cos(angles), -np.sin(angles),
         np.sin(angles),
         np.cos(angles)])
    rotations = np.reshape(rotations.T, (-1, 2, 2))

    return 10 * npr.permutation(np.einsum('ti,tij->tj', features, rotations))


if __name__ == "__main__":
    seed_no = npr.randint(1000)
    print(seed_no)
    npr.seed(seed_no)
    #plt.ion()

    num_clusters = 5  # number of clusters in pinwheel data
    samples_per_cluster = 100  # number of samples per cluster in pinwheel
    T = 50  # Truncation level for number of components
    N = 2  # number of latent dimensions
    P = 2  # number of observation dimensions
    alpha = 1000  # scale parameter for DP
    niw_conc = 0.5  # concentration parameter for NIW prior

    # generate synthetic data
    #data = make_pinwheel_data(0.3, 0.05, num_clusters, samples_per_cluster, 0.25)
    filename = '/Users/ybansal/Documents/PhD/Courses/CS282/Project/Code/Data/xor.hkl'
Esempio n. 49
0
 def randn(self):
     # These arbitrary vectors are not analogous to randn in any meaningful way
     N = npr.randint(1,3)
     return RKHSFun(self.kernel, dict(zip(npr.randn(N), npr.randn(N))))