Exemple #1
0
def test_vector_jacobian_product():
    # This function will have an asymmetric jacobian matrix.
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5)
    V = npr.randn(5)
    J = jacobian(fun)(a)
    check_equivalent(np.dot(V.T, J), vector_jacobian_product(fun)(a, V))
Exemple #2
0
def test_hvp():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5)
    v = npr.randn(5)
    H = hessian(fun)(a)
    hvp = make_hvp(fun)(a)[0]
    check_equivalent(np.dot(H, v), hvp(v))
Exemple #3
0
def test_items_values_keys():
    def fun(input_dict):
        A = 0.
        B = 0.
        for i, (k, v) in enumerate(sorted(input_dict.items(), key=op.itemgetter(0))):
            A = A + np.sum(np.sin(v)) * (i + 1.0)
            B = B + np.sum(np.cos(v))
        for v in input_dict.values():
            A = A + np.sum(np.sin(v))
        for k in sorted(input_dict.keys()):
            A = A + np.sum(np.cos(input_dict[k]))
        return A + B

    def d_fun(input_dict):
        g = grad(fun)(input_dict)
        A = np.sum(g['item_1'])
        B = np.sum(np.sin(g['item_1']))
        C = np.sum(np.sin(g['item_2']))
        return A + B + C

    input_dict = {'item_1' : npr.randn(5, 6),
                  'item_2' : npr.randn(4, 3),
                  'item_X' : npr.randn(2, 4)}

    check_grads(fun)(input_dict)
    check_grads(d_fun)(input_dict)
def gen_prior(K_chol, sig2_omega, sig2_mu):
        th = np.zeros(parser.N)
        N = parser.idxs_and_shapes['mus'][1][0]
        parser.set(th, 'betas', K_chol.dot(npr.randn(len(lam0), K)).T)
        parser.set(th, 'omegas', np.sqrt(sig2_omega) * npr.randn(N, K))
        parser.set(th, 'mus', np.sqrt(sig2_mu) * npr.randn(N))
        return th
Exemple #5
0
def dot_equivalent():
    # MNIST-scale convolution operation
    import autograd.scipy.signal
    dat = npr.randn(256, 3, 24, 5, 24, 5)
    kernel = npr.randn(3, 5, 5)
    with tictoc():
        np.tensordot(dat, kernel, axes=[(1, 3, 5), (0, 1, 2)])
Exemple #6
0
def test_simple_concatenate():
    A = npr.randn(5, 6, 4)
    B = npr.randn(4, 6, 4)
    def fun(x): return to_scalar(np.concatenate((A, x)))
    d_fun = lambda x : to_scalar(grad(fun)(x))
    check_grads(fun, B)
    check_grads(d_fun, B)
Exemple #7
0
def test_concatenate_axis_1():
    A = npr.randn(5, 6, 4)
    B = npr.randn(5, 6, 4)
    def fun(x): return to_scalar(np.concatenate((B, x, B), axis=1))
    d_fun = lambda x : to_scalar(grad(fun)(x))
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #8
0
def test_rts_backward_step():
    npr.seed(0)
    n = 3

    Jns = rand_psd(n)
    hns = npr.randn(n)
    mun = npr.randn(n)

    Jnp = rand_psd(n)
    hnp = npr.randn(n)

    Jf = rand_psd(n) + 10*np.eye(n)
    hf = npr.randn(n)

    bigJ = rand_psd(2*n)
    J11, J12, J22 = -1./2*bigJ[:n,:n], -bigJ[:n,n:], -1./2*bigJ[n:,n:]

    next_smooth = -1./2*Jns, hns, mun
    next_pred = -1./2*Jnp, hnp
    filtered = -1./2*Jf, hf

    pair_param = J11, J12, J22, 0.

    Js1, hs1, (mu1, ExxT1, ExxnT1) = natural_rts_backward_step(
        next_smooth, next_pred, filtered, pair_param)
    Js2, hs2, (mu2, ExxT2, ExnxT2) = rts_backward_step(
        next_smooth, next_pred, filtered, pair_param)

    assert np.allclose(Js1, Js2)
    assert np.allclose(hs1, hs2)

    assert np.allclose(mu1, mu2)
    assert np.allclose(ExxT1, ExxT2)
    assert np.allclose(ExxnT1, ExnxT2)
Exemple #9
0
def test_natural_predict_grad():
    npr.seed(0)
    n = 3

    J = rand_psd(n)
    h = npr.randn(n)
    bigJ = rand_psd(2*n)
    J11, J12, J22 = bigJ[:n,:n], bigJ[:n,n:], bigJ[n:,n:]
    logZ = npr.randn()
    J, J11, J12, J22 = -1./2*J, -1./2*J11, -J12, -1./2*J22

    ans = natural_predict((J, h), J11, J12, J22, logZ)
    dotter = (randn_like(J), randn_like(h)), randn_like(1.)

    def foo(*args):
        (J, h), logZ = natural_predict(*args)
        (a, b), c = dotter
        return np.sum(a*J) + np.sum(b*h) + c*logZ

    result1 = grad(foo)((J, h), J11, J12, J22, logZ)
    result2 = natural_predict_grad(dotter, ans, (J, h), J11, J12, J22, logZ)

    L, v, v2, temp, _, _ = natural_predict_forward_temps(J, J11, J12, h)
    result3 = _natural_predict_grad(dotter[0][0], dotter[0][1], dotter[1], -J12, L, v, v2, temp)

    for a, b in zip(result1, result2):
        check(a, b)

    for a, b in zip(result2, result3):
        check(a, b)
Exemple #10
0
def test_concatenate_axis_1_unnamed():
    """Tests whether you can specify the axis without saying "axis=1"."""
    A = npr.randn(5, 6, 4)
    B = npr.randn(5, 6, 4)
    def fun(x): return to_scalar(np.concatenate((B, x, B), 1))
    d_fun = lambda x : to_scalar(grad(fun)(x))
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #11
0
def test_solve_arg1_1d():
    D = 8
    A = npr.randn(D, D) + 10.0 * np.eye(D)
    B = npr.randn(D)
    def fun(a): return to_scalar(np.linalg.solve(a, B))
    d_fun = lambda x : to_scalar(grad(fun)(x))
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #12
0
def test_solve_arg1_3d_3d():
    D = 4
    A = npr.randn(D+1, D, D) + 5*np.eye(D)
    B = npr.randn(D+1, D, D+2)
    fun = lambda A: to_scalar(np.linalg.solve(A, B))
    d_fun = lambda A: to_scalar(grad(fun)(A))
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #13
0
def test_solve_arg2():
    D = 6
    A = npr.randn(D, D) + 1.0 * np.eye(D)
    B = npr.randn(D, D - 1)
    def fun(b): return to_scalar(np.linalg.solve(A, b))
    d_fun = lambda x : to_scalar(grad(fun)(x))
    check_grads(fun, B)
    check_grads(d_fun, B)
Exemple #14
0
def test_no_relation():
    c = npr.randn(3, 2)
    def fun(x):
        return to_scalar(c)
    A = npr.randn(3, 2)
    d_fun = lambda x : to_scalar(grad(fun)(x))
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #15
0
def test_norm_logpdf():
    x = npr.randn()
    l = npr.randn()
    scale=npr.rand()**2 + 1.1
    fun = autograd.scipy.stats.norm.logpdf
    d_fun = grad(fun)
    check_grads(fun, x, l, scale)
    check_grads(d_fun, x, l, scale)
Exemple #16
0
def convolution():
    # MNIST-scale convolution operation
    import autograd.scipy.signal
    convolve = autograd.scipy.signal.convolve
    dat = npr.randn(256, 3, 28, 28)
    kernel = npr.randn(3, 5, 5)
    with tictoc():
        convolve(dat, kernel, axes=([2, 3], [1, 2]), dot_axes=([1], [0]))
Exemple #17
0
def test_non_numpy_sum():
    def fun(x, y):
        return to_scalar(sum([x, y]))
    d_fun = lambda x, y : to_scalar(grad(fun)(x, y))
    mat1 = npr.randn(10, 11)
    mat2 = npr.randn(10, 11)
    check_grads(fun, mat1, mat2)
    check_grads(d_fun, mat1, mat2)
Exemple #18
0
def test_inv_3d():
    fun = lambda x: np.linalg.inv(x)

    D = 4
    mat = npr.randn(D, D, D) + 5*np.eye(D)
    check_grads(fun)(mat)

    mat = npr.randn(D, D, D, D) + 5*np.eye(D)
    check_grads(fun)(mat)
Exemple #19
0
def test_jacobian_higher_order():
    fun = lambda x: np.sin(np.outer(x, x)) + np.cos(np.dot(x, x))

    jacobian(fun)(npr.randn(3)).shape == (3, 3, 3)
    jacobian(jacobian(fun))(npr.randn(3)).shape == (3, 3, 3, 3)
    jacobian(jacobian(jacobian(fun)))(npr.randn(3)).shape == (3, 3, 3, 3, 3)

    check_grads(lambda x: np.sum(jacobian(fun)(x)), npr.randn(3))
    check_grads(lambda x: np.sum(jacobian(jacobian(fun))(x)), npr.randn(3))
Exemple #20
0
def test_solve_triangular_arg2_2d():
    D = 6
    A = npr.randn(D, D) + 10*np.eye(D)
    trans_options = ['T', 'N', 'C', 0, 1, 2]
    lower_options = [True, False]
    for trans, lower in itertools.product(trans_options, lower_options):
        def fun(B):
            return to_scalar(spla.solve_triangular(A, B, trans=trans, lower=lower))
        yield check_grads, fun, npr.randn(D, D-1)
Exemple #21
0
def test_make_ggnvp_broadcasting():
  A = npr.randn(4, 5)
  x = npr.randn(10, 4)
  v = npr.randn(10, 4)

  fun = lambda x: np.tanh(np.dot(x, A))
  res1 = np.stack([_make_explicit_ggnvp(fun)(xi)(vi) for xi, vi in zip(x, v)])
  res2 = make_ggnvp(fun)(x)(v)
  check_equivalent(res1, res2)
Exemple #22
0
def test_r_slicing():
    c = npr.randn(10)
    def fun(x):
        b = np.r_[x, c, 1:10]
        return to_scalar(b)
    A = npr.randn(10)
    d_fun = lambda x : to_scalar(grad(fun)(x))
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #23
0
def test_r_node_and_const():
    c = npr.randn(3, 2)
    def fun(x):
        b = np.r_[x, c]
        return to_scalar(b)
    A = npr.randn(3, 2)
    d_fun = lambda x : to_scalar(grad(fun)(x))
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #24
0
def test_c_mixed():
    c = npr.randn(3, 2)
    def fun(x):
        b = np.c_[x, c, x]
        return to_scalar(b)
    A = npr.randn(3, 2)
    d_fun = lambda x : to_scalar(grad(fun)(x))
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #25
0
def test_hessian():
    # Check Hessian of a quadratic function.
    D = 5
    H = npr.randn(D, D)
    def fun(x):
        return np.dot(np.dot(x, H),x)
    hess = hessian(fun)
    x = npr.randn(D)
    check_equivalent(hess(x), H + H.T)
Exemple #26
0
def test_no_relation():
    with warnings.catch_warnings(record=True) as w:
        c = npr.randn(3, 2)
        def fun(x):
            return to_scalar(c)
        A = npr.randn(3, 2)
        d_fun = lambda x : to_scalar(grad(fun)(x))
        check_grads(fun, A)
        check_grads(d_fun, A)
Exemple #27
0
def test_complex_separate_real_and_imaginary():
    def fun(a):
        r, i = np.real(a), np.imag(a)
        a = np.abs(r)**1.4 + np.abs(i)**1.3
        return np.sum(np.sin(a))
    d_fun = lambda x : grad(fun)(x)
    A = npr.randn(5, 3) + 0.1j*npr.randn(5, 3)
    check_grads(fun)(A)
    check_grads(d_fun)(A)
def test_complex_separate_real_and_imaginary():
    def fun(a):
        r, i = np.real(a), np.imag(a)
        a = np.abs(r)**1.4 + np.abs(i)**1.3
        return to_scalar(a)
    d_fun = lambda x : to_scalar(grad(fun)(x))
    A = npr.randn(5, 3) + 0.1j*npr.randn(5, 3)
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #29
0
def test_where():
    def fun(x, y):
        b = np.where(C, x, y)
        return to_scalar(b)
    C = npr.randn(4, 5) > 0
    A = npr.randn(4, 5)
    B = npr.randn(4, 5)
    d_fun = lambda a, b : to_scalar(grad(fun)(a, b))
    check_grads(fun, A, B)
    check_grads(d_fun, A, B)
Exemple #30
0
def stat_check(fun):
    # Tests functions that compute statistics, like sum, mean, etc
    x = 3.5
    A = npr.randn()
    B = npr.randn(3)
    C = npr.randn(2, 3)
    D = npr.randn(1, 3)
    combo_check(fun, (0,), [x, A])
    combo_check(fun, (0,), [B, C, D], axis=[None, 0], keepdims=[True, False])
    combo_check(fun, (0,), [C, D], axis=[None, 0, 1], keepdims=[True, False])
        print("Epoch: %d ELBO: %e" % (epoch, elbo_est / np.ceil(N / batch_size)))

    # We save the trained params so we don't have to retrain each time
    np.save(os.path.join("trained_params", "data.npy"), flattened_current_params)

    # We obtain the final trained parameters
    flattened_current_params = np.load(os.path.join("trained_params", "data.npy"))

    gen_params, rec_params = unflat_params(flattened_current_params)

    ####---- Task 3.1 ---####
    
    # We generate 25 samples from the prior.
    # Note the prior P(z) is a standard Gaussian
    num_prior_images = 25
    z = npr.randn(num_prior_images, latent_dim)

    # Generate the images using the prior and gen params
    generated_images = neural_net_predict(gen_params, z)

    # Convert the logits to probabilities
    sigmoid_generated_images = sigmoid(generated_images)
    save_images(sigmoid_generated_images, os.path.join("saved_images", "gen_prior_25.png"))

    ####---- Task 3.2 ---####
    
    # Select specific number of test images
    num_test_images = 10
    test_images = test_images[0:num_test_images, :]

    # Generate encoded output for the test images
Exemple #32
0
def test_jacobian_scalar_to_vector():
    fun = lambda x: np.array([x, x**2, x**3])
    val = npr.randn()
    assert np.allclose(jacobian(fun)(val), np.array([1., 2 * val, 3 * val**2]))
Exemple #33
0
def test_jacobian_against_grad():
    fun = lambda x: np.sum(np.sin(x), axis=1, keepdims=True)
    A = npr.randn(1, 3)
    assert np.allclose(grad(fun)(A), jacobian(fun)(A))
Exemple #34
0
def test_laplace_em(T=100, N=15, K=3, D=10):
    # Check that laplace-em works for each transition and emission model
    # so long as the dynamics are linear-gaussian.
    for transitions in [
            "stationary",
            "sticky",
            "inputdriven",
            "recurrent",
            "recurrent_only",
    ]:

        for emissions in [
                "gaussian",
                "gaussian_orthog",
                "poisson",
                "poisson_orthog",
                "bernoulli",
                "bernoulli_orthog",
        ]:
            for input_dim in [0, 1]:
                true_slds = ssm.SLDS(N,
                                     K,
                                     D,
                                     M=input_dim,
                                     transitions=transitions,
                                     dynamics="gaussian",
                                     emissions=emissions)

                # Test with a random number of data arrays
                num_datas = npr.randint(1, 5)
                Ts = T + npr.randint(20, size=num_datas)
                us = [npr.randn(Ti, input_dim) for Ti in Ts]
                datas = [
                    true_slds.sample(Ti, input=u) for Ti, u in zip(Ts, us)
                ]
                zs, xs, ys = list(zip(*datas))

                # Fit an SLDS to the data
                fit_slds = ssm.SLDS(N,
                                    K,
                                    D,
                                    M=input_dim,
                                    transitions=transitions,
                                    dynamics="gaussian",
                                    emissions=emissions)
                try:
                    fit_slds.fit(ys,
                                 inputs=us,
                                 initialize=True,
                                 num_init_iters=2,
                                 num_iters=5)

                # So that we can still interrupt the test.
                except KeyboardInterrupt:
                    raise

                # So that we know which test case fails...
                except:
                    print("Error during fit with Laplace-EM. Failed with:")
                    print("Emissions = {}".format(emissions))
                    print("Transitions = {}".format(transitions))
                    raise
 def test_solve_arg2(self):
     D = 6
     A = npr.randn(D, D) + 1.0 * np.eye(D)
     B = npr.randn(D, D - 1)
     def fun(b): return np.linalg.solve(A, b)
     check_grads(fun)(B)
 def test_solve_arg1_3d_3d(self):
     D = 4
     A = npr.randn(D+1, D, D) + 5 * np.eye(D)
     B = npr.randn(D+1, D, D + 2)
     fun = lambda A: np.linalg.solve(A, B)
     check_grads(fun)(A)
def rand_psd(D):
    mat = npr.randn(D, D)
    return np.dot(mat, mat.T)
Exemple #38
0
 def sample_y(self, z, x, input=None, tag=None):
     mus = self.compute_mus(x)
     etas = np.exp(self.inv_etas)
     return mus + np.sqrt(etas) * npr.randn(*mus.shape)
Exemple #39
0
 def obs(self, state):
     noise = self.noise
     vel, ang_vel = state[-2:]
     vel = vel + random.randn(1) * noise[5]
     ang_vel = ang_vel + random.randn(1) * noise[6]
     return np.array((vel, ang_vel)).reshape(-1)
Exemple #40
0
 def fun(x):
     c = npr.randn(3, 2)
     b = np.r_[x, x]
     return to_scalar(b)
Exemple #41
0
        psi_t = psi_solution(ix, net_out)

        grad_of_psi = psi_grad(ix, net_out)

        func = f(ix, psi_t)

        err_squared = np.abs(grad_of_psi - func)
        cost_sum += err_squared
    return cost_sum


'''
Main meat of the program.
'''

W = [npr.randn(1, nx), npr.randn(nx, 1)]
lmb = 0.0001

for i in range(5000):
    loss_grad = grad(cost_function)(W, x_space)

    W[0] -= lmb * loss_grad[0]
    W[1] -= lmb * loss_grad[1]

print(cost_function(W, x_space))
res = [psi_solution(ix, neural_network(W, ix)[0][0]) for ix in x_space]
#rk4_res = sio.loadmat('./ex1.mat')['w']
#rk4_res = rk4_res.flatten()

plt.figure()
plt.plot(x_space, analytic_solution(x_space))
Exemple #42
0
 def sample(self, z, x, input=None, tag=None):
     T = z.shape[0]
     z = np.zeros_like(z, dtype=int) if self.single_subspace else z
     mus = self.forward(x, input, tag)
     etas = np.exp(self.inv_etas)
     return mus[np.arange(T), z, :] + np.sqrt(etas[z]) * npr.randn(T, self.N)
Exemple #43
0
def rand_logistic_norm(N, K):
    """ Randomly sample N rows, each of K dimensions such that each row sums
    to one"""
    z = npr.randn(N, K-1)
    return logistic(z)
Exemple #44
0
def logprior(var_par, draw):
	l = int(var_par.shape[0]/2)
	sample = var_par[:l]+np.exp(var_par[l:])*npr.randn(l)
	# Assume 0-centered MV-normal prior with covariance matrix I
	return np.sum(mvn.logpdf(sample, mean=np.zeros(sample.shape), cov=np.diag(np.ones(sample.shape))))
 def test_solve_arg1_1d(self):
     D = 8
     A = npr.randn(D, D) + 10.0 * np.eye(D)
     B = npr.randn(D)
     def fun(a): return np.linalg.solve(a, B)
     check_grads(fun)(A)
def test_flatten_unflatten():
    for vs in everything:
        v = npr.randn(vs.size)
        v2 = vs.flatten(vs.unflatten(v))
        assert np.all(v2 == v), \
            report_flatten_unflatten(vs, v, v2)
Exemple #47
0
def logl(var_par, draw, data):
	l = int(var_par.shape[0]/2)
	sample = var_par[:l]+np.exp(var_par[l:])*npr.randn(l)
	#Log likelihood for logistic regression
	x,y = data[:, :-1], data[:, -1]
	return -1*np.log(1+ np.exp(-y*np.dot(x,sample)))
Exemple #48
0
            print training_text.replace(
                '\n', ' ') + "| " + predicted_text.replace('\n', ' ')

    def callback(weights):
        print "Train loss:", loss_fun(weights, train_inputs, train_targets)
        print_training_prediction(weights, train_inputs, train_targets)

# Build gradient of loss function using autograd.

    loss_and_grad = grad(loss_fun, return_function_value=True)

    # Wrap function to only have one argument, for scipy.minimize.
    def training_loss_and_grad(weights):
        return loss_and_grad(weights, train_inputs, train_targets)

    init_weights = npr.randn(num_weights) * param_scale
    # Check the gradients numerically, just to be safe
    quick_grad_check(loss_fun, init_weights, (train_inputs, train_targets))

    print "Training LSTM..."
    result = minimize(training_loss_and_grad,
                      init_weights,
                      jac=True,
                      method='CG',
                      options={'maxiter': train_iters},
                      callback=callback)
    trained_weights = result.x

    print "\nGenerating text from LSTM model..."
    num_letters = 30
    for t in xrange(20):
    def helper(shape, axis):
        def fun(x):
            return to_scalar(np.linalg.norm(x, axis=axis))

        arr = npr.randn(*shape)
        check_grads(fun, arr)
def init_net_params(layer_sizes, scale=1e-2):
    """Build a (weights, biases) tuples for all layers."""

    return [(scale * npr.randn(m, n),  # weight matrix
             scale * npr.randn(n))  # bias vector
            for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]
Exemple #51
0
def draw_samples(var_par, n_samples):
	l = int(var_par.shape[0]/2)
	return var_par[:l] + np.exp(var_par[l:])*npr.randn(n_samples,l)
Exemple #52
0
N = 100  # number of observed dimensions

# In[3]:

# Make an SLDS with the true parameters
true_slds = ssm.SLDS(N,
                     K,
                     D,
                     emissions="poisson_orthog",
                     emission_kwargs=dict(link="softplus"))

# Set rotational dynamics
for k in range(K):
    true_slds.dynamics.As[k] = .95 * random_rotation(
        D, theta=(k + 1) * np.pi / 20)
    true_slds.dynamics.bs[k] = 3 * npr.randn(D)

# Set an offset to make the counts larger
# true_slds.emissions.ds += 10

# Sample data
z, x, y = true_slds.sample(T)

# Mask off some data
mask = npr.rand(T, N) < 0.95
y_masked = y * mask

# In[4]:

plt.imshow(y.T, aspect="auto", interpolation="none")
plt.xlabel("time")
    def helper(size, ord):
        def fun(x):
            return np.linalg.norm(x, ord=ord)

        vec = npr.randn(size)
        check_grads(fun, vec)
y_fd = np.zeros_like(y_analytic)
y_fd[0] = 0.  # Inital Condition

for i in range(1, len(x_space)):
    y_fd[i] = y_fd[i -
                   1] + B(x_space[i]) * dx - y_fd[i - 1] * A(x_space[i]) * dx
"""plt.figure()
plt.plot(x_space, y_analytic)
plt.plot(x_space, y_fd)
plt.xlabel('x', fontsize = 14)
plt.ylabel('y', fontsize = 14)
plt.title('Solution of the First Order ODE', fontsize = 14)
plt.legend(['Analytic Solution','Finite Difference Solution'])
plt.show()"""

W = [npr.randn(1, 10), npr.randn(10, 1)]  #Random initialization of weights

lmb = 0.01  # Learning rate of the Neural Network

for i in range(1000):

    #Gradient of loss funtion w.r.t. weights
    loss_grad = grad(loss_function)(W, x_space)

    W[0] = W[0] - lmb * loss_grad[0]  # Upgrading weights
    W[1] = W[1] - lmb * loss_grad[1]  # Upgrading weights

print("The minimized cost function is: {0:.4f} \n".format(
    loss_function(W, x_space)))

y_nn = [xi * neural_network(W, xi)[0][0] for xi in x_space]
Exemple #55
0
def test_multi_index():
    A = npr.randn(3)
    fun = lambda x: np.sum(x[[0, 0]])
    d_fun = lambda x: to_scalar(grad(fun)(x))
    check_grads(fun, A)
    check_grads(d_fun, A)
Exemple #56
0
def rand_psd(n):
    A = npr.randn(n, n)
    return np.dot(A, A.T)
# import neuron 1
d = np.load("../analyses/neuron1.npz")
us = list(d["us"])
ys = list(d["ys"])
all_ys = [y.astype(int) for y in ys]
all_us = us
num_trials = len(us)

# setup parameters
numTrials = 100
bin_size = 0.01
# bin_size = 1.0
N = 1
beta = np.array([-0.01,-0.005,0.0,0.01,0.02])
latent_ddm = RampingHard(N, M=5, link="softplus", beta = beta, log_sigma_scale=np.log(1e-3), x0=0.5, bin_size=bin_size)
latent_ddm.emissions.Cs[0] = 40.0 + 3.0 * npr.randn(N,1)

ys = []
xs = []
zs = []
us = []
# sample from model
for tr in range(numTrials):

	u = all_us[tr]
	T = np.shape(u)[0]
	z, x, y = latent_ddm.sample(T, input=u)

	zs.append(z)
	xs.append(x)
	ys.append(y)
Exemple #58
0
def test_lds_log_probability_perf(T=1000, D=10, N_iter=10):
    """
    Compare performance of banded method vs message passing in pylds.
    """
    print("Comparing methods for T={} D={}".format(T, D))

    from pylds.lds_messages_interface import kalman_info_filter, kalman_filter

    # Convert LDS parameters into info form for pylds
    As, bs, Qi_sqrts, ms, Ri_sqrts = make_lds_parameters(T, D)
    Qis = np.matmul(Qi_sqrts, np.swapaxes(Qi_sqrts, -1, -2))
    Ris = np.matmul(Ri_sqrts, np.swapaxes(Ri_sqrts, -1, -2))
    x = npr.randn(T, D)

    print("Timing banded method")
    start = time.time()
    for itr in range(N_iter):
        lds_log_probability(x, As, bs, Qi_sqrts, ms, Ri_sqrts)
    stop = time.time()
    print("Time per iter: {:.4f}".format((stop - start) / N_iter))

    # Compare to Kalman Filter
    mu_init = np.zeros(D)
    sigma_init = np.eye(D)
    Bs = np.ones((D, 1))
    sigma_states = np.linalg.inv(Qis)
    Cs = np.eye(D)
    Ds = np.zeros((D, 1))
    sigma_obs = np.linalg.inv(Ris)
    inputs = bs
    data = ms

    print("Timing PyLDS message passing (kalman_filter)")
    start = time.time()
    for itr in range(N_iter):
        kalman_filter(mu_init, sigma_init,
                      np.concatenate([As, np.eye(D)[None, :, :]]), Bs,
                      np.concatenate([sigma_states,
                                      np.eye(D)[None, :, :]]), Cs, Ds,
                      sigma_obs, inputs, data)
    stop = time.time()
    print("Time per iter: {:.4f}".format((stop - start) / N_iter))

    # Info form comparison
    J_init = np.zeros((D, D))
    h_init = np.zeros(D)
    log_Z_init = 0

    J_diag, J_lower_diag, h = convert_lds_to_block_tridiag(
        As, bs, Qi_sqrts, ms, Ri_sqrts)
    J_pair_21 = J_lower_diag
    J_pair_22 = J_diag[1:]
    J_pair_11 = J_diag[:-1]
    J_pair_11[1:] = 0
    h_pair_2 = h[1:]
    h_pair_1 = h[:-1]
    h_pair_1[1:] = 0
    log_Z_pair = 0

    J_node = np.zeros((T, D, D))
    h_node = np.zeros((T, D))
    log_Z_node = 0

    print("Timing PyLDS message passing (kalman_info_filter)")
    start = time.time()
    for itr in range(N_iter):
        kalman_info_filter(J_init, h_init, log_Z_init, J_pair_11, J_pair_21,
                           J_pair_22, h_pair_1, h_pair_2, log_Z_pair, J_node,
                           h_node, log_Z_node)
    stop = time.time()
    print("Time per iter: {:.4f}".format((stop - start) / N_iter))
Exemple #59
0
 def __init__(self, N, K, D, M=0, single_subspace=True, **kwargs):
     super(_GaussianEmissionsMixin, self).__init__(N, K, D, M=M, single_subspace=single_subspace, **kwargs)
     self.inv_etas = -4 + npr.randn(1, N) if single_subspace else npr.randn(K, N)
Exemple #60
0
    :return:
    """
    loss_sum = 0.
    for xi in x:
        net_out = neural_network(W, xi)[0][0]
        psi_t = psi_trial(xi, net_out)
        gradient_of_trial = psi_grad(xi, net_out)
        second_gradient_of_trial = psi_grad2(xi, net_out)
        func = f(xi, psi_t, gradient_of_trial)
        err_sqr = (second_gradient_of_trial - func)**2
        loss_sum += err_sqr
    return loss_sum


# Set the weights
W = [npr.randn(1, 10), npr.randn(10, 1)]
lmb = 0.001

x_space = np.linspace(0, 2, nx)
y_space = psi_analytic(x_space)

# Train a neural network
for i in range(1000):
    loss_grad = grad(loss_function)(W, x_space)
    W[0] = W[0] - lmb * loss_grad[0]
    W[1] = W[1] - lmb * loss_grad[1]

# Results
res = [psi_trial(xi, neural_network(W, xi)[0][0]) for xi in x_space]

error = 0