Exemplo n.º 1
0
def time_vspace_flatten():
    val = {'k':  npr.random((4, 4)),
           'k2': npr.random((3, 3)),
           'k3': 3.0,
           'k4': [1.0, 4.0, 7.0, 9.0],
           'k5': np.array([4., 5., 6.]),
           'k6': np.array([[7., 8.], [9., 10.]])}

    vspace_flatten(val)
Exemplo n.º 2
0
def test_flatten_dict():
    val = {'k':  npr.random((4, 4)),
           'k2': npr.random((3, 3)),
           'k3': 3.0,
           'k4': [1.0, 4.0, 7.0, 9.0]}

    vect, unflatten = flatten(val)
    val_recovered = unflatten(vect)
    vect_2, _ = flatten(val_recovered)
    assert np.all(vect == vect_2)
def test_flatten_dict():
    val = {'k':  npr.random((4, 4)),
           'k2': npr.random((3, 3)),
           'k3': 3.0,
           'k4': [1.0, 4.0, 7.0, 9.0]}

    vect, unflatten = flatten(val)
    val_recovered = unflatten(vect)
    vect_2, _ = flatten(val_recovered)
    assert np.all(vect == vect_2)
Exemplo n.º 4
0
def time_vspace_flatten():
    val = {
        'k': npr.random((4, 4)),
        'k2': npr.random((3, 3)),
        'k3': 3.0,
        'k4': [1.0, 4.0, 7.0, 9.0],
        'k5': np.array([4., 5., 6.]),
        'k6': np.array([[7., 8.], [9., 10.]])
    }

    vspace_flatten(val)
Exemplo n.º 5
0
def time_flatten():
    val = {'k':  npr.random((4, 4)),
           'k2': npr.random((3, 3)),
           'k3': 3.0,
           'k4': [1.0, 4.0, 7.0, 9.0],
           'k5': np.array([4., 5., 6.]),
           'k6': np.array([[7., 8.], [9., 10.]])}

    vect, unflatten = flatten(val)
    val_recovered = unflatten(vect)
    vect_2, _ = flatten(val_recovered)
Exemplo n.º 6
0
def time_flatten():
    val = {
        'k': npr.random((4, 4)),
        'k2': npr.random((3, 3)),
        'k3': 3.0,
        'k4': [1.0, 4.0, 7.0, 9.0],
        'k5': np.array([4., 5., 6.]),
        'k6': np.array([[7., 8.], [9., 10.]])
    }

    vect, unflatten = flatten(val)
    val_recovered = unflatten(vect)
    vect_2, _ = flatten(val_recovered)
Exemplo n.º 7
0
def time_grad_flatten():
    val = {'k':  npr.random((4, 4)),
           'k2': npr.random((3, 3)),
           'k3': 3.0,
           'k4': [1.0, 4.0, 7.0, 9.0],
           'k5': np.array([4., 5., 6.]),
           'k6': np.array([[7., 8.], [9., 10.]])}

    vect, unflatten = flatten(val)
    def fun(vec):
        v = unflatten(vec)
        return np.sum(v['k5']) + np.sum(v['k6'])

    grad(fun)(vect)
Exemplo n.º 8
0
def time_grad_flatten():
    val = {
        'k': npr.random((4, 4)),
        'k2': npr.random((3, 3)),
        'k3': 3.0,
        'k4': [1.0, 4.0, 7.0, 9.0],
        'k5': np.array([4., 5., 6.]),
        'k6': np.array([[7., 8.], [9., 10.]])
    }

    vect, unflatten = flatten(val)

    def fun(vec):
        v = unflatten(vec)
        return np.sum(v['k5']) + np.sum(v['k6'])

    grad(fun)(vect)
Exemplo n.º 9
0
def test():
    b = npr.random([8])

    def f(b):
        return fn(a, b)

    deriv = autograd.grad(f)
    print f(b)
    print deriv(b)
Exemplo n.º 10
0
def test_norm_pdf():    combo_check(stats.norm.pdf,    [0,1,2], [R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_cdf():    combo_check(stats.norm.cdf,    [0,1,2], [R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_logpdf(): combo_check(stats.norm.logpdf, [0,1,2], [R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_logcdf(): combo_check(stats.norm.logcdf, [0,1,2], [R(4)], [R(4)], [R(4)**2 + 1.1])

def test_norm_pdf_broadcast():    combo_check(stats.norm.pdf,    [0,1,2], [R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_cdf_broadcast():    combo_check(stats.norm.cdf,    [0,1,2], [R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_logpdf_broadcast(): combo_check(stats.norm.logpdf, [0,1,2], [R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_logcdf_broadcast(): combo_check(stats.norm.logcdf, [0,1,2], [R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])

def make_psd(mat): return np.dot(mat.T, mat) + np.eye(mat.shape[0])
def test_mvn_pdf():    combo_check(mvn.logpdf, [0, 1, 2], [R(4)], [R(4)], [make_psd(R(4, 4))])
def test_mvn_logpdf(): combo_check(mvn.logpdf, [0, 1, 2], [R(4)], [R(4)], [make_psd(R(4, 4))])
def test_mvn_entropy():combo_check(mvn.entropy,[0, 1],            [R(4)], [make_psd(R(4, 4))])

alpha = npr.random(4)**2 + 1.2
x = stats.dirichlet.rvs(alpha, size=1)[0,:]

# Need to normalize input so that x's sum to one even when we perturb them to compute numeric gradient.
def normalize(x): return x / sum(x)
def normalized_dirichlet_pdf(   x, alpha): return stats.dirichlet.pdf(   normalize(x), alpha)
def normalized_dirichlet_logpdf(x, alpha): return stats.dirichlet.logpdf(normalize(x), alpha)

def test_dirichlet_pdf_x():        combo_check(normalized_dirichlet_pdf,    [0], [x], [alpha])
def test_dirichlet_pdf_alpha():    combo_check(stats.dirichlet.pdf,         [1], [x], [alpha])
def test_dirichlet_logpdf_x():     combo_check(normalized_dirichlet_logpdf, [0], [x], [alpha])
def test_dirichlet_logpdf_alpha(): combo_check(stats.dirichlet.logpdf,      [1], [x], [alpha])

### Misc ###
R = npr.randn
def test_logsumexp1(): combo_check(autograd.scipy.misc.logsumexp, [0], [1.1, R(4), R(3,4)],                axis=[None, 0],    keepdims=[True, False])
Exemplo n.º 11
0
    combo_check(mvn.logpdf, [0, 1, 2], [R(4)], [R(4)], [make_psd(R(4, 4))])


def test_mvn_entropy():
    combo_check(mvn.entropy, [0, 1], [R(4)], [make_psd(R(4, 4))])


def test_mvn_pdf_broadcast():
    combo_check(mvn.logpdf, [0, 1, 2], [R(5, 4)], [R(4)], [make_psd(R(4, 4))])


def test_mvn_logpdf_broadcast():
    combo_check(mvn.logpdf, [0, 1, 2], [R(5, 4)], [R(4)], [make_psd(R(4, 4))])


alpha = npr.random(4) ** 2 + 1.2
x = stats.dirichlet.rvs(alpha, size=1)[0, :]

# Need to normalize input so that x's sum to one even when we perturb them to compute numeric gradient.
def normalize(x):
    return x / sum(x)


def normalized_dirichlet_pdf(x, alpha):
    return stats.dirichlet.pdf(normalize(x), alpha)


def normalized_dirichlet_logpdf(x, alpha):
    return stats.dirichlet.logpdf(normalize(x), alpha)

Exemplo n.º 12
0
    # -------------- LOADING DATASET ------------------------
    # load the images
    npr.seed(0)
    _, train_images, train_labels, test_images, test_labels = load_mnist()

    rand_idx = np.arange(train_images.shape[0])
    npr.shuffle(rand_idx)

    train_images = train_images[rand_idx]
    train_labels = train_labels[rand_idx]

    # UNIFORM CLASS SAMPLE CODE
    # uniformly sample each class
    cls_labels = train_labels.argmax(axis=1)
    cls_images = [train_images[cls_labels == i] for i in range(10)]
    rand_cls = np.int32(npr.random(30) / 0.1)
    rand_idx = [npr.randint(cls_images[cls].shape[0]) for cls in rand_cls]

    train_images = np.vstack(
        [cls_images[rand_cls[i]][rand_idx[i]] for i in range(30)])
    train_labels = np.vstack([
        train_labels[cls_labels == rand_cls[i]][rand_idx[i]] for i in range(30)
    ])

    # binarize
    train_images = np.round(train_images)
    test_images = np.round(test_images)

    # -------------- LOADING DATASET ------------------------
    print('LOADED DATASET')
Exemplo n.º 13
0
import autograd
import autograd.numpy as np
import autograd.numpy.random as npr

a = npr.random([2, 2, 2, 2])


def fn(a, b):
    bs = np.reshape(b, [2, 2, 2])
    c = np.einsum("apqb,cqd->acpbd", a, bs)
    return np.sum(c)


def test():
    b = npr.random([8])

    def f(b):
        return fn(a, b)

    deriv = autograd.grad(f)
    print f(b)
    print deriv(b)