示例#1
0
def test_elemexp_values_1():
    npr.seed(1)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 6)
        A = kayak.Parameter(np_A)
        C = kayak.ElemExp(A)

        assert C.shape == np_A.shape
        assert np.all(close_float(C.value, np.exp(np_A)))
示例#2
0
def test_elemexp_values_2():
    npr.seed(2)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(1)
        A = kayak.Parameter(np_A)
        D = kayak.ElemExp(A)

        assert D.shape == np_A.shape
        assert np.all(close_float(D.value, np.exp(np_A)))
示例#3
0
def test_elemexp_grad_2():
    npr.seed(9)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(1)
        A = kayak.Parameter(np_A)
        D = kayak.ElemExp(A)
        E = kayak.MatSum(D)

        E.value
        assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
示例#4
0
def test_elemexp_grad_1():
    npr.seed(8)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 6)
        A = kayak.Parameter(np_A)
        C = kayak.ElemExp(A)
        D = kayak.MatSum(C)

        D.value
        assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
示例#5
0
# Build network.
kyk_inputs = kayak.Inputs(X, kyk_batcher)

# Labels.
kyk_targets = kayak.Targets(Y, kyk_batcher)

# Weights.
W = 0.01 * npr.randn(D, P)
kyk_W = kayak.Parameter(W)

# Linear layer.
kyk_activation = kayak.MatMult(kyk_inputs, kyk_W)

# Exponential inverse-link function.
kyk_lam = kayak.ElemExp(kyk_activation)

# Poisson negative log likelihood.
kyk_nll = kyk_lam - kayak.ElemLog(kyk_lam) * kyk_targets

# Sum the losses.
kyk_loss = kayak.MatSum(kyk_nll)

for ii in xrange(100):

    for batch in kyk_batcher:
        loss = kyk_loss.value
        print loss, np.sum((kyk_W.value - true_W)**2)
        grad = kyk_loss.grad(kyk_W)
        kyk_W.value -= learn * grad