Exemplo n.º 1
0
def test():
    anp = np.random.randn(4, 5)
    b = random.randint(1, 13)
    a = lg.array(anp)

    # test power with scalar on rhs
    assert lg.allclose(lg.power(a, b), np.power(anp, b))

    # test power with scalar on lhs
    assert lg.allclose(lg.power(b, a), np.power(b, anp))

    return
Exemplo n.º 2
0
def test():
    bases_np = np.random.randn(4, 5)

    # avoid fractional exponents
    exponents_np = np.random.randint(10, size=(4, 5)).astype(np.float64)

    bases = lg.array(bases_np)
    exponents = lg.array(exponents_np)

    assert lg.allclose(lg.power(bases, exponents),
                       np.power(bases_np, exponents_np))
def test():
    anp = np.random.randn(4, 5)
    b = random.randint(1, 13)
    a = lg.array(anp)

    np.power(anp, b, out=anp)
    lg.power(a, b, out=a)

    assert lg.allclose(a, anp)

    return
Exemplo n.º 4
0
def check(A, x, b):
    print("Checking result...")
    if np.allclose(A.dot(x), b):
        print("PASS!")
    else:
        print("FAIL!")
Exemplo n.º 5
0
def checkSequentialMatchesBatch():
    """ check LSTM I/O forward/backward interactions """

    n, b, d = (5, 3, 4)  # sequence length, batch size, hidden size
    input_size = 10
    WLSTM = LSTM.init(input_size, d)  # input size, hidden size
    X = np.random.randn(n, b, input_size)
    h0 = np.random.randn(b, d)
    c0 = np.random.randn(b, d)

    # sequential forward
    cprev = c0
    hprev = h0
    caches = [{} for t in range(n)]
    Hcat = np.zeros((n, b, d))

    for t in range(n):
        xt = X[t:t + 1]
        _, cprev, hprev, cache = LSTM.forward(xt, WLSTM, cprev, hprev)
        caches[t] = cache
        Hcat[t] = hprev

    # sanity check: perform batch forward to check that we get the same thing
    H, _, _, batch_cache = LSTM.forward(X, WLSTM, c0, h0)

    assert np.allclose(H, Hcat), "Sequential and Batch forward don" "t match!"

    # eval loss
    wrand = np.random.randn(*Hcat.shape)
    # loss = np.sum(Hcat * wrand)
    dH = wrand

    # get the batched version gradients
    BdX, BdWLSTM, Bdc0, Bdh0 = LSTM.backward(dH, batch_cache)

    # now perform sequential backward
    dX = np.zeros_like(X)
    dWLSTM = np.zeros_like(WLSTM)
    dc0 = np.zeros_like(c0)
    dh0 = np.zeros_like(h0)
    dcnext = None
    dhnext = None
    for t in reversed(range(n)):
        dht = dH[t].reshape((1, b, d))
        # print("dht")
        # print(dht.shape)
        # print(dht[0])
        dx, dWLSTMt, dcprev, dhprev = LSTM.backward(dht, caches[t], dcnext,
                                                    dhnext)
        dhnext = dhprev
        dcnext = dcprev

        dWLSTM += dWLSTMt  # accumulate LSTM gradient
        dX[t] = dx[0]
        if t == 0:
            dc0 = dcprev
            dh0 = dhprev

    # and make sure the gradients match
    print(
        "Making sure batched version agrees with sequential version: (should "
        "all be True)")
    print(np.allclose(BdX, dX))
    print(np.allclose(BdWLSTM, dWLSTM))
    print(np.allclose(Bdc0, dc0))
    print(np.allclose(Bdh0, dh0))