Example #1
0
def test_lookuptable_zeros_error(backend_default, basic_linargs, deltas_buffer):
    # basic sanity check with 0 weights random inputs
    nin, nout, batch_size, vocab_size = basic_linargs
    NervanaObject.be.bsz = batch_size

    dtypeu = np.float32

    init_glorot = GlorotUniform()
    layer = LookupTable(
        vocab_size=vocab_size, embedding_dim=nout, init=init_glorot)

    inp = np.random.random_integers(0, vocab_size - 1, size=nin * batch_size)
    layer.configure(nin)
    layer.allocate()

    layer.prev_layer = True  # Hack to force delta buffer allocation
    layer.allocate_deltas(deltas_buffer)
    deltas_buffer.allocate_buffers()
    layer.set_deltas(deltas_buffer)

    inputs = layer.be.array(inp.reshape((nin, batch_size)))
    out = layer.fprop(inputs).get()
    W = layer.W.get()
    for i in range(nin * batch_size):
        assert np.all(W[inp[i]].T == out[:, i])

    err = dtypeu(np.zeros((nout, nin * batch_size)))
    layer.bprop(layer.be.array(err)).get()

    dw = layer.dW.get()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0

    return
Example #2
0
def test_lookuptable_zeros_error(backend_default, basic_linargs,
                                 deltas_buffer):
    # basic sanity check with 0 weights random inputs
    nin, nout, batch_size, vocab_size = basic_linargs
    NervanaObject.be.bsz = batch_size

    dtypeu = np.float32

    init_glorot = GlorotUniform()
    layer = LookupTable(vocab_size=vocab_size,
                        embedding_dim=nout,
                        init=init_glorot)

    inp = np.random.random_integers(0, vocab_size - 1, size=nin * batch_size)
    layer.configure(nin)
    layer.allocate()

    layer.prev_layer = True  # Hack to force delta buffer allocation
    layer.allocate_deltas(deltas_buffer)
    deltas_buffer.allocate_buffers()
    layer.set_deltas(deltas_buffer)

    inputs = layer.be.array(inp.reshape((nin, batch_size)))
    out = layer.fprop(inputs).get()
    W = layer.W.get()
    for i in range(nin * batch_size):
        assert np.all(W[inp[i]].T == out[:, i])

    err = dtypeu(np.zeros((nout, nin * batch_size)))
    layer.bprop(layer.be.array(err)).get()

    dw = layer.dW.get()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0

    return
Example #3
0
def test_lookuptable_rand_error(backend_default, basic_linargs, deltas_buffer):
    nin, nout, batch_size, vocab_size = basic_linargs
    NervanaObject.be.bsz = batch_size

    dtypeu = np.float32

    init_glorot = GlorotUniform()
    layer = LookupTable(vocab_size=vocab_size,
                        embedding_dim=nout,
                        init=init_glorot)

    inp = np.random.random_integers(0, vocab_size - 1, size=nin * batch_size)
    layer.configure(nin)
    layer.allocate()

    layer.prev_layer = True  # Hack to force delta buffer allocation
    layer.allocate_deltas(deltas_buffer)
    deltas_buffer.allocate_buffers()
    layer.set_deltas(deltas_buffer)

    inputs = layer.be.array(inp.reshape((nin, batch_size)))
    out = layer.fprop(inputs).get()
    W = layer.W.get()
    for i in range(nin * batch_size):
        assert np.all(W[inp[i]].T == out[:, i])

    err = dtypeu(np.random.random((nout, nin * batch_size)))
    layer.bprop(layer.be.array(err)).get()

    dw = layer.dW.get()
    unqidx, count = np.unique(inp, return_counts=True)
    dw_exp = np.zeros((1, nout))
    for wrd_id, cnt in zip(unqidx, count):
        dw_exp[:] = 0
        cnt_exp = 0
        for i, w_id in enumerate(inp):
            if w_id == wrd_id:
                dw_exp[:] = dw_exp[:] + err[:, i]
                cnt_exp += 1
        assert np.allclose(dw[wrd_id, :], dw_exp, atol=0, rtol=1e-4)
        assert np.allclose(dw_exp, dw[wrd_id, :], atol=0, rtol=1e-4)
        assert cnt == cnt_exp

    return
Example #4
0
def test_lookuptable_rand_error(backend_default, basic_linargs, deltas_buffer):
    nin, nout, batch_size, vocab_size = basic_linargs
    NervanaObject.be.bsz = batch_size

    dtypeu = np.float32

    init_glorot = GlorotUniform()
    layer = LookupTable(
        vocab_size=vocab_size, embedding_dim=nout, init=init_glorot)

    inp = np.random.random_integers(0, vocab_size - 1, size=nin * batch_size)
    layer.configure(nin)
    layer.allocate()

    layer.prev_layer = True  # Hack to force delta buffer allocation
    layer.allocate_deltas(deltas_buffer)
    deltas_buffer.allocate_buffers()
    layer.set_deltas(deltas_buffer)

    inputs = layer.be.array(inp.reshape((nin, batch_size)))
    out = layer.fprop(inputs).get()
    W = layer.W.get()
    for i in range(nin * batch_size):
        assert np.all(W[inp[i]].T == out[:, i])

    err = dtypeu(np.random.random((nout, nin * batch_size)))
    layer.bprop(layer.be.array(err)).get()

    dw = layer.dW.get()
    unqidx, count = np.unique(inp, return_counts=True)
    dw_exp = np.zeros((1, nout))
    for wrd_id, cnt in zip(unqidx, count):
        dw_exp[:] = 0
        cnt_exp = 0
        for i, w_id in enumerate(inp):
            if w_id == wrd_id:
                dw_exp[:] = dw_exp[:] + err[:, i]
                cnt_exp += 1
        assert np.allclose(dw[wrd_id, :], dw_exp, atol=0, rtol=1e-4)
        assert np.allclose(dw_exp, dw[wrd_id, :], atol=0, rtol=1e-4)
        assert cnt == cnt_exp

    return