def test_linear_zeros(backend_default, basic_linargs):
    # basic sanity check with 0 weights random inputs
    nin, nout, batch_size = basic_linargs
    NervanaObject.be.bsz = batch_size

    dtypeu = np.float32

    init_unif = Uniform(low=0.0, high=0.0)
    layer = Linear(nout=nout, init=init_unif)
    inp = layer.be.array(dtypeu(np.random.random((nin, batch_size))))
    layer.configure(nin)
    layer.prev_layer = True  # Hack to force delta buffer allocation
    layer.allocate()
    layer.set_deltas([layer.be.iobuf(nin)])
    out = layer.fprop(inp).get()

    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros((nout, batch_size)))
    deltas = layer.bprop(layer.be.array(err)).get()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = layer.dW.get()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0

    return
def test_linear_zeros(backend_default, basic_linargs):
    # basic sanity check with 0 weights random inputs
    nin, nout, batch_size = basic_linargs
    NervanaObject.be.bsz = batch_size

    dtypeu = np.float32

    init_unif = Uniform(low=0.0, high=0.0)
    layer = Linear(nout=nout, init=init_unif)
    inp = layer.be.array(dtypeu(np.random.random((nin, batch_size))))
    layer.configure(nin)
    layer.prev_layer = True  # Hack to force delta buffer allocation
    layer.allocate()
    layer.set_deltas([layer.be.iobuf(nin)])
    out = layer.fprop(inp).get()

    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros((nout, batch_size)))
    deltas = layer.bprop(layer.be.array(err)).asnumpyarray()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = layer.dW.asnumpyarray()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0

    return
Beispiel #3
0
def test_all_rand(backend_default, allrand_args, deltas_buffer):
    # test with random weights and random inputs
    dtypeu = np.float32
    w_rng, rngmax = allrand_args
    inp_rng = [0.0, rngmax]
    nin = 1024
    nout = 2048
    batch_size = 16
    NervanaObject.be.bsz = batch_size

    init_unif = Uniform(low=w_rng[0], high=w_rng[1])
    layer = Linear(nout=nout, init=init_unif)
    inp = np.random.random((nin, batch_size))
    inp *= inp_rng[1] - inp_rng[0]
    inp += inp_rng[0]
    inp = inp.astype(dtypeu)
    layer.configure(nin)
    layer.prev_layer = True  # Hack to force delta buffer allocation
    layer.allocate()

    layer.allocate_deltas(deltas_buffer)
    deltas_buffer.allocate_buffers()
    layer.set_deltas(deltas_buffer)

    out = layer.fprop(layer.be.array(inp)).get()
    w = layer.W.get()

    # the expected output using numpy
    out_exp = np.dot(w, inp)

    # for larger layers need to estimate numerical precision
    atol = 2 * est_mm_prec(w, inp, ntrials=1)
    assert np.allclose(out_exp, out, atol=atol, rtol=0.0), \
        '%e %e' % (np.max(np.abs(out - out_exp)), atol)

    err = np.random.random((nout, batch_size))
    err = err * (inp_rng[1] - inp_rng[0]) + inp_rng[0]
    err = err.astype(dtypeu)
    deltas = layer.bprop(layer.be.array(err)).get()
    dw = layer.dW.get()

    deltas_exp = np.dot(w.T, err)
    atol = 2 * est_mm_prec(w.T, err, ntrials=1)
    assert np.allclose(deltas_exp, deltas, atol=atol, rtol=0.0), \
        '%e %e' % (np.max(np.abs(deltas_exp - deltas)), atol)

    dw_exp = np.dot(err, inp.T)
    atol = 2 * est_mm_prec(err, inp.T, ntrials=1)
    assert np.allclose(dw_exp, dw, atol=atol, rtol=0.0), \
        '%e %e' % (np.max(np.abs(dw_exp - dw)), atol)

    return
Beispiel #4
0
def test_linear_zeros(backend, basic_linargs):
    # basic sanity check with 0 weights random inputs
    nin, nout, batch_size = basic_linargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size

    dtypeu = np.float32

    init_unif = Uniform(low=0.0, high=0.0)
    layer = Linear(nout=nout, init=init_unif)
    inp = layer.be.array(dtypeu(np.random.random((nin, batch_size))))
    out = layer.fprop(inp).get()

    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros((nout, batch_size)))
    deltas = layer.bprop(layer.be.array(err)).asnumpyarray()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = layer.dW.asnumpyarray()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0

    return
Beispiel #5
0
def test_linear_zeros(backend, basic_linargs):
    # basic sanity check with 0 weights random inputs
    nin, nout, batch_size = basic_linargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size

    dtypeu = np.float32

    init_unif = Uniform(low=0.0, high=0.0)
    layer = Linear(nout=nout, init=init_unif)
    inp = layer.be.array(dtypeu(np.random.random((nin, batch_size))))
    out = layer.fprop(inp).get()

    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros((nout, batch_size)))
    deltas = layer.bprop(layer.be.array(err)).asnumpyarray()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = layer.dW.asnumpyarray()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0

    return