Exemple #1
0
def test_dconv_zeros(backend_default, zeros_convargs):
    fshape, nofm, batch_size = zeros_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size

    dtypeu = np.float32
    init_unif = Uniform(low=0.0, high=0.0)
    inshape = (64, 28, 28)
    insize = np.prod(inshape)
    neon_layer = Deconv(fshape=(fshape, fshape, nofm),
                        strides=1,
                        padding=0,
                        init=init_unif)
    inp_arr_shape = (insize, batch_size)
    inp = np.random.random(inp_arr_shape).astype(dtypeu)
    inp = neon_layer.be.array(inp)
    inp.lshape = inshape
    neon_layer.configure(inshape)
    neon_layer.allocate()

    outa = neon_layer.fprop(inp)
    out = outa.asnumpyarray()
    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros(outa.shape))
    deltas = neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = neon_layer.dW.asnumpyarray()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0
    return
Exemple #2
0
def test_dconv_ones(backend_default, ones_convargs):
    indim, nifm, fshape, nofm, batch_size = ones_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
    dtypeu = np.float32

    # weights set to one
    init_unif = Uniform(low=1.0, high=1.0)

    inshape = (nifm, indim, indim)
    insize = np.prod(inshape)

    neon_layer = Deconv(fshape=(fshape, fshape, nofm), strides=1,
                        padding=0, init=init_unif)
    inp = neon_layer.be.array(np.ones((insize, batch_size)).astype(dtypeu))
    inp.lshape = inshape
    # run fprop
    neon_layer.configure(inshape)
    neon_layer.allocate()
    out = neon_layer.fprop(inp).asnumpyarray()
    out_exp_min = nifm
    out_exp_max = fshape * fshape * nifm
    assert np.min(out) == out_exp_min and np.max(out) == out_exp_max
    # generate err array
    err = np.ones(out.shape).astype(dtypeu)

    # run bprop
    neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    dw = neon_layer.dW.asnumpyarray()

    # generate the reference layer
    ref_layer = DeconvRefLayer(1, batch_size, identity, inshape[0], inshape[1:3],
                               (fshape, fshape), nofm, 1, dtypeu)

    ref_layer.weights = np.ones(neon_layer.W.shape).T.astype(dtypeu)

    # run bprop
    ref_layer.bprop(err)

    # expected output for updates is uniform matrix with
    # all elements == ofmsize*batch_size
    updates_exp = ref_layer.ofmsize * batch_size

    # check dw from neon layer
    assert np.max(dw) == updates_exp and np.min(dw) == updates_exp

    # no tolerence here should be exact
    assert np.max(np.abs(ref_layer.y.T - neon_layer.deltas.get())) == 0.0

    return
Exemple #3
0
def test_dconv_rand(backend_default, rand_convargs):
    indim, nifm, fshape, nofm, batch_size, rngmax, w_rng = rand_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
    dtypeu = np.float32
    inp_rng = [0.0, rngmax]

    init_unif = Uniform(low=w_rng[0], high=w_rng[1])
    inshape = (indim, indim, nifm)
    insize = np.prod(inshape)

    # generate neon deconv layer
    # need to switch to nofm here...
    neon_layer = Deconv(fshape=(fshape, fshape, nofm), strides=1,
                        padding=0, init=init_unif)
    insize = np.prod(inshape)

    # generate reference deconv layer
    ref_layer = DeconvRefLayer(1, batch_size, identity, inshape[0], inshape[1:3],
                               (fshape, fshape), nofm, 1, dtypeu)

    # setup input in range inp_rng
    inpa = np.random.random((insize, batch_size))
    inpa *= (inp_rng[1] - inp_rng[0])
    inpa += inp_rng[0]
    inpa = inpa.astype(dtypeu)
    inp = neon_layer.be.array(inpa)
    inp.lshape = inshape

    # run fprop on neon
    neon_layer.configure(inshape)
    neon_layer.allocate()
    neon_out = neon_layer.fprop(inp).asnumpyarray()
    # pull neon weights into ref layer weights
    ref_layer.weights = neon_layer.W.asnumpyarray().T
    ref_out = np.copy(ref_layer.berror)

    # estimate the numerical precision
    ref_layer.fprop(inpa.T, permute=True)
    ref_out2 = ref_layer.berror
    atol = 10 * np.max(np.abs(ref_out - ref_out2))
    assert (np.allclose(ref_out.T, neon_out, atol=atol, rtol=0.0),
            '%e %e' % (np.max(np.abs(ref_out.T - neon_out)), atol))

    # generate err array
    erra = np.random.random(neon_out.shape)
    erra *= (inp_rng[1] - inp_rng[0])
    erra += inp_rng[0]
    erra = erra.astype(dtypeu)