Exemple #1
0
def test_dconv_ones(backend, ones_convargs):
    indim, nifm, fshape, nofm, batch_size = ones_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
    dtypeu = np.float32

    # weights set to one
    init_unif = Uniform(low=1.0, high=1.0)

    inshape = (nifm, indim, indim)
    insize = np.prod(inshape)

    neon_layer = Deconv(fshape=(fshape, fshape, nofm),
                        strides=1,
                        padding=0,
                        init=init_unif)
    inp = neon_layer.be.array(np.ones((insize, batch_size)).astype(dtypeu))
    inp.lshape = inshape
    # run fprop
    out = neon_layer.fprop(inp).asnumpyarray()
    out_exp_min = nifm
    out_exp_max = fshape * fshape * nifm
    assert np.min(out) == out_exp_min and np.max(out) == out_exp_max
    # generate err array
    err = np.ones(out.shape).astype(dtypeu)

    # run bprop
    neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    dw = neon_layer.dW.asnumpyarray()

    # generate the reference layer
    ref_layer = DeconvRefLayer(1, batch_size, identity, inshape[0],
                               inshape[1:3], (fshape, fshape), nofm, 1, dtypeu)

    ref_layer.weights = np.ones(neon_layer.W.shape).T.astype(dtypeu)

    # run bprop
    ref_layer.bprop(err)

    # expected output for updates is uniform matrix with
    # all elements == ofmsize*batch_size
    updates_exp = ref_layer.ofmsize * batch_size

    # check dw from neon layer
    assert np.max(dw) == updates_exp and np.min(dw) == updates_exp

    # no tolerence here should be exact
    assert np.max(np.abs(ref_layer.y.T - neon_layer.deltas.get())) == 0.0

    return
Exemple #2
0
def test_dconv_ones(backend_default, ones_convargs):
    indim, nifm, fshape, nofm, batch_size = ones_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
    dtypeu = np.float32

    # weights set to one
    init_unif = Uniform(low=1.0, high=1.0)

    inshape = (nifm, indim, indim)
    insize = np.prod(inshape)

    neon_layer = Deconv(fshape=(fshape, fshape, nofm), strides=1,
                        padding=0, init=init_unif)
    inp = neon_layer.be.array(np.ones((insize, batch_size)).astype(dtypeu))
    inp.lshape = inshape
    # run fprop
    neon_layer.configure(inshape)
    neon_layer.allocate()
    out = neon_layer.fprop(inp).asnumpyarray()
    out_exp_min = nifm
    out_exp_max = fshape * fshape * nifm
    assert np.min(out) == out_exp_min and np.max(out) == out_exp_max
    # generate err array
    err = np.ones(out.shape).astype(dtypeu)

    # run bprop
    neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    dw = neon_layer.dW.asnumpyarray()

    # generate the reference layer
    ref_layer = DeconvRefLayer(1, batch_size, identity, inshape[0], inshape[1:3],
                               (fshape, fshape), nofm, 1, dtypeu)

    ref_layer.weights = np.ones(neon_layer.W.shape).T.astype(dtypeu)

    # run bprop
    ref_layer.bprop(err)

    # expected output for updates is uniform matrix with
    # all elements == ofmsize*batch_size
    updates_exp = ref_layer.ofmsize * batch_size

    # check dw from neon layer
    assert np.max(dw) == updates_exp and np.min(dw) == updates_exp

    # no tolerence here should be exact
    assert np.max(np.abs(ref_layer.y.T - neon_layer.deltas.get())) == 0.0

    return
Exemple #3
0
def test_dconv_zeros(backend, zeros_convargs):
    fshape, nofm, batch_size = zeros_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size

    dtypeu = np.float32
    init_unif = Uniform(low=0.0, high=0.0)
    inshape = (64, 28, 28)
    insize = np.prod(inshape)
    neon_layer = Deconv(fshape=(fshape, fshape, nofm),
                        strides=1,
                        padding=0,
                        init=init_unif)
    inp_arr_shape = (insize, batch_size)
    inp = np.random.random(inp_arr_shape).astype(dtypeu)
    inp = neon_layer.be.array(inp)
    inp.lshape = inshape

    outa = neon_layer.fprop(inp)
    out = outa.asnumpyarray()
    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros(outa.shape))
    deltas = neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = neon_layer.dW.asnumpyarray()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0
    return
Exemple #4
0
def test_dconv_zeros(backend_default, zeros_convargs):
    fshape, nofm, batch_size = zeros_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size

    dtypeu = np.float32
    init_unif = Uniform(low=0.0, high=0.0)
    inshape = (64, 28, 28)
    insize = np.prod(inshape)
    neon_layer = Deconv(fshape=(fshape, fshape, nofm),
                        strides=1,
                        padding=0,
                        init=init_unif)
    inp_arr_shape = (insize, batch_size)
    inp = np.random.random(inp_arr_shape).astype(dtypeu)
    inp = neon_layer.be.array(inp)
    inp.lshape = inshape
    neon_layer.configure(inshape)
    neon_layer.allocate()

    outa = neon_layer.fprop(inp)
    out = outa.asnumpyarray()
    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros(outa.shape))
    deltas = neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = neon_layer.dW.asnumpyarray()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0
    return