Esempio n. 1
0
def test_dconv_ones(backend_default, ones_convargs, deltas_buffer):
    indim, nifm, fshape, nofm, batch_size = ones_convargs
    if isinstance(NervanaObject.be, NervanaGPU) and NervanaObject.be.compute_capability < (5, 0):
        if nofm % 4 != 0:
            pytest.skip(msg="C dim must be a multiple of 4 for Kepler bprop kernel")
    NervanaObject.be.bsz = batch_size
    dtypeu = np.float32

    # weights set to one
    init_unif = Uniform(low=1.0, high=1.0)

    inshape = (nifm, indim, indim)
    insize = np.prod(inshape)

    neon_layer = Deconvolution(fshape=(fshape, fshape, nofm), strides=1,
                               padding=0, init=init_unif)
    inp = neon_layer.be.array(np.ones((insize, batch_size)).astype(dtypeu))
    inp.lshape = inshape
    # run fprop
    neon_layer.configure(inshape)
    neon_layer.prev_layer = True
    neon_layer.allocate()

    neon_layer.allocate_deltas(deltas_buffer)
    deltas_buffer.allocate_buffers()
    neon_layer.set_deltas(deltas_buffer)

    out = neon_layer.fprop(inp).get()
    out_exp_min = nifm
    out_exp_max = fshape * fshape * nifm
    assert np.min(out) == out_exp_min and np.max(out) == out_exp_max
    # generate err array
    err = np.ones(out.shape).astype(dtypeu)

    # run bprop
    neon_layer.bprop(NervanaObject.be.array(err)).get()
    dw = neon_layer.dW.get()

    # generate the reference layer
    ref_layer = DeconvRefLayer(1, batch_size, identity, inshape[0], inshape[1:3],
                               (fshape, fshape), nofm, 1, dtypeu)

    ref_layer.weights = np.ones(neon_layer.W.shape).T.astype(dtypeu)

    # run bprop
    ref_layer.bprop(err)

    # expected output for updates is uniform matrix with
    # all elements == ofmsize*batch_size
    updates_exp = ref_layer.ofmsize * batch_size

    # check dw from neon layer
    assert np.max(dw) == updates_exp and np.min(dw) == updates_exp

    # no tolerance here should be exact
    assert np.max(np.abs(ref_layer.y.T - neon_layer.deltas.get())) == 0.0

    return
Esempio n. 2
0
def test_dconv_ones(backend_default, ones_convargs, deltas_buffer):
    indim, nifm, fshape, nofm, batch_size = ones_convargs
    if isinstance(NervanaObject.be, NervanaGPU) and NervanaObject.be.compute_capability < (5, 0):
        if nofm % 4 != 0:
            pytest.skip(msg="C dim must be a multiple of 4 for Kepler bprop kernel")
    NervanaObject.be.bsz = batch_size
    dtypeu = np.float32

    # weights set to one
    init_unif = Uniform(low=1.0, high=1.0)

    inshape = (nifm, indim, indim)
    insize = np.prod(inshape)

    neon_layer = Deconvolution(fshape=(fshape, fshape, nofm), strides=1,
                               padding=0, init=init_unif)
    inp = neon_layer.be.array(np.ones((insize, batch_size)).astype(dtypeu))
    inp.lshape = inshape
    # run fprop
    neon_layer.configure(inshape)
    neon_layer.prev_layer = True
    neon_layer.allocate()

    neon_layer.allocate_deltas(deltas_buffer)
    deltas_buffer.allocate_buffers()
    neon_layer.set_deltas(deltas_buffer)

    out = neon_layer.fprop(inp).get()
    out_exp_min = nifm
    out_exp_max = fshape * fshape * nifm
    assert np.min(out) == out_exp_min and np.max(out) == out_exp_max
    # generate err array
    err = np.ones(out.shape).astype(dtypeu)

    # run bprop
    neon_layer.bprop(NervanaObject.be.array(err)).get()
    dw = neon_layer.dW.get()

    # generate the reference layer
    ref_layer = DeconvRefLayer(1, batch_size, identity, inshape[0], inshape[1:3],
                               (fshape, fshape), nofm, 1, dtypeu)

    ref_layer.weights = np.ones(neon_layer.W.shape).T.astype(dtypeu)

    # run bprop
    ref_layer.bprop(err)

    # expected output for updates is uniform matrix with
    # all elements == ofmsize*batch_size
    updates_exp = ref_layer.ofmsize * batch_size

    # check dw from neon layer
    assert np.max(dw) == updates_exp and np.min(dw) == updates_exp

    # no tolerance here should be exact
    assert np.max(np.abs(ref_layer.y.T - neon_layer.deltas.get())) == 0.0

    return
Esempio n. 3
0
def test_dconv_ones(backend_default, ones_convargs):
    indim, nifm, fshape, nofm, batch_size = ones_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
    dtypeu = np.float32

    # weights set to one
    init_unif = Uniform(low=1.0, high=1.0)

    inshape = (nifm, indim, indim)
    insize = np.prod(inshape)

    neon_layer = Deconvolution(fshape=(fshape, fshape, nofm),
                               strides=1,
                               padding=0,
                               init=init_unif)
    inp = neon_layer.be.array(np.ones((insize, batch_size)).astype(dtypeu))
    inp.lshape = inshape
    # run fprop
    neon_layer.configure(inshape)
    neon_layer.prev_layer = True
    neon_layer.allocate()
    neon_layer.set_deltas([neon_layer.be.iobuf(inshape)])
    out = neon_layer.fprop(inp).asnumpyarray()
    out_exp_min = nifm
    out_exp_max = fshape * fshape * nifm
    assert np.min(out) == out_exp_min and np.max(out) == out_exp_max
    # generate err array
    err = np.ones(out.shape).astype(dtypeu)

    # run bprop
    neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    dw = neon_layer.dW.asnumpyarray()

    # generate the reference layer
    ref_layer = DeconvRefLayer(1, batch_size, identity, inshape[0],
                               inshape[1:3], (fshape, fshape), nofm, 1, dtypeu)

    ref_layer.weights = np.ones(neon_layer.W.shape).T.astype(dtypeu)

    # run bprop
    ref_layer.bprop(err)

    # expected output for updates is uniform matrix with
    # all elements == ofmsize*batch_size
    updates_exp = ref_layer.ofmsize * batch_size

    # check dw from neon layer
    assert np.max(dw) == updates_exp and np.min(dw) == updates_exp

    # no tolerence here should be exact
    assert np.max(np.abs(ref_layer.y.T - neon_layer.deltas.get())) == 0.0

    return
Esempio n. 4
0
def test_dconv_ones(backend_default, ones_convargs):
    indim, nifm, fshape, nofm, batch_size = ones_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
    dtypeu = np.float32

    # weights set to one
    init_unif = Uniform(low=1.0, high=1.0)

    inshape = (nifm, indim, indim)
    insize = np.prod(inshape)

    neon_layer = Deconvolution(fshape=(fshape, fshape, nofm), strides=1,
                               padding=0, init=init_unif)
    inp = neon_layer.be.array(np.ones((insize, batch_size)).astype(dtypeu))
    inp.lshape = inshape
    # run fprop
    neon_layer.configure(inshape)
    neon_layer.prev_layer = True
    neon_layer.allocate()
    neon_layer.set_deltas([neon_layer.be.iobuf(inshape)])
    out = neon_layer.fprop(inp).asnumpyarray()
    out_exp_min = nifm
    out_exp_max = fshape * fshape * nifm
    assert np.min(out) == out_exp_min and np.max(out) == out_exp_max
    # generate err array
    err = np.ones(out.shape).astype(dtypeu)

    # run bprop
    neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    dw = neon_layer.dW.asnumpyarray()

    # generate the reference layer
    ref_layer = DeconvRefLayer(1, batch_size, identity, inshape[0], inshape[1:3],
                               (fshape, fshape), nofm, 1, dtypeu)

    ref_layer.weights = np.ones(neon_layer.W.shape).T.astype(dtypeu)

    # run bprop
    ref_layer.bprop(err)

    # expected output for updates is uniform matrix with
    # all elements == ofmsize*batch_size
    updates_exp = ref_layer.ofmsize * batch_size

    # check dw from neon layer
    assert np.max(dw) == updates_exp and np.min(dw) == updates_exp

    # no tolerence here should be exact
    assert np.max(np.abs(ref_layer.y.T - neon_layer.deltas.get())) == 0.0

    return
Esempio n. 5
0
def test_dconv_zeros(backend_default, zeros_convargs):
    fshape, nofm, batch_size = zeros_convargs
    NervanaObject.be.bsz = NervanaObject.be.bs = batch_size

    dtypeu = np.float32
    init_unif = Uniform(low=0.0, high=0.0)
    inshape = (64, 28, 28)
    insize = np.prod(inshape)
    neon_layer = Deconvolution(fshape=(fshape, fshape, nofm),
                               strides=1,
                               padding=0,
                               init=init_unif)
    inp_arr_shape = (insize, batch_size)
    inp = np.random.random(inp_arr_shape).astype(dtypeu)
    inp = neon_layer.be.array(inp)
    inp.lshape = inshape
    neon_layer.configure(inshape)
    neon_layer.prev_layer = True
    neon_layer.allocate()
    neon_layer.set_deltas([neon_layer.be.iobuf(inshape)])

    outa = neon_layer.fprop(inp)
    out = outa.asnumpyarray()
    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros(outa.shape))
    deltas = neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = neon_layer.dW.asnumpyarray()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0
    return
Esempio n. 6
0
def test_dconv_zeros(backend_default, zeros_convargs):
    fshape, nofm, batch_size = zeros_convargs
    NervanaObject.be.bsz = batch_size

    dtypeu = np.float32
    init_unif = Uniform(low=0.0, high=0.0)
    inshape = (64, 28, 28)
    insize = np.prod(inshape)
    neon_layer = Deconvolution(fshape=(fshape, fshape, nofm),
                               strides=1,
                               padding=0,
                               init=init_unif)
    inp_arr_shape = (insize, batch_size)
    inp = np.random.random(inp_arr_shape).astype(dtypeu)
    inp = neon_layer.be.array(inp)
    inp.lshape = inshape
    neon_layer.configure(inshape)
    neon_layer.prev_layer = True
    neon_layer.allocate()
    neon_layer.set_deltas([neon_layer.be.iobuf(inshape)])

    outa = neon_layer.fprop(inp)
    out = outa.asnumpyarray()
    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros(outa.shape))
    deltas = neon_layer.bprop(NervanaObject.be.array(err)).asnumpyarray()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = neon_layer.dW.asnumpyarray()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0
    return
Esempio n. 7
0
def test_dconv_zeros(backend_default, zeros_convargs, deltas_buffer):
    fshape, nofm, batch_size = zeros_convargs
    NervanaObject.be.bsz = batch_size

    dtypeu = np.float32
    init_unif = Uniform(low=0.0, high=0.0)
    if isinstance(NervanaObject.be,
                  NervanaGPU) and NervanaObject.be.compute_capability < (5, 0):
        # Kepler kernels do not support 3D yet.
        inshape = (64, 28, 28)
    else:
        inshape = (64, 28, 28, 28)
    insize = np.prod(inshape)
    neon_layer = Deconvolution(fshape=(fshape, fshape, nofm),
                               strides=1,
                               padding=0,
                               init=init_unif)
    inp_arr_shape = (insize, batch_size)
    inp = np.random.random(inp_arr_shape).astype(dtypeu)
    inp = neon_layer.be.array(inp)
    inp.lshape = inshape
    neon_layer.configure(inshape)
    neon_layer.prev_layer = True
    neon_layer.allocate()

    neon_layer.allocate_deltas(deltas_buffer)
    deltas_buffer.allocate_buffers()
    neon_layer.set_deltas(deltas_buffer)

    outa = neon_layer.fprop(inp)
    out = outa.get()
    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros(outa.shape))
    deltas = neon_layer.bprop(NervanaObject.be.array(err)).get()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = neon_layer.dW.get()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0
    return
Esempio n. 8
0
def test_dconv_zeros(backend_default, zeros_convargs, deltas_buffer):
    fshape, nofm, batch_size = zeros_convargs
    NervanaObject.be.bsz = batch_size

    dtypeu = np.float32
    init_unif = Uniform(low=0.0, high=0.0)
    if isinstance(NervanaObject.be, NervanaGPU) and NervanaObject.be.compute_capability < (5, 0):
        # Kepler kernels do not support 3D yet.
        inshape = (64, 28, 28)
    else:
        inshape = (64, 28, 28, 28)
    insize = np.prod(inshape)
    neon_layer = Deconvolution(fshape=(fshape, fshape, nofm),
                               strides=1,
                               padding=0,
                               init=init_unif)
    inp_arr_shape = (insize, batch_size)
    inp = np.random.random(inp_arr_shape).astype(dtypeu)
    inp = neon_layer.be.array(inp)
    inp.lshape = inshape
    neon_layer.configure(inshape)
    neon_layer.prev_layer = True
    neon_layer.allocate()

    neon_layer.allocate_deltas(deltas_buffer)
    deltas_buffer.allocate_buffers()
    neon_layer.set_deltas(deltas_buffer)

    outa = neon_layer.fprop(inp)
    out = outa.get()
    assert np.min(out) == 0.0 and np.max(out) == 0.0

    err = dtypeu(np.zeros(outa.shape))
    deltas = neon_layer.bprop(NervanaObject.be.array(err)).get()
    assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0

    dw = neon_layer.dW.get()
    assert np.min(dw) == 0.0 and np.max(dw) == 0.0
    return