def test_lrnorm(backend_cpu64, lrnargs):
    nin, nifm, fshape, batch_size = lrnargs
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
    sz = nin * nin * nifm * batch_size
    epsilon = 1.0e-5
    # make sure perturbation can never change the max element
    inp = np.arange(sz) * 2.5 * epsilon
    # shuffle
    np.random.shuffle(inp)
    inp = inp.reshape((nin * nin * nifm, batch_size))

    lshape = (nifm, nin, nin)
    layer = LRNWithReset(depth=fshape, ascale=1.25e-4, bpower=0.75)

    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               lshape=lshape,
                                               pert_inds=pert_inds)
    assert max_abs < 1.0e-6
Example #2
0
def test_pooling(backend_cpu64, poolargs):
    nin, nifm, fshape, batch_size, op = poolargs
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
    sz = nin * nin * nifm * batch_size
    epsilon = 1.0e-5
    # make sure perturnationc an never change the max element
    inp = np.arange(sz) * 2.5 * epsilon
    # shuffle
    np.random.shuffle(inp)
    inp = inp.reshape((nin * nin * nifm, batch_size))

    lshape = (nifm, nin, nin)
    layer = PoolingWithReset(fshape, op=op)

    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               lshape=lshape,
                                               pert_inds=pert_inds,
                                               pooling=True)
    assert max_abs < 1.0e-7
Example #3
0
def test_conv(backend_cpu64, convargs):
    nin, nifm, fside, batch_size, dil_h, dil_w = convargs
    fshape = (fside, fside, fside)
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
    sz = nin * nin * nifm * batch_size
    epsilon = 1.0e-5
    inp = np.arange(sz) * 2.5 * epsilon
    np.random.shuffle(inp)
    inp = inp.reshape((nin * nin * nifm, batch_size))

    lshape = (nifm, nin, nin)
    init = Gaussian()
    layer = ConvWithReset(fshape,
                          strides=2,
                          padding=fside - 1,
                          dilation=dict(dil_d=1, dil_h=dil_h, dil_w=dil_w),
                          init=init)

    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               lshape=lshape,
                                               pert_inds=pert_inds)
    assert max_abs < 1.0e-7
Example #4
0
def test_conv_mkl(backend_mkl, convargs):
    nin, nifm, fside, batch_size, dil_h, dil_w = convargs
    fshape = (fside, fside, fside)
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
    sz = nin * nin * nifm * batch_size
    epsilon = 1.0e-5
    inp = np.arange(sz) * 2.5 * epsilon
    np.random.shuffle(inp)
    inp = inp.reshape((nin * nin * nifm, batch_size))

    lshape = (nifm, nin, nin)
    init = Gaussian()
    layer = ConvWithReset(fshape, strides=2, padding=fside-1,
                          dilation=dict(dil_d=1, dil_h=dil_h, dil_w=dil_w), init=init)

    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               lshape=lshape,
                                               pert_inds=pert_inds)
    assert max_abs < 1.0e-7
Example #5
0
def lrnorm(backend_cpu64, lrnargs):
    nin, nifm, fshape, batch_size = lrnargs
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
    sz = nin * nin * nifm * batch_size
    epsilon = 1.0e-5
    # make sure perturbation can never change the max element
    inp = np.arange(sz) * 2.5 * epsilon
    # shuffle
    np.random.shuffle(inp)
    inp = inp.reshape((nin * nin * nifm, batch_size))

    lshape = (nifm, nin, nin)
    layer = LRNWithReset(depth=fshape, ascale=1.25e-4, bpower=0.75)

    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               lshape=lshape,
                                               pert_inds=pert_inds)
    assert max_abs < 1.0e-6
Example #6
0
def test_bias_mkl(backend_mkl, biasargs):
    n, batch_size = biasargs
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
    init = Gaussian()
    layer = BiasWithReset(init=init)
    inp = np.random.randn(n, batch_size)

    epsilon = 1.0e-5
    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               lshape=inp.shape,
                                               pert_inds=pert_inds)
    assert max_abs < 1.0e-7
Example #7
0
def test_mlp_mkl(backend_mkl, mlpargs):
    nin, nout, batch_size = mlpargs
    # run the gradient check on an mlp
    batch_size = batch_size
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size

    init = Gaussian()
    layer = LinearWithReset(nout=nout, init=init)
    inp = np.random.randn(nin, batch_size)

    epsilon = 1.0e-5
    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               pert_inds=pert_inds)
    assert max_abs < 1.0e-7
Example #8
0
def test_mlp_mkl(backend_mkl, mlpargs):
    nin, nout, batch_size = mlpargs
    # run the gradient check on an mlp
    batch_size = batch_size
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size

    init = Gaussian()
    layer = LinearWithReset(nout=nout, init=init)
    inp = np.random.randn(nin, batch_size)

    epsilon = 1.0e-5
    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               pert_inds=pert_inds)
    assert max_abs < 1.0e-7
def test_batchnorm(backend_cpu64, bnargs):
    n, batch_size = bnargs
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size

    layer = BatchNormWithReset()
    inp_shape = None
    inp_size = n
    if isinstance(n, tuple):
        inp_shape = n
        inp_size = np.prod(n)
    inp = np.random.randn(inp_size, batch_size)

    epsilon = 1.0e-5
    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               lshape=inp_shape,
                                               pert_inds=pert_inds)
    assert max_abs < 1.0e-7
Example #10
0
def test_batchnorm_mkl(backend_mkl, bnargs):
    n, batch_size = bnargs
    NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size

    layer = BatchNormWithReset()
    inp_shape = None
    inp_size = n
    if isinstance(n, tuple):
        inp_shape = n
        inp_size = np.prod(n)
    inp = np.random.randn(inp_size, batch_size)

    epsilon = 1.0e-5
    pert_frac = 0.1  # test 10% of the inputs
    # select pert_frac fraction of inps to perturb
    pert_cnt = int(np.ceil(inp.size * pert_frac))
    pert_inds = np.random.permutation(inp.size)[0:pert_cnt]

    (max_abs, max_rel) = general_gradient_comp(layer,
                                               inp,
                                               epsilon=epsilon,
                                               lshape=inp_shape,
                                               pert_inds=pert_inds)
    assert max_abs < 1.0e-7