def test_pooling(backend_cpu64, poolargs): nin, nifm, fshape, batch_size, op = poolargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size sz = nin * nin * nifm * batch_size epsilon = 1.0e-5 # make sure perturnationc an never change the max element inp = np.arange(sz) * 2.5 * epsilon # shuffle np.random.shuffle(inp) inp = inp.reshape((nin * nin * nifm, batch_size)) lshape = (nifm, nin, nin) layer = PoolingWithReset(fshape) layer.op = op pert_frac = 0.1 # test 10% of the inputs # select pert_frac fraction of inps to perturb pert_cnt = int(np.ceil(inp.size * pert_frac)) pert_inds = np.random.permutation(inp.size)[0:pert_cnt] (max_abs, max_rel) = general_gradient_comp(layer, inp, epsilon=epsilon, lshape=lshape, pert_inds=pert_inds) assert max_abs < 1.0e-7
def test_batchnorm(backend_cpu64, bnargs): n, batch_size = bnargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size layer = BatchNormWithReset() inp_shape = None inp_size = n if isinstance(n, tuple): inp_shape = n from operator import mul inp_size = reduce(mul, n) inp = np.random.randn(inp_size, batch_size) epsilon = 1.0e-5 pert_frac = 0.1 # test 10% of the inputs # select pert_frac fraction of inps to perturb pert_cnt = int(np.ceil(inp.size*pert_frac)) pert_inds = np.random.permutation(inp.size)[0:pert_cnt] (max_abs, max_rel) = general_gradient_comp(layer, inp, epsilon=epsilon, lshape=inp_shape, pert_inds=pert_inds) assert max_abs < 1.0e-7
def test_pooling(backend_cpu64, poolargs): nin, nifm, fshape, batch_size, op = poolargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size sz = nin * nin * nifm * batch_size epsilon = 1.0e-5 # make sure perturnationc an never change the max element inp = np.arange(sz)*2.5*epsilon # shuffle np.random.shuffle(inp) inp = inp.reshape((nin * nin * nifm, batch_size)) lshape = (nifm, nin, nin) layer = PoolingWithReset(fshape, op=op) pert_frac = 0.1 # test 10% of the inputs # select pert_frac fraction of inps to perturb pert_cnt = int(np.ceil(inp.size*pert_frac)) pert_inds = np.random.permutation(inp.size)[0:pert_cnt] (max_abs, max_rel) = general_gradient_comp(layer, inp, epsilon=epsilon, lshape=lshape, pert_inds=pert_inds) assert max_abs < 1.0e-7
def test_batchnorm(cpu64_only, bnargs): n, batch_size = bnargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size layer = BatchNormWithReset() inp_shape = None inp_size = n if isinstance(n, tuple): inp_shape = n from operator import mul inp_size = reduce(mul, n) inp = np.random.randn(inp_size, batch_size) epsilon = 1.0e-5 pert_frac = 0.1 # test 10% of the inputs # select pert_frac fraction of inps to perturb pert_cnt = int(np.ceil(inp.size * pert_frac)) pert_inds = np.random.permutation(inp.size)[0:pert_cnt] (max_abs, max_rel) = general_gradient_comp(layer, inp, epsilon=epsilon, lshape=inp_shape, pert_inds=pert_inds) assert max_abs < 1.0e-7
def test_pooling(cpu64_only, poolargs): nin, nifm, fshape, batch_size, op = poolargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size inp = np.random.randn(nin * nin * nifm, batch_size) lshape = (nifm, nin, nin) layer = PoolingWithReset(fshape) layer.op = op epsilon = 1.0e-5 pert_frac = 0.1 # test 10% of the inputs # select pert_frac fraction of inps to perturb pert_cnt = int(np.ceil(inp.size * pert_frac)) pert_inds = np.random.permutation(inp.size)[0:pert_cnt] (max_abs, max_rel) = general_gradient_comp(layer, inp, epsilon=epsilon, lshape=lshape, pert_inds=pert_inds) assert max_abs < 1.0e-7
def test_bias(backend_cpu64, biasargs): n, batch_size = biasargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size init = Gaussian() layer = BiasWithReset(init=init) inp = np.random.randn(n, batch_size) epsilon = 1.0e-5 pert_frac = 0.1 # test 10% of the inputs # select pert_frac fraction of inps to perturb pert_cnt = int(np.ceil(inp.size*pert_frac)) pert_inds = np.random.permutation(inp.size)[0:pert_cnt] (max_abs, max_rel) = general_gradient_comp(layer, inp, epsilon=epsilon, lshape=inp.shape, pert_inds=pert_inds) assert max_abs < 1.0e-7
def test_bias(cpu64_only, biasargs): n, batch_size = biasargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size init = Gaussian() layer = BiasWithReset(init=init) inp = np.random.randn(n, batch_size) epsilon = 1.0e-5 pert_frac = 0.1 # test 10% of the inputs # select pert_frac fraction of inps to perturb pert_cnt = int(np.ceil(inp.size*pert_frac)) pert_inds = np.random.permutation(inp.size)[0:pert_cnt] (max_abs, max_rel) = general_gradient_comp(layer, inp, epsilon=epsilon, lshape=inp.shape, pert_inds=pert_inds) assert max_abs < 1.0e-7
def test_mlp(backend_cpu64, mlpargs): nin, nout, batch_size = mlpargs # run the gradient check on an mlp batch_size = batch_size NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size init = Gaussian() layer = LinearWithReset(nout=nout, init=init) inp = np.random.randn(nin, batch_size) epsilon = 1.0e-5 pert_frac = 0.1 # test 10% of the inputs # select pert_frac fraction of inps to perturb pert_cnt = int(np.ceil(inp.size*pert_frac)) pert_inds = np.random.permutation(inp.size)[0:pert_cnt] (max_abs, max_rel) = general_gradient_comp(layer, inp, epsilon=epsilon, pert_inds=pert_inds) assert max_abs < 1.0e-7