def test_concat_sequence_l1_l1(backend_default, allrand_args, deltas_buffer): # test two linear layers that are merged with concat dtypeu = np.float32 w_rng, rngmax = allrand_args # Diff size input steps nin = 128 steps = [32, 64] nout = 256 batch_size = 16 NervanaObject.be.bsz = batch_size be = NervanaObject.be init_unif = Uniform(low=w_rng[0], high=w_rng[1]) layers = [Sequential(Affine(nout=nout, init=init_unif)) for _ in (0, 1)] inputs = [ be.array(dtypeu(np.random.random((nin, batch_size * step)))) for step in steps ] merge = MergeMultistream(layers, merge="recurrent") assert (len(inputs) == len(layers)) merge.configure(inputs) merge.allocate() merge.allocate_deltas(deltas_buffer) deltas_buffer.allocate_buffers() merge.set_deltas(deltas_buffer) out = merge.fprop(inputs).get() sublayers = [s.layers[0] for s in layers] weights = [layer.W.get() for layer in sublayers] out_exp = np.concatenate( [np.dot(w, inp.get()) for (w, inp) in zip(weights, inputs)], axis=1) assert allclose_with_out(out, out_exp, atol=1e-3) err_lst = [ dtypeu(np.random.random((nout, batch_size * step))) for step in steps ] err_concat = be.array(np.concatenate(err_lst, axis=1)) merge.bprop(err_concat) dW_exp_lst = [ np.dot(err, inp.get().T) for (err, inp) in zip(err_lst, inputs) ] for layer, dW_exp in zip(sublayers, dW_exp_lst): assert allclose_with_out(layer.dW.get(), dW_exp) return
def test_concat_sequence_l1_l1(backend_default, allrand_args, deltas_buffer): # test two linear layers that are merged with concat dtypeu = np.float32 w_rng, rngmax = allrand_args # Diff size input steps nin = 128 steps = [32, 64] nout = 256 batch_size = 16 NervanaObject.be.bsz = batch_size be = NervanaObject.be init_unif = Uniform(low=w_rng[0], high=w_rng[1]) layers = [Sequential(Affine(nout=nout, init=init_unif)) for _ in (0, 1)] inputs = [be.array(dtypeu(np.random.random((nin, batch_size * step)))) for step in steps] merge = MergeMultistream(layers, merge="recurrent") assert(len(inputs) == len(layers)) merge.configure(inputs) merge.allocate() merge.allocate_deltas(deltas_buffer) deltas_buffer.allocate_buffers() merge.set_deltas(deltas_buffer) out = merge.fprop(inputs).get() sublayers = [s.layers[0] for s in layers] weights = [layer.W.get() for layer in sublayers] out_exp = np.concatenate([np.dot(w, inp.get()) for (w, inp) in zip(weights, inputs)], axis=1) assert allclose_with_out(out, out_exp, atol=1e-3) err_lst = [dtypeu(np.random.random((nout, batch_size * step))) for step in steps] err_concat = be.array(np.concatenate(err_lst, axis=1)) merge.bprop(err_concat) dW_exp_lst = [np.dot(err, inp.get().T) for (err, inp) in zip(err_lst, inputs)] for layer, dW_exp in zip(sublayers, dW_exp_lst): assert allclose_with_out(layer.dW.get(), dW_exp) return
def test_concat_l1_l1(backend_default, allrand_args, deltas_buffer): # test two linear layers that are merged with concat dtypeu = np.float32 w_rng, rngmax = allrand_args # Diff size inputs and outputs nins = [128, 1024] nouts = [64, 2048] batch_size = 16 NervanaObject.be.bsz = batch_size be = NervanaObject.be init_unif = Uniform(low=w_rng[0], high=w_rng[1]) layers = [Sequential(Affine(nout=nout, init=init_unif)) for nout in nouts] inputs = [be.array(dtypeu(np.random.random((nin, batch_size)))) for nin in nins] merge = MergeMultistream(layers, merge="stack") assert(len(inputs) == len(layers)) merge.configure(inputs) merge.allocate() merge.allocate_deltas(deltas_buffer) deltas_buffer.allocate_buffers() merge.set_deltas(deltas_buffer) out = merge.fprop(inputs).get() sublayers = [s.layers[0] for s in layers] weights = [layer.W.get() for layer in sublayers] out_exp = np.concatenate([np.dot(w, inp.get()) for (w, inp) in zip(weights, inputs)]) assert np.allclose(out, out_exp, atol=1e-3) err_lst = [dtypeu(np.random.random((nout, batch_size))) for nout in nouts] err_concat = np.concatenate(err_lst) merge.bprop(be.array(err_concat)) dW_exp_lst = [np.dot(err, inp.get().T) for (err, inp) in zip(err_lst, inputs)] for layer, dW_exp in zip(sublayers, dW_exp_lst): assert np.allclose(layer.dW.get(), dW_exp) return