def test_constant(backend, args): be = NervanaObject.be dim1, dim2 = args shape = (dim1, dim2) const_arg = 3 Wdev = be.empty(shape) const_init = Constant(const_arg) const_init.fill(Wdev) Whost = Wdev.get() flat = Whost.flatten() for elt in flat: assert elt == const_arg return
def test_ref_compare_ones(backend, refgruargs): # run comparison with reference code # for all ones init seq_len, input_size, hidden_size, batch_size = refgruargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size check_rnn(seq_len, input_size, hidden_size, batch_size, Constant(val=1.0), [1.0, 0.0])
def test_ref_compare_ones(backend_default, reflstmargs): # run comparison with reference code # for all ones init np.random.seed(seed=0) seq_len, input_size, hidden_size, batch_size = reflstmargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size check_lstm(seq_len, input_size, hidden_size, batch_size, Constant(val=1.0), [1.0, 0.0])
# See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Convolution layer tests """ import numpy as np from neon import NervanaObject from neon.backends import gen_backend from neon.layers import Sequential, Conv, Pooling, MergeBroadcast, Affine from neon.initializers.initializer import Gaussian, Constant from neon.transforms import Rectlin, Softmax init1 = Gaussian(scale=0.01) relu = Rectlin() bias = Constant(0) common = dict(activation=relu, init=init1, bias=bias) commonp1 = dict(activation=relu, init=init1, bias=bias, padding=1) commonp3s2 = dict(activation=relu, init=init1, bias=bias, padding=3, strides=2) pool3s1p1 = dict(fshape=3, padding=1, strides=1) batch_size = 64 def fshape(rs, k): return (rs, rs, k) def inception(kvals, name="i"): (p1, p2, p3) = kvals branch1 = [Sequential([Conv(fshape(1, p1[0]), **common)])] if p1[0] else []
def __init__(self, init=Constant(20.0), name=None): super(Normalize, self).__init__(name=name, init=init) self.bottom_data = None self.norm_data = None self.owns_outputs = True