示例#1
0
    def build(self, d_init, w_init):
        # Symbolic variables
        self.u = T.dtensor3("u")  # (M, N, L)
        self.i = T.dmatrix("i")  # (M, N)
        self.p = T.dtensor3("p")  # (K, N, L)
        self.d = self.build_d(d_init)

        self.w = theano.shared(w_init, name="w")  # (N)

        # Compute distance scores
        s = T.batched_dot(self.u.dimshuffle((1, 0, 2)), self.d)  # (N, M, L)
        q = T.batched_dot(s, self.p.dimshuffle(
            (1, 2, 0)))  # (N, M, K) (order?)
        # s = T.nnet.sigmoid(s)

        if self.with_importance:
            s = (q * self.i.dimshuffle(1, 0, 'x')).sum(0)  # (M, K)
        elif self.with_weights:
            s = T.tensordot(q, self.w, axes=[[0], [0]])  # (M, K)
            # s = T.nnet.sigmoid
        else:
            s = T.tensordot(q, np.ones(self.N), axes=[[0], [0]])  # (M, K)

        # Final outcome
        self.s = T.nnet.softmax(s)  # (M, K)

        inputs = [self.u, self.p]
        if self.with_importance:
            inputs += [self.i]

        self.predict = theano.function(inputs=inputs, outputs=self.s)
        self.get_latent = theano.function(inputs=inputs, outputs=q)
示例#2
0
    def test_fail(self):
        # Test that conv2d fails for dimensions other than 2 or 3.

        with pytest.raises(Exception):
            conv.conv2d(T.dtensor4(), T.dtensor3())
        with pytest.raises(Exception):
            conv.conv2d(T.dtensor3(), T.dvector())
示例#3
0
    def variables(self):
        # Define parameters 'w'
        w = {}
        for i in ['wz', 'bz', 'logsd', 'wx', 'bx']:
            w[i] = T.dmatrix(i)

        # Define variables 'x' and 'z'
        z = {'eps': T.dtensor3('eps')}
        x = {'x': T.dtensor3('x')}

        return w, x, z
示例#4
0
	def variables(self):
		# Define parameters 'w'
		w = {}
		for i in ['wz','bz','logsd','wx','bx']:
			w[i] = T.dmatrix(i)
		
		# Define variables 'x' and 'z'
		z = {'eps':T.dtensor3('eps')}
		x = {'x':T.dtensor3('x')}
		
		return w, x, z
示例#5
0
def SimpleRNN_MKL():
    global x, h_init, w_x, w_h, b
    X = T.dtensor3('X')
    H_init = T.dmatrix('H_init')
    W_x = T.dmatrix('W_x')
    W_h = T.dmatrix('W_h')
    B = T.dtensor3('B') 
    o = SimpleRNN()(X, H_init, W_x, W_h, B)
    f = theano.function([X, H_init, W_x, W_h, B], o)

    o_mkl = f(x, h_init, w_x, w_h, b)
    return o_mkl
示例#6
0
 def test_fail(self):
     """
     Test that conv2d fails for dimensions other than 2 or 3.
     """
     try:
         conv.conv2d(T.dtensor4(), T.dtensor3())
         self.fail()
     except:
         pass
     try:
         conv.conv2d(T.dtensor3(), T.dvector())
         self.fail()
     except:
         pass
示例#7
0
 def test_fail(self):
     """
     Test that conv2d fails for dimensions other than 2 or 3.
     """
     try:
         conv.conv2d(T.dtensor4(), T.dtensor3())
         self.fail()
     except:
         pass
     try:
         conv.conv2d(T.dtensor3(), T.dvector())
         self.fail()
     except:
         pass
    def build_model(self, train_x, train_mask_x, train_mask_out, train_target,
                    test_x, test_mask_x, test_mask_out, test_target):
        self.train_x = train_x
        self.train_mask_x = train_mask_x
        self.train_mask_out = train_mask_out
        self.train_target = train_target
        self.test_x = test_x
        self.test_mask_x = test_mask_x
        self.test_mask_out = test_mask_out
        self.test_target = test_target
        self.index = T.iscalar('index')
        self.num_batch_test = T.iscalar('index')
        self.b_slice = slice(self.index * self.num_batch, (self.index + 1) * self.num_batch)

        sym_x = T.dtensor3()
        sym_mask_x = T.dmatrix()
        sym_target = T.dtensor3()
        sym_mask_out = T.dtensor3()
        # sym_mask_out = T.dtensor3() should not be useful since output is still zero
        # TODO think about this if it is true

        out = lasagne.layers.get_output(self.model, inputs={self.l_in: sym_x, self.mask_input: sym_mask_x})
        out_out = self.get_output_y(out)
        loss = T.mean(lasagne.objectives.squared_error(out_out, sym_target)) / self.num_batch

        out_test = lasagne.layers.get_output(self.model, inputs={self.l_in: sym_x, self.mask_input: sym_mask_x})
        out_out_test = self.get_output_y(out_test)
        loss_test = T.mean(lasagne.objectives.squared_error(out_out_test, sym_target)) / self.num_batch_test

        all_params = [self.W] + [self.b] +lasagne.layers.get_all_params(self.model)
        all_grads_target = [T.clip(g, -3, 3) for g in T.grad(loss, all_params)]
        all_grads_target = lasagne.updates.total_norm_constraint(all_grads_target, 3)
        updates_target = adam(all_grads_target, all_params)

        train_model = theano.function([self.index],
                                      [loss, out_out],
                                      givens={sym_x: self.train_x[self.b_slice],
                                              sym_mask_x: self.train_mask_x[self.b_slice],
                                              sym_target: self.train_target[self.b_slice],
                                              },
                                      updates=updates_target)
        test_model = theano.function([self.num_batch_test],
                                     [loss_test, out_out_test],
                                     givens={sym_x: self.test_x,
                                             sym_mask_x: self.test_mask_x,
                                             sym_target: self.test_target,
                                             })

        return train_model, test_model
def format_algs_theano_bypart(hds, sms, total_parts=46, n_algs=9, max_hd=4):
    x = tt.dtensor3('x')
    y = tt.dtensor3('y')

    ass = np.array([i / (65 * 4 * 9) for i in xrange(9 * 65 * 4 * 46)])
    # for i in xrange(46):
    #  print i, list(ass).count(i)

    sms = sms[ass].reshape((46, 9, 65, 4))

    x = tt.pow(sms, hds)

    x = tt.sum(x, axis=3)

    return x
示例#10
0
    def build_model(self, train_x, train_mask_x, train_mask_out, train_target,
                    test_x, test_mask_x, test_mask_out, test_target):
        self.train_x = train_x
        self.train_mask_x = train_mask_x
        self.train_mask_out = train_mask_out
        self.train_target = train_target
        self.test_x = test_x
        self.test_mask_x = test_mask_x
        self.test_mask_out = test_mask_out
        self.test_target = test_target
        self.index = T.iscalar('index')
        self.num_batch_test = T.iscalar('index')
        self.b_slice = slice(self.index * self.num_batch, (self.index + 1) * self.num_batch)

        sym_x = T.dtensor3()
        sym_mask_x = T.dmatrix()
        sym_target = T.dtensor3()
        # sym_mask_out = T.dtensor3() should not be useful since output is still zero
        # TODO think about this if it is true

        output = lasagne.layers.get_output(self.model, inputs={self.l_in: sym_x, self.mask_input: sym_mask_x})
        theta = self.get_output_y(output)
        log_px = self.get_log_x(sym_target, theta)
        log_px_sum_time = log_px.sum(axis=1, dtype=theano.config.floatX) # sum over tx
        loss = - T.sum(log_px_sum_time) / self.num_batch # average over batch
        ##
        log_px_test = self.get_log_x(sym_target, theta)
        log_px_sum_time_test = log_px_test.sum(axis=1, dtype=theano.config.floatX) # sum over time
        loss_test = - T.sum(log_px_sum_time_test) / self.num_batch_test  # average over batch
        # loss = T.mean(lasagne.objectives.squared_error(mu, sym_target))
        all_params = [self.W_y_theta] + [self.b_y_theta] + lasagne.layers.get_all_params(self.model)
        all_grads_target = [T.clip(g, -3, 3) for g in T.grad(loss, all_params)]
        all_grads_target = lasagne.updates.total_norm_constraint(all_grads_target, 3)
        updates_target = adam(all_grads_target, all_params)

        train_model = theano.function([self.index],
                                      [loss, theta, log_px],
                                      givens={sym_x: self.train_x[self.b_slice],
                                              sym_mask_x: self.train_mask_x[self.b_slice],
                                              sym_target: self.train_target[self.b_slice]},
                                      updates=updates_target)
        test_model = theano.function([self.num_batch_test],
                                     [loss_test, theta],
                                     givens={sym_x: self.test_x,
                                             sym_mask_x: self.test_mask_x,
                                             sym_target: self.test_target})

        return train_model, test_model
示例#11
0
文件: vision.py 项目: jahuth/retina
 def __init__(self,retina=None,config=None,name=None,input_dependency=[],func=None,func_5=None,**kwargs): 
     self.model = retina 
     self.retina = retina # legacy naming, will be removed at some point
     self.config = config
     if self.config is None:
         self.config = {}
     if name is None:
         name = str(uuid.uuid4())
     self.name = self.config.get('name',name)
     self.input_dependency = input_dependency
     self.accept_dimensions = [3,5]
     self._updates = None
     self.collected_inputs = []
     self.node_type = 'Node'
     self.node_description = lambda: '- no computation -'
     self.parameter_variables = []
     self.state_variables = []
     self.inital_states = []
     self.updated_state_variables = []
     self.state = None
     self.compute = None
     self.update_variables = theano.updates.OrderedUpdates() ## TODO!
     self.__dict__.update(kwargs)
     if len(self.input_dependency) == 0:
         self.input_variable = T.dtensor3(self.name+"_input")
     else:
         self.input_variable = self.input_dependency[-1].output_variable
     self.output_variable = self.input_variable+0.0# define some computation here
     self.output_variable.name = self.name+'_output'
     if func is not None:
         d = func(self.input_variable,model=self.model,name=self.name,config=self.config)
         self.__dict__.update(d)
示例#12
0
def UV12_input(V1=Th.dmatrix(),
               STAs=Th.dmatrix(),
               STCs=Th.dtensor3(),
               N_spikes=Th.dvector(),
               **other):
    other.update(locals())
    return named(**other)
示例#13
0
    def test_max_pool_2d_3D(self):
        rng = numpy.random.RandomState(utt.fetch_seed())
        maxpoolshps = [(1, 2)]
        imval = rng.rand(2, 3, 4)
        images = tensor.dtensor3()

        for maxpoolshp, ignore_border, mode in product(maxpoolshps,
                                                       [True, False],
                                                       ['max', 'sum',
                                                        'average_inc_pad',
                                                        'average_exc_pad']):
                # print 'maxpoolshp =', maxpoolshp
                # print 'ignore_border =', ignore_border
                numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,
                                                          ignore_border,
                                                          mode)
                output = max_pool_2d(images, maxpoolshp, ignore_border,
                                     mode=mode)
                output_val = function([images], output)(imval)
                assert numpy.all(output_val == numpy_output_val), (
                    "output_val is %s, numpy_output_val is %s"
                    % (output_val, numpy_output_val))
                c = tensor.sum(output)
                c_val = function([images], c)(imval)
                g = tensor.grad(c, images)
                g_val = function([images],
                                 [g.shape,
                                 tensor.min(g, axis=(0, 1, 2)),
                                 tensor.max(g, axis=(0, 1, 2))]
                                 )(imval)
    def test_simple_3d(self):
        """Increments or sets part of a tensor by a scalar using full slice and
        a partial slice depending on a scalar.
        """
        a = tt.dtensor3()
        increment = tt.dscalar()
        sl1 = slice(None)
        sl2_end = tt.lscalar()
        sl2 = slice(sl2_end)
        sl3 = 2

        for do_set in [True, False]:
            print "Set", do_set

            if do_set:
                resut = tt.set_subtensor(a[sl1, sl3, sl2], increment)
            else:
                resut = tt.inc_subtensor(a[sl1, sl3, sl2], increment)

            f = theano.function([a, increment, sl2_end], resut)

            val_a = numpy.ones((5, 3, 4))
            val_inc = 2.3
            val_sl2_end = 2

            expected_result = numpy.copy(val_a)
            result = f(val_a, val_inc, val_sl2_end)

            if do_set:
                expected_result[:, sl3, :val_sl2_end] = val_inc
            else:
                expected_result[:, sl3, :val_sl2_end] += val_inc

            utt.assert_allclose(result, expected_result)
def make_minimizer(Model):
    L, y = T.ivector('L'), T.dvector('y')
    mu, eps = T.dscalar('mu'), T.dscalar('eps')
    R, eta = T.dtensor3('R'), T.dvector('eta')

    model = Model(L, y, mu, R, eta, eps)
    return theano.function([L, y, mu, R, eta, eps], model.minimize())
示例#16
0
    def testSlidingWindowL2MaxPooling(self):
        self.assertTrue(self.max_seq_len - self.filter_width > self.n_filters)

        self.setSeeds()

        input_shape = (self.batch_size, self.n_filters, self.max_seq_len,
                       self.filter_width)
        output_shape = (self.batch_size, self.n_filters, self.filter_width,
                        self.filter_width)

        x = np.zeros(shape=input_shape)
        expected = np.zeros(shape=output_shape)

        max_input_shape = (self.batch_size, self.filter_width,
                           self.filter_width)

        # For the i-th filter, make i the offset at which the maximum
        # L2 norm occurs.
        for i in np.arange(self.n_filters):
            start = i
            end = i + self.filter_width
            values = i + np.arange(np.prod(max_input_shape))
            values = values.reshape(max_input_shape)
            x[:, i, start:end, :] = values
            expected[:, i, :, :] = values

        it = T.iscalar()
        x3d = T.dtensor3('x3d')
        x4d = T.dtensor4('x4d')

        layer = SlidingWindowL2MaxPooling(self.batch_size, self.n_filters,
                                          self.filter_width, self.max_seq_len)
        '''
        Use the first sample and first filter to test `filter_dimension`.
        '''
        yt_filter_dim = layer.filter_dimension(it, x3d)
        f_filter_dim = theano.function(inputs=[it, x3d], outputs=yt_filter_dim)
        y_filter_dim_out = f_filter_dim(0, x[0])
        self.assertEquals((self.filter_width, self.filter_width),
                          y_filter_dim_out.shape)
        self.assertTrue(np.all(expected[0, 0, :, :] == y_filter_dim_out))
        '''
        Use the first sample to test `filter_dimension`.
        '''
        yt_sample_dim = layer.sample_dimension(it, x4d)
        f_sample_dim = theano.function(inputs=[it, x4d], outputs=yt_sample_dim)
        y_sample_dim_out = f_sample_dim(0, x)
        self.assertEquals(
            (self.n_filters, self.filter_width, self.filter_width),
            y_sample_dim_out.shape)
        self.assertTrue(np.all(expected[0, :, :, :] == y_sample_dim_out))
        '''
        Use all of `x` to test `_get_output`.
        '''
        yt_output = layer._get_output(x4d)
        f_output = theano.function(inputs=[x4d], outputs=yt_output)
        yt_out = f_output(x)
        self.assertEquals((self.batch_size, self.n_filters, self.filter_width,
                           self.filter_width), yt_out.shape)
        self.assertTrue(np.all(expected == yt_out))
示例#17
0
    def test_max_pool_2d_3D(self):
        rng = numpy.random.RandomState(utt.fetch_seed())

        maxpoolshps = [(1,2)]
        imval = rng.rand(2,3,4)
        images = tensor.dtensor3()

        for maxpoolshp in maxpoolshps:
            for ignore_border in [True,False]:
                #print 'maxpoolshp =', maxpoolshp
                #print 'ignore_border =', ignore_border
                numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border)

                output = max_pool_2d(images, maxpoolshp, ignore_border)
                output_val = function([images], output)(imval)
                assert numpy.all(output_val == numpy_output_val)

                c = tensor.sum(output)
                c_val = function([images], c)(imval)

                g = tensor.grad(c, images)
                g_val = function([images],
                        [g.shape,
                            tensor.min(g, axis=(0,1,2)),
                            tensor.max(g, axis=(0,1,2))]
                        )(imval)
示例#18
0
文件: vision.py 项目: jahuth/retina
    def __init__(self,retina=None,config=None,name=None,input_variable=None): 
        self.retina = retina 
        self.config = config
        self.state = None
        if name is None:
            name = str(uuid.uuid4())
        self.name = self.config.get('name',name)
        # 3d version
        self._I = T.dtensor3(self.name+"_I")
        self._preceding_V = T.dmatrix(self.name+"_preceding_V") # initial condition for sequence
        self._b_0 = T.dscalar(self.name+"_b_0")
        self._a_0 = T.dscalar(self.name+"_a_0")
        self._a_1 = T.dscalar(self.name+"_a_1")
        self._k = T.iscalar(self.name+"_k_bip") # number of iteration steps
        def bipolar_step(input_image,
                        preceding_V,b_0, a_0, a_1):
            V = (input_image * b_0 - preceding_V * a_1) / a_0
            return V

        # The order in theano.scan has to match the order of arguments in the function bipolar_step
        self._result, self._updates = theano.scan(fn=bipolar_step,
                                      outputs_info=[self._preceding_V],
                                      sequences = [self._I],
                                      non_sequences=[self._b_0, self._a_0, self._a_1],
                                      n_steps=self._k)
        self.output_varaible = self._result[0]
        # The order of arguments presented here is arbitrary (will be inferred by the symbols provided),
        #  but function calls to compute_V_bip have to match this order!
        self.compute_V = theano.function(inputs=[self._I,self._preceding_V,
                                                      self._b_0, self._a_0, self._a_1,
                                                      self._k], 
                                              outputs=self._result, 
                                              updates=self._updates)
示例#19
0
文件: test_pool.py 项目: wgapl/Theano
    def test_max_pool_3d_3D(self):
        rng = numpy.random.RandomState(utt.fetch_seed())
        maxpoolshps = ((1, 1, 1), (3, 2, 1))
        imval = rng.rand(4, 5, 6)
        images = tensor.dtensor3()

        for maxpoolshp, ignore_border, mode in product(maxpoolshps,
                                                       [True, False],
                                                       ['max', 'sum',
                                                        'average_inc_pad',
                                                        'average_exc_pad']):
                # print 'maxpoolshp =', maxpoolshp
                # print 'ignore_border =', ignore_border
                numpy_output_val = self.numpy_max_pool_nd(imval, maxpoolshp,
                                                          ignore_border,
                                                          mode=mode)
                output = pool_3d(images, maxpoolshp, ignore_border,
                                 mode=mode)
                output_val = function([images], output)(imval)
                utt.assert_allclose(output_val, numpy_output_val)

                def mp(input):
                    return pool_3d(input, maxpoolshp, ignore_border,
                                   mode=mode)
                utt.verify_grad(mp, [imval], rng=rng)
示例#20
0
def linear_parameterization( T  = Th.dtensor3() , u  = Th.dvector() , 
                                     **other ):
#                                b = Th.dvector() ,  ub = Th.dvector(), **other ): 
#    U = ( Th.sum( T*ub  , axis=2 ).T * b  ).T + Th.sum( T*u , axis=2 )
    U = Th.sum( T*u , axis=2 )    # U = Th.tensordot(T,u,axes=0)
    other.update(locals())
    return named( **other )
示例#21
0
    def __init__(self):
        #return
        print "Hello from toponyms.__init__"
        #word2vec models
        self.eng_word2vec_model = word2vec.Word2Vec.load_word2vec_format(
            'c:/Scanex/Operative/Data/TEMP/news/data/wiki-100.model',
            binary=False)
        self.rus_word2vec_model = word2vec.Word2Vec.load_word2vec_format(
            'c:/Scanex/Operative/Data/TEMP/news/data/100-hs-sg-joint.model',
            binary=False)
        # trained models
        self.eng_model = 'c:/Scanex/Operative/Data/TEMP/news/data/model-eng.npz'
        self.rus_model = 'c:/Scanex/Operative/Data/TEMP/news/data/model-rus.npz'
        self.input_var = T.dtensor3('inputs')

        # building nets for russian and english
        self.rus_network = data.build_mlp(((None, 3, 201), 300),
                                          self.input_var)
        self.eng_network = data.build_mlp(((None, 3, 101), 150),
                                          self.input_var)
        with np.load(self.eng_model) as f:
            param_values = [f['arr_%d' % i] for i in range(len(f.files))]
        lasagne.layers.set_all_param_values(self.eng_network, param_values)
        with np.load(self.rus_model) as f:
            param_values = [f['arr_%d' % i] for i in range(len(f.files))]
        lasagne.layers.set_all_param_values(self.rus_network, param_values)
def make_minimizer(Model):
    L, y = T.ivector('L'), T.dvector('y')
    mu, eps = T.dscalar('mu'), T.dscalar('eps')
    R, eta = T.dtensor3('R'),  T.dvector('eta')

    model = Model(L, y, mu, R, eta, eps)
    return theano.function([L, y, mu, R, eta, eps], model.minimize())
示例#23
0
    def __build_eigens_t3(self):
        t3_covs = T.dtensor3('covs')

        # Get eigenvectors
        egmatrix_t3, _ = theano.scan(
            fn=lambda covariance: T.nlinalg.eig(covariance)[1],
            sequences=[t3_covs])

        # Transpose is equal to the inverse
        egmatrix_inv_t3, _ = theano.scan(fn=lambda egv: egv.dimshuffle(1, 0),
                                         sequences=[egmatrix_t3])

        # Get eigenvalues
        egvalues_t2, _ = theano.scan(
            fn=lambda covariance: T.nlinalg.eig(covariance)[0],
            sequences=[t3_covs])

        # We can't have zero when computing sqrt's because
        # it'll messup theano
        egv_diag, _ = theano.scan(
            fn=lambda x: T.nlinalg.diag(T.maximum(x, .000001)),
            sequences=[egvalues_t2])

        egvalues_t3, _ = theano.scan(
            fn=lambda x: T.nlinalg.matrix_inverse(T.sqrt(x)),
            sequences=[egv_diag])

        return function(inputs=[t3_covs],
                        outputs=[egmatrix_t3, egvalues_t3, egmatrix_t3])
示例#24
0
    def test_max_pool_2d_3D(self):
        rng = numpy.random.RandomState(utt.fetch_seed())

        maxpoolshps = [(1, 2)]
        imval = rng.rand(2, 3, 4)
        images = tensor.dtensor3()

        for maxpoolshp in maxpoolshps:
            for ignore_border in [True, False]:
                print 'maxpoolshp =', maxpoolshp
                print 'ignore_border =', ignore_border
                numpy_output_val = self.numpy_max_pool_2d(
                    imval, maxpoolshp, ignore_border)

                output = max_pool_2d(images, maxpoolshp, ignore_border)
                output_val = function([images], output)(imval)
                assert numpy.all(output_val == numpy_output_val)

                c = tensor.sum(output)
                c_val = function([images], c)(imval)

                g = tensor.grad(c, images)
                g_val = function([images], [
                    g.shape,
                    tensor.min(g, axis=(0, 1, 2)),
                    tensor.max(g, axis=(0, 1, 2))
                ])(imval)
示例#25
0
 def test_wrong_input(self):
     """
     Make sure errors are raised when image and kernel are not 4D tensors
     """
     try:
         self.validate((3, 2, 8, 8), (4, 2, 5, 5),
                       'valid',
                       input=T.dmatrix())
         # should never reach here
         self.fail()
     except:
         pass
     try:
         self.validate((3, 2, 8, 8), (4, 2, 5, 5),
                       'valid',
                       filters=T.dvector())
         # should never reach here
         self.fail()
     except:
         pass
     try:
         self.validate((3, 2, 8, 8), (4, 2, 5, 5),
                       'valid',
                       input=T.dtensor3())
         # should never reach here
         self.fail()
     except:
         pass
示例#26
0
    def test_max_pool_2d_3D(self):
        rng = numpy.random.RandomState(utt.fetch_seed())
        maxpoolshps = [(1, 2)]
        imval = rng.rand(2, 3, 4)
        images = tensor.dtensor3()

        for maxpoolshp, ignore_border, mode in product(
                maxpoolshps, [True, False],
            ['max', 'average_inc_pad', 'average_exc_pad']):
            # print 'maxpoolshp =', maxpoolshp
            # print 'ignore_border =', ignore_border
            numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,
                                                      ignore_border, mode)
            output = max_pool_2d(images, maxpoolshp, ignore_border, mode=mode)
            output_val = function([images], output)(imval)
            assert numpy.all(output_val == numpy_output_val), (
                "output_val is %s, numpy_output_val is %s" %
                (output_val, numpy_output_val))
            c = tensor.sum(output)
            c_val = function([images], c)(imval)
            g = tensor.grad(c, images)
            g_val = function([images], [
                g.shape,
                tensor.min(g, axis=(0, 1, 2)),
                tensor.max(g, axis=(0, 1, 2))
            ])(imval)
示例#27
0
def SimpleRNN_theano():
    X = T.dtensor3('X')
    W_x = T.dmatrix('W_x')
    W_h = T.dmatrix('W_h')
    B = T.dvector('B')
    Hid = T.dmatrix('hid')

    def step(x, h):
        h = T.tanh(T.dot(x, W_x) + T.dot(h, W_h))
        return h

    result, updates = theano.scan(step,
                                  sequences=[X],
                                  outputs_info=Hid,
                                  name="SimpleRNN_theano")
    f = theano.function([X, W_x, W_h, Hid], result)
    o_theano = f(x, w_x, w_h, h_init)
    """
    tic = time.time()
    for i in range(1000):
        o_theano = f(x, w_x, w_h, h_init)
    toc = time.time()
    print('Theano time: %.8f' %((toc - tic) / 1000))
    """
    return o_theano
    def test_simple_3d(self):
        """Increments or sets part of a tensor by a scalar using full slice and
        a partial slice depending on a scalar.
        """
        a = tt.dtensor3()
        increment = tt.dscalar()
        sl1 = slice(None)
        sl2_end = tt.lscalar()
        sl2 = slice(sl2_end)
        sl3 = 2

        for do_set in [True, False]:
            print "Set", do_set

            if do_set:
                resut = tt.set_subtensor(a[sl1, sl3, sl2], increment)
            else:
                resut = tt.inc_subtensor(a[sl1, sl3, sl2], increment)

            f = theano.function([a, increment, sl2_end], resut)

            val_a = numpy.ones((5, 3, 4))
            val_inc = 2.3
            val_sl2_end = 2

            expected_result = numpy.copy(val_a)
            result = f(val_a, val_inc, val_sl2_end)

            if do_set:
                expected_result[:, sl3, :val_sl2_end] = val_inc
            else:
                expected_result[:, sl3, :val_sl2_end] += val_inc

            self.assertTrue(numpy.array_equal(result, expected_result))
示例#29
0
def normalize_theano_tensor(loss_tensor, lvl_acceptance):
    normalized_loss_tensor = (loss_tensor - np.min(loss_tensor, axis=0)) / \
                             np.max(loss_tensor - np.min(loss_tensor, axis=0), axis=0)
    Mshape = (np.shape(normalized_loss_tensor)[1],
              np.shape(normalized_loss_tensor)[2])
    mean_values = np.mean(normalized_loss_tensor, axis=2)
    cropx = np.sort(mean_values)[:, lvl_acceptance]

    normalized_tensor = T.dtensor3(
        'normalized_tensor'
    )  # theano.shared(name='normalized_tensor', value=normalized_loss_tensor.astype(theano.config.floatX))
    crops = T.dvector(
        'crops'
    )  #theano.shared(name='crops', value=crops.astype(theano.config.floatX))

    def crop_normalized_tensor(last, normalized_matrix, crop):
        # x,y = T.dmatrices('x','y')
        # v = T.where(, x, y)
        normalized_matrix[T.lt(normalized_matrix, crop)] = crop
        return normalized_matrix / T.max(normalized_matrix)

    n_l_t, updates = theano.scan(
        crop_normalized_tensor,
        sequences=[normalized_tensor, crops],
        outputs_info=[None,
                      dict(initial=T.zeros_like(normalized_tensor))])

    normalizexx_loss_tensor = theano.function([normalized_tensor, crops],
                                              n_l_t)

    normalized_loss_tensor = normalizexx_loss_tensor(normalized_loss_tensor,
                                                     cropx)
    return np.nan_to_num(normalized_loss_tensor)
示例#30
0
def trainer_tester(mapping,train_data,test_data):
	data = theano.shared(train_data)
	test_data = theano.shared(test_data)
	init_weights = 0.1*np.random.randn(len(mapping),2,100)
	W = theano.shared(init_weights)

	matches = T.wmatrix('matches')
	weights = T.dtensor3('weights')
	t_matches = T.wmatrix('t_matches')
	delta   = theano.shared(np.zeros(init_weights.shape))

	cost, accuracy = cost_fn(matches,weights)
	log_loss_fn = log_loss(t_matches,weights)
	grad = T.grad(cost,wrt=weights)
	train = theano.function(
			inputs = [],
			outputs = cost,
			givens  = { matches: data, weights: W },
			updates = [
				(W, W - 0.1*( grad + 0.5 * delta )),
				(delta, 0.1*( grad + 0.5 * delta ))
			]
		)
	test = theano.function(
			inputs = [],
			outputs = [log_loss_fn],
			givens  = { t_matches: test_data, weights: W }
		)
	return train,test,W
示例#31
0
def testPredictFunc():
    """
    Test the network predict function
    """
    network = LSTMP2H()

    symPremise = T.dtensor3("inputPremise")
    symHypothesis = T.dtensor3("inputHypothesis")
    premiseSent = np.random.randn(1,1,2)
    hypothesisSent = np.random.randn(1,1,2)

    predictFunc = network.predictFunc(symPremise, symHypothesis)
    labels = network.predict(premiseSent, hypothesisSent, predictFunc)

    for l in labels:
        print "Label: %s" %(l)
示例#32
0
    def test_max_pool_3d_3D_deprecated_interface(self):
        rng = np.random.RandomState(utt.fetch_seed())
        maxpoolshps = ((1, 1, 1), (3, 2, 1))
        imval = rng.rand(4, 5, 6)
        images = tensor.dtensor3()

        for maxpoolshp, ignore_border, mode in product(
                maxpoolshps,
            [True, False],
            ["max", "sum", "average_inc_pad", "average_exc_pad"],
        ):
            # print 'maxpoolshp =', maxpoolshp
            # print 'ignore_border =', ignore_border
            numpy_output_val = self.numpy_max_pool_nd(imval,
                                                      maxpoolshp,
                                                      ignore_border,
                                                      mode=mode)
            output = pool_3d(
                input=images,
                ds=maxpoolshp,
                ignore_border=ignore_border,
                st=maxpoolshp,
                padding=(0, 0, 0),
                mode=mode,
            )
            output_val = function([images], output)(imval)
            utt.assert_allclose(output_val, numpy_output_val)

            def mp(input):
                return pool_3d(input, maxpoolshp, ignore_border, mode=mode)
示例#33
0
def course_grain(excitation_grid, cg_factor):
    """ excitation_grid should be list of 2d arrays in time order where each 2d array
    is the animation state of the system at time t. The excitation_grid
    of a system can be obtained  using b = animator.Visual('file_name'), selecting your
    desired animation range and then exporting excitation_grid = b.animation_data.

    cg_factor is the unitless factor corresponding to the number of small original cells
    along each side of the new course grained cell.
    e.g. If a 200x200 array is processed with cg_factor = 5, the new course grained array
    will be shape 40x40 where each new cell corresponds to the net excitations from 5x5
    sections of the original array."""

    exc = np.array(excitation_grid).astype(
        'float')  #Asserts data type of imported excitation_grid
    filt = np.ones((cg_factor, cg_factor), dtype='float'
                   )  #Square matrix of ones in shape of course_grained cells.
    norm = cg_factor**2  #Number of original cells in each course grained cell
    a = T.dtensor3(
        'a'
    )  #Theano requires us to specify data types. dtensor3 is a 3d tensor of float64's
    b = T.dmatrix('b')  #Matrix of float64's
    z = conv2d(a, b, subsample=(
        cg_factor, cg_factor)) / norm  #This specifies the function to process.
    #               Convolution with subsample step length results in course grained matrices
    f = function(
        [a, b], z
    )  #Theano function definition where inputs ([a,b]) and outputs (z) are specified
    return f(exc,
             filt)  #Returns function with excitation_grid and filter as output
示例#34
0
    def preprocess_state(self, state): #TODO: Display to cross check.
        """
        Preprocess a sequence of frames that make up a state.

        Args:
        -----
            state: A sequence of frames.

        Returns:
        --------
            Preprocessed state    
        """
        N, m, n = self.agent_params['state_frames'], self.game_params['crop_hei'], self.game_params['crop_wid']
        factor = self.game_params['factor']
        maxed = np.zeros((N, m, n), dtype='float64')

        # max pool and downsample
        maxed[0] = state[0].reshape(m, n)
        for i in xrange(1, len(state)):
            maxed[i] = np.max(np.asarray(state[i - 1: i]), axis=0).reshape(m, n)

        x = tn.dtensor3('x')
        f = thn.function([x], downsample.max_pool_2d(x, factor))
        downsampled = f(maxed)

        if self.ale_params['display_state']:
            s = downsampled[-1].reshape(m / factor[0], n / factor[1])
            plt.figure(1)
            plt.clf()
            plt.imshow(s, 'gray')
            plt.pause(0.005)
        
        return downsampled.reshape(1, np.prod(downsampled.shape[0:])) #Stack
示例#35
0
    def test_norm(self):

        x = tensor.dtensor3()
        a = np.random.rand(3, 2, 4).astype(theano.config.floatX)
        mode = theano.compile.Mode(optimizer="fast_compile", linker="py")

        for axis in [
                0, 1, 2, [0], [1], [2], None, [0, 1], [1, 2], [0, 1, 2], [-1],
            [-2], [-3], [-1, -2], [-1, -2, -3], [0, -2, 2]
        ]:

            f = function([x], [
                x.norm(L=1, axis=axis, keepdims=True),
                self.makeKeepDims_local(
                    x, x.norm(L=1, axis=axis, keepdims=False), axis)
            ],
                         mode=mode)

            ans1, ans2 = f(a)
            assert np.allclose(ans1, ans2)
            assert ans1.shape == ans2.shape

            g = function([x], [
                x.norm(L=2, axis=axis, keepdims=True),
                self.makeKeepDims_local(
                    x, x.norm(L=2, axis=axis, keepdims=False), axis)
            ],
                         mode=mode)

            ans1, ans2 = g(a)
            assert np.allclose(ans1, ans2)
            assert ans1.shape == ans2.shape
示例#36
0
 def UV( lU   = Th.dmatrix('lU')  , lV1  = Th.dmatrix('lV1') , V2 = Th.dvector('V2') ,
         STAs = Th.dmatrix('STAs'), STCs = Th.dtensor3('STCs'), **other):
     U  = Th.exp(lU + 1e-10)
     V1 = Th.exp(lV1+ 1e-10)
     return [{'theta': Th.dot( U.T , V1[i] ) ,
              'M'  :   Th.dot( V1[i] * U.T , (V2 * U.T).T ),
              'STA':   STAs[i,:],
              'STC':   STCs[i,:,:]} for i in range(N)]
示例#37
0
def make_theano_tensors(list_of_datasets):
    theano_tensor = [TT.dscalar(), TT.dvector(), TT.dmatrix(), TT.dtensor3()]
    res = []
    for d_set in list_of_datasets:
        dim = len(np.array(d_set).shape)
        theano_t = deepcopy(theano_tensor[dim])
        res.append(theano_t)
    return res
示例#38
0
def linear_reparameterization( T  = Th.dtensor3('T') , u  = Th.dvector('u') , 
#                               V1 = Th.dmatrix('V1') , V2 = Th.dvector('V2') ,
#                            STAs = Th.dmatrix('STAs'), STCs = Th.dtensor3('STCs'),
#                            N_spikes = Th.dvector('N_spikes'), 
                            **other):
    other['U'] = Th.sum( T*u , axis=2 )
#    other[name] = Th.tensordot(T,u,axes=0)
    return other
def make_theano_tensors(list_of_datasets):
    theano_tensor = [TT.dscalar(), TT.dvector(), TT.dmatrix(), TT.dtensor3()]
    res = []
    for d_set in list_of_datasets:
        dim = len(np.array(d_set).shape)
        theano_t = deepcopy(theano_tensor[dim])
        res.append(theano_t)
    return res
示例#40
0
def create_distance_matrix(displacement_matrix):
    X = T.dtensor3("d")
    dist_matrix = T.sqrt(T.sum(X * X, axis=-1))

    dist_func = theano.function([X], dist_matrix)

    distance_matrix = dist_func(displacement_matrix)

    return distance_matrix
示例#41
0
 def __build_center(self):
     # We only want to compile our theano functions once
     imgv = T.dtensor3("imgv")
     # Get the mean
     u = T.mean(imgv, 0)
     # Get the standard deviation
     s = T.std(T.std(imgv, 0), 0)
     # Subtract our mean
     return function(inputs=[imgv], outputs=[(imgv - u) / s])
示例#42
0
 def __build_center(self):
     #We only want to compile our theano functions once
     imgv = T.dtensor3('imgv')
     # Get the mean
     u = T.mean(imgv, 0)
     # Get the standard deviation
     s = T.std(T.std(imgv, 0), 0)
     # Subtract our mean
     return function(inputs=[imgv], outputs=[(imgv - u) / s])
示例#43
0
    def test_wrong_input(self):
        # Make sure errors are raised when image and kernel are not 4D tensors

        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5), "valid", input=tt.dmatrix())
        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5), "valid", filters=tt.dvector())
        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5), "valid", input=tt.dtensor3())
示例#44
0
    def test_wrong_input(self):
        # Make sure errors are raised when image and kernel are not 4D tensors

        self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                          'valid', input=T.dmatrix())
        self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                          'valid', filters=T.dvector())
        self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                          'valid', input=T.dtensor3())
def test_REN_Cell():
    cell = Cell(31, 10)  # emb_dim, num_slots
    _input = np.random.randn(11, 100, 31)  # T,N,D
    x = T.dtensor3('x')
    H_init = theano.shared(np.zeros((100, 10, 31)), name='Initial Hidden')
    keys_init = theano.shared(np.zeros((100, 10, 31)), name='Initial keys')
    y, _ = cell(x, H_init, keys_init)
    f = theano.function(inputs=[x], outputs=[y])
    f(_input)
示例#46
0
 def UV( U    = Th.dmatrix('U')   , V1  = Th.dmatrix('V1') , V2 = Th.dvector('V2') ,
         STAs = Th.dmatrix('STAs'), STCs = Th.dtensor3('STCs'),
         N_spikes = Th.dvector('N_spikes'), **other):
    return [{'theta':    Th.dot( U.T , V1[i,:] ) ,
              'M'  :      Th.dot( V1[i,:] * U.T , (V2 * U.T).T ),
              'STA':      STAs[i,:],
              'STC':      STCs[i,:,:],
              'N_spike':  N_spikes[i]/(Th.sum(N_spikes)) ,
              'U' :       U,
              'logprior': 0. } for i in range(N)]
示例#47
0
def UVi(i , V1   = Th.dmatrix() , STAs = Th.dmatrix(), STCs = Th.dtensor3(), 
        N_spikes = Th.dvector(), **other):
    '''
    Reparameterize a list of N (theta,M) parameters as a function of a 
    common U,V2 and a matrix of N rows containing V1.
    '''
    return named( **{'v1'  :    V1[i,:] ,
                     'STA' :    STAs[i,:],
                     'STC' :    STCs[i,:,:],
                     'N_spike': N_spikes[i]/(Th.sum(N_spikes))} )
示例#48
0
def u2c_parameterization( T = Th.dtensor3() , V2 = Th.dvector() ,
                          u = Th.dvector()  , uc = Th.dvector() ,
                          c = Th.dvector()  , **other ):
#    Ub = Th.sum( T*ub , axis=2 )
#    Uc = Th.sum( T*uc , axis=2 )
    U  = Th.sum( T*u  , axis=2 ) + ( Th.sum( T*uc  , axis=2 ).T * c  ).T
#    U  = ( Th.sum( T*ub  , axis=2 ).T * b  ).T + Th.sum( T*u  , axis=2 )
#       + ( Th.sum( T*uc , axis=2 ).T * V2 ).T
    other.update(locals())
    return named( **other )
示例#49
0
 def test_wrong_input(self):
     """
     Make sure errors are raised when image and kernel are not 4D tensors
     """
     self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                       'valid', input=T.dmatrix())
     self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                       'valid', filters=T.dvector())
     self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                       'valid', input=T.dtensor3())
示例#50
0
文件: test_mkl_gru.py 项目: TaoLv/GRU
def GRU_MKL():
    X = T.dtensor3('X')
    W_x = T.dmatrix('W_x')
    W_h = T.dmatrix('W_h')
    B = T.dvector('b')
    Hid = T.dmatrix('Hid_init')

    Z = GRU(hid=1000, return_sequences=True, max_len=100)(X, W_x, W_h, Hid)
    f = theano.function([X, W_x, W_h, Hid], Z)

    return f
示例#51
0
def GRU_MKL():
    X = T.dtensor3('X')
    W_x = T.dmatrix('W_x')
    W_h = T.dmatrix('W_h')
    B = T.dvector('b')
    Hid = T.dmatrix('Hid_init')

    Z = GRU(hid=1000, return_sequences=True, max_len=100)(X, W_x, W_h, Hid, B)
    f = theano.function([X, W_x, W_h, Hid, B], Z)
    # theano.printing.pydotprint(f, outfile='gru.png', var_with_name_simple=True)
    return f
示例#52
0
 def test_infer_shape(self):
     z = tensor.dtensor3()
     x = tensor.dmatrix()
     y = tensor.dscalar()
     self._compile_and_check([x, y], [self.op(x, y)],
                             [numpy.random.rand(8, 5),
                              numpy.random.rand()], self.op_class)
     self._compile_and_check(
         [z, y], [self.op(z, y)],
         [numpy.random.rand(8, 8, 8),
          numpy.random.rand()], self.op_class)
示例#53
0
 def test_infer_shape(self):
     z = tensor.dtensor3()
     x = tensor.dmatrix()
     y = tensor.dscalar()
     self._compile_and_check([x, y], [self.op(x, y)],
                             [numpy.random.rand(8, 5),
                              numpy.random.rand()],
                             self.op_class)
     self._compile_and_check([z, y], [self.op(z, y)],
                             [numpy.random.rand(8, 8, 8),
                              numpy.random.rand()],
                             self.op_class)
def main(argv):
    def formatter(prog):
        return argparse.HelpFormatter(prog, max_help_position=100, width=200)

    # Training labels, similarity matrix and weight of the regularization term
    f, R, mu, eps = T.dvector('f'), T.dtensor3('R'), T.dvector('mu'), T.dscalar('eps')
    sigma2 = T.dscalar('sigma2')

    # Indices of labeled examples
    l = T.ivector('l')

    f_star = propagate(f, l, R, mu, eps)
    ll = likelihood(f, l, R, mu, eps, sigma2)

    propagate_f = theano.function([f, l, R, mu, eps], f_star, on_unused_input='warn')
    likelihood_function = theano.function([f, l, R, mu, eps, sigma2], ll, on_unused_input='warn')

    ll_grad = T.grad(ll, [mu, eps, sigma2])
    likelihood_gradient_function = theano.function([f, l, R, mu, eps, sigma2], ll_grad, on_unused_input='warn')

    nb_nodes = 64

    R = np.zeros((nb_nodes, nb_nodes, 1))
    even_edges = [(i, i + 2) for i in range(0, nb_nodes, 2) if (i + 2) < nb_nodes]
    odd_edges = [(i, i + 2) for i in range(1, nb_nodes, 2) if (i + 2) < nb_nodes]

    for source, target in even_edges + odd_edges:
        R[source, target, 0], R[target, source, 0] = 1.0, 1.0

    mu = np.ones(1)
    eps = 1e-2
    sigma2 = 1e-6

    f = np.array([+ 1.0, - 1.0] + ([.0] * (nb_nodes - 2)))
    l = np.array(f != 0, dtype='int8')

    print(propagate_f(f, l, R, mu, eps))

    learning_rate = 1e-2

    for i in range(1024):
        ll_value = likelihood_function(f, l, R, mu, eps, sigma2)
        print('LL [%d]: %s' % (i, ll_value))

        grad_value = likelihood_gradient_function(f, l, R, mu, eps, sigma2)

        mu += learning_rate * grad_value[0]
        eps += max(1e-6, learning_rate * grad_value[1])
        sigma2 += max(1e-6, learning_rate * grad_value[2])

    print('Mu: %s' % str(mu))
    print('Eps: %s' % str(eps))
    print('Sigma^2: %s' % str(sigma2))
def make_loss(Model, l1=0., l2=0.):
    L, y = T.ivector('L'), T.dvector('y')
    mu, eps = T.dscalar('mu'), T.dscalar('eps')
    R, eta = T.dtensor3('R'),  T.dvector('eta')

    loss = Model.loss_symbolic(L, y, mu, R, eta, eps)

    L1 = abs(mu) + T.sum(abs(eta)) + abs(eps)
    L2 = mu ** 2 + T.sum(eta ** 2) + eps ** 2
    regularized_loss = loss + l1 * L1 + l2 * L2

    return theano.function([L, y, mu, R, eta, eps], regularized_loss)
示例#56
0
def pack_param_helper_maker():
    loose_cov_var = T.dtensor3('loose_cov')
    loose_rot_var = T.dtensor3('loose_rot')
    loose_hyd_var = T.dtensor3('loose_hyd')
    loose_hydpl_var = T.dmatrix('loose_hydpl')
    loose_rotpos_var = T.dmatrix('loose_rotpos')
    loose_rotscalar_var = T.dmatrix('loose_rotscalar')

    discrep_expr = (T.sum((unpack_rot_expr - loose_rot_var)**2) + T.sum(
        (unpack_cov_expr - loose_cov_var)**2) + T.sum(
            (unpack_hyd_expr - loose_hyd_var)**2) + T.sum(
                (unpack_hydpl_expr - loose_hydpl_var)**2) + T.sum(
                    (unpack_rotpos_expr - loose_rotpos_var)**2) + T.sum(
                        (unpack_rotscalar_expr - loose_rotscalar_var)**2))
    v = [
        lparam, loose_rot_var, loose_cov_var, loose_hyd_var, loose_hydpl_var,
        loose_rotpos_var, loose_rotscalar_var
    ]
    discrep = theano.function(v, discrep_expr)
    d_discrep = theano.function(v, T.grad(discrep_expr, lparam))
    return discrep, d_discrep
示例#57
0
 def UV( U    = Th.dmatrix('U')   , V1  = Th.dmatrix('V1') , V2 = Th.dvector('V2') ,
         STAs = Th.dmatrix('STAs'), STCs = Th.dtensor3('STCs'), 
         centers= Th.dvector('centers'), indices = Th.dmatrix('indices'), lam=Th.dscalar('lam'),
         lambdas= Th.dvector('lambdas') ,
         N_spikes = Th.dvector('N_spikes'),  Ncones = Th.dscalar('Ncones'), **other):
     return [{'theta':    Th.dot( U.T , V1[i,:] ) ,
              'M'  :      Th.dot( V1[i,:] * U.T , (V2 * U.T).T ),
              'STA':      STAs[i,:],
              'STC':      STCs[i,:,:],
              'N_spike':  N_spikes[i]/(Th.sum(N_spikes)) ,
              'U' :       U,
              'logprior': - Th.sum( Th.sqrt(Th.sum(V1**2.,axis=0) + 0.000001) * lambdas) } for i in range(N)]
示例#58
0
 def test_infer_shape(self):
     z = tensor.dtensor3()
     x = tensor.dmatrix()
     y = tensor.dscalar()
     self._compile_and_check([x, y], [self.op(x, y)], [numpy.random.rand(8, 5), numpy.random.rand()], self.op_class)
     self._compile_and_check(
         [z, y],
         [self.op(z, y)],
         # must be square when nd>2
         [numpy.random.rand(8, 8, 8), numpy.random.rand()],
         self.op_class,
         warn=False,
     )
示例#59
0
    def test_norm(self):

        x = tensor.dtensor3()
        a = numpy.random.rand(3, 2, 4).astype(theano.config.floatX)
        mode = theano.compile.Mode(optimizer="fast_compile", linker="py")

        for axis in [
            0,
            1,
            2,
            [0],
            [1],
            [2],
            None,
            [0, 1],
            [1, 2],
            [0, 1, 2],
            [-1],
            [-2],
            [-3],
            [-1, -2],
            [-1, -2, -3],
            [0, -2, 2],
        ]:

            f = function(
                [x],
                [
                    x.norm(L=1, axis=axis, keepdims=True),
                    self.makeKeepDims_local(x, x.norm(L=1, axis=axis, keepdims=False), axis),
                ],
                mode=mode,
            )

            ans1, ans2 = f(a)
            assert numpy.allclose(ans1, ans2)
            assert ans1.shape == ans2.shape

            g = function(
                [x],
                [
                    x.norm(L=2, axis=axis, keepdims=True),
                    self.makeKeepDims_local(x, x.norm(L=2, axis=axis, keepdims=False), axis),
                ],
                mode=mode,
            )

            ans1, ans2 = g(a)
            assert numpy.allclose(ans1, ans2)
            assert ans1.shape == ans2.shape