コード例 #1
0
ファイル: theano_extensions.py プロジェクト: hydercps/hred-qs
 def make_node(self, pvals):
     pvals = T.as_tensor_variable(pvals)
     if self.odtype == 'auto':
         odtype = pvals.dtype
     vals = T.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
     indx = T.tensor(dtype='int32', broadcastable=pvals.type.broadcastable)
     return Apply(self, [pvals,], [vals, indx])
コード例 #2
0
ファイル: archkit.py プロジェクト: abailoni/greedy_CNN
    def __init__(self, dim=2, issequence=False, inpshape=None):
        """
        :type dim: int
        :param dim: Dimension of the data (= 2 for images, = 3 for sequential or 3D)

        :type inpshape: list or tuple
        :param inpshape: Input shape
        :return:
        """
        super(idlayer, self).__init__()

        # Parse data dimensionality
        assert not (dim is None and inpshape is None), "Data dimension can not be parsed. Provide dim or inpshape."

        # Meta
        self.dim = dim if dim is not None else {4: 2, 5: 3}[len(inpshape)]
        self.allowsequences = True
        self.issequence = self.dim == 2 and len(self.inpshape) == 5 if issequence is None else issequence
        self.inpdim = len(inpshape) if inpshape is not None else 5 if self.issequence else {2: 4, 3: 5}[dim]

        # Shape inference
        self.inpshape = [None, ] * self.inpdim if inpshape is None else list(inpshape)

        # Containers for input and output
        self.x = T.tensor('floatX', [False, ] * self.inpdim, name='x:' + str(id(self)))
        self.y = T.tensor('floatX', [False, ] * self.inpdim, name='y:' + str(id(self)))
コード例 #3
0
ファイル: archkit.py プロジェクト: abailoni/greedy_CNN
    def __init__(self, activation, dim=2, issequence=False, inpshape=None):
        """
        :type activation: callable
        :param activation: Activation function (any element-wise symbolic function)

        :type dim: int
        :param dim: Dimensionality of the input data

        :type issequence: bool
        :param issequence: Whether the input is a sequence

        :type inpshape: list
        :param inpshape: Input shape
        """

        super(activationlayer, self).__init__()

        # Parse data dimensionality
        assert not (dim is None and inpshape is None), "Data dimension can not be parsed. Provide dim or inpshape."

        # Meta
        self.activation = activation
        self.dim = dim if dim is not None else {4: 2, 5: 3}[len(inpshape)]
        self.allowsequences = True
        self.issequence = self.dim == 2 and len(inpshape) == 5 if issequence is None else issequence
        self.inpdim = len(inpshape) if inpshape is not None else 5 if self.issequence else {2: 4, 3: 5}[dim]

        # Shape inference
        self.inpshape = [None, ] * self.inpdim if inpshape is None else list(inpshape)

        # Containers for input and output
        self.x = T.tensor('floatX', [False, ] * self.inpdim, name='x:' + str(id(self)))
        self.y = T.tensor('floatX', [False, ] * self.inpdim, name='y:' + str(id(self)))
コード例 #4
0
ファイル: sp.py プロジェクト: olivierverdier/Theano
    def make_node(self, x):
        ###
        # At least for small matrices (5x5), the .sum() method of a csc matrix returns a dense matrix
        # as the result whether axis is 0 or 1... weird!
        ###
        if self.axis is None:
            z = tensor.tensor(broadcastable=(), dtype=x.dtype)
        elif self.axis == 0:
            if x.format == 'csc':
                z = tensor.tensor(broadcastable=(False,), dtype=x.dtype)
            elif x.format == 'csr':
                #return SparseVector() #WRITEME!
                raise NotImplementedError()
            else:
                raise NotImplementedError()
        elif self.axis == 1:
            if x.format == 'csc':
                #return SparseVector() #WRITEME!
                raise NotImplementedError()
            elif x.format == 'csr':
                z = tensor.tensor(broadcastable=(False,), dtype=x.dtype)
            else:
                raise NotImplementedError()
        else:
            assert False #axis should have been verified by self.__init__

        return gof.Apply(self, [x], [z])
コード例 #5
0
ファイル: archkit.py プロジェクト: abailoni/greedy_CNN
    def __init__(self, keepdims=False, inpshape=None):
        """
        :type keepdims: bool
        :param keepdims: Whether to keep the T dimension or to squeeze it away.

        :type inpshape: list or tuple
        :param inpshape: Shape of the input tensor
        """
        super(timeaveragelayer, self).__init__()

        # Meta
        self.keepdims = keepdims

        # Input must be 5D sequential, i.e.
        self.dim = 2
        self.inpdim = 5
        self.allowsequences = True
        self.issequence = True

        # Shape inference
        self.inpshape = list(inpshape) if inpshape is not None else [None, ] * self.inpdim

        self.layerinfo = "[Keep Dimensions: {}]".format(self.keepdims)

        # Containers for input and output
        self.x = T.tensor('floatX', [False, ] * self.inpdim, name='x:' + str(id(self)))
        self.y = T.tensor('floatX', [False, ] * (self.inpdim - (0 if not self.keepdims else 1)),
                          name='y:' + str(id(self)))
        self.xr = T.tensor('floatX', [False, ] * self.inpdim, name='xr:' + str(id(self)))
コード例 #6
0
    def __init__(self, splits, dim=None, issequence=None, inpshape=None):
        """
        :type splits: list or int
        :param splits: Index of the split (along the channel axis). E.g. split = 3 would result in the input tensor
                       split as: [inp[:, 0:3, ...], inp[:, 3:, ...]] for 2D inputs.

        :type issequence: bool
        :param issequence: Whether input is a sequence

        :type inpshape: list or tuple
        :param inpshape: Input shape
        :return:
        """

        super(splitlayer, self).__init__()

        # Parse
        dim = 2 if issequence else dim
        assert not (
            dim is None and inpshape is None
        ), "Data dimension can not be parsed. Provide dim or inpshape."

        # Meta
        self.dim = dim if dim is not None else {4: 2, 5: 3}[len(inpshape)]
        self.allowsequences = True
        self.issequence = self.dim == 2 and len(
            self.inpshape) == 5 if issequence is None else issequence
        self.inpdim = len(
            inpshape) if inpshape is not None else 5 if self.issequence else {
                2: 4,
                3: 5
            }[dim]
        self.dim = 2 if self.issequence else self.dim  # Correct dim if necessary

        self.splits = pyk.obj2list(splits)
        self.numsplits = len(self.splits) + 1

        # More meta for layertrainyard
        self.numinp = 1
        self.numout = self.numsplits

        # Shape inference
        self.inpshape = [
            None,
        ] * self.inpdim if inpshape is None else list(inpshape)

        # Containers for input and output
        self.x = T.tensor('floatX', [
            False,
        ] * self.inpdim,
                          name='x:' + str(id(self)))
        self.y = [
            T.tensor('floatX', [
                False,
            ] * self.inpdim,
                     name='y{}:'.format(splitnum) + str(id(self)))
            for splitnum in range(self.numsplits)
        ]
コード例 #7
0
ファイル: test_elemwise.py プロジェクト: souravsingh/Theano
def test_elemwise_grad_broadcast():
    # This crashed in the past.

    x = tensor.tensor(dtype="float32", broadcastable=(True, False, False, False))
    y = tensor.tensor(dtype="float32", broadcastable=(True, True, False, False))

    theano.grad(theano.tensor.tanh(x).sum(), x)
    theano.grad(theano.tensor.tanh(x + y).sum(), y)
    theano.grad(theano.tensor.tanh(x + y).sum(), [x, y])
コード例 #8
0
ファイル: test_blas_c.py プロジェクト: daien/Theano
 def setUp(self, dtype='float64'):
     self.dtype = dtype
     self.mode = theano.compile.get_default_mode().including('fast_run')
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.Aval = numpy.ones((2,3), dtype=dtype)
     self.xval = numpy.asarray([1,2], dtype=dtype)
     self.yval = numpy.asarray([1.5,2.7,3.9], dtype=dtype)
コード例 #9
0
ファイル: test_blas_c.py プロジェクト: amishtal/Theano
 def setUp(self, dtype='float64'):
     self.dtype = dtype
     self.mode = theano.compile.get_default_mode().including('fast_run')
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.Aval = numpy.ones((2, 3), dtype=dtype)
     self.xval = numpy.asarray([1, 2], dtype=dtype)
     self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
コード例 #10
0
ファイル: test_blas.py プロジェクト: gyenney/Tools
 def setUp(self):
     self.mode = mode_with_gpu
     dtype = self.dtype = 'float32'  # optimization isn't dtype-dependent
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
     # data on the gpu make the op always inplace
     self.ger = gpu_ger_inplace
     self.ger_destructive = gpu_ger_inplace
     self.gemm = tcn.blas.gpu_gemm_inplace
コード例 #11
0
 def manual_setup_method(self, dtype="float64"):
     # This tests can run even when theano.config.blas.ldflags is empty.
     self.dtype = dtype
     self.mode = theano.compile.get_default_mode().including("fast_run")
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.Aval = np.ones((2, 3), dtype=dtype)
     self.xval = np.asarray([1, 2], dtype=dtype)
     self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
コード例 #12
0
 def setUp(self):
     self.mode = mode_with_gpu
     dtype = self.dtype = 'float32'  # optimization isn't dtype-dependent
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
     # data on the gpu make the op always inplace
     self.ger = gpu_ger_inplace
     self.ger_destructive = gpu_ger_inplace
     self.gemm = tcn.blas.gpu_gemm_inplace
コード例 #13
0
ファイル: test_blas_c.py プロジェクト: lamblin/Theano
 def setUp(self, dtype='float64'):
     # This tests can run even when theano.config.blas.ldflags is empty.
     self.dtype = dtype
     self.mode = theano.compile.get_default_mode().including('fast_run')
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.Aval = np.ones((2, 3), dtype=dtype)
     self.xval = np.asarray([1, 2], dtype=dtype)
     self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
コード例 #14
0
 def setUp(self, dtype='float64'):
     # This tests can run even when theano.config.blas.ldflags is empty.
     self.dtype = dtype
     self.mode = theano.compile.get_default_mode().including('fast_run')
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.Aval = numpy.ones((2, 3), dtype=dtype)
     self.xval = numpy.asarray([1, 2], dtype=dtype)
     self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
コード例 #15
0
 def setup_method(self):
     self.mode = theano.compile.get_default_mode()
     self.mode = self.mode.including("fast_run")
     self.mode = self.mode.excluding("c_blas")  # c_blas trumps scipy Ops
     dtype = self.dtype = "float64"  # optimization isn't dtype-dependent
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.Aval = np.ones((2, 3), dtype=dtype)
     self.xval = np.asarray([1, 2], dtype=dtype)
     self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
コード例 #16
0
    def __init__(self, numreplicate, dim=2, issequence=False, inpshape=None):
        """
        :type numreplicate: int
        :param numreplicate: Number of times to replicate

        :type dim: int
        :param dim: Dimensionality of input data

        :type inpshape: list
        :param inpshape: Input shape
        :return:
        """
        super(replicatelayer, self).__init__()

        assert not (
            dim is None and inpshape is None
        ), "Data dimension can not be parsed. Provide dim or inpshape."

        # Meta
        self.dim = dim if dim is not None else {4: 2, 5: 3}[len(inpshape)]
        self.allowsequences = True
        self.issequence = self.dim == 2 and len(
            self.inpshape) == 5 if issequence is None else issequence
        self.inpdim = len(
            inpshape) if inpshape is not None else 5 if self.issequence else {
                2: 4,
                3: 5
            }[dim]
        self.dim = 2 if self.issequence else self.dim  # Correct dim if necessary

        self.numcopies = numreplicate

        # More meta for layertrainyard
        self.numinp = 1
        self.numout = self.numcopies

        # Shape inference
        self.inpshape = [
            None,
        ] * self.inpdim if inpshape is None else list(inpshape)

        # Containers for input and output
        self.x = T.tensor('floatX', [
            False,
        ] * self.inpdim,
                          name='x:' + str(id(self)))
        self.y = [
            T.tensor('floatX', [
                False,
            ] * self.inpdim,
                     name='y{}:'.format(splitnum) + str(id(self)))
            for splitnum in range(self.numcopies)
        ]
コード例 #17
0
ファイル: test_blas_scipy.py プロジェクト: delallea/Theano
 def setUp(self):
     self.mode = theano.compile.get_default_mode().including('fast_run')
     dtype = self.dtype = 'float64'  # optimization isn't dtype-dependent
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.Aval = numpy.ones((2,3), dtype=dtype)
     self.xval = numpy.asarray([1,2], dtype=dtype)
     self.yval = numpy.asarray([1.5,2.7,3.9], dtype=dtype)
     if not theano.tensor.blas_scipy.optimizations_enabled:
         self.SkipTest()
コード例 #18
0
 def test_numpy_2d(self):
     for shp0 in [(2, 3)]:
         x = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp0))
         a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
         for shp1 in [(6, 7)]:
             if len(shp0) + len(shp1) == 2:
                 continue
             y = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp1))
             f = function([x, y], kron(x, y))
             b = self.rng.rand(*shp1).astype(config.floatX)
             out = f(a, b)
             assert numpy.allclose(out, numpy.kron(a, b))
コード例 #19
0
 def test_numpy_2d(self):
     for shp0 in [(2, 3)]:
         x = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp0))
         a = np.asarray(self.rng.rand(*shp0)).astype(config.floatX)
         for shp1 in [(6, 7)]:
             if len(shp0) + len(shp1) == 2:
                 continue
             y = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp1))
             f = function([x, y], kron(x, y))
             b = self.rng.rand(*shp1).astype(config.floatX)
             out = f(a, b)
             assert np.allclose(out, np.kron(a, b))
コード例 #20
0
 def setUp(self):
     self.mode = theano.compile.get_default_mode().including('fast_run')
     dtype = self.dtype = 'float64'  # optimization isn't dtype-dependent
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False, ))
     self.Aval = numpy.ones((2, 3), dtype=dtype)
     self.xval = numpy.asarray([1, 2], dtype=dtype)
     self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
     if not theano.tensor.blas_scipy.optimizations_enabled:
         self.SkipTest()
コード例 #21
0
ファイル: test_blas_c.py プロジェクト: Jerryzcn/Theano
    def setUp(self, dtype="float64"):
        if theano.config.blas.ldflags == "":
            raise SkipTest("This test is useful only when Theano" " is directly linked to blas.")

        self.dtype = dtype
        self.mode = theano.compile.get_default_mode().including("fast_run")
        self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
        self.a = tensor.tensor(dtype=dtype, broadcastable=())
        self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
        self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
        self.Aval = numpy.ones((2, 3), dtype=dtype)
        self.xval = numpy.asarray([1, 2], dtype=dtype)
        self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
コード例 #22
0
ファイル: test_blas.py プロジェクト: luke14free/Theano-PyMC
    def setup_method(self):
        self.mode = mode_with_gpu
        dtype = self.dtype = "float32"  # optimization isn't dtype-dependent
        self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
        self.a = tensor.tensor(dtype=dtype, broadcastable=())
        self.x = tensor.tensor(dtype=dtype, broadcastable=(False, ))
        self.y = tensor.tensor(dtype=dtype, broadcastable=(False, ))
        self.ger_destructive = gpuger_inplace

        # data on the gpu make the op always inplace
        self.ger = gpuger_inplace
        self.gemm = gpugemm_inplace
        super().setup_method()
コード例 #23
0
 def setUp(self):
     self.mode = theano.compile.get_default_mode()
     self.mode = self.mode.including("fast_run")
     self.mode = self.mode.excluding("c_blas")  # c_blas trumps scipy Ops
     dtype = self.dtype = "float64"  # optimization isn't dtype-dependent
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.Aval = numpy.ones((2, 3), dtype=dtype)
     self.xval = numpy.asarray([1, 2], dtype=dtype)
     self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
     if not theano.tensor.blas_scipy.have_fblas:
         self.SkipTest()
コード例 #24
0
    def setUp(self, dtype='float64'):
        if theano.config.blas.ldflags == "":
            raise SkipTest("This test is useful only when Theano"
                           " is directly linked to blas.")

        self.dtype = dtype
        self.mode = theano.compile.get_default_mode().including('fast_run')
        self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
        self.a = tensor.tensor(dtype=dtype, broadcastable=())
        self.x = tensor.tensor(dtype=dtype, broadcastable=(False, ))
        self.y = tensor.tensor(dtype=dtype, broadcastable=(False, ))
        self.Aval = numpy.ones((2, 3), dtype=dtype)
        self.xval = numpy.asarray([1, 2], dtype=dtype)
        self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
コード例 #25
0
    def __init__(self, activation, dim=2, issequence=False, inpshape=None):
        """
        :type activation: callable
        :param activation: Activation function (any element-wise symbolic function)

        :type dim: int
        :param dim: Dimensionality of the input data

        :type issequence: bool
        :param issequence: Whether the input is a sequence

        :type inpshape: list
        :param inpshape: Input shape
        """

        super(activationlayer, self).__init__()

        # Parse data dimensionality
        assert not (
            dim is None and inpshape is None
        ), "Data dimension can not be parsed. Provide dim or inpshape."

        # Meta
        self.activation = activation
        self.dim = dim if dim is not None else {4: 2, 5: 3}[len(inpshape)]
        self.allowsequences = True
        self.issequence = self.dim == 2 and len(
            inpshape) == 5 if issequence is None else issequence
        self.inpdim = len(
            inpshape) if inpshape is not None else 5 if self.issequence else {
                2: 4,
                3: 5
            }[dim]

        # Shape inference
        self.inpshape = [
            None,
        ] * self.inpdim if inpshape is None else list(inpshape)

        # Containers for input and output
        self.x = T.tensor('floatX', [
            False,
        ] * self.inpdim,
                          name='x:' + str(id(self)))
        self.y = T.tensor('floatX', [
            False,
        ] * self.inpdim,
                          name='y:' + str(id(self)))
コード例 #26
0
    def make_node(self, z, a, x, y, pattern):
        z = tensor.as_tensor_variable(z)
        a = tensor.as_tensor_variable(a)
        x = tensor.as_tensor_variable(x)
        y = tensor.as_tensor_variable(y)
        pattern = tensor.as_tensor_variable(pattern)

        assert z.ndim == a.ndim == x.ndim == y.ndim == pattern.ndim == 2
        assert a.type.broadcastable == (True, True)

        if x.type.dtype != y.type.dtype != z.type.dtype != a.type.dtype:
            raise TypeError(x)

        if _is_sparse_variable(x) or _is_sparse_variable(
                y) or _is_sparse_variable(pattern) or _is_sparse_variable(z):
            raise TypeError(x)

        dtype_out = scalar.upcast(z.type.dtype, a.type.dtype, x.type.dtype,
                                  y.type.dtype, pattern.type.dtype)

        # We call blas ?axpy function that take only param of the same type
        z = tensor.cast(z, dtype_out)
        a = tensor.cast(a, dtype_out)
        x = tensor.cast(x, dtype_out)
        y = tensor.cast(y, dtype_out)
        pattern = tensor.cast(pattern, dtype_out)

        if self.inplace:
            assert z.type.dtype == dtype_out

        return gof.Apply(
            self, [z, a, x, y, pattern],
            [tensor.tensor(dtype=dtype_out, broadcastable=(False, False))])
コード例 #27
0
 def make_node(self, pvals, indx):
     pvals = T.as_tensor_variable(pvals)
     indx = T.as_tensor_variable(indx)
     if self.odtype == 'auto':
         odtype = pvals.dtype
     vals = T.tensor(dtype=odtype, broadcastable=(pvals.broadcastable[0], pvals.broadcastable[1]))
     return Apply(self, [pvals,indx], [vals,])
コード例 #28
0
ファイル: archkit.py プロジェクト: abailoni/greedy_CNN
    def __init__(self, inpshape=None):
        super(temporalizelayer, self).__init__()

        # Input must be non-sequential
        self.dim = 2
        self.inpdim = 4
        self.issequence = False
        self.allowsequences = False

        # Shape inference
        self.inpshape = list(inpshape) if inpshape is not None else [None, ] * self.inpdim

        # Containers for input and output
        self.x = T.tensor('floatX', [False, ] * self.inpdim, name='x:' + str(id(self)))
        self.y = T.tensor('floatX', [False, ] * (self.inpdim + 1), name='y:' + str(id(self)))
        self.xr = T.tensor('floatX', [False, ] * self.inpdim, name='xr:' + str(id(self)))
コード例 #29
0
 def make_node(self, x, gz):
     assert isinstance(x, Variable)
     assert isinstance(gz, Variable)
     gx = tensor(dtype=scal.upcast(gz.dtype, x.dtype),
                 broadcastable=x.broadcastable)
     op = self
     return Apply(op, [x, gz], [gx])
コード例 #30
0
def test_incsub_f16():
    shp = (3, 3)
    shared = gpuarray_shared_constructor
    xval = np.arange(np.prod(shp), dtype='float16').reshape(shp) + 1
    yval = np.empty((2, ) + shp[1:], dtype='float16')
    yval[:] = 2
    x = shared(xval, name='x')
    y = tensor.tensor(dtype='float16',
                      broadcastable=(False, ) * len(shp),
                      name='y')
    expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
    f = theano.function([y], expr, mode=mode_with_gpu)
    assert sum([
        isinstance(node.op, GpuAdvancedIncSubtensor1)
        for node in f.maker.fgraph.toposort()
    ]) == 1
    rval = f(yval)
    rep = xval.copy()
    rep[[0, 2]] += yval
    assert np.allclose(rval, rep)

    expr = tensor.inc_subtensor(x[1:], y)
    f = theano.function([y], expr, mode=mode_with_gpu)
    assert sum([
        isinstance(node.op, GpuIncSubtensor)
        for node in f.maker.fgraph.toposort()
    ]) == 1
    rval = f(yval)
    rep = xval.copy()
    rep[1:] += yval
    assert np.allclose(rval, rep)
コード例 #31
0
def test_advinc_subtensor1_dtype():
    # Test the mixed dtype case
    shp = (3, 4)
    for dtype1, dtype2 in [
        ("float32", "int8"),
        ("float32", "float64"),
        ("uint64", "int8"),
        ("int64", "uint8"),
        ("float16", "int8"),
        ("float16", "float64"),
        ("float16", "float16"),
    ]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
        yval = np.empty((2,) + shp[1:], dtype=dtype2)
        yval[:] = 10
        x = shared(xval, name="x")
        y = tensor.tensor(
            dtype=yval.dtype, broadcastable=(False,) * len(yval.shape), name="y"
        )
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = theano.function([y], expr, mode=mode_with_gpu)
        assert (
            sum(
                [
                    isinstance(node.op, GpuAdvancedIncSubtensor1_dev20)
                    for node in f.maker.fgraph.toposort()
                ]
            )
            == 1
        )
        rval = f(yval)
        rep = xval.copy()
        np.add.at(rep, [[0, 2]], yval)
        assert np.allclose(rval, rep)
コード例 #32
0
def test_advinc_subtensor1_vector_scalar():
    # Test the case where x is a vector and y a scalar
    shp = (3, )
    for dtype1, dtype2 in [('float32', 'int8'), ('float32', 'float64'),
                           ('float16', 'int8'), ('float16', 'float64'),
                           ('float16', 'float16')]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
        yval = np.asarray(10, dtype=dtype2)
        x = shared(xval, name='x')
        y = tensor.tensor(dtype=yval.dtype,
                          broadcastable=(False, ) * len(yval.shape),
                          name='y')
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = theano.function([y], expr, mode=mode_with_gpu)
        assert sum([
            isinstance(
                node.op,
                (GpuAdvancedIncSubtensor1_dev20, GpuAdvancedIncSubtensor1))
            for node in f.maker.fgraph.toposort()
        ]) == 1
        rval = f(yval)
        rep = xval.copy()
        rep[[0, 2]] += yval
        assert np.allclose(rval, rep)
コード例 #33
0
 def make_node(self, A, b):
     A = T.as_tensor_variable(A)
     b = T.as_tensor_variable(b)
     assert A.ndim == 2
     assert b.ndim in [1, 2]
     otype = T.tensor(broadcastable=b.broadcastable, dtype=(A * b).dtype)
     return theano.gof.Apply(self, [A, b], [otype])
コード例 #34
0
def makelayerxy(inpdim, outdim, layerid):
    x = pyk.delist([
        T.tensor('floatX', [
            False,
        ] * indim,
                 name='x{}:'.format(inpnum) + str(layerid))
        for inpnum, indim in enumerate(pyk.obj2list(inpdim))
    ])
    y = pyk.delist([
        T.tensor('floatX', [
            False,
        ] * oudim,
                 name='y{}:'.format(outnum) + str(layerid))
        for outnum, oudim in enumerate(pyk.obj2list(outdim))
    ])
    return x, y
コード例 #35
0
ファイル: test_subtensor.py プロジェクト: Thrandis/Theano
def test_incsub_f16():
    shp = (3, 3)
    shared = gpuarray_shared_constructor
    xval = np.arange(np.prod(shp), dtype='float16').reshape(shp) + 1
    yval = np.empty((2,) + shp[1:], dtype='float16')
    yval[:] = 2
    x = shared(xval, name='x')
    y = tensor.tensor(dtype='float16',
                      broadcastable=(False,) * len(shp),
                      name='y')
    expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
    f = theano.function([y], expr, mode=mode_with_gpu)
    assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)
                for node in f.maker.fgraph.toposort()]) == 1
    rval = f(yval)
    rep = xval.copy()
    np.add.at(rep, [[0, 2]], yval)
    assert np.allclose(rval, rep)

    expr = tensor.inc_subtensor(x[1:], y)
    f = theano.function([y], expr, mode=mode_with_gpu)
    assert sum([isinstance(node.op, GpuIncSubtensor)
                for node in f.maker.fgraph.toposort()]) == 1
    rval = f(yval)
    rep = xval.copy()
    rep[1:] += yval
    assert np.allclose(rval, rep)
コード例 #36
0
ファイル: ops.py プロジェクト: hamelphi/Theano
 def make_node(self, A, b):
     A = as_tensor_variable(A)
     b = as_tensor_variable(b)
     otype = tensor.tensor(
             broadcastable=b.broadcastable,
             dtype = (A*b).dtype)
     return Apply(self, [A,b], [otype])
コード例 #37
0
ファイル: mkl_gru_seq_wx.py プロジェクト: TaoLv/GRUBenchmark
    def make_node(self, *inputs):
        """
        inputs: X, Wx, Wh, hid_init, bias. bias is optional.

        """
        if len(inputs) in (4, 5):
            inp = list(map(tensor.as_tensor_variable, inputs))
        else:
            raise ValueError('GRU: number of parameter is wrong.')

        if len(inputs) == 5:
            self.bias = True
            assert inp[-1].ndim is 1
        else:
            self.bias = False

        assert inp[0].ndim is 3
        assert inp[1].ndim is 2
        assert inp[2].ndim is 2
        assert inp[3].ndim is 2

        if self.return_sequences:
            out = [inp[0].type()]
        else:
            bcast = [
                inp[0].type.broadcastable[1], inp[0].type.broadcastable[2]
            ]
            out = [
                tensor.tensor(dtype=inp[0].type.dtype, broadcastable=bcast)()
            ]

        return gof.Apply(self, inp, out)
コード例 #38
0
ファイル: test_slinalg.py プロジェクト: gyenney/Tools
    def test_perform(self):
        if not imported_scipy:
            raise SkipTest('kron tests need the scipy package to be installed')

        for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
            for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]:
                if len(shp0) + len(shp1) == 2:
                    continue
                x = tensor.tensor(dtype='floatX',
                                  broadcastable=(False,) * len(shp0))
                y = tensor.tensor(dtype='floatX',
                                  broadcastable=(False,) * len(shp1))
                f = function([x, y], kron(x, y))
                a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
                b = self.rng.rand(*shp1).astype(config.floatX)
                out = f(a, b)
                assert numpy.allclose(out, scipy.linalg.kron(a, b))
コード例 #39
0
    def make_node(self, A, b):
        A = ts.as_sparse_variable(A)
        b = ts.as_sparse_or_tensor_variable(b)
        assert A.ndim == 2
        assert b.ndim in [1, 2]

        x = tt.tensor(dtype=b.dtype)
        return Apply(self, [A, b], [x])
コード例 #40
0
ファイル: ops.py プロジェクト: he-yunlong/Theano
 def make_node(self, A, b):
     assert imported_scipy, "Scipy not available. Scipy is needed for the Solve op"
     A = as_tensor_variable(A)
     b = as_tensor_variable(b)
     assert A.ndim == 2
     assert b.ndim in [1, 2]
     otype = tensor.tensor(broadcastable=b.broadcastable, dtype=(A * b).dtype)
     return Apply(self, [A, b], [otype])
コード例 #41
0
ファイル: test_slinalg.py プロジェクト: rsk2327/Theano
    def test_perform(self):
        if not imported_scipy:
            raise SkipTest('kron tests need the scipy package to be installed')

        for shp0 in [(2, ), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
            for shp1 in [(6, ), (6, 7), (6, 7, 8), (6, 7, 8, 9)]:
                if len(shp0) + len(shp1) == 2:
                    continue
                x = tensor.tensor(dtype='floatX',
                                  broadcastable=(False, ) * len(shp0))
                y = tensor.tensor(dtype='floatX',
                                  broadcastable=(False, ) * len(shp1))
                f = function([x, y], kron(x, y))
                a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
                b = self.rng.rand(*shp1).astype(config.floatX)
                out = f(a, b)
                assert numpy.allclose(out, scipy.linalg.kron(a, b))
コード例 #42
0
 def make_node(self, A, B):
     A = T.as_tensor_variable(A)
     B = T.as_tensor_variable(B)
     assert A.ndim in [2, 3]
     assert B.ndim in [2, 3]
     assert A.ndim == B.ndim
     otype = T.tensor(broadcastable=B.broadcastable, dtype=(A * B).dtype)
     return theano.gof.Apply(self, [A, B], [otype])
コード例 #43
0
 def make_node(self,pvals,indx,gr):
     pvals = T.as_tensor_variable(pvals)
     indx = T.as_tensor_variable(indx)
     gr = T.as_tensor_variable(gr)
     if self.odtype == 'auto':
         odtype = pvals.dtype
     vals = T.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
     return Apply(self, [pvals,indx,gr], [vals])
コード例 #44
0
 def make_node(self, A, b):
     assert imported_scipy, (
         "Scipy not available. Scipy is needed for the Solve op")
     A = as_tensor_variable(A)
     b = as_tensor_variable(b)
     otype = tensor.tensor(broadcastable=b.broadcastable,
                           dtype=(A * b).dtype)
     return Apply(self, [A, b], [otype])
コード例 #45
0
def test_local_dimshuffle_subtensor():

    dimshuffle_subtensor = out2in(local_dimshuffle_subtensor)

    x = tensor.dtensor4("x")
    x = tensor.patternbroadcast(x, (False, True, False, False))
    i = tensor.iscalar("i")

    out = x[:, :, 10:30, ::i].dimshuffle(0, 2, 3)

    g = FunctionGraph([x, i], [out])
    dimshuffle_subtensor(g)

    topo = g.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])

    # Test dimshuffle remove dimensions the subtensor don't "see".
    x = tensor.tensor(broadcastable=(False, True, False), dtype="float64")
    out = x[i].dimshuffle(1)

    g = FunctionGraph([x, i], [out])
    dimshuffle_subtensor(g)

    topo = g.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])

    # Test dimshuffle remove dimensions the subtensor don't "see" but
    # have in between dimensions.
    x = tensor.tensor(broadcastable=(False, True, False, True),
                      dtype="float64")
    out = x[i].dimshuffle(1)

    f = theano.function([x, i], out)

    topo = f.maker.fgraph.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])
    assert f(np.random.rand(5, 1, 4, 1), 2).shape == (4, )

    # Test a corner case that had Theano return a bug.
    x = tensor.dtensor4("x")
    x = tensor.patternbroadcast(x, (False, True, False, False))

    assert x[:, :, 0:3, ::-1].dimshuffle(0, 2, 3).eval({
        x: np.ones((5, 1, 6, 7))
    }).shape == (5, 3, 7)
コード例 #46
0
ファイル: test_downsample.py プロジェクト: hhoareau/Theano
    def test_infer_shape(self):
        image = tensor.dtensor4()
        maxout = tensor.dtensor4()
        gz = tensor.dtensor4()
        rng = numpy.random.RandomState(utt.fetch_seed())
        maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3), (3, 2))

        image_val = rng.rand(4, 6, 7, 9)
        out_shapes = [[[[4, 6, 7, 9], [4, 6, 7, 9]],
                       [[4, 6, 3, 4], [4, 6, 4, 5]],
                       [[4, 6, 2, 3], [4, 6, 3, 3]],
                       [[4, 6, 3, 3], [4, 6, 4, 3]],
                       [[4, 6, 2, 4], [4, 6, 3, 5]]],
                      [[None, None],
                       [[4, 6, 4, 5], None],
                       [[4, 6, 3, 3], None],
                       [[4, 6, 4, 3], None],
                       [[4, 6, 3, 5], None]],
                      [[None, None],
                       [None, None],
                       [[4, 6, 3, 4], None],
                       [[4, 6, 4, 4], None],
                       [None, None]]]

        for i, maxpoolshp in enumerate(maxpoolshps):
            for j, ignore_border in enumerate([True, False]):
                for k, padding in enumerate([(0, 0), (1, 1), (1, 2)]):
                    if out_shapes[k][i][j] is None:
                        continue
                    # checking shapes generated by DownsampleFactorMax
                    self._compile_and_check([image],
                                            [DownsampleFactorMax(maxpoolshp,
                                                                 ignore_border=ignore_border,
                                                                 padding=padding)(image)],
                                            [image_val], DownsampleFactorMax)

                    # checking shapes generated by MaxPoolGrad
                    maxout_val = rng.rand(*out_shapes[k][i][j])
                    gz_val = rng.rand(*out_shapes[k][i][j])
                    self._compile_and_check([image, maxout, gz],
                                            [MaxPoolGrad(maxpoolshp,
                                                         ignore_border=ignore_border,
                                                         padding=padding)
                                            (image, maxout, gz)],
                                            [image_val, maxout_val, gz_val],
                                            MaxPoolGrad,
                                            warn=False)
        # checking with broadcastable input
        image = tensor.tensor(dtype='float64',
                              broadcastable=(False, False, True, True))
        image_val = rng.rand(4, 6, 1, 1)
        self._compile_and_check(
            [image],
            [DownsampleFactorMax((2, 2),
                                 ignore_border=True,
                                 padding=(0, 0))(image)],
            [image_val], DownsampleFactorMax)
コード例 #47
0
 def make_node(self):
     return gof.Apply(
         self,
         [],
         [
             theano.Variable(Generic()),
             tensor(self.dtype, broadcastable=self.broadcastable),
         ],
     )
コード例 #48
0
 def make_node(self, x):
     x = theano.tensor.as_tensor_variable(x)
     inputs = [x]
     broadcastable = [
         b for i, b in enumerate(x.type.broadcastable)
         if i not in [self.axis]
     ]
     outputs = [T.tensor('int64', broadcastable, name='argmax_unique')]
     return theano.Apply(self, inputs, outputs)
コード例 #49
0
ファイル: test_pool.py プロジェクト: tariqdaouda/Theano
    def test_infer_shape(self):
        image = tensor.dtensor4()
        maxout = tensor.dtensor4()
        gz = tensor.dtensor4()
        rng = numpy.random.RandomState(utt.fetch_seed())
        maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3), (3, 2))

        image_val = rng.rand(4, 6, 7, 9)
        out_shapes = [[[[4, 6, 7, 9], [4, 6, 7, 9]],
                       [[4, 6, 3, 4], [4, 6, 4, 5]],
                       [[4, 6, 2, 3], [4, 6, 3, 3]],
                       [[4, 6, 3, 3], [4, 6, 4, 3]],
                       [[4, 6, 2, 4], [4, 6, 3, 5]]],
                      [[None, None],
                       [[4, 6, 4, 5], None],
                       [[4, 6, 3, 3], None],
                       [[4, 6, 4, 3], None],
                       [[4, 6, 3, 5], None]],
                      [[None, None],
                       [None, None],
                       [[4, 6, 3, 4], None],
                       [[4, 6, 4, 4], None],
                       [None, None]]]

        for i, maxpoolshp in enumerate(maxpoolshps):
            for j, ignore_border in enumerate([True, False]):
                for k, padding in enumerate([(0, 0), (1, 1), (1, 2)]):
                    if out_shapes[k][i][j] is None:
                        continue
                    # checking shapes generated by Pool
                    self._compile_and_check([image],
                                            [Pool(maxpoolshp,
                                                  ignore_border=ignore_border,
                                                  padding=padding)(image)],
                                            [image_val], Pool)

                    # checking shapes generated by MaxPoolGrad
                    maxout_val = rng.rand(*out_shapes[k][i][j])
                    gz_val = rng.rand(*out_shapes[k][i][j])
                    self._compile_and_check([image, maxout, gz],
                                            [MaxPoolGrad(maxpoolshp,
                                                         ignore_border=ignore_border,
                                                         padding=padding)
                                            (image, maxout, gz)],
                                            [image_val, maxout_val, gz_val],
                                            MaxPoolGrad,
                                            warn=False)
        # checking with broadcastable input
        image = tensor.tensor(dtype='float64',
                              broadcastable=(False, False, True, True))
        image_val = rng.rand(4, 6, 1, 1)
        self._compile_and_check(
            [image],
            [Pool((2, 2),
                  ignore_border=True,
                  padding=(0, 0))(image)],
            [image_val], Pool)
コード例 #50
0
 def make_node(self, input, axis=-1):
     input = theano.tensor.as_tensor_variable(input)
     if axis is None:
         axis = theano.Constant(theano.gof.generic, None)
         # axis=None flattens the array before sorting
         out_type = tensor(dtype=input.dtype, broadcastable=[False])
     else:
         axis = theano.tensor.as_tensor_variable(axis)
         out_type = input.type()
     return theano.Apply(self, [input, axis], [out_type])
コード例 #51
0
def test_local_dimshuffle_subtensor():

    dimshuffle_subtensor = out2in(local_dimshuffle_subtensor)

    x = tensor.dtensor4('x')
    x = tensor.patternbroadcast(x, (False, True, False, False))
    i = tensor.iscalar('i')

    out = x[:, :, 10:30, ::i].dimshuffle(0, 2, 3)

    g = FunctionGraph([x, i], [out])
    dimshuffle_subtensor(g)

    topo = g.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])

    # Test dimshuffle remove dimensions the subtensor don't "see".
    x = tensor.tensor(broadcastable=(False, True, False), dtype='float64')
    out = x[i].dimshuffle(1)

    g = FunctionGraph([x, i], [out])
    dimshuffle_subtensor(g)

    topo = g.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])

    # Test dimshuffle remove dimensions the subtensor don't "see" but
    # have in between dimensions.
    x = tensor.tensor(broadcastable=(False, True, False, True),
                      dtype='float64')
    out = x[i].dimshuffle(1)

    f = theano.function([x, i], out)

    topo = f.maker.fgraph.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])
    assert f(np.random.rand(5, 1, 4, 1), 2).shape == (4,)

    # Test a corner case that had Theano return a bug.
    x = tensor.dtensor4('x')
    x = tensor.patternbroadcast(x, (False, True, False, False))

    assert x[:,:, 0:3, ::-1].dimshuffle(0,2,3).eval({x: np.ones((5, 1, 6, 7))}).shape == (5, 3, 7)
コード例 #52
0
ファイル: sort.py プロジェクト: DeepLearningIndia/Theano
 def make_node(self, input, axis=-1):
     input = theano.tensor.as_tensor_variable(input)
     if axis is None:
         axis = theano.Constant(theano.gof.generic, None)
         # axis=None flattens the array before sorting
         out_type = tensor(dtype=input.dtype, broadcastable=[False])
     else:
         axis = theano.tensor.as_tensor_variable(axis)
         out_type = input.type()
     return theano.Apply(self, [input, axis], [out_type])
コード例 #53
0
ファイル: sp.py プロジェクト: HaniAlmousli/Theano
 def make_node(self, x):
     ###
     # At least for small matrices (5x5), the .sum() method of a csc matrix returns a dense matrix
     # as the result whether axis is 0 or 1... weird!
     ###
     assert isinstance(x.type, theano.sparse.SparseType)
     b = ()
     if self.axis is not None:
         b = (False,)
     z = tensor.tensor(broadcastable=b, dtype=x.dtype)
     return gof.Apply(self, [x], [z])
コード例 #54
0
ファイル: theano_extensions.py プロジェクト: hydercps/hred-qs
 def make_node(self, pvals, unis):
     pvals = T.as_tensor_variable(pvals)
     unis = T.as_tensor_variable(unis)
     if unis.ndim != 2:
         raise NotImplementedError('unis ndim should be 1', unis.ndim)
     if self.odtype == 'auto':
         odtype = pvals.dtype
     else:
         odtype = self.odtype
     out = T.tensor(dtype=odtype, broadcastable=unis.type.broadcastable)
     return Apply(self, [pvals, unis], [out])
コード例 #55
0
ファイル: test_basic.py プロジェクト: daien/Theano
    def check_format_ndim(format, ndim):
        x = tensor.tensor(dtype=config.floatX, broadcastable=([False] * ndim), name="x")

        s = SparseFromDense(format)(x)
        s_m = -s
        d = dense_from_sparse(s_m)
        c = d.sum()
        g = tensor.grad(c, x)
        f = theano.function([x], [s, g])
        f(numpy.array(0, dtype=config.floatX, ndmin=ndim))
        f(numpy.array(7, dtype=config.floatX, ndmin=ndim))
コード例 #56
0
ファイル: sp2.py プロジェクト: jsalvatier/Theano-1
 def make_node(self, a_data, a_indices, a_indptr, b):
     b = tensor.as_tensor_variable(b)
     a_data = tensor.as_tensor_variable(a_data)
     a_indices = tensor.as_tensor_variable(a_indices)
     a_indptr = tensor.as_tensor_variable(a_indptr)
     assert a_data.type.ndim == 1
     assert a_indices.type.ndim == 1
     assert a_indptr.type.ndim == 1
     assert b.type.ndim == 1
     return gof.Apply(self, [a_data, a_indices, a_indptr, b],
                            [tensor.tensor(b.dtype, (False,))])
コード例 #57
0
ファイル: archkit.py プロジェクト: abailoni/greedy_CNN
    def __init__(self, splits, dim=None, issequence=None, inpshape=None):
        """
        :type splits: list or int
        :param splits: Index of the split (along the channel axis). E.g. split = 3 would result in the input tensor
                       split as: [inp[:, 0:3, ...], inp[:, 3:, ...]] for 2D inputs.

        :type issequence: bool
        :param issequence: Whether input is a sequence

        :type inpshape: list or tuple
        :param inpshape: Input shape
        :return:
        """

        super(splitlayer, self).__init__()

        # Parse
        dim = 2 if issequence else dim
        assert not (dim is None and inpshape is None), "Data dimension can not be parsed. Provide dim or inpshape."

        # Meta
        self.dim = dim if dim is not None else {4: 2, 5: 3}[len(inpshape)]
        self.allowsequences = True
        self.issequence = self.dim == 2 and len(self.inpshape) == 5 if issequence is None else issequence
        self.inpdim = len(inpshape) if inpshape is not None else 5 if self.issequence else {2: 4, 3: 5}[dim]
        self.dim = 2 if self.issequence else self.dim   # Correct dim if necessary

        self.splits = pyk.obj2list(splits)
        self.numsplits = len(self.splits) + 1

        # More meta for layertrainyard
        self.numinp = 1
        self.numout = self.numsplits

        # Shape inference
        self.inpshape = [None, ] * self.inpdim if inpshape is None else list(inpshape)

        # Containers for input and output
        self.x = T.tensor('floatX', [False, ] * self.inpdim, name='x:' + str(id(self)))
        self.y = [T.tensor('floatX', [False, ] * self.inpdim, name='y{}:'.format(splitnum) + str(id(self)))
                  for splitnum in range(self.numsplits)]
コード例 #58
0
    def test_perform(self):
        if not imported_scipy:
            raise SkipTest("kron tests need the scipy package to be installed")

        for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
            x = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp0))
            a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
            for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]:
                if len(shp0) + len(shp1) == 2:
                    continue
                y = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp1))
                f = function([x, y], kron(x, y))
                b = self.rng.rand(*shp1).astype(config.floatX)
                out = f(a, b)
                # Newer versions of scipy want 4 dimensions at least,
                # so we have to add a dimension to a and flatten the result.
                if len(shp0) + len(shp1) == 3:
                    scipy_val = scipy.linalg.kron(a[numpy.newaxis, :], b).flatten()
                else:
                    scipy_val = scipy.linalg.kron(a, b)
                utt.assert_allclose(out, scipy_val)