Exemple #1
0
 def make_node(self, C, alpha, A, B, beta):
     res = Gemm.make_node(self, C, alpha, A, B, beta)
     A = as_gpuarray_variable(A)
     B = as_gpuarray_variable(B)
     C = as_gpuarray_variable(C)
     assert A.dtype == B.dtype == C.dtype == alpha.dtype == beta.dtype
     return Apply(self, [C, alpha, A, B, beta], [C.type()])
Exemple #2
0
 def make_node(self, y, alpha, A, x, beta):
     res = Gemv.make_node(self, y, alpha, A, x, beta)
     A = as_gpuarray_variable(A)
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     assert A.dtype == x.dtype == y.dtype == alpha.dtype == beta.dtype
     return Apply(self, [y, alpha, A, x, beta], [y.type()])
Exemple #3
0
 def make_node(self, A, alpha, x, y):
     res = Ger.make_node(self, A, alpha, x, y)
     A = as_gpuarray_variable(A)
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     assert A.dtype == x.dtype == y.dtype
     return Apply(self, [A, alpha, x, y], [A.type()])
Exemple #4
0
 def make_node(self, C, alpha, A, B, beta):
     res = Gemm.make_node(self, C, alpha, A, B, beta)
     A = as_gpuarray_variable(A)
     B = as_gpuarray_variable(B)
     C = as_gpuarray_variable(C)
     assert A.dtype == B.dtype == C.dtype == alpha.dtype == beta.dtype
     return Apply(self, [C, alpha, A, B, beta], [C.type()])
Exemple #5
0
 def make_node(self, y, alpha, A, x, beta):
     res = Gemv.make_node(self, y, alpha, A, x, beta)
     A = as_gpuarray_variable(A)
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     assert A.dtype == x.dtype == y.dtype == alpha.dtype == beta.dtype
     return Apply(self, [y, alpha, A, x, beta], [y.type()])
Exemple #6
0
 def make_node(self, x, y, *inputs):
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     rval = tensor.IncSubtensor.make_node(self, x, y, *inputs)
     op = copy.copy(self)
     ret = gof.Apply(op, [x, y] + rval.inputs[2:], [x.type()])
     op.create_iadd_node(ret)
     return ret
Exemple #7
0
 def make_node(self, x, y, *inputs):
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     rval = tensor.IncSubtensor.make_node(self, x, y, *inputs)
     op = copy.copy(self)
     ret = gof.Apply(op, [x, y] + rval.inputs[2:], [x.type()])
     op.create_iadd_node(ret)
     return ret
Exemple #8
0
 def make_node(self, x, b, y_idx):
     # N.B. won't work when we don't cast y_idx to float anymore
     x = as_gpuarray_variable(x)
     b = as_gpuarray_variable(b)
     y_idx = as_gpuarray_variable(y_idx)
     nll = GpuArrayType(x.type.dtype, y_idx.type.broadcastable)()
     sm = x.type()
     am = y_idx.type()
     return Apply(self, [x, b, y_idx], [nll, sm, am])
Exemple #9
0
 def make_node(self, x, b, y_idx):
     #N.B. won't work when we don't cast y_idx to float anymore
     x = as_gpuarray_variable(x)
     b = as_gpuarray_variable(b)
     y_idx = as_gpuarray_variable(y_idx)
     nll = GpuArrayType(x.type.dtype, y_idx.type.broadcastable)()
     sm = x.type()
     am = y_idx.type()
     return Apply(self, [x, b, y_idx], [nll, sm, am])
Exemple #10
0
 def make_node(self, img, kern):
     if img.dtype != "float32" or kern.dtype != "float32":
         raise NotImplementedError("GpuConv currently only work"
                                   " with float32 dtype")
     if img.type.ndim != 4:
         raise TypeError('img must be 4D tensor')
     if kern.type.ndim != 4:
         raise TypeError('kern must be 4D tensor')
     img = as_gpuarray_variable(img)
     kern = as_gpuarray_variable(kern)
     broadcastable = [img.type.broadcastable[0], kern.type.broadcastable[0],
                      False, False]
     out = GpuArrayType(img.dtype, broadcastable)()
     return gof.Apply(self, [img, kern], [out])
Exemple #11
0
 def make_node(self, img, kern):
     if img.dtype != "float32" or kern.dtype != "float32":
         raise NotImplementedError("GpuConv currently only work"
                                   " with float32 dtype")
     if img.type.ndim != 4:
         raise TypeError('img must be 4D tensor')
     if kern.type.ndim != 4:
         raise TypeError('kern must be 4D tensor')
     img = as_gpuarray_variable(img)
     kern = as_gpuarray_variable(kern)
     broadcastable = [
         img.type.broadcastable[0], kern.type.broadcastable[0], False, False
     ]
     out = GpuArrayType(img.dtype, broadcastable)()
     return gof.Apply(self, [img, kern], [out])
Exemple #12
0
    def make_node(self, *inputs):
        res = Elemwise.make_node(self, *inputs)
        outputs = [GpuArrayType(broadcastable=o.type.broadcastable,
                                dtype=o.type.dtype)() for o in res.outputs]
        inputs = [as_gpuarray_variable(i) for i in inputs]
        node = Apply(self, inputs, outputs)

        # Try to generate the kernel to catch SupportCodeErrors
        try:
            inps = [make_argument(i, 'i%d' % (n,)) for n, i in
                    enumerate(node.inputs)]
            scal_ins = [scalar.Scalar(i.dtype) for i in node.inputs]

            outs = [make_argument(o, 'o%d' % (n,)) for n, o in
                    enumerate(node.outputs) if not n in self.inplace_pattern]
            scal_out = [scalar.Scalar(o.dtype) for o in node.outputs]

            fake_node = Apply(self.scalar_op, [i() for i in scal_ins],
                              [o() for o in scal_out])
            code = self.scalar_op.c_support_code_apply(fake_node, "test")
            if code:
                raise SupportCodeError(code)
        except MethodNotDefined:
            pass
        try:
            support_code = self.scalar_op.c_support_code()
            if (support_code.strip() != "#define THEANO_MACRO_MOD(x,y) (x % y)" and
                support_code.strip() != ""):
                # The macro is fine, the C++ struct is not.
                raise SupportCodeError(support_code)
        except MethodNotDefined:
            pass

        return node
Exemple #13
0
    def make_node(self, *inputs):
        res = Elemwise.make_node(self, *inputs)
        outputs = [GpuArrayType(broadcastable=o.type.broadcastable,
                                dtype=o.type.dtype)() for o in res.outputs]
        inputs = [as_gpuarray_variable(i) for i in inputs]
        node = Apply(self, inputs, outputs)

        # Try to generate the kernel to catch SupportCodeErrors
        try:
            inps = [make_argument(i, 'i%d' % (n,)) for n, i in
                    enumerate(node.inputs)]
            scal_ins = [scalar.Scalar(i.dtype) for i in node.inputs]

            outs = [make_argument(o, 'o%d' % (n,)) for n, o in
                    enumerate(node.outputs) if not n in self.inplace_pattern]
            scal_out = [scalar.Scalar(o.dtype) for o in node.outputs]

            fake_node = Apply(self.scalar_op, [i() for i in scal_ins],
                              [o() for o in scal_out])
            code = self.scalar_op.c_support_code_apply(fake_node, "test")
            if code:
                raise SupportCodeError(code)
        except MethodNotDefined:
            pass
        try:
            support_code = self.scalar_op.c_support_code()
            if (support_code.strip() != "#define THEANO_MACRO_MOD(x,y) (x % y)" and
                support_code.strip() != ""):
                # The macro is fine, the C++ struct is not.
                raise SupportCodeError(support_code)
        except MethodNotDefined:
            pass

        return node
Exemple #14
0
 def make_node(self, *inputs):
     res = Elemwise.make_node(self, *inputs)
     outputs = [GpuArrayType(broadcastable=o.type.broadcastable,
                             dtype=o.type.dtype)() for o in res.outputs]
     inputs = [as_gpuarray_variable(i) for i in inputs]
     res = Apply(self, inputs, outputs)
     # Try to generate the kernel to catch SupportCodeErrors
     k = self.generate_kernel(res, 'test')
     return res
Exemple #15
0
    def make_node(self, x, y, ilist):
        x_ = as_gpuarray_variable(x)
        y_ = as_gpuarray_variable(y)
        ilist_ = tensor.as_tensor_variable(ilist)

        assert x_.type.dtype == y_.type.dtype
        assert x_.type.ndim >= y_.type.ndim

        if ilist_.type.dtype[:3] not in ('int', 'uin'):
            raise TypeError('index must be integers')
        if ilist_.type.broadcastable != (False, ):
            raise TypeError('index must be vector')
        if x_.type.ndim == 0:
            raise TypeError('cannot index into a scalar')
        if x_.type.broadcastable[0]:
            # the caller should have made a copy of x len(ilist) times
            raise TypeError('cannot index into a broadcastable dimension')

        return gof.Apply(self, [x_, y_, ilist_], [x_.type()])
Exemple #16
0
    def make_node(self, x, y, ilist):
        x_ = as_gpuarray_variable(x)
        y_ = as_gpuarray_variable(y)
        ilist_ = tensor.as_tensor_variable(ilist)

        assert x_.type.dtype == y_.type.dtype
        assert x_.type.ndim >= y_.type.ndim

        if ilist_.type.dtype[:3] not in ('int', 'uin'):
            raise TypeError('index must be integers')
        if ilist_.type.broadcastable != (False,):
            raise TypeError('index must be vector')
        if x_.type.ndim == 0:
            raise TypeError('cannot index into a scalar')
        if x_.type.broadcastable[0]:
            # the caller should have made a copy of x len(ilist) times
            raise TypeError('cannot index into a broadcastable dimension')

        return gof.Apply(self, [x_, y_, ilist_], [x_.type()])
 def make_node(self, *inputs):
     res = Elemwise.make_node(self, *inputs)
     outputs = [
         GpuArrayType(broadcastable=o.type.broadcastable,
                      dtype=o.type.dtype)() for o in res.outputs
     ]
     inputs = [as_gpuarray_variable(i) for i in inputs]
     res = Apply(self, inputs, outputs)
     # Try to generate the kernel to catch SupportCodeErrors
     k = self.generate_kernel(res, 'test')
     return res
Exemple #18
0
    def make_node(self, x, y, ilist):
        """It defer from GpuAdvancedIncSubtensor1 in that it make sure
        the index are of type long.
        """
        x_ = as_gpuarray_variable(x)
        y_ = as_gpuarray_variable(y)
        ilist_ = as_gpuarray_variable(ilist)

        assert x_.type.dtype == y_.type.dtype
        assert x_.type.ndim >= y_.type.ndim

        if ilist_.type.dtype[:3] not in ("int", "uin"):
            raise TypeError("index must be integers")
        if ilist_.type.broadcastable != (False,):
            raise TypeError("index must be vector")
        if x_.type.ndim == 0:
            raise TypeError("cannot index into a scalar")
        if x_.type.broadcastable[0]:
            # the caller should have made a copy of x len(ilist) times
            raise TypeError("cannot index into a broadcastable dimension")

        return gof.Apply(self, [x_, y_, ilist_], [x_.type()])
Exemple #19
0
    def make_node(self, ten4, neib_shape, neib_step):
        ten4 = as_gpuarray_variable(ten4)
        neib_shape = T.as_tensor_variable(neib_shape)
        neib_step = T.as_tensor_variable(neib_step)

        assert ten4.ndim == 4
        assert neib_shape.ndim == 1
        assert neib_step.ndim == 1
        assert "int" in neib_shape.dtype
        assert "int" in neib_step.dtype

        return Apply(self, [ten4, neib_shape, neib_step], [
            GpuArrayType(broadcastable=(False, False), dtype=ten4.type.dtype)()
        ])
Exemple #20
0
    def make_node(self, ten4, neib_shape, neib_step):
        ten4 = as_gpuarray_variable(ten4)
        neib_shape = T.as_tensor_variable(neib_shape)
        neib_step = T.as_tensor_variable(neib_step)

        assert ten4.ndim == 4
        assert neib_shape.ndim == 1
        assert neib_step.ndim == 1
        assert "int" in neib_shape.dtype
        assert "int" in neib_step.dtype

        return Apply(self, [ten4, neib_shape, neib_step],
                     [GpuArrayType(broadcastable=(False, False),
                                   dtype=ten4.type.dtype)()])
    def make_node(self, input):
        res = CAReduceDtype.make_node(self, input)
        input = as_gpuarray_variable(input)
        otype = GpuArrayType(dtype=res.outputs[0].dtype,
                             broadcastable=res.outputs[0].broadcastable)

        if res.op.axis is not None:
            redux = []
            for i in range(len(input.type.broadcastable)):
                redux.append(i in res.op.axis)
                # since redux is just another way to describe what is in axis
                # it doesn't need to be compared in __eq__ or __hash__
            res.op.redux = redux

        return Apply(res.op, [input], [otype()])
Exemple #22
0
    def make_node(self, input):
        res = CAReduceDtype.make_node(self, input)
        input = as_gpuarray_variable(input)
        otype = GpuArrayType(dtype=res.outputs[0].dtype,
                             broadcastable=res.outputs[0].broadcastable)

        if res.op.axis is not None:
            redux = []
            for i in range(len(input.type.broadcastable)):
                redux.append(i in res.op.axis)
                # since redux is just another way to describe what is in axis
                # it doesn't need to be compared in __eq__ or __hash__
            res.op.redux = redux

        return Apply(res.op, [input], [otype()])
 def make_node(self, input):
     res = DimShuffle.make_node(self, input)
     otype = GpuArrayType(dtype=res.outputs[0].type.dtype,
                          broadcastable=res.outputs[0].type.broadcastable)
     input = as_gpuarray_variable(input)
     return Apply(self, [input], [otype()])
Exemple #24
0
 def make_node(self, y, alpha, A, x, beta):
     res = Gemv.make_node(self, y, alpha, A, x, beta)
     A = as_gpuarray_variable(A)
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     return Apply(self, [y, alpha, A, x, beta], [y.type()])
Exemple #25
0
 def make_node(self, x, *inputs):
     rval = tensor.Subtensor.make_node(self, x, *inputs)
     otype = GpuArrayType(dtype=rval.outputs[0].type.dtype,
                          broadcastable=rval.outputs[0].type.broadcastable)
     x = as_gpuarray_variable(x)
     return gof.Apply(self, [x] + rval.inputs[1:], [otype()])
Exemple #26
0
 def make_node(self, x, y):
     res = Dot22.make_node(self, x, y)
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     assert x.dtype == y.dtype
     return Apply(self, [x, y], [x.type()])
Exemple #27
0
 def make_node(self, C, alpha, A, B, beta):
     res = Gemm.make_node(self, C, alpha, A, B, beta)
     A = as_gpuarray_variable(A)
     B = as_gpuarray_variable(B)
     C = as_gpuarray_variable(C)
     return Apply(self, [C, alpha, A, B, beta], [C.type()])
Exemple #28
0
 def make_node(self, x, b):
     x = as_gpuarray_variable(x)
     b = as_gpuarray_variable(b)
     return Apply(self, [x, b], [x.type()])
Exemple #29
0
 def make_node(self, x):
     x = as_gpuarray_variable(x)
     return Apply(self, [x], [x.type()])
Exemple #30
0
 def make_node(self, C, alpha, A, B, beta):
     res = Gemm.make_node(self, C, alpha, A, B, beta)
     A = as_gpuarray_variable(A)
     B = as_gpuarray_variable(B)
     C = as_gpuarray_variable(C)
     return Apply(self, [C, alpha, A, B, beta], [C.type()])
Exemple #31
0
 def make_node(self, dnll, sm, y_idx):
     dnll = as_gpuarray_variable(dnll)
     sm = as_gpuarray_variable(sm)
     y_idx = as_gpuarray_variable(y_idx)
     return Apply(self, [dnll, sm, y_idx], [sm.type()])
Exemple #32
0
 def make_node(self, input):
     res = DimShuffle.make_node(self, input)
     otype = GpuArrayType(dtype=res.outputs[0].type.dtype,
                          broadcastable=res.outputs[0].type.broadcastable)
     input = as_gpuarray_variable(input)
     return Apply(self, [input], [otype()])
Exemple #33
0
 def make_node(self, x):
     x = as_gpuarray_variable(x)
     return Apply(self, [x], [x.type()])
Exemple #34
0
 def make_node(self, x, y):
     res = Dot22.make_node(self, x, y)
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     assert x.dtype == y.dtype
     return Apply(self, [x, y], [x.type()])
Exemple #35
0
 def make_node(self, dnll, sm, y_idx):
     dnll = as_gpuarray_variable(dnll)
     sm = as_gpuarray_variable(sm)
     y_idx = as_gpuarray_variable(y_idx)
     return Apply(self, [dnll, sm, y_idx], [sm.type()])
Exemple #36
0
 def make_node(self, x, *inputs):
     rval = tensor.Subtensor.make_node(self, x, *inputs)
     otype = GpuArrayType(dtype=rval.outputs[0].type.dtype,
                          broadcastable=rval.outputs[0].type.broadcastable)
     x = as_gpuarray_variable(x)
     return gof.Apply(self, [x] + rval.inputs[1:], [otype()])
Exemple #37
0
 def make_node(self, x, b):
     x = as_gpuarray_variable(x)
     b = as_gpuarray_variable(b)
     return Apply(self, [x, b], [x.type()])
Exemple #38
0
 def make_node(self, y, alpha, A, x, beta):
     res = Gemv.make_node(self, y, alpha, A, x, beta)
     A = as_gpuarray_variable(A)
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     return Apply(self, [y, alpha, A, x, beta], [y.type()])