Exemplo n.º 1
0
 def make_node(self, x):
     from theano.sandbox.cuda import CudaNdarrayType
     if not isinstance(x.type, CudaNdarrayType):
         raise TypeError(x)
     return Apply(
         self, [x],
         [GpuArrayType(broadcastable=x.broadcastable, dtype=x.dtype)()])
Exemplo n.º 2
0
 def make_node(self, x, axis, splits):
     node = Split.make_node(self, x, axis, splits)
     x = as_gpuarray_variable(x)
     outs = [
         GpuArrayType(dtype=o.dtype, broadcastable=o.broadcastable)()
         for o in node.outputs
     ]
     return Apply(self, [x] + node.inputs[1:], outs)
Exemplo n.º 3
0
    def make_node(self, axis, *tensors):
        node = Join.make_node(self, axis, *tensors)

        return Apply(
            self, [node.inputs[0]] + map(as_gpuarray_variable, tensors), [
                GpuArrayType(broadcastable=node.outputs[0].broadcastable,
                             dtype=node.outputs[0].dtype)()
            ])
Exemplo n.º 4
0
    def make_node(self, n, m, k):
        n = tensor.as_tensor_variable(n)
        m = tensor.as_tensor_variable(m)
        k = tensor.as_tensor_variable(k)
        assert n.ndim == 0
        assert m.ndim == 0
        assert k.ndim == 0
        otype = GpuArrayType(dtype=self.dtype, broadcastable=(False, False))

        # k != 0 isn't implemented on the GPU yet.
        assert tensor.get_scalar_constant_value(k) == 0
        return Apply(self, [n, m], [otype()])
Exemplo n.º 5
0
 def make_node(self, value, *shape):
     v = as_gpuarray_variable(value)
     sh = [tensor.as_tensor_variable(s) for s in shape]
     bcast = []
     if v.ndim > len(shape):
         raise TypeError(
             'GpuAlloc value has more dimensions than arguments',
             value.ndim, len(shape))
     for i, s in enumerate(sh):
         if s.type.dtype[:3] not in ('int', 'uint'):
             raise TypeError('Shape arguments must be integers', s)
         try:
             const_shp = tensor.get_scalar_constant_value(s)
         except tensor.NotScalarConstantError:
             const_shp = None
         bcast.append(numpy.all(1 == const_shp))
     otype = GpuArrayType(dtype=v.dtype, broadcastable=bcast)
     return Apply(self, [v] + sh, [otype()])
Exemplo n.º 6
0
 def make_node(self, x, shp):
     x = as_gpuarray_variable(x)
     res = host_from_gpu(x).reshape(shp, ndim=self.ndim)
     otype = GpuArrayType(dtype=res.dtype, broadcastable=res.broadcastable)
     return Apply(self, [x, shp], [otype()])
Exemplo n.º 7
0
 def make_node(self, value, *shape):
     res = Alloc.make_node(self, value, *shape)
     value = as_gpuarray_variable(value)
     otype = GpuArrayType(dtype=res.outputs[0].dtype,
                          broadcastable=res.outputs[0].broadcastable)
     return Apply(self, [value] + res.inputs[1:], [otype()])
Exemplo n.º 8
0
 def make_node(self, x):
     if not isinstance(x.type, tensor.TensorType):
         raise TypeError(x)
     return Apply(
         self, [x],
         [GpuArrayType(broadcastable=x.broadcastable, dtype=x.dtype)()])
Exemplo n.º 9
0
    def make_node(self, *inputs):
        _inputs = [as_gpuarray_variable(i) for i in inputs]
        if self.nin > 0 and len(_inputs) != self.nin:
            raise TypeError("Wrong argument count", (self.nin, len(_inputs)))
        for i in _inputs[1:]:
            if i.type.ndim != inputs[0].type.ndim:
                raise TypeError('mismatched rank amongst inputs')

        broadcastable = []
        for d in xrange(_inputs[0].type.ndim):
            bcast_d = True
            for i in _inputs:
                if not i.type.broadcastable[d]:
                    bcast_d = False
                    break
            broadcastable.append(bcast_d)
        assert len(broadcastable) == _inputs[0].type.ndim

        assert self.nout > 0
        inps = [make_argument(i, 'i%d' % (n, )) for n, i in enumerate(inputs)]
        scal_ins = [scalar.Scalar(i.dtype) for i in inputs]

        res = Apply(self, _inputs, [
            GpuArrayType(o.dtype, broadcastable)()
            for o in self.scalar_op.output_types(scal_ins)
        ])

        outs = [
            make_argument(o, 'o%d' % (n, )) for n, o in enumerate(res.outputs)
        ]
        scal_out = [scalar.Scalar(o.dtype) for o in res.outputs]

        fake_node = Apply(self.scalar_op, [i() for i in scal_ins],
                          [o() for o in scal_out])

        kcode = self.scalar_op.c_code(fake_node,
                                      'kcode', [i.expr() for i in inps],
                                      [o.expr() for o in outs],
                                      sub=dict(fail='return;'))
        res.tag.kcode = kcode

        try:
            code = self.scalar_op.c_support_code_apply(fake_node, 'kcode')
            if code:
                raise SupportCodeError()
        except MethodNotDefined:
            pass

        support_code = ""
        try:
            support_code += self.scalar_op.c_support_code()
        except MethodNotDefined:
            pass

        if support_code != "#define THEANO_MACRO_MOD(x,y) (x % y)":
            # Avoid the C++ complex struct
            raise SupportCodeError()

        k = ElemwiseKernel(None, inps + outs, kcode, preamble=support_code)
        res.tag.kernel = k

        return res