Пример #1
0
    def make_node(self, inp, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp)
        inp = as_gpuarray_variable(inp, ctx_name)
        nd = self.ndim
        assert inp.ndim == nd + 2
        if stride is None:
            stride = ws
        if pad is None:
            pad = (0,) * nd
        elif isinstance(pad, (tuple, list)):
            if max(pad) != 0 and not self.ignore_border:
                raise ValueError("Padding works only with ignore_border=True")
            if isinstance(ws, (tuple, list)):
                if any(pad[i] >= ws[i] for i in range(nd)):
                    raise ValueError("Padding must be smaller than strides")

        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in aesara.tensor.int_dtypes:
            raise TypeError("Window shape parameters must be ints.")
        if stride.dtype not in aesara.tensor.int_dtypes:
            raise TypeError("Stride parameters must be ints.")
        if pad.dtype not in aesara.tensor.int_dtypes:
            raise TypeError("Padding parameters must be ints.")

        ws = aesara.tensor.cast(ws, "int64")
        stride = aesara.tensor.cast(stride, "int64")
        pad = aesara.tensor.cast(pad, "int64")

        return Apply(self, [inp, ws, stride, pad], [inp.type()])
Пример #2
0
    def make_node(self, inp, out, out_grad, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp, out, out_grad)
        nd = self.ndim
        inp = as_gpuarray_variable(inp, ctx_name)
        assert inp.ndim == nd + 2
        out = as_gpuarray_variable(out, ctx_name)
        assert out_grad.ndim == nd + 2
        out_grad = as_gpuarray_variable(out_grad, ctx_name)
        assert out.ndim == nd + 2

        assert out_grad.ndim == inp.ndim
        assert inp.ndim == out.ndim

        if stride is None:
            stride = ws
        if pad is None:
            pad = (0,) * nd
        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in aesara.tensor.int_dtypes:
            raise TypeError("Window shape parameters must be ints.")
        if stride.dtype not in aesara.tensor.int_dtypes:
            raise TypeError("Stride parameters must be ints.")
        if pad.dtype not in aesara.tensor.int_dtypes:
            raise TypeError("Padding parameters must be ints.")

        ws = aesara.tensor.cast(ws, "int64")
        stride = aesara.tensor.cast(stride, "int64")
        pad = aesara.tensor.cast(pad, "int64")

        return Apply(self, [inp, out, out_grad, ws, stride, pad], [inp.type()])
Пример #3
0
    def make_node(self, inp, out_grad, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp, out_grad)
        nd = self.ndim
        inp = as_gpuarray_variable(inp, ctx_name)
        assert inp.ndim == nd + 2
        out_grad = as_gpuarray_variable(out_grad, ctx_name)
        assert out_grad.ndim == nd + 2

        assert out_grad.ndim == inp.ndim

        if stride is None:
            stride = ws
        if pad is None:
            pad = (0,) * nd
        elif isinstance(pad, (tuple, list)):
            if max(pad) != 0 and not self.mode == "average_exc_pad":
                raise ValueError("Padding must be zero for average_exc_pad")
        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in aesara.tensor.int_dtypes:
            raise TypeError("Window shape parameters must be ints.")
        if stride.dtype not in aesara.tensor.int_dtypes:
            raise TypeError("Stride parameters must be ints.")
        if pad.dtype not in aesara.tensor.int_dtypes:
            raise TypeError("Padding parameters must be ints.")

        ws = aesara.tensor.cast(ws, "int64")
        stride = aesara.tensor.cast(stride, "int64")
        pad = aesara.tensor.cast(pad, "int64")

        return Apply(self, [inp, out_grad, ws, stride, pad], [inp.type()])
Пример #4
0
    def make_node(self, ten4, neib_shape, neib_step=None):
        ten4 = as_gpuarray_variable(ten4, infer_context_name(ten4))
        neib_shape = tt.as_tensor_variable(neib_shape)
        if neib_step is None:
            neib_step = neib_shape
        else:
            neib_step = tt.as_tensor_variable(neib_step)

        assert ten4.ndim == 4
        assert neib_shape.ndim == 1
        assert neib_step.ndim == 1
        assert neib_shape.dtype in tt.integer_dtypes
        assert neib_step.dtype in tt.integer_dtypes

        return Apply(
            self,
            [ten4, neib_shape, neib_step],
            [
                GpuArrayType(
                    broadcastable=(False, False),
                    dtype=ten4.type.dtype,
                    context_name=ten4.type.context_name,
                )()
            ],
        )
Пример #5
0
 def make_node(self, o, x, y, xIdx, yIdx, alpha=None):
     ctx = infer_context_name(o, x, y)
     one = tensor.constant(np.asarray(1.0, dtype="float32"))
     o = as_gpuarray_variable(o, ctx)
     x = as_gpuarray_variable(x, ctx)
     y = as_gpuarray_variable(y, ctx)
     xIdx = as_tensor_variable(xIdx)
     yIdx = as_tensor_variable(yIdx)
     if alpha is None:
         alpha = one
     return Apply(self, [o, x, y, xIdx, yIdx, alpha], [o.type()])
Пример #6
0
 def make_node(self, rstate, size):
     # error checking slightly redundant here, since
     # this op should not be called directly.
     #
     # call through MRG_RandomStreams instead.
     broad = []
     for i in range(self.output_type.ndim):
         broad.append(tensor.extract_constant(size[i]) == 1)
     output_type = self.output_type.clone(broadcastable=broad)()
     rstate = as_gpuarray_variable(rstate, infer_context_name(rstate))
     return Apply(self, [rstate, size], [rstate.type(), output_type])
Пример #7
0
 def make_node(self, x, b, y_idx):
     ctx_name = infer_context_name(x, b, y_idx)
     x = as_gpuarray_variable(x, ctx_name)
     b = as_gpuarray_variable(b, ctx_name)
     y_idx = as_gpuarray_variable(y_idx, ctx_name)
     nll = GpuArrayType(x.type.dtype,
                        y_idx.type.broadcastable,
                        context_name=ctx_name)()
     sm = x.type()
     am = y_idx.type()
     return Apply(self, [x, b, y_idx], [nll, sm, am])
Пример #8
0
    def make_node(self, n, m):
        n = tensor.as_tensor_variable(n)
        m = tensor.as_tensor_variable(m)
        assert n.ndim == 0
        assert m.ndim == 0
        otype = GpuArrayType(
            dtype=self.dtype,
            broadcastable=(False, False),
            context_name=self.context_name,
        )

        return Apply(self, [n, m], [otype()])
Пример #9
0
 def make_node(self, pvals, unis, n=1):
     pvals = tt.as_tensor_variable(pvals)
     unis = tt.as_tensor_variable(unis)
     if pvals.ndim != 2:
         raise NotImplementedError("pvals ndim should be 2", pvals.ndim)
     if unis.ndim != 1:
         raise NotImplementedError("unis ndim should be 1", unis.ndim)
     if self.odtype == "auto":
         odtype = pvals.dtype
     else:
         odtype = self.odtype
     out = tt.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
     return Apply(self, [pvals, unis, as_scalar(n)], [out])
Пример #10
0
 def make_node(self, inp, kth):
     ctx_name = infer_context_name(inp)
     inp = as_gpuarray_variable(inp, ctx_name)
     kth = as_tensor_variable(kth)
     bcast = inp.type.broadcastable
     outs = []
     if self.return_values:
         outs.append(inp.type())
     if self.return_indices:
         outs.append(
             GpuArrayType(dtype=self.idx_dtype,
                          broadcastable=bcast,
                          context_name=ctx_name)())
     return Apply(self, [inp, kth], outs)
Пример #11
0
    def make_node(self, x):
        assert x.type.dtype == "float32", "Only float32 supported for GpuCumOp"

        context_name = infer_context_name(x)

        x = as_gpuarray_variable(x, context_name)

        if x.ndim > GpuCumOp.SUPPORTED_NDIMS:
            raise NotImplementedError("Only cum op on 1D, 2D and\
                                       3D arrays are supported right now!")

        if self.axis >= x.ndim or self.axis < -x.ndim:
            raise ValueError("axis(={}) out of bounds".format(self.axis))
        return Apply(self, [x], [x.type()])
Пример #12
0
    def make_node(self, ten4, neib_shape, neib_step=None):
        """
        Parameters
        ----------
        ten4 : a list of lists of images
            ten4 is of shape (list 1 dim, list 2 dim, row, col).
        neib_shape
            (r,c) where r is the height of the neighborhood in rows and c is
            the width of the neighborhood in columns.
        neib_step
            (dr,dc) where dr is the number of rows to skip between patch and dc
            is the number of columns. When None, this is the same as neib_shape
            (patch are disjoint).

        Returns
        -------
        matrix
            A 2D matrix, written using the following pattern::

                idx = 0
                for i in range(list 1 dim)
                    for j in range(list 2 dim)
                        for k in <image column coordinates>
                            for l in <image row coordinates>
                                output[idx,:]
                                     = flattened version of ten4[i,j,l:l+r,k:k+c]
                                idx += 1

            .. note:: The op isn't necessarily implemented internally with these
                for loops, they're just the easiest way to describe the output
                pattern.

        """
        ten4 = tt.as_tensor_variable(ten4)
        neib_shape = tt.as_tensor_variable(neib_shape)
        if neib_step is None:
            neib_step = neib_shape
        else:
            neib_step = tt.as_tensor_variable(neib_step)

        assert ten4.ndim == 4
        assert neib_shape.ndim == 1
        assert neib_step.ndim == 1

        return Apply(self, [ten4, neib_shape, neib_step],
                     [tt.matrix(dtype=ten4.type.dtype)])
Пример #13
0
    def make_node(self, o, W, h, inputIdx, outputIdx):
        ctx = infer_context_name(o, W, h)
        o = as_gpuarray_variable(o, ctx)
        W = as_gpuarray_variable(W, ctx)
        h = as_gpuarray_variable(h, ctx)
        inputIdx = as_tensor_variable(inputIdx)
        outputIdx = as_tensor_variable(outputIdx)
        assert o.ndim == 3
        assert W.ndim == 4
        assert h.ndim == 3
        assert inputIdx.ndim == 2
        assert outputIdx.ndim == 2

        assert inputIdx.type.dtype in discrete_dtypes
        assert outputIdx.type.dtype in discrete_dtypes

        return Apply(self, [o, W, h, inputIdx, outputIdx], [o.type()])
Пример #14
0
    def make_node(self, pvals, unis):
        ctx_name = infer_context_name(pvals, unis)
        pvals = as_gpuarray_variable(pvals, ctx_name)
        unis = as_gpuarray_variable(unis, ctx_name)
        assert pvals.dtype in ["float32", "float16", "float64"]
        assert unis.dtype in ["float32", "float16", "float64"]

        if pvals.ndim != 2:
            raise NotImplementedError("pvals ndim should be 2", pvals.ndim)
        if unis.ndim != 1:
            raise NotImplementedError("unis ndim should be 1", unis.ndim)
        if self.odtype == "auto":
            odtype = pvals.dtype
        else:
            odtype = self.odtype
        br = (pvals.broadcastable[1], pvals.broadcastable[0])
        out = GpuArrayType(broadcastable=br,
                           dtype=odtype,
                           context_name=ctx_name)()

        return Apply(self, [pvals, unis], [out])
Пример #15
0
 def make_node(self, c):
     return Apply(self, [c], [TensorType("float32", (False, ))()])
Пример #16
0
 def make_node(self):
     return Apply(self, [], [scalar.uint32()])
Пример #17
0
 def make_node(self, i):
     return Apply(self, [i], [CDataType("void *", "py_decref")()])
Пример #18
0
 def make_node(self, a, b):
     return Apply(
         self,
         [scalar.as_scalar(a), scalar.as_scalar(b)], [scalar.float64()])
Пример #19
0
 def make_node(self, dnll, sm, y_idx):
     ctx_name = infer_context_name(dnll, sm, y_idx)
     dnll = as_gpuarray_variable(dnll, ctx_name)
     sm = as_gpuarray_variable(sm, ctx_name)
     y_idx = as_gpuarray_variable(y_idx, ctx_name)
     return Apply(self, [dnll, sm, y_idx], [sm.type()])
Пример #20
0
 def make_node(self, x):
     x = as_gpuarray_variable(x, infer_context_name(x))
     return Apply(self, [x], [x.type()])
Пример #21
0
 def make_node(self, x, b):
     ctx_name = infer_context_name(x, b)
     x = as_gpuarray_variable(x, ctx_name)
     b = as_gpuarray_variable(b, ctx_name)
     return Apply(self, [x, b], [x.type()])
Пример #22
0
 def make_node(self, A, s, m, A2, s2, m2):
     return Apply(self, [A, s, m, A2, s2, m2], [s.type()])
Пример #23
0
    def make_node(self, val):
        from aesara import Apply
        from aesara.scalar import as_scalar

        val = as_scalar(val).astype("uint64")
        return Apply(self, [val], [self.rtype()])