Beispiel #1
0
    def make_node(self, x, shape):
        if not isinstance(x, Variable):
            x = at.as_tensor_variable(x)

        shape = at.as_tensor_variable(shape, ndim=1)

        if isinstance(shape, Constant):
            shape = tuple(shape.data)
        else:
            shape = tuple(at.as_tensor_variable(s, ndim=0) for s in shape)

        if any(s.dtype not in aesara.tensor.type.integer_dtypes
               for s in shape):
            raise TypeError("Shape values must be integer types")

        if len(shape) != x.ndim:
            raise ValueError(
                f"Input `x` is {x.ndim}-dimensional and will never match a shape of length {len(shape)}."
            )

        if isinstance(x.type, TensorType) and all(
                isinstance(s, Number) for s in shape):
            out_var = TensorType(x.type.dtype, shape)()
        else:
            out_var = x.type()

        in_shape = at.as_tensor_variable(shape, ndim=1)
        return Apply(self, [x, in_shape], [out_var])
Beispiel #2
0
    def make_node(self, inp, out_grad, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp, out_grad)
        nd = self.ndim
        inp = as_gpuarray_variable(inp, ctx_name)
        assert inp.ndim == nd + 2
        out_grad = as_gpuarray_variable(out_grad, ctx_name)
        assert out_grad.ndim == nd + 2

        assert out_grad.ndim == inp.ndim

        if stride is None:
            stride = ws
        if pad is None:
            pad = (0, ) * nd
        elif isinstance(pad, (tuple, list)):
            if max(pad) != 0 and not self.mode == "average_exc_pad":
                raise ValueError("Padding must be zero for average_exc_pad")
        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in int_dtypes:
            raise TypeError("Window shape parameters must be ints.")
        if stride.dtype not in int_dtypes:
            raise TypeError("Stride parameters must be ints.")
        if pad.dtype not in int_dtypes:
            raise TypeError("Padding parameters must be ints.")

        ws = aesara.tensor.cast(ws, "int64")
        stride = aesara.tensor.cast(stride, "int64")
        pad = aesara.tensor.cast(pad, "int64")

        return Apply(self, [inp, out_grad, ws, stride, pad], [inp.type()])
Beispiel #3
0
    def make_node(self, inp, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp)
        inp = as_gpuarray_variable(inp, ctx_name)
        nd = self.ndim
        assert inp.ndim == nd + 2
        if stride is None:
            stride = ws
        if pad is None:
            pad = (0, ) * nd
        elif isinstance(pad, (tuple, list)):
            if max(pad) != 0 and not self.ignore_border:
                raise ValueError("Padding works only with ignore_border=True")
            if isinstance(ws, (tuple, list)):
                if any(pad[i] >= ws[i] for i in range(nd)):
                    raise ValueError("Padding must be smaller than strides")

        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in int_dtypes:
            raise TypeError("Window shape parameters must be ints.")
        if stride.dtype not in int_dtypes:
            raise TypeError("Stride parameters must be ints.")
        if pad.dtype not in int_dtypes:
            raise TypeError("Padding parameters must be ints.")

        ws = aesara.tensor.cast(ws, "int64")
        stride = aesara.tensor.cast(stride, "int64")
        pad = aesara.tensor.cast(pad, "int64")

        return Apply(self, [inp, ws, stride, pad], [inp.type()])
Beispiel #4
0
    def make_node(self, x, *shape):
        from aesara.tensor.basic import get_scalar_constant_value

        x = at.as_tensor_variable(x)

        shape = tuple(NoneConst if (s is None or NoneConst.equals(s)
                                    ) else at.as_tensor_variable(s, ndim=0)
                      for s in shape)

        if any(s.dtype not in aesara.tensor.type.integer_dtypes for s in shape
               if hasattr(s, "dtype")):
            raise TypeError("Shape values must be integer types")

        if len(shape) != x.type.ndim:
            raise ValueError(
                f"Input `x` is {x.type.ndim}-dimensional and will never match a shape of length {len(shape)}."
            )

        type_shape = [None] * x.ndim
        for i, (xts, s) in enumerate(zip(x.type.shape, shape)):
            if xts is not None:
                type_shape[i] = xts
            else:
                try:
                    type_s = get_scalar_constant_value(s)
                    if type_s is not None:
                        type_shape[i] = int(type_s)
                except NotScalarConstantError:
                    pass

        out_var = x.type.clone(shape=type_shape)()

        return Apply(self, [x, *shape], [out_var])
Beispiel #5
0
    def make_node(self, inp, out, out_grad, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp, out, out_grad)
        nd = self.ndim
        inp = as_gpuarray_variable(inp, ctx_name)
        assert inp.ndim == nd + 2
        out = as_gpuarray_variable(out, ctx_name)
        assert out_grad.ndim == nd + 2
        out_grad = as_gpuarray_variable(out_grad, ctx_name)
        assert out.ndim == nd + 2

        assert out_grad.ndim == inp.ndim
        assert inp.ndim == out.ndim

        if stride is None:
            stride = ws
        if pad is None:
            pad = (0, ) * nd
        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in int_dtypes:
            raise TypeError("Window shape parameters must be ints.")
        if stride.dtype not in int_dtypes:
            raise TypeError("Stride parameters must be ints.")
        if pad.dtype not in int_dtypes:
            raise TypeError("Padding parameters must be ints.")

        ws = aesara.tensor.cast(ws, "int64")
        stride = aesara.tensor.cast(stride, "int64")
        pad = aesara.tensor.cast(pad, "int64")

        return Apply(self, [inp, out, out_grad, ws, stride, pad], [inp.type()])
Beispiel #6
0
def test_jax_compile_ops():

    x = DeepCopyOp()(aet.as_tensor_variable(1.1))
    x_fg = FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [])

    x_np = np.zeros((20, 1, 1))
    x = aet.Rebroadcast((0, False), (1, True),
                        (2, False))(aet.as_tensor_variable(x_np))
    x_fg = FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [])

    with config.change_flags(compute_test_value="off"):
        x = aet.Rebroadcast((0, True), (1, False),
                            (2, False))(aet.as_tensor_variable(x_np))
        x_fg = FunctionGraph([], [x])

        with pytest.raises(ValueError):
            compare_jax_and_py(x_fg, [])

    x = ViewOp()(aet.as_tensor_variable(x_np))
    x_fg = FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [])
Beispiel #7
0
 def make_node(self, x, shp):
     x = aet.as_tensor_variable(x)
     shp_orig = shp
     shp = aet.as_tensor_variable(shp, ndim=1)
     if not (shp.dtype in int_dtypes or
             (isinstance(shp, TensorConstant) and shp.data.size == 0)):
         # It raises an error if shp is not of integer type,
         # except when shp is constant and empty
         # (in this case, shp.dtype does not matter anymore).
         raise TypeError("Shape must be integers", shp, shp.dtype)
     assert shp.ndim == 1
     if isinstance(shp, TensorConstant):
         bcast = [s == 1 for s in shp.data]
         return Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])
     else:
         bcasts = [False] * self.ndim
         shp_list = shp_orig
         if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0:
             shp_list = [shp_orig]
         for index in range(self.ndim):
             y = shp_list[index]
             y = aet.as_tensor_variable(y)
             # Try to see if we can infer that y has a constant value of 1.
             # If so, that dimension should be broadcastable.
             try:
                 bcasts[index] = (hasattr(y, "get_scalar_constant_value")
                                  and y.get_scalar_constant_value() == 1)
             except NotScalarConstantError:
                 pass
         return Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])
Beispiel #8
0
    def make_node(self, a, val, offset):
        a = aet.as_tensor_variable(a)
        val = aet.as_tensor_variable(val)
        offset = aet.as_tensor_variable(offset)
        if a.ndim != 2:
            raise TypeError(
                "%s: first parameter must have exactly"
                " two dimensions" % self.__class__.__name__
            )
        elif val.ndim != 0:
            raise TypeError(
                f"{self.__class__.__name__}: second parameter must be a scalar"
            )
        elif offset.ndim != 0:
            raise TypeError(
                f"{self.__class__.__name__}: third parameter must be a scalar"
            )
        val = aet.cast(val, dtype=upcast(a.dtype, val.dtype))
        if val.dtype != a.dtype:
            raise TypeError(
                "%s: type of second parameter must be the same"
                " as the first's" % self.__class__.__name__
            )
        elif offset.dtype not in integer_dtypes:
            raise TypeError(
                f"{self.__class__.__name__}: type of third parameter must be as integer"
                " use aesara.tensor.cast( input, 'int32/int64')"
            )

        return Apply(self, [a, val, offset], [a.type()])
Beispiel #9
0
    def make_node(self, kern, topgrad, shape=None):
        kern = as_tensor_variable(kern)
        topgrad = as_tensor_variable(topgrad)
        kern, topgrad = self.as_common_dtype(kern, topgrad)
        if kern.type.ndim != 5:
            raise TypeError("kern must be 5D tensor")
        if topgrad.type.ndim != 5:
            raise TypeError("topgrad must be 5D tensor")
        if shape is None:
            if self.subsample != (1, 1, 1):
                raise ValueError("shape must be given if subsample != (1, 1, 1)")
            height_width_depth = []
        else:
            height_width_depth = [
                as_tensor_variable(shape[0]).astype("int64"),
                as_tensor_variable(shape[1]).astype("int64"),
                as_tensor_variable(shape[2]).astype("int64"),
            ]

        if self.num_groups > 1:
            broadcastable = [topgrad.type.broadcastable[0], False, False, False, False]
        else:
            broadcastable = [
                topgrad.type.broadcastable[0],
                kern.type.broadcastable[1],
                False,
                False,
                False,
            ]
        dtype = kern.type.dtype
        return Apply(
            self,
            [kern, topgrad] + height_width_depth,
            [TensorType(dtype, broadcastable)()],
        )
Beispiel #10
0
    def make_node(self, img, topgrad, shape=None):
        img = as_tensor_variable(img)
        topgrad = as_tensor_variable(topgrad)
        img, topgrad = self.as_common_dtype(img, topgrad)
        if img.type.ndim != 5:
            raise TypeError("img must be 5D tensor")
        if topgrad.type.ndim != 5:
            raise TypeError("topgrad must be 5D tensor")
        if shape is None:
            if self.subsample != (1, 1, 1) or self.border_mode == "half":
                raise ValueError(
                    "shape must be given if subsample != (1, 1, 1)"
                    ' or border_mode == "half"')
            height_width_depth = []
        else:
            height_width_depth = [
                as_tensor_variable(shape[0]).astype("int64"),
                as_tensor_variable(shape[1]).astype("int64"),
                as_tensor_variable(shape[2]).astype("int64"),
            ]

        broadcastable = [
            topgrad.type.broadcastable[1],
            img.type.broadcastable[1],
            False,
            False,
            False,
        ]
        dtype = img.type.dtype
        return Apply(
            self,
            [img, topgrad] + height_width_depth,
            [TensorType(dtype, broadcastable)()],
        )
Beispiel #11
0
 def make_node(self, input, axis=-1):
     input = as_tensor_variable(input)
     axis = as_tensor_variable(axis)
     return Apply(
         self,
         [input, axis],
         [TensorType(dtype="int64", shape=input.type.shape)()],
     )
Beispiel #12
0
    def make_node(self, a, *shape):
        a = aet.as_tensor_variable(a)
        shape = aet.as_tensor_variable(shape, ndim=1)

        shape, bcast = aet.alloc_validate_shape(shape)

        out = type(a.type)(dtype=a.type.dtype, broadcastable=bcast)()

        return Apply(self, [a] + shape, [out])
Beispiel #13
0
 def make_node(self, input, axis=-1):
     input = as_tensor_variable(input)
     axis = as_tensor_variable(axis)
     bcast = input.type.broadcastable
     return Apply(
         self,
         [input, axis],
         [TensorType(dtype="int64", broadcastable=bcast)()],
     )
Beispiel #14
0
def test_jax_shape_ops():
    x_np = np.zeros((20, 3))
    x = Shape()(aet.as_tensor_variable(x_np))
    x_fg = FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [], must_be_device_array=False)

    x = Shape_i(1)(aet.as_tensor_variable(x_np))
    x_fg = FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [], must_be_device_array=False)
Beispiel #15
0
 def make_node(self, x, shape):
     if not isinstance(x, Variable):
         x = aet.as_tensor_variable(x)
     shape = aet.as_tensor_variable(shape)
     if shape.ndim > 1:
         raise AssertionError()
     if shape.dtype not in aesara.tensor.type.integer_dtypes:
         raise AssertionError()
     if isinstance(shape, TensorConstant) and shape.data.size != x.ndim:
         raise AssertionError()
     return Apply(self, [x, shape], [x.type()])
Beispiel #16
0
 def make_node(self, a, val):
     a = basic.as_tensor_variable(a)
     val = basic.as_tensor_variable(val)
     if a.ndim < 2:
         raise TypeError("%s: first parameter must have at least"
                         " two dimensions" % self.__class__.__name__)
     elif val.ndim != 0:
         raise TypeError("%s: second parameter must be a scalar" %
                         self.__class__.__name__)
     val = basic.cast(val, dtype=upcast(a.dtype, val.dtype))
     if val.dtype != a.dtype:
         raise TypeError("%s: type of second parameter must be the same as"
                         " the first's" % self.__class__.__name__)
     return Apply(self, [a, val], [a.type()])
Beispiel #17
0
def test_jax_specify_shape():
    x_np = np.zeros((20, 3))
    x = SpecifyShape()(aet.as_tensor_variable(x_np), (20, 3))
    x_fg = FunctionGraph([], [x])

    compare_jax_and_py(x_fg, [])

    with config.change_flags(compute_test_value="off"):

        x = SpecifyShape()(aet.as_tensor_variable(x_np), (2, 3))
        x_fg = FunctionGraph([], [x])

        with pytest.raises(AssertionError):
            compare_jax_and_py(x_fg, [])
Beispiel #18
0
def shape(x: Union[np.ndarray, Number, Variable]) -> Variable:
    """Return the shape of `x`."""
    if not isinstance(x, Variable):
        x = at.as_tensor_variable(x)

    x_type = x.type

    if isinstance(x_type, TensorType) and all(s is not None
                                              for s in x_type.shape):
        res = at.as_tensor_variable(x_type.shape, ndim=1, dtype=np.int64)
    else:
        res = _shape(x)

    return res
Beispiel #19
0
 def make_node(self, x, y, rcond):
     x = as_tensor_variable(x)
     y = as_tensor_variable(y)
     rcond = as_tensor_variable(rcond)
     return Apply(
         self,
         [x, y, rcond],
         [
             matrix(),
             dvector(),
             lscalar(),
             dvector(),
         ],
     )
Beispiel #20
0
    def make_node(self, activations, labels, input_lengths):
        context_name = infer_context_name(activations)
        t_activations = as_gpuarray_variable(activations,
                                             context_name=context_name)
        # Ensure activations array is C-contiguous
        t_activations = gpu_contiguous(t_activations)

        # Labels and input lengths are always on the CPU
        t_labels = as_tensor_variable(labels)
        t_input_lengths = as_tensor_variable(input_lengths)

        if t_activations.type.dtype != "float32":
            raise TypeError("activations must use the float32 type.")

        if t_activations.ndim != 3:
            raise ValueError("activations must have 3 dimensions.")

        if t_labels.type.dtype != "int32":
            raise TypeError("labels must use the int32 type.")

        if t_labels.ndim != 2:
            raise ValueError("labels must have 2 dimensions.")

        if t_input_lengths.type.dtype != "int32":
            raise TypeError("input_lengths must use the int32 type.")

        if t_input_lengths.ndim != 1:
            raise ValueError("input_lengths must have 1 dimension.")

        costs = GpuArrayType(dtype="float32",
                             broadcastable=(False, ),
                             context_name=context_name)()
        outputs = [costs]

        if self.compute_grad:
            gradients = GpuArrayType(
                dtype="float32",
                broadcastable=(
                    False,
                    False,
                    False,
                ),
                context_name=context_name,
            )()
            outputs += [gradients]

        return Apply(self,
                     inputs=[t_activations, t_labels, t_input_lengths],
                     outputs=outputs)
Beispiel #21
0
def test_sd_csc():

    A = sp.sparse.rand(4, 5, density=0.60, format="csc", dtype=np.float32)
    b = np.random.rand(5, 2).astype(np.float32)
    target = A * b

    a_val = as_tensor_variable(A.data)
    a_ind = as_tensor_variable(A.indices)
    a_ptr = as_tensor_variable(A.indptr)
    nrows = as_tensor_variable(np.int32(A.shape[0]))
    b = as_tensor_variable(b)

    res = aesara.sparse.opt.sd_csc(a_val, a_ind, a_ptr, nrows, b).eval()

    utt.assert_allclose(res, target)
Beispiel #22
0
 def make_node(self, x):
     x = aet.as_tensor_variable(x)
     self_axis = self.axis
     if self_axis is None:
         broadcastable = [False]
     else:
         if self_axis < 0:
             self_axis += len(x.broadcastable)
         if self_axis < 0 or self_axis >= len(x.broadcastable):
             raise RuntimeError(
                 "Unique axis `{}` is outside of input ndim = "
                 "{}.".format(self.axis, len(x.broadcastable)))
         broadcastable = [
             b if axis != self_axis else False
             for axis, b in enumerate(x.broadcastable)
         ]
     outputs = [TensorType(broadcastable=broadcastable, dtype=x.dtype)()]
     typ = TensorType(broadcastable=[False], dtype="int64")
     if self.return_index:
         outputs.append(typ())
     if self.return_inverse:
         outputs.append(typ())
     if self.return_counts:
         outputs.append(typ())
     return Apply(self, [x], outputs)
Beispiel #23
0
def shape_padaxis(t, axis):
    """Reshape `t` by inserting 1 at the dimension `axis`.

    Examples
    --------
    >>> tensor = aesara.tensor.type.tensor3()
    >>> aesara.tensor.shape_padaxis(tensor, axis=0)
    DimShuffle{x,0,1,2}.0
    >>> aesara.tensor.shape_padaxis(tensor, axis=1)
    DimShuffle{0,x,1,2}.0
    >>> aesara.tensor.shape_padaxis(tensor, axis=3)
    DimShuffle{0,1,2,x}.0
    >>> aesara.tensor.shape_padaxis(tensor, axis=-1)
    DimShuffle{0,1,2,x}.0

    See Also
    --------
    shape_padleft
    shape_padright
    Dimshuffle

    """
    _t = aet.as_tensor_variable(t)

    ndim = _t.ndim + 1
    if not -ndim <= axis < ndim:
        msg = "axis {0} is out of bounds [-{1}, {1})".format(axis, ndim)
        raise IndexError(msg)
    if axis < 0:
        axis += ndim

    pattern = [i for i in range(_t.type.ndim)]
    pattern.insert(axis, "x")
    return _t.dimshuffle(pattern)
Beispiel #24
0
def normalize_size_param(size):
    """Create an Aesara value for a ``RandomVariable`` ``size`` parameter."""
    if size is None:
        size = constant([], dtype="int64")
    elif isinstance(size, int):
        size = as_tensor_variable([size], ndim=1)
    elif not isinstance(size, (np.ndarray, Variable, Sequence)):
        raise TypeError(
            "Parameter size must be None, an integer, or a sequence with integers."
        )
    else:
        size = cast(as_tensor_variable(size, ndim=1), "int64")

    assert size.dtype in int_dtypes

    return size
Beispiel #25
0
def norm(x, ord):
    x = as_tensor_variable(x)
    ndim = x.ndim
    if ndim == 0:
        raise ValueError("'axis' entry is out of bounds.")
    elif ndim == 1:
        if ord is None:
            return tm.sum(x**2)**0.5
        elif ord == "inf":
            return tm.max(abs(x))
        elif ord == "-inf":
            return tm.min(abs(x))
        elif ord == 0:
            return x[x.nonzero()].shape[0]
        else:
            try:
                z = tm.sum(abs(x**ord))**(1.0 / ord)
            except TypeError:
                raise ValueError("Invalid norm order for vectors.")
            return z
    elif ndim == 2:
        if ord is None or ord == "fro":
            return tm.sum(abs(x**2))**(0.5)
        elif ord == "inf":
            return tm.max(tm.sum(abs(x), 1))
        elif ord == "-inf":
            return tm.min(tm.sum(abs(x), 1))
        elif ord == 1:
            return tm.max(tm.sum(abs(x), 0))
        elif ord == -1:
            return tm.min(tm.sum(abs(x), 0))
        else:
            raise ValueError(0)
    elif ndim > 2:
        raise NotImplementedError("We don't support norm with ndim > 2")
Beispiel #26
0
    def make_node(self, rng, size, dtype, *dist_params):
        """Create a random variable node.

        XXX: Unnamed/non-keyword arguments are considered distribution
        parameters!  If you want to set `size`, `rng`, and/or `name`, use their
        keywords.

        Parameters
        ----------
        rng: RandomStateType
            Existing Aesara `RandomState` object to be used.  Creates a
            new one, if `None`.
        size: int or Sequence
            Numpy-like size of the output (i.e. replications).
        dtype: str
            The dtype of the sampled output.  If the value ``"floatX"`` is
            given, then ``dtype`` is set to ``aesara.config.floatX``.  This
            value is only used when `self.dtype` isn't set.
        dist_params: list
            Distribution parameters.

        Results
        -------
        out: `Apply`
            A node with inputs `(rng, size, dtype) + dist_args` and outputs
            `(rng_var, out_var)`.

        """
        size = normalize_size_param(size)

        dist_params = tuple(
            as_tensor_variable(p) if not isinstance(p, Variable) else p
            for p in dist_params
        )

        if rng is None:
            rng = aesara.shared(np.random.RandomState())
        elif not isinstance(rng.type, RandomStateType):
            raise TypeError("The type of rng should be an instance of RandomStateType")

        bcast = self.compute_bcast(dist_params, size)
        dtype = self.dtype or dtype

        if dtype == "floatX":
            dtype = config.floatX
        elif dtype is None or (isinstance(dtype, str) and dtype not in all_dtypes):
            raise TypeError("dtype is unspecified")

        if isinstance(dtype, str):
            dtype_idx = constant(all_dtypes.index(dtype), dtype="int64")
        else:
            dtype_idx = constant(dtype, dtype="int64")
            dtype = all_dtypes[dtype_idx.data]

        outtype = TensorType(dtype=dtype, broadcastable=bcast)
        out_var = outtype()
        inputs = (rng, size, dtype_idx) + dist_params
        outputs = (rng.type(), out_var)

        return Apply(self, inputs, outputs)
Beispiel #27
0
    def make_node(self, condition, *monitored_vars):

        # Ensure that condition is an Aesara tensor
        if not isinstance(condition, Variable):
            condition = as_tensor_variable(condition)

        # Validate that the condition is a scalar (else it is not obvious how
        # is should be evaluated)
        assert condition.ndim == 0

        # Because the user might be tempted to instantiate PdbBreakpoint only
        # once and apply it many times on different number of inputs, we must
        # create a new instance of the op here, define the instance attributes
        # (view_map and var_types) in that instance and then apply it on the
        # inputs.
        new_op = PdbBreakpoint(name=self.name)
        new_op.view_map = {}
        new_op.inp_types = []
        for i in range(len(monitored_vars)):
            # Every output i is a view of the input i+1 because of the input
            # condition.
            new_op.view_map[i] = [i + 1]
            new_op.inp_types.append(monitored_vars[i].type)

        # Build the Apply node
        inputs = [condition] + list(monitored_vars)
        outputs = [inp.type() for inp in monitored_vars]
        return Apply(op=new_op, inputs=inputs, outputs=outputs)
Beispiel #28
0
def test_jax_scan_tap_output():

    a_aet = scalar("a")

    def input_step_fn(y_tm1, y_tm3, a):
        y_tm1.name = "y_tm1"
        y_tm3.name = "y_tm3"
        res = (y_tm1 + y_tm3) * a
        res.name = "y_t"
        return res

    y_scan_aet, _ = scan(
        fn=input_step_fn,
        outputs_info=[
            {
                "initial":
                aet.as_tensor_variable(np.r_[-1.0, 1.3,
                                             0.0].astype(config.floatX)),
                "taps": [-1, -3],
            },
        ],
        non_sequences=[a_aet],
        n_steps=10,
        name="y_scan",
    )
    y_scan_aet.name = "y"
    y_scan_aet.owner.inputs[0].name = "y_all"

    out_fg = FunctionGraph([a_aet], [y_scan_aet])

    test_input_vals = [np.array(10.0).astype(config.floatX)]
    compare_jax_and_py(out_fg, test_input_vals)
Beispiel #29
0
    def make_node(self, inp, s=None):
        # A shape parameter s can be provided as an input. For now this is used to
        # manage odd transform sizes.
        # Later this could be extended to handle padding and trunkation,
        # following numpy's interface. However, cuFFT expects array that match
        # the shape given to the plan, so padding will have to be done in the op.
        # The effect of padding on gradients has yet to be investigated.

        if not skcuda_available:
            raise RuntimeError("skcuda is needed for CuFFTOp")

        if not pygpu_available:
            raise RuntimeError("pygpu is needed for CuFFTOp")

        if not pycuda_available:
            raise RuntimeError("pycuda is needed for CuFFTOp")

        inp = gpu_contiguous(as_gpuarray_variable(inp, infer_context_name(inp)))

        # If no shape is provided as input, default to input data shape.
        if s is None:
            s = inp.shape[1:]
        s = as_tensor_variable(s)

        assert inp.dtype == "float32"
        assert s.ndim == 1
        assert s.dtype in integer_dtypes

        return Apply(self, [inp, s], [self.output_type(inp)()])
Beispiel #30
0
    def make_node(self, a, s=None):
        a = as_tensor_variable(a)
        if a.ndim < 2:
            raise TypeError(
                "%s: input must have dimension > 2, with first dimension batches"
                % self.__class__.__name__)

        if s is None:
            s = a.shape[1:]
            s = as_tensor_variable(s)
        else:
            s = as_tensor_variable(s)
            if s.dtype not in integer_dtypes:
                raise TypeError("%s: length of the transformed axis must be"
                                " of type integer" % self.__class__.__name__)
        return Apply(self, [a, s], [self.output_type(a)()])