Beispiel #1
0
    def make_node(self, inp1, inp2):
        if not cublas_available:
            raise RuntimeError("CUBLAS is not available and "
                               "GpuCublasTriangularSolve Op "
                               "can not be constructed.")
        context_name = infer_context_name(inp1, inp2)

        inp1 = as_gpuarray_variable(inp1, context_name)
        inp2 = as_gpuarray_variable(inp2, context_name)

        inp1 = gpu_contiguous(inp1)
        inp2 = gpu_contiguous(inp2)

        assert inp1.ndim == 2
        assert inp2.ndim in (1, 2)
        assert inp1.dtype == inp2.dtype

        return Apply(
            self,
            [inp1, inp2],
            [
                GpuArrayType(
                    inp1.dtype,
                    broadcastable=inp2.broadcastable,
                    context_name=context_name,
                )()
            ],
        )
Beispiel #2
0
 def make_node(self, x, index):
     assert isinstance(x.type, TypedListType)
     if not isinstance(index, Variable):
         if isinstance(index, slice):
             index = Constant(SliceType(), index)
             return Apply(self, [x, index], [x.type()])
         else:
             index = at.constant(index, ndim=0, dtype="int64")
             return Apply(self, [x, index], [x.ttype()])
     if isinstance(index.type, SliceType):
         return Apply(self, [x, index], [x.type()])
     elif isinstance(index, TensorVariable) and index.ndim == 0:
         assert index.dtype == "int64"
         return Apply(self, [x, index], [x.ttype()])
     else:
         raise TypeError("Expected scalar or slice as index.")
Beispiel #3
0
    def make_node(self, inp1, inp2):
        if not cusolver_available:
            raise RuntimeError("CUSOLVER is not available and "
                               "GpuCusolverSolve Op can not be constructed.")
        if skcuda.__version__ <= "0.5.1":
            warnings.warn(
                "The GpuSolve op requires scikit-cuda > 0.5.1 to work with CUDA 8"
            )
        context_name = infer_context_name(inp1, inp2)

        inp1 = as_gpuarray_variable(inp1, context_name)
        inp2 = as_gpuarray_variable(inp2, context_name)

        inp1 = gpu_contiguous(inp1)
        inp2 = gpu_contiguous(inp2)

        assert inp1.ndim == 2
        assert inp2.ndim == 2
        assert inp1.dtype == inp2.dtype

        return Apply(
            self,
            [inp1, inp2],
            [
                GpuArrayType(
                    inp1.dtype,
                    broadcastable=inp1.broadcastable,
                    context_name=context_name,
                )()
            ],
        )
Beispiel #4
0
    def make_node(self, condition, *monitored_vars):

        # Ensure that condition is an Aesara tensor
        if not isinstance(condition, Variable):
            condition = as_tensor_variable(condition)

        # Validate that the condition is a scalar (else it is not obvious how
        # is should be evaluated)
        assert condition.ndim == 0

        # Because the user might be tempted to instantiate PdbBreakpoint only
        # once and apply it many times on different number of inputs, we must
        # create a new instance of the op here, define the instance attributes
        # (view_map and var_types) in that instance and then apply it on the
        # inputs.
        new_op = PdbBreakpoint(name=self.name)
        new_op.view_map = {}
        new_op.inp_types = []
        for i in range(len(monitored_vars)):
            # Every output i is a view of the input i+1 because of the input
            # condition.
            new_op.view_map[i] = [i + 1]
            new_op.inp_types.append(monitored_vars[i].type)

        # Build the Apply node
        inputs = [condition] + list(monitored_vars)
        outputs = [inp.type() for inp in monitored_vars]
        return Apply(op=new_op, inputs=inputs, outputs=outputs)
Beispiel #5
0
    def make_node(self, *inputs: Variable) -> Apply:
        """Construct an `Apply` node that represent the application of this operation to the given inputs.

        This must be implemented by sub-classes.

        Returns
        -------
        node: Apply
            The constructed `Apply` node.

        """
        if not hasattr(self, "itypes"):
            raise NotImplementedError(
                "You can either define itypes and otypes,\
             or implement make_node")

        if not hasattr(self, "otypes"):
            raise NotImplementedError(
                "You can either define itypes and otypes,\
             or implement make_node")

        if len(inputs) != len(self.itypes):
            raise ValueError(
                f"We expected {len(self.itypes)} inputs but got {len(inputs)}."
            )
        if not all(inp.type == it for inp, it in zip(inputs, self.itypes)):
            raise TypeError(
                f"We expected inputs of types '{str(self.itypes)}' but got types '{str([inp.type for inp in inputs])}'"
            )
        return Apply(self, inputs, [o() for o in self.otypes])
Beispiel #6
0
    def make_node(self, c, *args):
        if len(args) != 2 * self.n_outs:
            raise ValueError(
                f"Wrong number of arguments to make_node: expected "
                f"{int(2 * self.n_outs)}, got {len(args)}")
        c = aet.basic.as_tensor_variable(c)
        if not self.gpu:
            # When gpu is true, we are given only gpuarrays, and we want
            # to keep them as gpuarrays
            nw_args = []
            for x in args:
                if isinstance(x, Variable):
                    nw_args.append(x)
                else:
                    nw_args.append(aet.as_tensor_variable(x))
            args = nw_args
        aes = args[:self.n_outs]
        fs = args[self.n_outs:]

        for t, f in zip(aes, fs):
            # TODO: Attempt to convert types so that they match?
            # new_f = t.type.filter_variable(f)

            if t.type != f.type:
                raise TypeError(
                    "IfElse requires same types for true and false return values: "
                    f"true_branch={t.type}, false_branch={f.type}")
        if c.ndim > 0:
            raise TypeError("Condition given to the op has to be a scalar "
                            "with 0 standing for False, anything else "
                            "for True")
        return Apply(self, [c] + list(args), [t.type() for t in aes])
Beispiel #7
0
    def make_node(self, ten4, neib_shape, neib_step=None):
        ten4 = as_gpuarray_variable(ten4, infer_context_name(ten4))
        neib_shape = at.as_tensor_variable(neib_shape)
        if neib_step is None:
            neib_step = neib_shape
        else:
            neib_step = at.as_tensor_variable(neib_step)

        assert ten4.ndim == 4
        assert neib_shape.ndim == 1
        assert neib_step.ndim == 1
        assert neib_shape.dtype in integer_dtypes
        assert neib_step.dtype in integer_dtypes

        return Apply(
            self,
            [ten4, neib_shape, neib_step],
            [
                GpuArrayType(
                    broadcastable=(False, False),
                    dtype=ten4.type.dtype,
                    context_name=ten4.type.context_name,
                )()
            ],
        )
Beispiel #8
0
    def make_node(self, inp, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp)
        inp = as_gpuarray_variable(inp, ctx_name)
        nd = self.ndim
        assert inp.ndim == nd + 2
        if stride is None:
            stride = ws
        if pad is None:
            pad = (0, ) * nd
        elif isinstance(pad, (tuple, list)):
            if max(pad) != 0 and not self.ignore_border:
                raise ValueError("Padding works only with ignore_border=True")
            if isinstance(ws, (tuple, list)):
                if any(pad[i] >= ws[i] for i in range(nd)):
                    raise ValueError("Padding must be smaller than strides")

        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in int_dtypes:
            raise TypeError("Window shape parameters must be ints.")
        if stride.dtype not in int_dtypes:
            raise TypeError("Stride parameters must be ints.")
        if pad.dtype not in int_dtypes:
            raise TypeError("Padding parameters must be ints.")

        ws = aesara.tensor.cast(ws, "int64")
        stride = aesara.tensor.cast(stride, "int64")
        pad = aesara.tensor.cast(pad, "int64")

        return Apply(self, [inp, ws, stride, pad], [inp.type()])
Beispiel #9
0
 def make_node(self, x):
     if not isinstance(x, Variable):
         raise TypeError("x must be Variable with ndim attribute", x)
     if x.ndim <= self.i:
         raise TypeError("x has too few dimensions for Shape_i",
                         (x, self.i))
     return Apply(self, [x], [aesara.tensor.type.lscalar()])
Beispiel #10
0
    def make_node(self, inp, out, out_grad, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp, out, out_grad)
        nd = self.ndim
        inp = as_gpuarray_variable(inp, ctx_name)
        assert inp.ndim == nd + 2
        out = as_gpuarray_variable(out, ctx_name)
        assert out_grad.ndim == nd + 2
        out_grad = as_gpuarray_variable(out_grad, ctx_name)
        assert out.ndim == nd + 2

        assert out_grad.ndim == inp.ndim
        assert inp.ndim == out.ndim

        if stride is None:
            stride = ws
        if pad is None:
            pad = (0, ) * nd
        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in int_dtypes:
            raise TypeError("Window shape parameters must be ints.")
        if stride.dtype not in int_dtypes:
            raise TypeError("Stride parameters must be ints.")
        if pad.dtype not in int_dtypes:
            raise TypeError("Padding parameters must be ints.")

        ws = aesara.tensor.cast(ws, "int64")
        stride = aesara.tensor.cast(stride, "int64")
        pad = aesara.tensor.cast(pad, "int64")

        return Apply(self, [inp, out, out_grad, ws, stride, pad], [inp.type()])
Beispiel #11
0
    def make_node(self, inp, out_grad, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp, out_grad)
        nd = self.ndim
        inp = as_gpuarray_variable(inp, ctx_name)
        assert inp.ndim == nd + 2
        out_grad = as_gpuarray_variable(out_grad, ctx_name)
        assert out_grad.ndim == nd + 2

        assert out_grad.ndim == inp.ndim

        if stride is None:
            stride = ws
        if pad is None:
            pad = (0, ) * nd
        elif isinstance(pad, (tuple, list)):
            if max(pad) != 0 and self.mode != "average_exc_pad":
                raise ValueError("Padding must be zero for average_exc_pad")
        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in int_dtypes:
            raise TypeError("Window shape parameters must be ints.")
        if stride.dtype not in int_dtypes:
            raise TypeError("Stride parameters must be ints.")
        if pad.dtype not in int_dtypes:
            raise TypeError("Padding parameters must be ints.")

        ws = aesara.tensor.cast(ws, "int64")
        stride = aesara.tensor.cast(stride, "int64")
        pad = aesara.tensor.cast(pad, "int64")

        return Apply(self, [inp, out_grad, ws, stride, pad], [inp.type()])
Beispiel #12
0
    def make_node(self, x, *shape):
        from aesara.tensor.basic import get_scalar_constant_value

        x = at.as_tensor_variable(x)

        shape = tuple(NoneConst if (s is None or NoneConst.equals(s)
                                    ) else at.as_tensor_variable(s, ndim=0)
                      for s in shape)

        if any(s.dtype not in aesara.tensor.type.integer_dtypes for s in shape
               if hasattr(s, "dtype")):
            raise TypeError("Shape values must be integer types")

        if len(shape) != x.type.ndim:
            raise ValueError(
                f"Input `x` is {x.type.ndim}-dimensional and will never match a shape of length {len(shape)}."
            )

        type_shape = [None] * x.ndim
        for i, (xts, s) in enumerate(zip(x.type.shape, shape)):
            if xts is not None:
                type_shape[i] = xts
            else:
                try:
                    type_s = get_scalar_constant_value(s)
                    if type_s is not None:
                        type_shape[i] = int(type_s)
                except NotScalarConstantError:
                    pass

        out_var = x.type.clone(shape=type_shape)()

        return Apply(self, [x, *shape], [out_var])
 def make_node(self, x, y):
     x = aet.as_tensor_variable(x)
     y = aet.as_tensor_variable(y)
     outdim = x.ndim
     output = TensorType(dtype=aesara.scalar.upcast(x.dtype, y.dtype),
                         broadcastable=[False] * outdim)()
     return Apply(self, inputs=[x, y], outputs=[output])
Beispiel #14
0
 def make_node(self, *inputs):
     inputs = list(map(is_variable, inputs))
     for input in inputs:
         if not isinstance(input.type, MyType):
             raise Exception("Error 1")
     outputs = [MyType()()]
     return Apply(self, inputs, outputs)
Beispiel #15
0
    def make_node(self, value: Variable, *conds: Tuple[Variable]):
        """

        Parameters
        ==========
        value
            The value to return if `conds` all evaluate to ``True``; otherwise,
            `self.exc_type` is raised.
        conds
            The conditions to evaluate.
        """
        import aesara.tensor as at

        if not isinstance(value, Variable):
            value = at.as_tensor_variable(value)

        conds = [at.as_tensor_variable(c) for c in conds]

        assert all(c.type.ndim == 0 for c in conds)

        return Apply(
            self,
            [value] + conds,
            [value.type()],
        )
Beispiel #16
0
 def __init__(
     self,
     data,
     batch_size=128,
     dtype=None,
     broadcastable=None,
     name="Minibatch",
     random_seed=42,
     update_shared_f=None,
     in_memory_size=None,
 ):
     if dtype is None:
         data = pm.smartfloatX(np.asarray(data))
     else:
         data = np.asarray(data, dtype)
     in_memory_slc = self.make_static_slices(in_memory_size)
     self.shared = aesara.shared(data[tuple(in_memory_slc)])
     self.update_shared_f = update_shared_f
     self.random_slc = self.make_random_slices(self.shared.shape,
                                               batch_size, random_seed)
     minibatch = self.shared[self.random_slc]
     if broadcastable is None:
         broadcastable = (False, ) * minibatch.ndim
     minibatch = at.patternbroadcast(minibatch, broadcastable)
     self.minibatch = minibatch
     super().__init__(self.minibatch.type, None, None, name=name)
     Apply(aesara.compile.view_op, inputs=[self.minibatch], outputs=[self])
     self.tag.test_value = copy(self.minibatch.tag.test_value)
Beispiel #17
0
 def make_node(self, x):
     x = aet.as_tensor_variable(x)
     self_axis = self.axis
     if self_axis is None:
         broadcastable = [False]
     else:
         if self_axis < 0:
             self_axis += len(x.broadcastable)
         if self_axis < 0 or self_axis >= len(x.broadcastable):
             raise RuntimeError(
                 "Unique axis `{}` is outside of input ndim = "
                 "{}.".format(self.axis, len(x.broadcastable)))
         broadcastable = [
             b if axis != self_axis else False
             for axis, b in enumerate(x.broadcastable)
         ]
     outputs = [TensorType(broadcastable=broadcastable, dtype=x.dtype)()]
     typ = TensorType(broadcastable=[False], dtype="int64")
     if self.return_index:
         outputs.append(typ())
     if self.return_inverse:
         outputs.append(typ())
     if self.return_counts:
         outputs.append(typ())
     return Apply(self, [x], outputs)
Beispiel #18
0
    def make_node(self, activations, labels, input_lengths):
        t_activations = aet.as_tensor_variable(activations)
        # Ensure activations array is C-contiguous
        t_activations = cpu_contiguous(t_activations)

        t_labels = aet.as_tensor_variable(labels)
        t_input_lengths = aet.as_tensor_variable(input_lengths)

        if t_activations.type.dtype != "float32":
            raise TypeError("activations must use the float32 type!")

        if t_activations.ndim != 3:
            raise ValueError("activations must have 3 dimensions.")

        if t_labels.type.dtype != "int32":
            raise TypeError("labels must use the int32 type!")

        if t_labels.ndim != 2:
            raise ValueError("labels must have 2 dimensions.")

        if t_input_lengths.type.dtype != "int32":
            raise TypeError("input_lengths must use the int32 type!")

        if t_input_lengths.ndim != 1:
            raise ValueError("input_lengths must have 1 dimension.")

        costs = fvector(name="ctc_cost")
        outputs = [costs]
        if self.compute_grad:
            gradients = ftensor3(name="ctc_grad")
            outputs += [gradients]

        return Apply(self,
                     inputs=[t_activations, t_labels, t_input_lengths],
                     outputs=outputs)
Beispiel #19
0
    def make_node(self,
                  x,
                  scale,
                  bias,
                  estimated_mean,
                  estimated_variance,
                  epsilon=1e-4):
        x = as_tensor_variable(x)
        scale = as_tensor_variable(scale)
        bias = as_tensor_variable(bias)
        estimated_mean = as_tensor_variable(estimated_mean)
        estimated_variance = as_tensor_variable(estimated_variance)
        epsilon = as_tensor_variable(epsilon)
        # Upcast to common dtype on the non-scalar
        # Keep as is dtype of scalar (epsilon)
        x, scale, bias, estimated_mean, estimated_variance = as_common_dtype(
            x, scale, bias, estimated_mean, estimated_variance)
        assert (x.ndim == scale.ndim == bias.ndim == estimated_mean.ndim ==
                estimated_variance.ndim)

        return Apply(
            self,
            [x, scale, bias, estimated_mean, estimated_variance, epsilon],
            [x.type()],
        )
Beispiel #20
0
 def make_node(self, x, v, sorter=None):
     x = aet.as_tensor(x, ndim=1)
     v = aet.as_tensor(v)
     out_type = v.type.clone(dtype="int64")
     if sorter is None:
         return Apply(self, [x, v], [out_type()])
     else:
         sorter = aet.as_tensor(sorter, ndim=1)
         if PYTHON_INT_BITWIDTH == 32 and sorter.dtype == "int64":
             raise TypeError(
                 "numpy.searchsorted with Python 32bit do not support a"
                 " sorter of int64."
             )
         if sorter.type not in int_vector_types:
             raise TypeError("sorter must be an integer vector", sorter.type)
         return Apply(self, [x, v, sorter], [out_type()])
Beispiel #21
0
    def make_node(self, a, val, offset):
        a = aet.as_tensor_variable(a)
        val = aet.as_tensor_variable(val)
        offset = aet.as_tensor_variable(offset)
        if a.ndim != 2:
            raise TypeError(
                "%s: first parameter must have exactly"
                " two dimensions" % self.__class__.__name__
            )
        elif val.ndim != 0:
            raise TypeError(
                f"{self.__class__.__name__}: second parameter must be a scalar"
            )
        elif offset.ndim != 0:
            raise TypeError(
                f"{self.__class__.__name__}: third parameter must be a scalar"
            )
        val = aet.cast(val, dtype=upcast(a.dtype, val.dtype))
        if val.dtype != a.dtype:
            raise TypeError(
                "%s: type of second parameter must be the same"
                " as the first's" % self.__class__.__name__
            )
        elif offset.dtype not in integer_dtypes:
            raise TypeError(
                f"{self.__class__.__name__}: type of third parameter must be as integer"
                " use aesara.tensor.cast( input, 'int32/int64')"
            )

        return Apply(self, [a, val, offset], [a.type()])
Beispiel #22
0
    def make_node(self, rng, size, dtype, *dist_params):
        """Create a random variable node.

        Parameters
        ----------
        rng: RandomGeneratorType or RandomStateType
            Existing Aesara `Generator` or `RandomState` object to be used.  Creates a
            new one, if `None`.
        size: int or Sequence
            Numpy-like size of the output (i.e. replications).
        dtype: str
            The dtype of the sampled output.  If the value ``"floatX"`` is
            given, then ``dtype`` is set to ``aesara.config.floatX``.  This
            value is only used when `self.dtype` isn't set.
        dist_params: list
            Distribution parameters.

        Results
        -------
        out: `Apply`
            A node with inputs `(rng, size, dtype) + dist_args` and outputs
            `(rng_var, out_var)`.

        """
        size = normalize_size_param(size)

        dist_params = tuple(
            as_tensor_variable(p) if not isinstance(p, Variable) else p
            for p in dist_params)

        if rng is None:
            rng = aesara.shared(np.random.default_rng())
        elif not isinstance(rng.type, RandomType):
            raise TypeError(
                "The type of rng should be an instance of either RandomGeneratorType or RandomStateType"
            )

        shape = self._infer_shape(size, dist_params)
        _, bcast = infer_broadcastable(shape)
        dtype = self.dtype or dtype

        if dtype == "floatX":
            dtype = config.floatX
        elif dtype is None or (isinstance(dtype, str)
                               and dtype not in all_dtypes):
            raise TypeError("dtype is unspecified")

        if isinstance(dtype, str):
            dtype_idx = constant(all_dtypes.index(dtype), dtype="int64")
        else:
            dtype_idx = constant(dtype, dtype="int64")
            dtype = all_dtypes[dtype_idx.data]

        outtype = TensorType(dtype=dtype, broadcastable=bcast)
        out_var = outtype()
        inputs = (rng, size, dtype_idx) + dist_params
        outputs = (rng.type(), out_var)

        return Apply(self, inputs, outputs)
Beispiel #23
0
def numba_funcify_MaxAndArgmax(op, node, **kwargs):
    axis = op.axis
    x_at = node.inputs[0]
    x_dtype = x_at.type.numpy_dtype
    x_dtype = numba.np.numpy_support.from_dtype(x_dtype)
    x_ndim = x_at.ndim

    if x_ndim == 0:

        @numba_basic.numba_njit(inline="always")
        def maxandargmax(x):
            return x, 0

    else:

        axes = tuple(int(ax) for ax in axis)

        # NumPy does not support multiple axes for argmax; this is a
        # work-around
        keep_axes = tuple(i for i in range(x_ndim) if i not in axes)

        reduce_max_py_fn = create_multiaxis_reducer(
            scalar_maximum, -np.inf, axes, x_ndim, x_dtype
        )
        reduce_max = jit_compile_reducer(
            Apply(node.op, node.inputs, [node.outputs[0].clone()]), reduce_max_py_fn
        )

        reduced_x_ndim = x_ndim - len(axes) + 1
        argmax_axis = create_axis_apply_fn(
            np.argmax, reduced_x_ndim - 1, reduced_x_ndim, np.int64
        )

        reaxis_order = keep_axes + axes
        sl1 = slice(None, len(keep_axes))
        sl2 = slice(len(keep_axes), None)

        @numba_basic.numba_njit
        def maxandargmax(x):
            max_res = reduce_max(x)

            # Not-reduced axes in front
            transposed_x = np.ascontiguousarray(np.transpose(x, reaxis_order))
            kept_shape = transposed_x.shape[sl1]
            reduced_shape = transposed_x.shape[sl2]
            reduced_size = 1
            for s in reduced_shape:
                reduced_size *= s

            # Numpy.prod returns 1.0 when arg is empty, so we cast it to int64
            # Otherwise reshape would complain citing float arg
            new_shape = kept_shape + (reduced_size,)
            reshaped_x = transposed_x.reshape(new_shape)

            max_idx_res = argmax_axis(reshaped_x)

            return max_res, max_idx_res

    return maxandargmax
Beispiel #24
0
 def make_node(self, *inputs):
     assert len(inputs) == self.nin
     inputs = list(map(as_variable, inputs))
     for input in inputs:
         if input.type is not tdouble:
             raise Exception("Error 1")
     outputs = [double(self.name + "_R")]
     return Apply(self, inputs, outputs)
Beispiel #25
0
 def make_node(self, v):
     if not isinstance(v, Variable):
         v = aet.as_tensor_variable(v)
     assert v.type.ndim == 1
     type_class = type(v.type)
     out_r_type = type_class(dtype=v.dtype, broadcastable=(True, False))
     out_c_type = type_class(dtype=v.dtype, broadcastable=(False, True))
     return Apply(self, [v], [out_r_type(), out_c_type()])
Beispiel #26
0
 def make_node(self, a, b):
     a = aet.as_tensor_variable(a)
     b = aet.as_tensor_variable(b)
     assert a.type.dtype == "float32"
     assert a.type.dtype == b.type.dtype
     assert a.type.ndim == 2
     r = Apply(self, [a, b], [a.type()])
     return r
Beispiel #27
0
    def make_node(self, value, *conds):
        from aesara.tensor import as_tensor_variable

        if not isinstance(value, Variable):
            value = as_tensor_variable(value)
        cond = [as_tensor_variable(c) for c in conds]
        assert np.all([c.type.ndim == 0 for c in cond])
        return Apply(self, [value] + cond, [value.type()])
Beispiel #28
0
 def make_node(self, input, axis=-1):
     input = as_tensor_variable(input)
     axis = as_tensor_variable(axis)
     return Apply(
         self,
         [input, axis],
         [TensorType(dtype="int64", shape=input.type.shape)()],
     )
Beispiel #29
0
    def make_node(self, a, b):
        if b == aesara.tensor.type_other.NoneConst:
            a = as_tensor_variable(a)
            assert a.ndim == 2

            out_dtype = aesara.scalar.upcast(a.dtype)
            w = vector(dtype=out_dtype)
            return Apply(self, [a], [w])
        else:
            a = as_tensor_variable(a)
            b = as_tensor_variable(b)
            assert a.ndim == 2
            assert b.ndim == 2

            out_dtype = aesara.scalar.upcast(a.dtype, b.dtype)
            w = vector(dtype=out_dtype)
            return Apply(self, [a, b], [w])
Beispiel #30
0
 def make_node(self, *inputs):
     num_expected_inps = len(self.local_inputs) - len(self.shared_inputs)
     if len(inputs) != num_expected_inps:
         raise ValueError(
             f"Expected {int(num_expected_inps)} inputs, got {len(inputs)}")
     inputs = [
         inp_t.filter_variable(inp)
         for inp, inp_t in zip(inputs, self.input_types)
     ]
     apply_node = Apply(
         self,
         list(inputs) + self.shared_inputs,
         [type() for type in self.output_types],
     )
     apply_node.local_inputs = self.local_inputs
     apply_node.local_outputs = self.local_outputs
     return apply_node