Beispiel #1
0
class Pooling(Operation):
    """
    Pooling Op Superclass
    """
    input_spec = InputSpec(
        x=TensorInputType(),
        kernel_sizes=IntTensorInputType(const=True),
        strides=IntTensorInputType(const=True, optional=True),
        pad_type=StringInputType(const=True),
        pad=IntTensorInputType(const=True, optional=True),
        ceil_mode=BoolInputType(const=True, optional=True),
    )

    def default_inputs(self):
        num_spatial_dims = self.x.rank - 2
        return DefaultInputs(
            strides=[1]*num_spatial_dims,
            pad=[0]*2*num_spatial_dims,
            ceil_mode=False,
            )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        ksize = self.kernel_sizes.val
        x_shape = self.x.shape
        D_in_rank = len(x_shape) - 2

        strides = [1] * D_in_rank if self.strides is None else self.strides.val
        pad_type = "valid" if self.pad_type is None else self.pad_type.val.lower()
        if pad_type not in ["valid", "same", "custom"]:
            raise ValueError("Unrecognized value of pad_type : {}".format(pad_type))
        pad = None if self.pad is None else self.pad.val
        D_in = x_shape[2:]  # spatial dimensions

        if self.ceil_mode.val:
            if D_in_rank > 2:
                raise ValueError('pool: ceil_mode only supported for 1D or 2D pool')
            if pad_type == "same" and self.ceil_mode.val:
                raise ValueError("ceil_mode must be False when pad_type==same")
            if pad is not None:
                for i in range(D_in_rank):
                    if pad[2*i] != pad[2*i+1]:
                        raise ValueError("Padding must be symmetric if ceil_mode is True")

        D_out_shape = spatial_dimensions_out_shape(
            pad_type=pad_type,
            input_shape=D_in,
            kernel_shape=ksize,
            strides=strides,
            custom_pad=pad,
            ceil_mode=self.ceil_mode.val,
        )
        ret_shape = list(x_shape[:2]) + D_out_shape
        return types.tensor(self.x.dtype, tuple(ret_shape))
Beispiel #2
0
class scatter_nd(Operation):
    """
    Scatter ``updates`` to ``data`` at locations ``indices``.

    The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a
    slice of ``data``, ``K = rank(indices)``, and ``data[indices[i_0, ..., i_{K-2}]]``
    has rank ``rank(data) - indices.shape[-1]``.

    * Example: ``mode == update``: The ``output`` is set to ``data`` initially, and
      the op updates ``output`` as follows:

    .. math::
       output[indices[i_0, ..., i_{K-2}]]= updates[indices[i_0, ..., i_{K-2}]]

    * Example: ``mode == add``. The update rule is:

    .. math::
       output[indices[i_0, ..., i_{K-2}]] += updates[indices[i_0, ..., i_{K-2}]]

    Parameters
    ----------
    data: tensor<\*D,T> (Required)
    indices: tensor<\*K,i32> (Required)
    updates: tensor<\*K, T> (Required)
        * Must be the shape as ``K[:-1]+data.shape[K[-1]:]``.
    mode: const string (Optional)
        * Default to ``add``.
        * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
          ``div``, ``max``, ``min``.

    Returns
    -------
    tensor<\*D,T>
        * A tensor with the same shape and type as ``data``.

    Attributes
    ----------
    T: fp16, fp32, i32
    """

    input_spec = InputSpec(
        data=TensorInputType(),
        indices=IntTensorInputType(),
        updates=TensorInputType(),
        mode=StringInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(mode="add", )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        assert self.indices.shape[-1] <= self.data.rank
        expected_updates_shape = (self.indices.shape[:-1] +
                                  self.data.shape[self.indices.shape[-1]:])
        assert is_compatible_symbolic_vector(self.updates.shape,
                                             tuple(expected_updates_shape))
        return self.data.sym_type
Beispiel #3
0
class list_scatter(Operation):
    """
    Scatter ``values`` to ``ls`` at locations ``indices``.

    Parameters
    ----------
    ls: List[*] (Required)

    indices: tensor<num_updates, i32> (Required)
        * Indices of ``ls`` to scatter to.
        * Elements of ``indices`` must be in ``[0, ls.length)`` at runtime.
        * If indices are greater than or equal to the list length, the list is
          dynamically resized.

    value: <*,T> (Optional)
        * Element value to write, which must match the element shape of ``ls``.
        * Default is ``None``.

    Returns
    -------
    List[*]
        * Updated list.

    Attributes
    ----------
    T: fp32, i32, bool
    """

    input_spec = InputSpec(
        ls=ListInputType(),
        indices=IntTensorInputType(),
        value=TensorInputType(),
    )

    def __init__(self, **kwargs):
        super(list_scatter, self).__init__(**kwargs)

    def type_inference(self):
        num_indices = self.indices.shape[0]
        num_values = self.value.shape[0]
        if num_values != num_indices:
            raise ValueError("Cannot scatter {} values to {} indices".format(
                num_values, num_indices))
        list_elem_type = self.ls.elem_type
        value_type = self.value.sym_type
        dynamic_length = self.ls.dynamic_length
        init_length = self.ls.init_length

        elem_type = types.tensor(value_type.get_primitive(),
                                 value_type.get_shape()[1:])
        if list_elem_type == types.unknown:
            # fill in the elem type using value's type info.
            return types.list(elem_type,
                              dynamic_length=dynamic_length,
                              init_length=init_length)
        if not types.is_subtype(elem_type, list_elem_type):
            msg = "Elem type mismatch: ls elem type {} vs " + "value type {}"
            raise ValueError(msg.format(list_elem_type, elem_type))
        return self.ls.sym_type
Beispiel #4
0
class random_normal(RandomDistribution):
    r"""
    Returns a tensor with the specified shape, with random values from a normal
    distribution.
    
    Parameters
    ----------
    shape: <K, i32> (Required)
        * Target output tensor shape.
        * ``K`` is the rank of the output tensor.
          ``shape[k] > 0`` for ``k = 0,..., K-1``.
    mean: const<f32> (Optional)
        The mean (center) of the normal distribution. Defaults to 0.0.
    stddev: const<f32> (Optional)
        The standard deviation (width) of the normal distribution. Defaults to ``1.0``.
    seed: const<i32> (Optional)
        Seed to create a reproducible sequence of values across multiple invokes.
    
    Returns
    -------
    <\*, T>
        * A tensor of the given target output shape filled with random values.

    Attributes
    ----------
    T: fp16, fp32

    See Also
    --------
    random_categorical, random_bernoulli, random_uniform
    """
    
    input_spec = (
        InputSpec(
            shape=IntTensorInputType(),
            mean=FloatInputType(const=True, optional=True),
            stddev=FloatInputType(const=True, optional=True),
            seed=IntInputType(const=True, optional=True),
        )
        + RandomDistribution.input_spec
    )

    def default_inputs(self):
        return super().default_inputs() + \
            DefaultInputs(
                mean=0.,
                stddev=1.,
                seed=-1,
            )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        if self.mean.dtype != self.stddev.dtype:
            raise ValueError("Incompatible primitive types in random_normal operation")
        self.out_dtype = self.mean.dtype
        return super().type_inference()
class reverse_sequence(Operation):
    """
    Reverse variable length slices for specified axes / dimensions of the input
    tensor. This op first slices input tensor along the ``batch_axis`` dimension, then
    partially reverses the elements along the ``seq_axis`` for the first ``lengths[i]``
    elements.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
        * Input tensor.
    lengths: tensor<L, i32> (Required)
        * 1-dimensional tensor of length ``x.shape[batch_axis]`` specifying the length
          of the sequence to reverse.
        * Values must be in range ``[0, x.shape[seq_axis]]``.
    seq_axis: const<i32> (Optional)
        * The dimension to reverse.
        * Defaults to ``0``.
    batch_axis: const<i32> (Optional)
        * Dimension for slicing.
        * Defaults to ``0``.

    Returns
    -------
    tensor<\*?, T>
        * Same type and shape as the input tensor.

    Attributes
    ----------
    T: fp32

    References
    ----------
    `tf.reverse_sequence <https://www.tensorflow.org/api_docs/python/tf/reverse_sequence>`_

    """

    input_spec = InputSpec(
        x=TensorInputType(),
        lengths=IntTensorInputType(),
        seq_axis=IntInputType(const=True, optional=True),
        batch_axis=IntInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            seq_axis=0,
            batch_axis=0)

    def __init__(self, **kwargs):
        super(reverse_sequence, self).__init__(**kwargs)

    def type_inference(self):
        return self.x.sym_type

    @precondition(allow=VALUE)
    def value_inference(self):
        raise NotImplementedError("TODO")
Beispiel #6
0
class random_bernoulli(RandomDistribution):
    r"""
    Returns a tensor with the specified shape, with random values from a Bernoulli
    distribution.
    
    .. math::
       f(k) = \begin{cases}1-p  &\text{if } k = 0\\
                        p    &\text{if } k = 1\end{cases}

    for :math:`k` in :math:`\{0, 1\}`.
    
    Parameters
    ----------
    shape: <K, i32> (Required)
        * Target output tensor shape.
        * ``K`` is the rank of the output tensor.
          ``shape[k] > 0`` for ``k = 0,..., K-1``.
    prob: const<f32> (Optional)
        * The probability of sampling ``1``. Defaults to ``0.5``.
    seed: const<i32> (Optional)
        * Seed to create a reproducible sequence of values across multiple invokes.
    
    Returns
    -------
    <\*, T>
        * A tensor of the given target output shape filled with random values.

    Attributes
    ----------
    T: fp16, fp32

    See Also
    --------
    random_categorical, random_normal, random_uniform
    """
    
    input_spec = (
        InputSpec(
            shape=IntTensorInputType(),
            prob=FloatInputType(const=True, optional=True),
            seed=IntInputType(const=True, optional=True),
        )
        + RandomDistribution.input_spec
    )

    def default_inputs(self):
        return super().default_inputs() + \
            DefaultInputs(
                seed=-1,
                prob=0.5,
            )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        self.out_dtype = self.prob.dtype
        return super().type_inference()
Beispiel #7
0
class ReductionAxes(Operation):
    """
    Reduction Op Superclasses
    """
    input_spec = InputSpec(
        x=TensorInputType(),
        axes=IntTensorInputType(const=True, optional=True),
        keep_dims=BoolInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            axes=None,
            keep_dims=False,
        )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        x_type = self.x.dtype
        x_shape = self.x.shape
        axes = self.axes.val if self.axes is not None else None
        if axes is None:
            axes = range(self.x.rank)
        keep_dims = self.keep_dims.val

        reduced_shape = list(x_shape)
        if keep_dims:
            for i in axes:
                reduced_shape[i] = 1
        else:
            # sort reverse so we can delete shape elements back to front
            axes = [
                axis if axis >= 0 else axis + len(reduced_shape)
                for axis in axes
            ]
            for i in sorted(axes)[::-1]:
                reduced_shape.pop(i)
        if len(reduced_shape) == 0:
            return x_type  # scalar

        return types.tensor(x_type, tuple(reduced_shape))

    @precondition(allow=VALUE)
    def value_inference(self):
        axes = tuple(self.axes.val) if self.axes is not None else None
        return self.get_operator()(self.x.val,
                                   axis=axes,
                                   keepdims=self.keep_dims.val)

    def get_operator(self):
        raise NotImplementedError()
Beispiel #8
0
class fill(Operation):
    """
    Returns a tensor with a given shape filled with a constant value.

    Parameters
    ----------
    shape: tensor<[K], i32> (Required)
        * Target output tensor shape.
        * ``K`` is the rank of the output tensor. ``shape[k] > 0`` for ``k = 0,..., K-1``.
    value: const<T> (Optional)
        * Default to ``0.0``.
        * Constant value to fill in.

    Returns
    -------
    tensor<\*?, T>
        * Tensor with shape determined by the input shape.

    Attributes
    ----------
    T: fp16, fp32, i32, bool
    """

    input_spec = InputSpec(
        shape=IntTensorInputType(),
        value=IntOrFloatOrBoolInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(value=0.)

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        if any_symbolic(self.shape.shape):
            # We can't infer any shape if shape has variable length.
            return types.tensor(self.value.dtype,
                                (get_new_variadic_symbol(), ))

        # shape has fixed length here.
        if self.shape.sym_val is None:
            ret_shape = tuple(
                [get_new_symbol() for _ in range(self.shape.shape[0])])
            return types.tensor(self.value.dtype, ret_shape)

        return types.tensor(self.value.dtype,
                            tuple(self.shape.sym_val.tolist()))

    @precondition(allow=VALUE)
    def value_inference(self):
        return np.full(shape=self.shape.val, fill_value=self.value.val)
class transpose(Operation):
    """
    Permute tensor ``x`` dimensions according to ``perm``.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
        * Must be at least 1-D. ``x`` may have a symbolic shape.
    perm: const<[rank(x)], i32> (Required)
        * Permutation order. -rank(x) <= perm[I] < rank(x) for all perm entries.

    Returns
    -------
    tensor<\*?,T>
        * Tensor with same rank and type as ``x``.

    Attributes
    ----------
    T: fp16, fp32, i32, bool

    References
    ----------
    `torch.Tensor.permute <https://pytorch.org/docs/stable/tensors.html#torch.Tensor.permute>`_
    """

    input_spec = InputSpec(
        x=TensorInputType(),
        perm=IntTensorInputType(const=True),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        x_type = self.x.dtype
        perm = self.perm.val
        x_shape = np.array(self.x.shape)
        if len(perm) != self.x.rank:
            msg = "perm should have the same length as rank(x): {} != {}"
            raise ValueError(msg.format(len(perm), self.x.rank))
        if self.x.rank == 0:
            return self.x.sym_type  # scalar cannot be transposed
        if any_variadic(self.x.shape):
            ret_shape = get_new_variadic_symbol()
        else:
            ret_shape = x_shape[perm]
        return types.tensor(x_type, tuple(ret_shape))

    @precondition(allow=VALUE | SYMBOL)
    def value_inference(self):
        return np.transpose(self.x.val, axes=self.perm.val)
class reverse(Operation):
    """
    Reverse the order of the input tensor ``x`` along specified ``axes`` (dimensions).

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
        * Input tensor.

    axes: const<D, i32> (Optional)
        * Dimension(s) to reverse. Each axis must be in the range ``[-rank(x), rank(x))``.
        * Defaults to None (reverse on all dimensions).

    Returns
    -------
    tensor<\*?, T>
        * Same type and shape as the input tensor.

    Attributes
    ----------
    T: fp32

    References
    ----------
    See `tf.reverse <https://www.tensorflow.org/api_docs/python/tf/reverse>`_
    and `TORCH <https://pytorch.org/docs/stable/torch.html#torch.flip>`_.
    """

    input_spec = InputSpec(
        x=TensorInputType(),
        axes=IntTensorInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            axes=None,
            )

    def __init__(self, **kwargs):
        super(reverse, self).__init__(**kwargs)

    def type_inference(self):
        return self.x.sym_type

    @precondition(allow=VALUE)
    def value_inference(self):
        res = self.x.val
        axes = self.axes.val if self.axes is not None else range(self.x.rank)
        for axis in axes:
            res = np.flip(res, axis=axis)
        return res
Beispiel #11
0
class gather_nd(Operation):
    """
    Gather slices from ``x`` according to ``indices``, similar to `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.

    The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a slice
    of ``x``:

    .. math::
       output[i_0, ..., i_{K-2}]= x[indices[i_0, ..., i_{K-2}]]

    Where ``K = rank(indices)`` and ``x[indices[i_0, ..., i_{K-2}]]`` has rank
    ``rank(x) - indices.shape[-1]``.

    Parameters
    ----------
    x: tensor<\*D,T> (Required)
    indices: tensor<\*K,i32> (Required)

    Returns
    -------
    tensor<\*V,T>
        * ``V = K[:-1] + D[K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``.

    Attributes
    ----------
    U: fp16, fp32, i32

    References
    ----------
    See `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.
    """

    input_spec = InputSpec(
        x=TensorInputType(),
        indices=IntTensorInputType(),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        assert self.indices.shape[-1] <= self.x.rank
        out_type = self.x.dtype
        out_shape = self.indices.shape[:-1] + self.x.shape[self.indices.
                                                           shape[-1]:]
        return types.tensor(out_type, out_shape)
Beispiel #12
0
class RandomDistribution(Operation):
    """
    Random Op Superclass
    """
    input_spec = InputSpec(shape=IntTensorInputType(),)
    out_dtype = types.fp32

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        if any_symbolic(self.shape.shape):
            # We can't infer any shape if shape has variable length.
            return types.tensor(self.out_dtype, (get_new_variadic_symbol(),))

        # shape has fixed length here.
        if self.shape.sym_val is None:
            shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])])
            return types.tensor(self.out_dtype, shape)

        return types.tensor(self.out_dtype, tuple(self.shape.sym_val.tolist()))
Beispiel #13
0
class list_gather(Operation):
    """
    Return selected values in ``ls`` as a packed ``Tensor``.

    Parameters
    ----------
    ls: List[\*] (Required)

    indices: <K,i32> (Required)
        * Gather from indices, whose element must be in ``[0, ls.length)`` at runtime.

    Returns
    -------
    <\*K,T>
        * Selected tensors packed into a ``len(ls.elem_shape)+1`` rank tensor.
        * ``K[0] == len(indices)``.

    Attributes
    ----------
    T: fp32, i32, bool
    """

    input_spec = InputSpec(
        ls=ListInputType(),
        indices=IntTensorInputType(),
    )

    def __init__(self, **kwargs):
        super(list_gather, self).__init__(**kwargs)

    def type_inference(self):
        list_elem_type = self.ls.elem_type
        if list_elem_type == types.unknown:
            msg = ("Unknown element type. The List might not have been " +
                   "written to ({})")
            raise ValueError(msg.format(self.name))
        elem_shape = list_elem_type.get_shape()
        dtype = list_elem_type.get_primitive()
        ret_shape = [self.indices.shape[0]] + list(elem_shape)
        return types.tensor(dtype, tuple(ret_shape))
Beispiel #14
0
class torch_tensor_assign(Operation):
    """
    Method for tensor value assignment via indexing and slicing.
    Suppose we have a tensor ``x``, this method achieves:
    ``x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...] = value``

    Parameters
    ----------
    data: tensor<*?, T> (Required)
        * Input tensor
    updates: tensor<\*K, T> (Required)
        * Value tensor to be inserted
        * The shape of the updates tensor must match the slicing result of the input data.
    begin: tensor<[rank<x>], i32> (Required)
        * Starting index for the dimension of slicing.
    end: tensor<[rank(x)], i32> (Required)
        * Ending index for the dimension of slicing.
    stride: tensor<[rank(x)], i32> (Optional)
        * Default as all ``1``s.
        * Stride for the dimension of slicing.
    begin_mask: tensor<[rank(x)], bool> (Optional)
        * Default to all ``False``.
        * If ``begin_mask[i]==True``, neglect ``begin[i]``, and set ``begin[i]`` to ``0``.
    end_mask: tensor<[rank(x)], bool> (Optional)
        * Default to all ``False``.
        * If ``end_mask[i]==True``, neglect ``end[i]``, and set ``end[i]`` to ``x.shape[i]``.
    squeeze_mask: tensor<[rank(x)], bool> (Optional)
        * Default to all ``False``.
        * If ``squeeze_mask[i]==true``, neglect ``end[i]``, and do the pure index at ``begin[i]``.

    Returns
    -------
    tensor<*?, T>
        - Scalar or tensor.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        data=TensorInputType(),
        updates=IntOrFloatInputType(),
        begin=IntTensorInputType(const=True),
        end=IntTensorInputType(const=True),
        stride=IntTensorInputType(const=True, optional=True),
        begin_mask=BoolTensorInputType(const=True, optional=True),
        end_mask=BoolTensorInputType(const=True, optional=True),
        squeeze_mask=BoolTensorInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            stride=None,
            begin_mask=None,
            end_mask=None,
            squeeze_mask=None,
        )

    def __init__(self, **kwargs):
        super(torch_tensor_assign, self).__init__(**kwargs)

    def type_inference(self):
        # Verify the updates and the data slicing have the same shape
        begin = self.begin.val
        end = self.end.val
        data_rank = self.data.rank
        stride = self.stride.val if self.stride is not None else [1] * data_rank
        begin_mask = (
            self.begin_mask.val if self.begin_mask is not None else [False] * data_rank
        )
        end_mask = self.end_mask.val if self.end_mask is not None else [False] * data_rank
        squeeze_mask = (
            self.squeeze_mask.val if self.squeeze_mask is not None else [False] * data_rank
        )
        data_shape = self.data.shape
        expected_updates_shape = tuple(_solve_slice_by_index_shape(data_shape, begin, end, stride, begin_mask, end_mask, squeeze_mask))
        if not is_compatible_symbolic_vector(expected_updates_shape, self.updates.shape):
            raise ValueError("The updates tensor should have shape {}. Got {}".format(expected_updates_shape, self.updates.shape))
        return self.data.sym_type
Beispiel #15
0
class split(Operation):
    """
    Split tensors into a tuple

    Parameters
    ----------
    x: <\*?,T>  (Required)
        * The tensor to split.
        * The tensors may be variadic, but the number of tensors must be determined
          at compile time (i.e. a tuple).

    num_splits: <i32> (Optional)
        If specified, divide ``x`` into ``num_splits`` tensors along ``axis``.
        Its behavior depends on ``split_sizes``:

            * If ``split_sizes`` is defined, ``num_splits == S``, and the output
              sizes may be uneven.
            * If ``split_sizes`` is not defined, ``value.shape[axis]`` must be
              divisible by ``num_splits``, and the output sizes must be even.

        At least one of ``num_splits`` or ``split_sizes`` must be provided.
        If ``split_sizes`` length ``S`` cannot be determined at compile time,
        ``num_splits`` must be supplied to determine the number of outputs.

    split_sizes: const<S,i32> (Optional)
        * Sizes to split to. The sum of ``split_sizes`` must equal to
          ``value.shape[axis]``.

    axis: const<i32> (Required)
        * The dimension along which to concatenate. Must be in the
          range ``[-rank(x), rank(x))``.

    Returns
    -------
    Tuple[tensor<\*?,T>]
        * Where the length of the tuple is the number of splits (determined
          from ``num_splits`` or ``split_sizes``).

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=TensorInputType(),
        num_splits=IntInputType(const=True, optional=True),
        split_sizes=IntTensorInputType(const=True, optional=True),
        axis=IntInputType(const=True),
    )

    def __init__(self, **kwargs):
        super(split, self).__init__(**kwargs)

    def type_inference(self):
        num_splits, sizes = self._get_num_splits_and_sizes()
        x_shape = list(self.x.shape)
        ret_shapes = [x_shape[:] for _ in range(num_splits)]
        axis = self.axis.val
        for i, d in enumerate(sizes):
            ret_shapes[i][axis] = d
        self.sizes = sizes
        return tuple([types.tensor(self.x.dtype, s) for s in ret_shapes])

    def _get_num_splits_and_sizes(self):
        """
        Return:
        - num_splits: int
        - sizes: list of int/symbols. Of length num_splits

        Raise ValueError if num_splits cannot be determined.
        """
        if self.num_splits is None and self.split_sizes is None:
            msg = (
                "At least one of num_splits and split_sizes "
                + "must be specified in split op {}"
            )
            raise ValueError(msg.format(self.name))

        axis = self.axis.val

        if self.num_splits is not None:
            num_splits = self.num_splits.val
            if self.split_sizes is None:
                # Even split
                if (
                    not is_symbolic(self.x.shape[axis])
                    and self.x.shape[axis] % num_splits != 0
                ):
                    msg = "num_split {} does not divide split " + "dim (length = {})"
                    raise ValueError(msg.format(num_splits, self.x.shape[axis]))
                size = self.x.shape[axis] / num_splits
                return num_splits, [size] * num_splits

            # self.split_sizes is not None
            if self.split_sizes.sym_val is not None:
                return num_splits, self.split_sizes.sym_val

            # self.split_size.sym_val is None.
            sizes = [get_new_symbol() for _ in range(num_splits)]
            return num_splits, sizes

        # self.num_splits is None, self.split_sizes is not None
        if self.split_sizes.sym_val is not None:
            return len(self.split_sizes.sym_val), self.split_sizes.sym_val

        # self.num_splits is None, self.split_sizes is not None
        # self.split_sizes.sym_val is None
        if any_symbolic(self.split_sizes.shape):
            raise ValueError("Unable to determine number of splits")

        num_splits = len(self.split_sizes.shape)
        sizes = [get_new_symbol() for _ in range(num_splits)]
        return num_splits, sizes

    @precondition(allow=VALUE | SYMBOL | NONE)
    def value_inference(self):
        num_splits, sizes = self._get_num_splits_and_sizes()
        if self.x.sym_val is None or any_symbolic(sizes):
            raise NotImplementedError()

        if num_splits == 1:
            # No split_indices possible.
            return self.x.sym_val

        split_indices = np.cumsum(sizes).astype(np.int)
        return tuple(np.split(self.x.sym_val, split_indices[:-1], axis=self.axis.val))
Beispiel #16
0
class scatter(Operation):
    """
    Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``
    by operation ``mode``.
    
    Example: ``mode == update``.
    
    * For ``i`` in ``[0, len(indices)]``:
    
    .. math::
       output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
    .. math::
       updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
    
    * For ``j! = i``:
    
    .. math::
       output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
    .. math::
       data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
    
    Example: ``mode == add``.
    
    * For ``i`` in ``[0, len(indices)]``:
    
    .. math::
       output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
    .. math::
       updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +
    .. math::
       x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]
    
    * For ``j! = i``:
    
    .. math::
       output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
    .. math::
       data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
    
    Parameters
    ----------
    data: tensor<\*D, T> (Required)
    indices: tensor<[C],T> (Required)
        * 1-D tensor.
    updates: tensor<\*K, T> (Required)
        * ``K = data.shape[:axis] + [len(indices)] + data.shape[axis+1:]``.
    axis: const i32 (Optional)
        * Default to ``0``.
    mode: const string (Optional)
        * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
          ``div``, ``max``, ``min``.
    
    Returns
    -------
    tensor<\*D, T>
        * With the same type and shape as input ``x``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        data=TensorInputType(),
        indices=IntTensorInputType(),
        updates=TensorInputType(),
        axis=IntInputType(const=True, optional=True),
        mode=StringInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            axis=0,
            mode="add",
        )

    def __init__(self, **kwargs):
        super(scatter, self).__init__(**kwargs)

    def type_inference(self):
        if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
            raise IndexError(
                "Axis value {} is out of bounds for {} node {}".format(
                    self.axis.val, self.op_type, self.name))

        axis = self.axis.val
        axis = axis if axis >= 0 else axis + self.data.rank
        expected_updates_shape = (self.data.shape[:axis] + self.indices.shape +
                                  self.data.shape[axis + 1:])
        np.testing.assert_equal(self.updates.shape,
                                np.array(expected_updates_shape))

        return self.data.sym_type
Beispiel #17
0
class gather_along_axis(Operation):
    """
    Take the values along ``axis`` at locations ``indices``.

    .. math::
       idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
    .. math::
       output[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] = = x[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D]

    Parameters
    ----------
    x: tensor<\*D, T> (Required)
    indices: tensor<\*K, T> (Required)
        * ``rank(indices) == rank(x)``.
    axis: const i32 (Optional):
        * Default to ``0``.

    Returns
    -------
    tensor<\*D, T>:
        * Output tensor has the same shape as ``indices``.

    Attributes
    ----------
    T: fp16, fp32, i32
    """

    input_spec = InputSpec(
        x=TensorInputType(),
        indices=IntTensorInputType(),
        axis=IntInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(axis=0, )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @precondition(allow=VALUE)
    def value_inference(self):
        x = self.x.val
        indices = self.indices.val
        axis = self.axis.val
        return np.take_along_axis(x, indices, axis)

    def type_inference(self):

        if self.x.rank != self.indices.rank:
            raise ValueError("Rank mismatch between input and indices. \
                              Input rank: {}, indices rank: {}".format(
                self.x.rank, self.indices.rank))

        if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:
            raise IndexError(
                "Axis value {} is out of bounds for {} node {}".format(
                    self.axis.val, self.op_type, self.name))

        axis = self.axis.val
        axis = axis if axis >= 0 else axis + self.x.rank

        for i in range(self.x.rank):
            if i != axis:
                assert self.x.shape[i] == self.indices.shape[i]

        return types.tensor(self.x.dtype, self.indices.shape)
Beispiel #18
0
class scatter_along_axis(Operation):
    """
    Scatter ``updates`` to ``data`` at locations ``indices`` along ``axis`` dimension
    using ``mode`` operation.

    Example: ``mode == update``.

    * For ``i`` in ``[0, len(indices)]``:

    .. math::
       idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
    .. math::
       output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =
    .. math::
       updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]

    * For ``j! = i``:

    .. math::
       output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
    .. math::
       data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]

    Example: ``mode == add``.

    * For ``i`` in ``[0, len(indices)]``:

    .. math::
       idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
    .. math::
       output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =
    .. math::
       updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +
    .. math::
       x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]

    * For ``j! = i``:

    .. math::
       output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
    .. math::
       data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]

    Parameters
    ----------
    data: tensor<\*D, T> (Required)
    indices: tensor<\*K,T> (Required)
        * ``rank(indices) == rank(data)``.
    updates: tensor<\*K, T> (Required)
        * Must be the same shape as ``indices``.
    axis: const i32 (Optional)
        * Default to ``0``.
    mode: const string (Optional)
        * Default to ``add``.
        * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
          ``div``, ``max``, ``min``.

    Returns
    -------
    tensor<\*D, T>
        * With the same type and shape as input ``x``.

    Attributes
    ----------
    U: fp16, fp32, i32
    """

    input_spec = InputSpec(
        data=TensorInputType(),
        indices=IntTensorInputType(),
        updates=TensorInputType(),
        axis=IntInputType(const=True, optional=True),
        mode=StringInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            axis=0,
            mode="add",
        )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @precondition(allow=VALUE)
    def value_inference(self):
        data = np.copy(self.data.val)
        indices = self.indices.val
        updates = self.updates.val
        axis = self.axis.val
        np_output = data
        np.put_along_axis(np_output, indices, updates, axis=axis)
        return np_output

    def type_inference(self):
        if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
            raise IndexError(
                "Axis value {} is out of bounds for {} node {}".format(
                    self.axis.val, self.op_type, self.name))

        axis = self.axis.val
        axis = axis if axis >= 0 else axis + self.data.rank

        assert is_compatible_symbolic_vector(self.indices.shape,
                                             self.updates.shape)
        assert self.data.rank == self.indices.rank
        for i in range(self.data.rank):
            if i != axis:
                assert self.data.shape[i] == self.indices.shape[i]

        return self.data.sym_type
Beispiel #19
0
class one_hot(Operation):
    """
    Returns one-hot vectors whose locations represented in ``indices`` take the ``on_value``,
    while other locations take the ``off_value``.

    Parameters
    ----------
    indices: tensor<[D],T> (Required)
        * Tensor, values indicate the locations for each one-hot vector to take the ``on_value``.
    one_got_vector_size: i32 (Required)
        * Indicates the number of returning vectors.
    axis: const i32 (Optional)
        * Indicates which dimension to append the new axis.
        * If the input indices is rank ``D``, the output tensor will have rank ``D+1``.
        * Default to ``-1`` (the last dimension).
    on_value: const i32 (Optional)
        * Values for locations where defined in ``indices``.
        * Default to ``1``.
    off_value: const i32 (Optional)
        * Default to ``0``.

    Returns
    -------
    tensor<\*?,T>
        * A tensor that contains one-hot vectors.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        indices=IntTensorInputType(),
        one_hot_vector_size=IntInputType(),
        axis=IntInputType(const=True, optional=True),
        on_value=IntOrFloatInputType(const=True, optional=True),
        off_value=IntOrFloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            axis=-1,
            on_value=1,
            off_value=0,
            )

    def __init__(self, **kwargs):
        super(one_hot, self).__init__(**kwargs)

    def type_inference(self):
        on_type = self.on_value.dtype
        off_type = self.off_value.dtype

        if on_type != off_type:
            raise TypeError(
                "Parameters on_value and off_value must have same input types."
            )

        if self.axis.val < -self.indices.rank - 1 or self.axis.val > self.indices.rank:
            raise IndexError(
                "Axis value {} is out of bounds for {} node {}".format(
                    self.axis.val, self.op_type, self.name
                )
            )

        indices_shape = list(self.indices.shape)

        depth_value = self.one_hot_vector_size.val
        if depth_value is None:
            depth_value = get_new_symbol()
        elif depth_value < 0:
            raise ValueError("Parameter one_hot_vector_size must be non-negative")

        retshape = indices_shape

        if self.axis.val < 0:
            cut = len(retshape) + self.axis.val + 1
        else:
            cut = self.axis.val
        retshape = retshape[0:cut] + [depth_value] + retshape[cut:]

        return types.tensor(on_type, retshape)
Beispiel #20
0
class scatter(Operation):
    """
    Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``
    by operation ``mode``.

    Example: ``mode == update``.

    * For ``i`` in ``[0, len(indices)]``:

    .. math::
       output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
    .. math::
       updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]

    * For ``j != i``:

    .. math::
       output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
    .. math::
       data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]

    Example: ``mode == add``.

    * For ``i`` in ``[0, len(indices)]``:

    .. math::
       output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
    .. math::
       updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +
    .. math::
       x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]

    * For ``j != i``:

    .. math::
       output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
    .. math::
       data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]

    Parameters
    ----------
    data: tensor<\*D, T> (Required)
    indices: tensor<[C],T> (Required)
        * 1-D tensor.
    updates: tensor<\*K, T> (Required)
        * ``K = data.shape[:axis] + [len(indices)] + data.shape[axis+1:]``.
    axis: const i32 (Optional)
        * Default to ``0``.
    mode: const string (Optional)
        * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
          ``div``, ``max``, ``min``.
        * Default value is ``update``.

    Returns
    -------
    tensor<\*D, T>
        * With the same type and shape as input ``x``.

    Attributes
    ----------
    T: fp32

    For example:
        data = [[1, 2, 3], [4, 5, 6]]
        indices = [1, 0]
        updates = [[5, 6, 7], [8, 9, 10]]
        axis = 0
        mode = "update"

    produces:
       [[9, 11, 13], [9, 11, 13]]
    """

    input_spec = InputSpec(
        data=TensorInputType(),
        indices=IntTensorInputType(),
        updates=TensorInputType(),
        axis=IntInputType(const=True, optional=True),
        mode=StringInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            axis=0,
            mode="add",
        )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
            raise IndexError(
                "Axis value {} is out of bounds for {} node {}".format(
                    self.axis.val, self.op_type, self.name))

        axis = self.axis.val
        axis = axis if axis >= 0 else axis + self.data.rank
        expected_updates_shape = (self.data.shape[:axis] + self.indices.shape +
                                  self.data.shape[axis + 1:])

        err = "Updates shape {} is incorrect. It should be {}.".format(
            self.updates.shape, expected_updates_shape)
        assert is_compatible_symbolic_vector(
            self.updates.shape, tuple(expected_updates_shape)), err

        return self.data.sym_type
class expand_dims(Operation):
    """
    Insert a single-dimension in a 1-D or higher tensor at each axis in axes.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
        * Scalar or tensor.
    axes: const tensor<[K], i32> Required
        * ``K`` is the number of dimensions expanded.
        * Insert single dimension at dimension index at each axes.
        * Negative value to index from the end. ``-d-1 <= axis <= d``
          where ``d`` is the rank of ``x``.

    Returns
    -------
    tensor<\*(rank(x)+K), T>
        * Same type as the input ``x`` with rank ``rank(x)+K``.

    Attributes
    ----------
    T: fp16, fp32, i32, bool
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        axes=IntTensorInputType(const=True),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        x_rank = self.x.rank
        x_type = self.x.dtype
        x_shape = list(self.x.shape)
        axes = self.axes.val
        out_rank = x_rank + len(axes)

        for axis in axes:
            if axis <= -out_rank - 1 or axis >= out_rank:
                msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}'
                raise IndexError(
                    msg.format(axis, self.op_type, self.name, self.x.shape))

        ret_shape = x_shape
        axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes])
        for axis in axes:
            ret_shape.insert(axis, 1)

        return types.tensor(x_type, tuple(ret_shape))

    @precondition(allow=VALUE)
    def value_inference(self):
        axes = self.axes.val
        out_rank = self.x.rank + len(axes)

        for axis in axes:
            if axis <= -out_rank - 1 or axis >= out_rank:
                msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}'
                raise IndexError(
                    msg.format(axis, self.op_type, self.name, self.x.shape))

        axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes])
        ret_shape = list(self.x.shape)
        for axis in axes:
            ret_shape.insert(axis, 1)
        return np.reshape(self.x.val, ret_shape)
class squeeze(Operation):
    """
    Remove single-dimension dimensions in a 1-D or higher tensor.

    Parameters
    ----------
    x: tensor<\*?,T> (Required)
        * Must be at least 1-D.
    axes: const<K,i32> (Optional)
        * Axes to squeeze out.
        * Default to remove all single-dimensions.

    Returns
    -------
    tensor<\*(rank(x)-K),T>
        * Tensor with same type as input ``x`` and rank ``rank(x)-K``.

    Attributes
    ----------
    T: fp16, fp32, i32, bool
    """

    input_spec = InputSpec(
        x=TensorInputType(),
        axes=IntTensorInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(axes=None, )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        x_type = self.x.dtype
        x_shape = self.x.shape
        squeezed_shape = list(x_shape)
        if self.axes is None:
            # Squeeze all single-dim, assuming symbolic dims != 1
            squeezed_shape = [s for s in squeezed_shape if s != 1]
        else:
            axes = self.axes.val
            axes = [axis if axis >= 0 else axis + self.x.rank for axis in axes]
            for i in sorted(axes)[::-1]:  # descending order
                if len(squeezed_shape) <= i:
                    raise ValueError(
                        "Cannot squeeze dim {} for shape {}".format(
                            i, squeezed_shape))
                squeezed_shape.pop(i)

        return types.tensor(
            x_type,
            tuple(squeezed_shape)) if len(squeezed_shape) != 0 else x_type

    @precondition(allow=VALUE)
    def value_inference(self):
        if self.x.val is None:
            return None
        if self.axes is None:
            val = np.squeeze(self.x.val)
        else:
            val = np.squeeze(self.x.val, axis=tuple(self.axes.val))
        return val if val.shape != () else self.x.val[0]
class slice_by_size(Operation):
    """
    Slice input tensor starting from the given ``begin`` index and by
    the amount specified by the ``size`` input, for each dimension.

    Parameters
    ----------
    x: tensor<*?, T> (Required)
        * Input tensor.
    begin: tensor<[rank(x)], i32> Required
        * The begin index for slice.
    size: tensor<[rank(x)], i32> Required
        * The size that is to be sliced. If ``size`` is ``-1``,
          all the remaining elements starting with "begin" are sliced.

    Returns
    -------
    tensor<\*?, T>
        * Scalar or tensor.

    Attributes
    ----------
    T: fp16, fp32, i32, bool
    """

    input_spec = InputSpec(
        x=TensorInputType(),
        begin=IntTensorInputType(),
        size=IntTensorInputType(),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        if self.begin.rank != 1:
            raise ValueError(
                "begin should be 1-D tensor, got {}-D tensor instead".format(
                    self.begin.rank))
        if self.size.rank != 1:
            raise ValueError(
                "size should be 1-D tensor, got {}-D tensor instead".format(
                    self.size.rank))
        if self.x.rank != self.begin.shape[0]:
            raise ValueError(
                "Length of begin {} doesn't equal to input rank {}.".format(
                    len(self.begin.shape[0]), len(self.x.rank)))
        if self.x.rank != self.size.shape[0]:
            raise ValueError(
                "Length of size {} doesn't equal to input rank {}.".format(
                    len(self.size.shape[0]), len(self.x.rank)))

        x_shape = self.x.shape
        ret_shape = []
        if self.size.sym_val is None:
            ret_shape = [get_new_symbol() for _ in range(self.x.rank)]
            return types.tensor(self.x.dtype, tuple(ret_shape))

        for idx, s in enumerate(self.size.sym_val):
            if is_symbolic(s):
                ret_shape.append(s)
            elif s != -1:
                ret_shape.append(s)
            elif self.begin.sym_val is not None:
                ret_shape.append(x_shape[idx] - self.begin.sym_val[idx])
            else:
                ret_shape.append(get_new_symbol())

        return types.tensor(self.x.dtype, tuple(ret_shape))

    @precondition(allow=VALUE | SYMBOL)
    def value_inference(self):
        if any_symbolic(self.begin.sym_val):
            return None
        if any_symbolic(self.size.sym_val):
            return None
        if self.x.val is None:
            return None
        slices = []
        for i in range(self.x.rank):
            begin_val = self.begin.val[i]
            if begin_val < 0:
                if is_symbolic(self.x.shape[i]):
                    return None
                begin_val += self.x.shape[i]
            if self.size.val[i] > 0:
                slices.append(slice(begin_val, begin_val + self.size.val[i]))
            else:
                slices.append(slice(begin_val, None, None))
        return self.x.val[tuple(slices)]
class slice_by_index(Operation):
    """
    Method for numpy style indexing and slicing.
    With a tensor ``x``, this method achieves the following:
    
    ``result = x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...]``

    Note: This method does not support pure indexing. You would need to do a 
    squeeze if indexing is intended.

    Parameters
    ----------
    x: tensor<*?, T> (Required)
        * Input tensor
    begin: tensor<[rank<x>], i32> (Required)
        * Starting index for the dimension of slicing.
    end: tensor<[rank(x)], i32> (Required)
        * Ending index for the dimension of slicing.
    stride: tensor<[rank(x)], i32> (Optional)
        * Default is all ``1``.
        * Stride for the dimension of slicing.
    begin_mask: tensor<[rank(x)], bool> (Optional)
        * Default to all ``False``.
        * If ``begin_mask[i]==True``, neglect ``begin[i]``, and set ``begin[i]`` to ``0``.
    end_mask: tensor<[rank(x)], bool> (Optional)
        * Default to all ``False``.
        * If ``end_mask[i]==True``, neglect ``end[i]``, and set ``end[i]`` to ``x.shape[i]``.
    squeeze_mask: tensor<[rank(x)], bool> (Optional)
        * Default to all ``False``.
        * If ``squeeze_mask[i]==true``, neglect ``end[i]``, and do the pure index at ``begin[i]``.

    Returns
    -------
    tensor<\*?, T>
        - Scalar or tensor.

    Attributes
    ----------
    T: fp16, fp32, i32, bool

    """

    input_spec = InputSpec(
        x=TensorInputType(),
        begin=IntTensorInputType(),
        end=IntTensorInputType(),
        stride=IntTensorInputType(const=True, optional=True),
        begin_mask=BoolTensorInputType(const=True, optional=True),
        end_mask=BoolTensorInputType(const=True, optional=True),
        squeeze_mask=BoolTensorInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            stride=None,
            begin_mask=None,
            end_mask=None,
            squeeze_mask=None,
        )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):

        # get tensor and set default value
        begin = self.begin.val
        end = self.end.val
        x_rank = self.x.rank
        stride = self.stride.val if self.stride is not None else [1] * x_rank
        begin_mask = (self.begin_mask.val
                      if self.begin_mask is not None else [False] * x_rank)
        end_mask = self.end_mask.val if self.end_mask is not None else [
            False
        ] * x_rank
        squeeze_mask = (self.squeeze_mask.val
                        if self.squeeze_mask is not None else [False] * x_rank)

        # solve shape
        x_shape = self.x.shape
        ret_shape = solve_slice_by_index_shape(x_shape, begin, end, stride,
                                               begin_mask, end_mask,
                                               squeeze_mask)

        if len(ret_shape) == 0:
            # Scalar case.
            return self.x.dtype
        else:
            return types.tensor(self.x.dtype, tuple(ret_shape))

    def value_inference(self):
        if self.x.sym_val is None or self.begin.val is None or self.end.val is None:
            return None
        begin = [int(i) for i in list(self.begin.val[:])]
        end = [int(i) for i in list(self.end.val[:])]
        stride = [1] * self.x.rank if self.stride is None else self.stride.val
        begin_mask = ([False] * self.x.rank
                      if self.begin_mask is None else self.begin_mask.val)
        end_mask = [
            False
        ] * self.x.rank if self.end_mask is None else self.end_mask.val
        squeeze_mask = ([False] * self.x.rank if self.squeeze_mask is None else
                        self.squeeze_mask.val)

        slices = []
        for idx, mask in enumerate(begin_mask):
            if mask:
                begin[idx] = None
        for idx, mask in enumerate(end_mask):
            if mask:
                end[idx] = None
        squeeze_axes = []
        for idx, mask in enumerate(squeeze_mask):
            if mask:
                end[idx] = None
                stride[
                    idx] = 2147483647  # We slice out only 1 element by setting stride to INF
                squeeze_axes.append(idx)
        for idx in range(self.x.rank):
            slices.append(slice(begin[idx], end[idx], stride[idx]))

        slices = tuple(slices)
        res = self.x.sym_val[slices]

        # remove squeezed axes
        if len(squeeze_axes) > 0:
            if len(squeeze_axes) == len(res.shape):
                if len(res) == 0:
                    logging.warning("%s seems to be a 0 sized tensor",
                                    self.name)
                    return np.array([])
                res = res.tolist()[0]
                if is_symbolic(res):
                    return res
                elif self.x.dtype == types.int32 or self.x.dtype == types.int64:
                    res = np.int32(res)
                elif self.x.dtype == types.float or self.x.dtype == types.double:
                    res = np.float32(res)
                else:
                    raise ValueError("Unable to convert type {}".format(
                        self.x.sym_val.dtype))
            else:
                res = np.squeeze(res, axis=tuple(squeeze_axes))
        return res
class reshape(Operation):
    """
    Return a tensor that has the same values as ``x`` with shape ``shape``.
    ``shape`` must have the same volume (number of elements) as ``x``.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)

        * A n-D tensor or a scalar.
        * If ``x`` is fixed rank (and possibly contains symbolic dimension),
          shape may contain elements that are not positive integers (see below).
        * If ``x`` is variadic rank, shape can only contain positive integers.

    shape: tensor<[K], i32> (Required)

        A 1-D tensor, with elements from the following:

            * Positive integers.
            * Symbols: All but one symbol in shape must be present in ``x.shape``.
              The new symbol that is not present in ``x.shape`` represent a dimension
              such that the total size remains constant. Symbol is illegal
              if ``x`` is variadic rank.
            * ``-1``: ``-1`` introduces a new symbol (see Symbols). Therefore, ``-1`` is
              allowed if all symbols in the shape appear in ``x.shape``. ``-1`` is illegal
              if ``x`` is variadic rank.
            * ``0``: If ``K == rank(x)`` then ``0`` means inheriting from the corresponding
              dimension in ``x.shape``. ``0`` is illegal if ``x`` is variadic rank.

    Returns
    -------
    tensor<\*?, T>
        * Tensor with shape determined by the input shape.

    Attributes
    ----------
    T: fp16, fp32, i32, bool
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        shape=IntTensorInputType(),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        if any_symbolic(self.shape.shape):
            # We can't infer any shape if shape has variable length.
            return types.tensor(self.x.dtype, (get_new_variadic_symbol(), ))

        # shape has fixed length here.
        if self.shape.sym_val is None:
            shape = tuple(
                [get_new_symbol() for _ in range(self.shape.shape[0])])
            return types.tensor(self.x.dtype, shape)
        t, _ = self._get_type_val()
        return t

    @precondition(allow=VALUE | SYMBOL)
    def value_inference(self):
        _, val = self._get_type_val()
        return val

    def _get_type_val(self):
        x_type = self.x.dtype
        x_shape = self.x.shape
        x_vol = np.prod(x_shape)
        # shape is const, and thus sym_val is not None
        sym_shape = self.shape.sym_val
        sym_shape = [get_new_symbol() if d == -1 else d for d in sym_shape]
        try:
            ret_shape = reshape.enforce_volumetric_constraint(x_vol, sym_shape)
        except:
            ret_shape = sym_shape
        ret_val = None
        if self.x.val is not None and all(
                isscalar(a) and not is_symbolic(a) for a in ret_shape):
            ret_val = reshape_with_symbol(self.x.val, ret_shape)
        return types.tensor(x_type, tuple(ret_shape)), ret_val

    @staticmethod
    def enforce_volumetric_constraint(left_volume, inshape):
        left_symbols = set()
        if is_symbolic(left_volume):
            left_symbols = left_volume.free_symbols
        # Generally, we want to solve for right in terms of left. But this
        # is kinda annoying actually.
        shape = list(inshape)

        # Handling when reshape is given 0 instead of actual input
        # input tensor shape: [4, 3, 2], reshape:[0, -1], output tensor shape: [4, 6]
        if shape.count(-1) > 1:
            raise ValueError(
                "Reshape op supports only one dimension to be -1. Given {}".
                format(shape.count(-1)))

        infer_dim_index = shape.index(-1) if -1 in shape else None
        right_volume = 1
        for i in shape:
            if i != -1:
                right_volume = right_volume * i

        if infer_dim_index:
            shape[infer_dim_index] = left_volume // right_volume

        if not is_symbolic(right_volume):
            return shape

        constraints = [left_volume - right_volume]
        solve_for = [s for s in shape if is_symbolic(s)]

        for rightsym in solve_for:
            sol = sm.solve(constraints, [rightsym], dict=True)
            if not isinstance(sol, list):
                sol = [sol]
            # look for an acceptable solution
            for s in sol:
                if 0 in s.values():
                    continue
                for i in range(len(shape)):
                    if shape[i] in s:
                        v = s[shape[i]]
                        if len(v.free_symbols - left_symbols) > 0:
                            continue
                        try:
                            shape[i] = int(v)
                        except:
                            shape[i] = v
        return shape
Beispiel #26
0
class pad(Operation):
    """
    Pad a tensor.

    Parameters
    ----------
    
    x: tensor<[\*D_in],T>  (Required)

    pad: tensor<[2\*N],i32> (Required)
        ``N <= D_in``. Last ``N`` dimensions of ``x`` are padded as follows:
        
        * For each dimension ``i`` of ``x`` if ``i >= D_in - N``:
            * pad ``pad[2*i]`` elements before ``x[..,i,..]``
            * pad ``pad[2*i+1]`` elements after ``x[..,i,..]``
        * If mode is "reflect" then ``pad[2*i]`` and ``pad[2*i+1]`` can be at
          most ``D[i]-1``.
        * If mode is "replicate" then ``pad[2*i]`` and ``pad[2*i+1]`` can be
          at most ``D[i]``.

    mode: const<str> (Optional)
        * Default to ``constant``.
        * Must be one of the following values:
          ``constant``, ``reflect``, or ``replicate``.

    constant_val: const<T> (Optional)
        * Default to ``0``.
        * Constant value to pad. Ignored if ``mode != constant``.

    Returns
    -------
    tensor<[\*D_out],T>
        * Tensor with same type as the input.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=TensorInputType(),
        pad=IntTensorInputType(),
        mode=StringInputType(const=True, optional=True),
        constant_val=FloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            mode="constant",
            constant_val=0.,
            )

    def __init__(self, **kwargs):
        super(pad, self).__init__(**kwargs)

    def type_inference(self):
        in_shape = self.x.shape
        ret_shape = list(in_shape)
        pad = self.pad
        if len(pad.shape) != 1:
            raise ValueError("Pad should be a 1D tensor!")
        if self.mode and not self.mode.val in {'constant', 'reflect', 'replicate'}:
            raise ValueError("Pad mode should be one of {'constant', 'reflect', 'replicate'}")

        if pad.val is None:
            for i in range(self.pad.shape[0]//2):
                ret_shape[-self.pad.shape[0]//2+i] = get_new_symbol()
        else:
            pad = pad.val
            pad = pad.copy()

            if len(pad) % 2 != 0:
                raise ValueError("Number of elements in the argument Pad must be divisible by 2.")

            pad = pad.reshape(-1, 2)

            if pad.shape[0] > len(ret_shape):
                raise ValueError("Number of dimensions specified through pad must less than or equal to rank of input x")

            for i in range(len(pad)):
                ret_shape[-len(pad) + i] = ret_shape[-len(pad) + i] + pad[i][0] + pad[i][1]

        return types.tensor(self.x.dtype, tuple(ret_shape))

    @precondition(allow=VALUE)
    def value_inference(self):
        # NumPy `edge` mode is equivalent to `replicate` mode of PyTorch and CoreML
        mode = "edge" if self.mode.val == "replicate" else self.mode.val
        pad_val = self.pad.val

        if pad_val is None:
            return None

        if len(self.x.val.shape) > (pad_val.shape[0] // 2):
            updated_pad = np.zeros(len(self.x.val.shape) * 2)
            updated_pad[-pad_val.shape[0] :] = pad_val
            pad_val = updated_pad
        pad_val = pad_val.reshape(-1, 2).astype(np.int32)
        if mode == "constant":
            return np.pad(
                self.x.val, pad_val, mode, constant_values=self.constant_val.val
            )
        # NumPy does not support non-constant mode and constant_values argument
        return np.pad(self.x.val, pad_val, mode)
Beispiel #27
0
class random_uniform(RandomDistribution):
    r"""
    Returns a tensor with the specified shape with random values from a uniform
    distribution. Samples are uniformly distributed over the half-open interval
    ``[low, high)`` (includes low, but excludes high).
    
    .. math::
       p(x) = \frac{1}{high - low}
    
    For a real number :math:`x`.
    
    When ``high == low``, values of ``low`` will be returned. If ``high < low``,
    the results are officially undefined and may eventually raise an error.
    
    Parameters
    ----------
    shape: <K, i32> (Required)
        * Target output tensor shape.
        * ``K`` is the rank of the output tensor.
          ``shape[k] > 0`` for ``k = 0,..., K-1``.
    low: const<f32> (Optional)
        * Lower boundary of the output interval (inclusive). Defaults to ``0.0``.
    high: const<f32> (Optional)
        * Upper boundary of the output interval (exclusive). Defaults to ``1.0``.
    seed: const<i32> (Optional)
        * Seed to create a reproducible sequence of values across multiple invokes.
    
    Returns
    -------
    <\*, T>
        * A tensor of the given target output shape filled with random values.

    Attributes
    ----------
    T: fp16, fp32

    See Also
    --------
    random_categorical, random_bernoulli, random_normal
    """
    
    input_spec = (
        InputSpec(
            shape=IntTensorInputType(),
            low=FloatInputType(const=True, optional=True),
            high=FloatInputType(const=True, optional=True),
            seed=IntInputType(const=True, optional=True),
        )
        + RandomDistribution.input_spec
    )

    def default_inputs(self):
        return super().default_inputs() + \
            DefaultInputs(
                low=0.,
                high=1.,
                seed=-1,
            )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        if self.low.dtype != self.high.dtype:
            raise ValueError("Incompatible primitive types in random_uniform operation")
        self.out_dtype = self.low.dtype
        return super().type_inference()