Beispiel #1
0
class thresholded_relu(Operation):
    """
    Return ``x`` if ``x >= alpha``, otherwise return ``0``.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
    alpha: const fp32 (Optional)
        * Default is ``1``.

    Returns
    -------
    tensor<\*, T>
        * A tensor of the same shape and type as ``x``.
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        alpha=FloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(alpha=1., )

    def __init__(self, **kwargs):
        super(thresholded_relu, self).__init__(**kwargs)

    def type_inference(self):
        return self.x.sym_type

    @precondition(allow=VALUE)
    def value_inference(self):
        y = self.x.val
        y[y < self.alpha.val] = 0
        return y
Beispiel #2
0
class threshold(Operation):
    """
    Set a lower bound ``alpha`` to the values in the input ``x``, element-wise.
    Any values less than ``alpha`` are set to ``alpha``.

    Parameters
    ----------
    x: tensor<[\*d], T> (Required)
    alpha: const fp32 (Required)

    Returns
    -------
    tensor<[\*d], f32>
        * A tensor of the same shape as ``x``.

    Attributes
    ----------
    T: fp16, fp32, i32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        alpha=FloatInputType(const=True),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        return self.x.sym_type

    @precondition(allow=VALUE)
    def value_inference(self):
        return np.maximum(self.x.val, self.alpha.val)
Beispiel #3
0
class elementwise_unary(Operation):
    """
    Elementwise Unary Op Superclass
    """
    input_spec = InputSpec(x=ScalarOrTensorInputType(), )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        return self.x.sym_type
Beispiel #4
0
class sigmoid_hard(Operation):
    """
    Return ``min( max( alpha * x + beta, 0 ), 1 )`` elementwise.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
    alpha: const fp32 (Optional)
        * Default is ``0.2``.
    beta: const fp32 (Optional)
        * Default is ``0.5``.

    Returns
    -------
    tensor<\*?, fp32>
        * A tensor of the same shape and type as ``x``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        alpha=FloatInputType(const=True, optional=True),
        beta=FloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            alpha=0.2,
            beta=0.5,
            )

    def __init__(self, **kwargs):
        super(sigmoid_hard, self).__init__(**kwargs)

    @precondition(allow=VALUE)
    def value_inference(self):
        return np.minimum(
            np.maximum((self.alpha.val * self.x.val) + self.beta.val, 0), 1
        )

    def type_inference(self):
        return self.x.sym_type
Beispiel #5
0
class scaled_tanh(Operation):
    """
    Return ``alpha * tanh(beta * x)`` elementwise.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
        * Input range is ``(-inf, inf)``.
    alpha: const fp32 (Optional)
        * Default is ``1``.
    beta: const fp32 (Optional)
        * Default is ``1``.

    Returns
    -------
    tensor<\*?, fp32>
        * A tensor of the same shape and type as ``x``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        alpha=FloatInputType(const=True, optional=True),
        beta=FloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            alpha=1,
            beta=1,
        )

    def __init__(self, **kwargs):
        super(scaled_tanh, self).__init__(**kwargs)

    @precondition(allow=VALUE)
    def value_inference(self):
        return self.alpha.val * np.tanh(self.x.val * self.beta.val)

    def type_inference(self):
        return self.x.sym_type
Beispiel #6
0
class resample(_resample_iOS15):
    """
    iOS16 version of resample supports float16 coordinates
    """
    input_spec = InputSpec(
        x=TensorInputType(),
        coordinates=ScalarOrTensorInputType(type_domain=(np.int32, np.float32, np.float16)),
        sampling_mode=StringInputType(const=True),
        padding_mode=StringInputType(const=True),
        padding_value=FloatInputType(const=True),
        coordinates_mode=StringInputType(const=True),
        align_corners=BoolInputType(const=True),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        return super().type_inference()
Beispiel #7
0
class pixel_unshuffle(Operation):
    """
    Rearrange elements in a tensor from spatial dimensions into depth (channel).
    It is basically the inverse operation of pixel_shuffle.
    Equivalent to PyTorch's ``PixelUnshuffle``.

    Parameters
    ----------
    x: tensor<[n, C, H / f , W / f], T> (Required)
        * Input tensor of rank ``4``.
    downscale_factor: const<i32>
        * Factor to decrease spatial resolution by.

    Returns
    -------
    tensor<[n, C * f^2, H, W], T>
        * Where ``f`` is the downscale factor.

    Attributes
    ----------
    T: fp32

    References
    ----------
    `torch.nn.PixelUnshuffle <https://pytorch.org/docs/stable/generated/torch.nn.PixelUnshuffle.html>`_
    """

    input_spec = InputSpec(
        x=TensorInputType(),
        downscale_factor=ScalarOrTensorInputType(const=True,
                                                 type_domain=(np.uint32, )),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        x_type = self.x.dtype
        n, c, h, w = self.x.shape
        f = self.downscale_factor.val
        ret_shape = (n, c * f * f, h / f, w / f)
        return types.tensor(x_type, ret_shape)
Beispiel #8
0
class leaky_relu(Operation):
    """
    If ``x >= 0`` apply ``x`` elementwise, otherwise apply ``alpha * x`` elementwise.

    Parameters
    ----------
    x: <*?, T> (Required)
    alpha: const fp32 (Optional)
        * Default is ``0.01``.

    Returns
    -------
    tensor<\*?, fp32>
        * A tensor of the same shape and type as ``x``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        alpha=FloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            alpha=0.01,
            )

    def __init__(self, **kwargs):
        super(leaky_relu, self).__init__(**kwargs)

    @precondition(allow=VALUE)
    def value_inference(self):
        b = np.copy(self.x.val)
        b[b < 0] *= self.alpha.val
        return b

    def type_inference(self):
        return self.x.sym_type
Beispiel #9
0
class elu(Operation):
    """
    If ``x > 0`` return elementwise ``x``, otherwise return ``alpha * (e^x - 1)``.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
    alpha: const fp32 (Optional)
        * Default is ``1``.

    Returns
    -------
    tensor<\*?, T>
        * A tensor of the same shape and type as ``x``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        alpha=FloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            alpha=1.,
            )

    def __init__(self, **kwargs):
        super(elu, self).__init__(**kwargs)

    @precondition(allow=VALUE)
    def value_inference(self):
        b = np.copy(self.x.val)
        b[b < 0] = self.alpha.val * (np.exp(b[b < 0]) - 1)
        return b

    def type_inference(self):
        return self.x.sym_type
Beispiel #10
0
class linear_activation(Operation):
    """
    Apply elementwise ``x * alpha + beta``.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
    alpha: const fp32 (Required)
    beta: const fp32 (Optional)
        * Default is ``0``.

    Returns
    -------
    tensor<\*?, T>
        * A tensor of the same shape and type as ``x``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        alpha=FloatInputType(const=True),
        beta=FloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(
            beta=0.,
            )

    def __init__(self, **kwargs):
        super(linear_activation, self).__init__(**kwargs)

    @precondition(allow=VALUE)
    def value_inference(self):
        return self.alpha.val * self.x.val + self.beta.val

    def type_inference(self):
        return self.x.sym_type
Beispiel #11
0
class rsqrt(Operation):
    """
    Return the reciprocal value of the square root of the input ``x``, element-wise.

    Parameters
    ----------
    x: tensor<[\*d], T> (Required)
    epsilon: const fp32 (Optional, default=1e-12)
        * This is a small constant that is added to the input, before applying the
          ``rsqrt`` function, for stability.
        * ``y = 1 / sqrt(x + epsilon)``.

    Returns
    -------
    tensor<[\*d], f32>
        * A tensor of the same shape as ``x``.

    Attributes
    ----------
    T: fp16, fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        epsilon=FloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(epsilon=1e-12, )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        return self.x.sym_type

    @precondition(allow=VALUE)
    def value_inference(self):
        result = 1.0 / np.sqrt(self.x.val + self.epsilon.val)
        return _maintain_shape(self.x.val, result)
class inverse(Operation):
    """
    Return the reciprocal value of the input ``x``, element-wise.

    Parameters
    ----------
    x: tensor<[\*d], T> (Required)
    epsilon: const fp32 (Optional, default=1e-4)
        * This is a small constant that is added to the input, before taking its
          inverse, for stability.
        * ``y = 1 / (x + epsilon)``.

    Returns
    -------
    tensor<[\*d], f32>
        * A tensor of the same shape as ``x``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        epsilon=FloatInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(epsilon=1e-4, )

    def __init__(self, **kwargs):
        super(inverse, self).__init__(**kwargs)

    def type_inference(self):
        return self.x.sym_type

    @precondition(allow=VALUE)
    def value_inference(self):
        return np.reciprocal(self.x.val + self.epsilon.val)
Beispiel #13
0
class clamped_relu(Operation):
    """
    If ``x >= 0`` return elementwise ``min(beta, x)``, otherwise return
    ``min(beta, alpha * x)``.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
    alpha: const fp32 (Required)
    beta: const fp32 (Required)

    Returns
    -------
    tensor<\*?, T>
        * A tensor of the same type and shape as ``x``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        alpha=FloatInputType(const=True),
        beta=FloatInputType(const=True),
    )

    def __init__(self, **kwargs):
        super(clamped_relu, self).__init__(**kwargs)

    @precondition(allow=VALUE)
    def value_inference(self):
        x = np.minimum(np.maximum(self.x.val, 0), self.beta.val)
        y = np.minimum(
            np.minimum(self.x.val, 0) * self.alpha.val, self.beta.val)
        return x + y

    def type_inference(self):
        return self.x.sym_type
class clip(Operation):
    """
    Clip the values in the input ``x`` to ``[alpha, beta]``, element-wise.
    Any values less than ``alpha`` are set to ``alpha``, and any values greater
    than ``beta`` are set to ``beta``.
    
    Parameters
    ----------
    x: tensor<[\*d], T> (Required)
    alpha: const f32 (Required)
    beta: const f32 (Required)
    
    Returns
    -------
    tensor<[\*d], f32>
        * A tensor of the same shape as ``x``.
    
    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        alpha=FloatInputType(const=True),
        beta=FloatInputType(const=True),
    )

    def __init__(self, **kwargs):
        super(clip, self).__init__(**kwargs)

    def type_inference(self):
        return self.x.sym_type

    @precondition(allow=VALUE)
    def value_inference(self):
        return np.minimum(np.maximum(self.x.val, self.alpha.val),
                          self.beta.val)
Beispiel #15
0
class shape(Operation):
    """
    Returns a 1-dimensional tensor with the shape of the input tensor

    Parameters
    ----------
    x: tensor<[*?], T> (Required)
        * Input tensor.

    Returns
    -------
    tensor<K, i32>
        * Shape of the input tensor.
        * ``K = x.rank``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(x = ScalarOrTensorInputType())

    def __init__(self, **kwargs):
        super(shape, self).__init__(**kwargs)

    def type_inference(self):
        input_rank = self.x.rank
        return types.tensor(types.int32, tuple([input_rank]))

    def value_inference(self):
        if any_symbolic(self.x.shape):
            # convert elements in shape to int32
            res = [x if is_symbolic(x) else np.int32(x) for x in self.x.shape]
            return np.array(res)
        else:
            return np.array(self.x.shape).astype(np.int32)
Beispiel #16
0
class cast(Operation):
    """
    Cast the input ``x`` to the new type ``dtype``.

    Parameters
    ----------
    x: tensor<[\*d], T> (Required)
    dtype: const str (Required)
        * Can be one of the following types: ``int32``, ``int64``, ``fp32``, ``fp64``.

    Returns
    -------
    tensor<[\*d], dtype>
        * A tensor of the same shape as ``x``, with type ``dtype``.

    Attributes
    ----------
    T: i32, i64, fp16, fp32, fp64, bool.
    """

    input_spec = InputSpec(x=ScalarOrTensorInputType(),
                           dtype=StringInputType(const=True))

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        type_map = {
            "int32": types.int32,
            "int64": types.int64,
            "fp16": types.fp16,
            "fp32": types.fp32,
            "fp64": types.fp64,
            "bool": types.bool,
        }

        if self.dtype.val not in type_map.keys():
            raise NotImplementedError(
                "Parameter dtype of the cast operation can be one of the {}. "
                "Provided {}".format(type_map.keys(), self.dtype.val))

        if not types.is_tensor(self.x.sym_type):
            return type_map[self.dtype.val]

        ret_shape = self.x.shape
        return types.tensor(type_map[self.dtype.val], ret_shape)

    @precondition(allow=VALUE | SYMBOL)
    def value_inference(self):
        return self.get_cast_value(self.x, self.dtype.val)

    @staticmethod
    def get_cast_value(input_var, dtype_val):
        type_map = {
            "int32": np.int32,
            "int64": np.int64,
            "fp16": np.float16,
            "fp32": np.float32,
            "fp64": np.float64,
            "bool": np.bool,
        }

        if dtype_val not in type_map.keys():
            raise NotImplementedError(
                "Parameter dtype of the cast operation can be one of the {}. "
                "Provided {}".format(type_map.keys(), dtype_val))

        if input_var.val is None:
            if input_var.sym_val is not None and not is_symbolic(
                    input_var.sym_val) and len(input_var.sym_val.shape) == 1:
                result = [
                    np.array(val).astype(dtype=type_map[dtype_val]).item()
                    if not is_symbolic(val) else val
                    for val in input_var.sym_val
                ]
                return np.array(result)
            return None

        if not types.is_tensor(input_var.sym_type):
            return input_var.val.astype(dtype=type_map[dtype_val])
        else:
            return np.array(input_var.val).astype(dtype=type_map[dtype_val])
class reshape(Operation):
    """
    Return a tensor that has the same values as ``x`` with shape ``shape``.
    ``shape`` must have the same volume (number of elements) as ``x``.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)

        * A n-D tensor or a scalar.
        * If ``x`` is fixed rank (and possibly contains symbolic dimension),
          shape may contain elements that are not positive integers (see below).
        * If ``x`` is variadic rank, shape can only contain positive integers.

    shape: tensor<[K], i32> (Required)

        A 1-D tensor, with elements from the following:

            * Positive integers.
            * Symbols: All but one symbol in shape must be present in ``x.shape``.
              The new symbol that is not present in ``x.shape`` represent a dimension
              such that the total size remains constant. Symbol is illegal
              if ``x`` is variadic rank.
            * ``-1``: ``-1`` introduces a new symbol (see Symbols). Therefore, ``-1`` is
              allowed if all symbols in the shape appear in ``x.shape``. ``-1`` is illegal
              if ``x`` is variadic rank.
            * ``0``: If ``K == rank(x)`` then ``0`` means inheriting from the corresponding
              dimension in ``x.shape``. ``0`` is illegal if ``x`` is variadic rank.

    Returns
    -------
    tensor<\*?, T>
        * Tensor with shape determined by the input shape.

    Attributes
    ----------
    T: fp16, fp32, i32, bool
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        shape=IntTensorInputType(),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        if any_symbolic(self.shape.shape):
            # We can't infer any shape if shape has variable length.
            return types.tensor(self.x.dtype, (get_new_variadic_symbol(), ))

        # shape has fixed length here.
        if self.shape.sym_val is None:
            shape = tuple(
                [get_new_symbol() for _ in range(self.shape.shape[0])])
            return types.tensor(self.x.dtype, shape)
        t, _ = self._get_type_val()
        return t

    @precondition(allow=VALUE | SYMBOL)
    def value_inference(self):
        _, val = self._get_type_val()
        return val

    def _get_type_val(self):
        x_type = self.x.dtype
        x_shape = self.x.shape
        x_vol = np.prod(x_shape)
        # shape is const, and thus sym_val is not None
        sym_shape = self.shape.sym_val
        sym_shape = [get_new_symbol() if d == -1 else d for d in sym_shape]
        try:
            ret_shape = reshape.enforce_volumetric_constraint(x_vol, sym_shape)
        except:
            ret_shape = sym_shape
        ret_val = None
        if self.x.val is not None and all(
                isscalar(a) and not is_symbolic(a) for a in ret_shape):
            ret_val = reshape_with_symbol(self.x.val, ret_shape)
        return types.tensor(x_type, tuple(ret_shape)), ret_val

    @staticmethod
    def enforce_volumetric_constraint(left_volume, inshape):
        left_symbols = set()
        if is_symbolic(left_volume):
            left_symbols = left_volume.free_symbols
        # Generally, we want to solve for right in terms of left. But this
        # is kinda annoying actually.
        shape = list(inshape)

        # Handling when reshape is given 0 instead of actual input
        # input tensor shape: [4, 3, 2], reshape:[0, -1], output tensor shape: [4, 6]
        if shape.count(-1) > 1:
            raise ValueError(
                "Reshape op supports only one dimension to be -1. Given {}".
                format(shape.count(-1)))

        infer_dim_index = shape.index(-1) if -1 in shape else None
        right_volume = 1
        for i in shape:
            if i != -1:
                right_volume = right_volume * i

        if infer_dim_index:
            shape[infer_dim_index] = left_volume // right_volume

        if not is_symbolic(right_volume):
            return shape

        constraints = [left_volume - right_volume]
        solve_for = [s for s in shape if is_symbolic(s)]

        for rightsym in solve_for:
            sol = sm.solve(constraints, [rightsym], dict=True)
            if not isinstance(sol, list):
                sol = [sol]
            # look for an acceptable solution
            for s in sol:
                if 0 in s.values():
                    continue
                for i in range(len(shape)):
                    if shape[i] in s:
                        v = s[shape[i]]
                        if len(v.free_symbols - left_symbols) > 0:
                            continue
                        try:
                            shape[i] = int(v)
                        except:
                            shape[i] = v
        return shape
class expand_dims(Operation):
    """
    Insert a single-dimension in a 1-D or higher tensor at each axis in axes.

    Parameters
    ----------
    x: tensor<\*?, T> (Required)
        * Scalar or tensor.
    axes: const tensor<[K], i32> Required
        * ``K`` is the number of dimensions expanded.
        * Insert single dimension at dimension index at each axes.
        * Negative value to index from the end. ``-d-1 <= axis <= d``
          where ``d`` is the rank of ``x``.

    Returns
    -------
    tensor<\*(rank(x)+K), T>
        * Same type as the input ``x`` with rank ``rank(x)+K``.

    Attributes
    ----------
    T: fp16, fp32, i32, bool
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        axes=IntTensorInputType(const=True),
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def type_inference(self):
        x_rank = self.x.rank
        x_type = self.x.dtype
        x_shape = list(self.x.shape)
        axes = self.axes.val
        out_rank = x_rank + len(axes)

        for axis in axes:
            if axis <= -out_rank - 1 or axis >= out_rank:
                msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}'
                raise IndexError(
                    msg.format(axis, self.op_type, self.name, self.x.shape))

        ret_shape = x_shape
        axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes])
        for axis in axes:
            ret_shape.insert(axis, 1)

        return types.tensor(x_type, tuple(ret_shape))

    @precondition(allow=VALUE)
    def value_inference(self):
        axes = self.axes.val
        out_rank = self.x.rank + len(axes)

        for axis in axes:
            if axis <= -out_rank - 1 or axis >= out_rank:
                msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}'
                raise IndexError(
                    msg.format(axis, self.op_type, self.name, self.x.shape))

        axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes])
        ret_shape = list(self.x.shape)
        for axis in axes:
            ret_shape.insert(axis, 1)
        return np.reshape(self.x.val, ret_shape)
class cast(Operation):
    """
    Cast the input ``x`` to the new type ``dtype``.
    
    Parameters
    ----------
    x: tensor<[\*d], T> (Required)
    dtype: const str (Required)
        * Can be one of the following types: ``int32``, ``int64``, ``fp32``, ``fp64``.
    
    Returns
    -------
    tensor<[\*d], dtype>
        * A tensor of the same shape as ``x``, with type ``dtype``.
    
    Attributes
    ----------
    T: i32, i64, fp32, fp64, bool.
    """

    input_spec = InputSpec(x=ScalarOrTensorInputType(),
                           dtype=StringInputType(const=True))

    def __init__(self, **kwargs):
        super(cast, self).__init__(**kwargs)

    def type_inference(self):
        type_map = {
            "int32": types.int32,
            "int64": types.int64,
            "fp32": types.fp32,
            "fp64": types.fp64,
            "bool": types.bool,
        }

        if self.dtype.val not in type_map.keys():
            raise NotImplementedError(
                "Parameter dtype of the cast operation can be one of the {}. "
                "Provided {}".format(type_map.keys(), self.dtype.val))

        if not types.is_tensor(self.x.sym_type):
            return type_map[self.dtype.val]

        ret_shape = self.x.shape
        return types.tensor(type_map[self.dtype.val], ret_shape)

    @precondition(allow=VALUE)
    def value_inference(self):
        type_map = {
            "int32": np.int32,
            "int64": np.int64,
            "fp32": np.float32,
            "fp64": np.float64,
            "bool": np.bool,
        }

        if self.dtype.val not in type_map.keys():
            raise NotImplementedError(
                "Parameter dtype of the cast operation can be one of the {}. "
                "Provided {}".format(type_map.keys(), self.dtype.val))

        if not types.is_tensor(self.x.sym_type):
            return self.x.val.astype(dtype=type_map[self.dtype.val])
        else:
            return np.array(self.x.val).astype(dtype=type_map[self.dtype.val])
Beispiel #20
0
class gelu(Operation):
    """
    Return the elementwise Gaussian error linear unit activation function for ``x``.
    
    You can use ``EXACT``, ``TANH_APPROXIMATION``, or ``SIGMOID_APPROXIMATION`` values
    based on the following formulas:
    
    * ``EXACT``:
    
    .. math::
       f(x) = 0.5x\\left ( 1+\\rm{erf}\\left ( \\frac{x}{\\sqrt{2}} \\right ) \\right )
    
    * ``TANH_APPROXIMATION``:
    
    .. math::
       f(x) = 0.5x\\left ( 1+\\rm{tanh}\\left ( \\sqrt{2/\\pi}\\left ( x + 0.044715x^3 \\right ) \\right ) \\right )
    
    * ``SIGMOID_APPROXIMATION``:
    
    .. math::
       f(x) = x*\\rm{sigmoid}(1.702x)

    
    Parameters
    ----------
    x: tensor<\*?, T> (Required)
    
    mode: const str (Optional)
        * Use ``'EXACT'``, ``'TANH_APPROXIMATION'``, or ``'SIGMOID_APPROXIMATION'`` for ``str``.
        * Default is ``'EXACT'``.

    Returns
    -------
    tensor<\*?, T>
        * A tensor of the same shape and type as ``x``.

    Attributes
    ----------
    T: fp32
    """

    input_spec = InputSpec(
        x=ScalarOrTensorInputType(),
        mode=StringInputType(const=True, optional=True),
    )

    def default_inputs(self):
        return DefaultInputs(mode="EXACT", )

    def __init__(self, **kwargs):
        super(gelu, self).__init__(**kwargs)

    @precondition(allow=VALUE)
    def value_inference(self):
        if self.mode.val == "TANH_APPROXIMATION":
            a = np.sqrt(
                2 / np.pi) * (self.x.val + 0.044715 * np.power(self.x.val, 3))
            return 0.5 * self.x.val * (1 + np.tanh(a))
        elif self.mode.val == "SIGMOID_APPROXIMATION":
            return self.x.val * (1 / (1 + np.exp(-(1.702 * self.x.val))))
        else:
            sqaure_root_of_2 = np.sqrt(2)
            vfunc = np.vectorize(lambda x: 0.5 * x *
                                 (1 + math.erf(x / sqaure_root_of_2)))
            return vfunc(self.x.val)

    def type_inference(self):
        allowed_values = {
            "EXACT", "TANH_APPROXIMATION", "SIGMOID_APPROXIMATION"
        }
        if self.mode.val not in allowed_values:
            msg = '"gelu" op: unrecognized value of mode: "{}". Allowed values are {}'
            raise ValueError(msg.format(self.mode.val, allowed_values))
        return self.x.sym_type