Exemple #1
0
    def __call__(self, x, training, mask):
        # Sequence length
        seq_len = x.shape[1]

        # Embedding
        with flow.scope.namespace("Encoder_Embedding"):
            x = EmbeddingLayer(x,
                               vocab_size=self.vocab_size,
                               embedding_size=self.d_model)
            d_model_constant = flow.constant_scalar(value=self.d_model,
                                                    dtype=flow.float32,
                                                    name="d_model_constant")
            x *= flow.math.sqrt(d_model_constant)

        # Position encoding
        with flow.scope.namespace("Encoder_Position_encoding"):
            # equal to self.pos_encoding[:, :seq_len, :]
            pos_encoding = flow.slice(self.pos_encoding,
                                      begin=[None, 0, None],
                                      size=[None, seq_len, None])
            x += pos_encoding
            if training:
                x = flow.nn.dropout(x, rate=self.rate)

        # Encoding
        with flow.scope.namespace("Encoder_Multi_encoder"):
            for i in range(self.num_layers):
                with flow.scope.namespace('encoder_{}'.format(i)):
                    x = self.enc_layers[i](x, training, mask)

        return x
Exemple #2
0
def loss_function(real, pred):
    mask = flow.math.not_equal(
        real, flow.constant_scalar(0, dtype=flow.int64, name="zero constant"))

    real = flow.cast(real, dtype=flow.int32, name="cast_to_int32")
    loss_ = flow.nn.sparse_softmax_cross_entropy_with_logits(labels=real,
                                                             logits=pred)

    mask = flow.cast(mask, dtype=loss_.dtype)
    loss_ *= mask

    return flow.math.reduce_mean(loss_)
Exemple #3
0
def reduce_any(
    x: remote_blob_util.BlobDef,
    axis: Optional[Union[int, Sequence[int]]] = None,
    keepdims: bool = False,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    name = _gen_unique_name_if_need(name, "ReduceAny_")
    axis = _check_axis(axis, x.shape)
    if len(axis) == 0:
        return flow.math.not_equal(
            x, flow.constant_scalar(value=0.0, dtype=x.dtype))
    return _do_reduce(x, name, "reduce_any", keepdims, axis)
Exemple #4
0
def reduce_all(
    x: remote_blob_util.BlobDef,
    axis: Optional[Union[int, Sequence[int]]] = None,
    keepdims: bool = False,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    """This operator computes the `logical and` of input Blob along the specified axis

    Args:
        x (remote_blob_util.BlobDef): A Blob
        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the logical and value is computed. Defaults to None.
        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
        name (Optional[str], optional): The name for the operation. Defaults to None.

    Returns:
        remote_blob_util.BlobDef: The result of logical and value on the specified axis of input Blob
    
    Note: 

        The input Blob dtype is int8
    
    For example: 

    .. code-block:: python 

        import oneflow as flow
        import numpy as np
        import oneflow.typing as tp


        @flow.global_function()
        def reduce_all_Job(x: tp.Numpy.Placeholder((3, 3), dtype=flow.int8)
        ) -> tp.Numpy:
            return flow.math.reduce_all(x, axis=1, keepdims=True)


        x = np.array([[1, 0, 0], [0, 0, 0], [1, 1, 1]]).astype(np.int8)
        out = reduce_all_Job(x)

        # output [[0]
        #         [0]
        #         [1]]

    """
    name = _gen_unique_name_if_need(name, "ReduceAll_")
    axis = _check_axis(axis, x.shape)
    if len(axis) == 0:
        return flow.math.not_equal(
            x, flow.constant_scalar(value=0.0, dtype=x.dtype))
    return _do_reduce(x, name, "reduce_all", keepdims, axis)
Exemple #5
0
def create_padding_mask(seq, name="CreatePad"):
    """
    Create padding mask
    :param seq: input sequence, shape=(batch, seq_lenth)
    :return:
    """
    with flow.scope.namespace(name):
        seq = flow.cast(
            flow.math.equal(
                seq,
                flow.constant_scalar(0,
                                     dtype=flow.int64,
                                     name="zero_mask_scalar")), flow.float32)
        # Expand dims from (a, b) -> (a, 1, 1, b)
        seq = flow.expand_dims(seq, axis=1)
        seq = flow.expand_dims(seq, axis=1)

    return seq
Exemple #6
0
def get_angles(pos, i, d_model):
    """
    Compute angles

    The equation is  1 / 10000^(2i / d_model)
    :param pos: The position dims, shape=(position, 1)
    :param i: The d_model index, shape = (1, d_model)
    :param d_model: The hidden dims, int value
    :return:
    """
    # Get constant value as d_model
    d_model_constant = flow.constant(d_model, dtype=flow.float32, shape=(1,), name="One_constant")

    constant_10000 = flow.constant(10000, dtype=flow.float32, shape=(1, d_model), name="constant_10000")

    constant_2 = flow.constant_scalar(2, dtype=flow.float32)

    # Compute angle_rates = 1 / 10000^(2i / d_model)

    angle_rates = 1 / flow.math.pow(constant_10000,
                                    (constant_2 * flow.math.floor(i / constant_2)) / d_model_constant)

    return pos * angle_rates
Exemple #7
0
def meanshift(x, rgb_range, rgb_mean, rgb_std, sign=-1, name="Meanshift"):
    # Concat the rgb_std
    _new_constant_std_0 = flow.constant_scalar(rgb_std[0],
                                               dtype=flow.float32,
                                               name=name + "_std_0")
    _new_constant_std_1 = flow.constant_scalar(rgb_std[1],
                                               dtype=flow.float32,
                                               name=name + "_std_1")
    _new_constant_std_2 = flow.constant_scalar(rgb_std[2],
                                               dtype=flow.float32,
                                               name=name + "_std_2")
    _std = flow.concat(
        inputs=[_new_constant_std_0, _new_constant_std_1, _new_constant_std_2],
        axis=0,
    )

    _reshaped_std = flow.reshape(_std, (3, 1, 1, 1), name=name + "reshape_std")

    # Concat the rgb_mean
    _new_constant_mean_0 = flow.constant_scalar(rgb_mean[0],
                                                dtype=flow.float32,
                                                name=name + "_mean_0")
    _new_constant_mean_1 = flow.constant_scalar(rgb_mean[1],
                                                dtype=flow.float32,
                                                name=name + "_mean_1")
    _new_constant_mean_2 = flow.constant_scalar(rgb_mean[2],
                                                dtype=flow.float32,
                                                name=name + "_mean_2")

    _mean = flow.concat(
        inputs=[
            _new_constant_mean_0, _new_constant_mean_1, _new_constant_mean_2
        ],
        axis=0,
    )

    _weight_ones = flow.constant(1.0,
                                 dtype=flow.float32,
                                 shape=(3, 3),
                                 name=name + "_ones")

    # Generate eye matrix

    # [[1, 0, 0],    [[0, 0, 0],
    #  [1, 1, 0], -   [1, 0, 0],
    #  [1, 1, 1]]     [1, 1, 0]]

    weight = flow.math.tril(_weight_ones, 0) - flow.math.tril(_weight_ones, -1)
    weight = flow.reshape(weight,
                          shape=(3, 3, 1, 1),
                          name=name + "_reshaped_weight")
    weight = flow.math.divide(weight, _reshaped_std)

    bias = sign * rgb_range * _mean
    bias = flow.math.divide(bias, _std)

    _conv = flow.nn.conv2d(x,
                           filters=weight,
                           strides=1,
                           padding="SAME",
                           name=name + "_mean_shift_conv")
    output = flow.nn.bias_add(_conv,
                              bias,
                              data_format="NCHW",
                              name=name + "_addbias")
    return output