示例#1
0
 def _get_noise_shape(self, inputs):
   # Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
   # which will override `self.noise_shape`, and allows for custom noise
   # shapes with dynamically sized inputs.
   if self.noise_shape is None:
     return self.noise_shape
   return nn_ops._get_noise_shape(inputs, self.noise_shape)  # pylint: disable=protected-access
示例#2
0
 def _get_noise_shape(self, inputs):
   # Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
   # which will override `self.noise_shape`, and allows for custom noise
   # shapes with dynamically sized inputs.
   if self.noise_shape is None:
     return self.noise_shape
   return nn_ops._get_noise_shape(inputs, self.noise_shape)  # pylint: disable=protected-access
示例#3
0
def dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
    """The gradient for `gelu`.

    Args:
        x: A tensor with type is float.
        keep_prob: A tensor, float, rate of every element reserved.
        noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random
            generated.
        seed: Random seed.
        name: Layer name.

    Returns:
        A tensor.
    """
    if context.executing_eagerly():
        raise RuntimeError("npu_ops.dropout() is not compatible with "
                           "eager execution.")
    x = ops.convert_to_tensor(x, name="x")
    if not x.dtype.is_floating:
        raise ValueError("x must be a floating point tensor."
                         " Got a %s tensor instead." % x.dtype)
    if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1.0:
        raise ValueError("keep_prob must be a float value or a scalar tensor in the "
                         "range (0, 1], got %g" % keep_prob)
    if isinstance(keep_prob, float) and keep_prob == 1.0:
        return x
    seed, seed2 = random_seed.get_seed(seed)
    noise_shape = _get_noise_shape(x, noise_shape)
    gen_out = gen_npu_ops.drop_out_gen_mask(noise_shape, keep_prob, seed, seed2, name)
    result = gen_npu_ops.drop_out_do_mask(x, gen_out, keep_prob, name)
    return result
示例#4
0
文件: __init__.py 项目: SachsLab/indl
        def dropped_inputs():
            rate = self.rate
            noise_shape = self.noise_shape
            seed = self.seed
            with ops.name_scope(None, "coordinated_dropout", [inputs]) as name:
                is_rate_number = isinstance(rate, numbers.Real)
                if is_rate_number and (rate < 0 or rate >= 1):
                    raise ValueError(
                        "rate must be a scalar tensor or a float in the "
                        "range [0, 1), got %g" % rate)
                x = ops.convert_to_tensor(inputs, name="x")
                x_dtype = x.dtype
                if not x_dtype.is_floating:
                    raise ValueError(
                        "x has to be a floating point tensor since it's going "
                        "to be scaled. Got a %s tensor instead." % x_dtype)
                is_executing_eagerly = context.executing_eagerly()
                if not tensor_util.is_tensor(rate):
                    if is_rate_number:
                        keep_prob = 1 - rate
                        scale = 1 / keep_prob
                        scale = ops.convert_to_tensor(scale, dtype=x_dtype)
                        ret = gen_math_ops.mul(x, scale)
                    else:
                        raise ValueError(
                            "rate is neither scalar nor scalar tensor %r" %
                            rate)
                else:
                    rate.get_shape().assert_has_rank(0)
                    rate_dtype = rate.dtype
                    if rate_dtype != x_dtype:
                        if not rate_dtype.is_compatible_with(x_dtype):
                            raise ValueError(
                                "Tensor dtype %s is incomptaible with Tensor dtype %s: %r"
                                % (x_dtype.name, rate_dtype.name, rate))
                        rate = gen_math_ops.cast(rate, x_dtype, name="rate")
                    one_tensor = constant_op.constant(1, dtype=x_dtype)
                    ret = gen_math_ops.real_div(
                        x, gen_math_ops.sub(one_tensor, rate))

                noise_shape = nn_ops._get_noise_shape(x, noise_shape)
                # Sample a uniform distribution on [0.0, 1.0) and select values larger
                # than rate.
                #
                # NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
                # and subtract 1.0.
                random_tensor = random_ops.random_uniform(noise_shape,
                                                          seed=seed,
                                                          dtype=x_dtype)
                # NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
                # hence a >= comparison is used.
                keep_mask = random_tensor >= rate
                ret = gen_math_ops.mul(ret,
                                       gen_math_ops.cast(keep_mask, x_dtype))
                if not is_executing_eagerly:
                    ret.set_shape(x.get_shape())
                return ret, keep_mask
示例#5
0
def complex_dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
    '''
    Implementation of complex dropout based on tf.nn.dropout.
    '''
    with tf.name_scope(name, "complex_dropout", [x]) as name:
        # Early return if nothing needs to be dropped.
        if isinstance(keep_prob, float) and keep_prob == 1:
            return x

        noise_shape = _get_noise_shape(x, noise_shape)
        # uniform [keep_prob, 1.0 + keep_prob)
        random_tensor = keep_prob
        random_tensor += tf.random_uniform(
            noise_shape, seed=seed, dtype=tf.float32)
        # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
        binary_tensor = tf.floor(random_tensor)
        ret = tf.complex(tf.div(tf.real(x), keep_prob) * binary_tensor,
                         tf.div(tf.imag(x), keep_prob) * binary_tensor)
    return ret
示例#6
0
def complex_dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
    '''
    Implementation of complex dropout based on tf.nn.dropout.
    The idea is straightforward, just like its done in the real
    case if a complex number is dropped out it is set to zero.
    The remaining numbers are scaled according to the keep probability.
    '''
    with tf.name_scope(name, "complex_dropout", [x]) as name:
        # Early return if nothing needs to be dropped.
        if isinstance(keep_prob, float) and keep_prob == 1:
            return x

        noise_shape = _get_noise_shape(x, noise_shape)
        # uniform [keep_prob, 1.0 + keep_prob)
        random_tensor = keep_prob
        random_tensor += tf.random_uniform(noise_shape,
                                           seed=seed,
                                           dtype=tf.float32)
        # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
        binary_tensor = tf.floor(random_tensor)
        ret = tf.complex(
            tf.div(tf.real(x), keep_prob) * binary_tensor,
            tf.div(tf.imag(x), keep_prob) * binary_tensor)
    return ret
示例#7
0
 def _get_noise_shape(self, inputs):
     if self.noise_shape is None:
         return self.noise_shape
     return nn_ops._get_noise_shape(inputs, self.noise_shape)