Ejemplo n.º 1
0
 def test_unary_cwise_real_ops_2(self):
   real_ops = [
       math_ops.neg,
       math_ops.negative,
       math_ops.reciprocal,
       math_ops.rint,
       math_ops.round,
       math_ops.rsqrt,
       math_ops.sigmoid,
       math_ops.sign,
       math_ops.sin,
       math_ops.sinh,
       math_ops.sqrt,
       math_ops.square,
       math_ops.tan,
       math_ops.tanh,
       nn.elu,
       nn.relu,
       nn.relu6,
       lambda t: nn.leaky_relu(t, alpha=0.1),
       nn.selu,
       nn.softplus,
       nn.softsign,
   ]
   self._test_unary_cwise_ops(real_ops, False)
Ejemplo n.º 2
0
        def dropped_inputs_training():

            with ops.name_scope("random_relu_training"):
                x = ops.convert_to_tensor(inputs, name="x")
                if not x.dtype.is_floating:
                    raise ValueError(
                        "x has to be a floating point tensor since it's going to"
                        " be scaled. Got a %s tensor instead." % x.dtype)
                if isinstance(self.a, numbers.Real) and not 0 < self.a:
                    raise ValueError(
                        "a must be a scalar tensor or a float positive "
                        ", got %g" % self.a)

                if isinstance(self.b, numbers.Real) and not 0 < self.b:
                    raise ValueError(
                        "b must be a scalar tensor or a float positive "
                        ", got %g" % self.b)

                if isinstance(self.b, numbers.Real) and \
                        isinstance(self.a, numbers.Real) and \
                        not self.a < self.b:
                    raise ValueError(
                        "a and b must be a scalar tensor or a float such that"
                        " a < b, got {} and {}".format(self.a, self.b))

                else:
                    a = ops.convert_to_tensor(self.a, dtype=x.dtype, name="a")
                    b = ops.convert_to_tensor(self.b, dtype=x.dtype, name="b")
                    a.get_shape().assert_is_compatible_with(
                        tensor_shape.scalar())
                    b.get_shape().assert_is_compatible_with(
                        tensor_shape.scalar())

                    # TODO : confirm this noise shape !!
                    noise_shape = array_ops.shape(x)
                    random_tensor = math_ops.divide(1, b)
                    random_tensor += math_ops.divide(
                        b - a, a * b) * random_ops.random_uniform(
                            noise_shape, seed=self.seed, dtype=x.dtype)

                    ret = nn.leaky_relu(x, alpha=random_tensor)
                    if not context.executing_eagerly():
                        ret.set_shape(x.get_shape())

                    return ret
Ejemplo n.º 3
0
    def test_two_conv2d_fusions(self):
        """Test two Conv2D patterns and only the second is fusable."""
        if not test_util.is_gpu_available(cuda_only=True,
                                          min_cuda_compute_capability=(8, 0)):
            self.skipTest('No GPU with compute compatibility >= 8.0 available')

        N, H, W, C = (5, 3, 3, 8)  # pylint: disable=invalid-name

        ops.reset_default_graph()
        x_shape = [N, C, H, W]
        x_format, b_format = ('NCHW', 'NC..')

        x = _input(x_shape)
        w = _weight([2, 2, C, C])
        b = _bias([C])

        y = nn_ops.conv2d(x,
                          w,
                          strides=(1, 1),
                          padding='SAME',
                          data_format=x_format)
        y = nn.bias_add(y, b, data_format=b_format)
        y = nn.leaky_relu(y)
        y = nn_ops.conv2d(y,
                          w,
                          strides=(1, 1),
                          padding='SAME',
                          data_format=x_format)
        y = nn.bias_add(y, b, data_format=b_format)
        y = nn.relu(y)
        out = array_ops.identity(y)

        # The first Conv-BiasAdd-LeakyRelu is not fusable because cuDNN requires
        # fp16 for this pattern. The second Conv-BiasAdd-Relu is fusable.
        epilog_ops = [b'BiasAdd', b'Relu']
        fused_op = ['_FusedConv2D']
        self._VerifyValues(out, False, fused_op, epilog_ops)
Ejemplo n.º 4
0
 def dropped_inputs_testing():
     with ops.name_scope("drop_activation_testing"):
         # in testing phase, deterministic activation function
         # leaky ReLU with slope = 1. - p
         return nn.leaky_relu(inputs, alpha=1 - self.p)
Ejemplo n.º 5
0
 def dropped_inputs_testing():
     with ops.name_scope("random_relu_testing"):
         # in testing phase, deterministic activation function
         # leaky ReLU with slope = 1. - p
         return nn.leaky_relu(inputs,
                              alpha=1 / (0.5 * (self.a + self.b)))
Ejemplo n.º 6
0
def leaky_relu(x, alpha=0.1):
    return nn.leaky_relu(x, alpha=alpha)