Пример #1
0
 def testInvalidKeepProb(self):
     x_dim = 40
     y_dim = 30
     t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
     with self.assertRaises(ValueError):
         nn.dropout(t, -1.0)
     with self.assertRaises(ValueError):
         nn.dropout(t, 1.1)
     with self.assertRaises(ValueError):
         nn.dropout(t, [0.0, 1.0])
     with self.assertRaises(ValueError):
         nn.dropout(t, array_ops.placeholder(dtypes.float64))
     with self.assertRaises(ValueError):
         nn.dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))
Пример #2
0
def dropout(inputs,
            keep_prob=0.5,
            noise_shape=None,
            is_training=True,
            outputs_collections=None,
            scope=None):
    """Returns a dropout op applied to the input.

  With probability `keep_prob`, outputs the input element scaled up by
  `1 / keep_prob`, otherwise outputs `0`.  The scaling is so that the expected
  sum is unchanged.

  Args:
    inputs: the tensor to pass to the nn.dropout op.
    keep_prob: A scalar `Tensor` with the same type as x. The probability
      that each element is kept.
    noise_shape: A 1-D `Tensor` of type `int32`, representing the
      shape for randomly generated keep/drop flags.
    is_training: A bool `Tensor` indicating whether or not the model
      is in training mode. If so, dropout is applied and values scaled.
      Otherwise, inputs is returned.
    outputs_collections: collection to add the outputs.
    scope: Optional scope for op_scope.

  Returns:
    a tensor representing the output of the operation.
  """
    with ops.op_scope([inputs], scope, 'Dropout') as sc:
        is_training = ops.convert_to_tensor(is_training)
        outputs = control_flow_ops.cond(
            is_training, lambda: nn.dropout(inputs, keep_prob, noise_shape),
            lambda: inputs)
        return utils.collect_named_outputs(outputs_collections, sc, outputs)
Пример #3
0
 def testShapedDropoutUnknownShape(self):
     x_dim = 40
     y_dim = 30
     keep_prob = 0.5
     x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
     dropout_x = nn.dropout(x, keep_prob, noise_shape=array_ops.placeholder(dtypes.int32))
     self.assertEqual(x.get_shape(), dropout_x.get_shape())
Пример #4
0
  def test_conv_bn_dropout(self, mode):
    """Test dropout precision of convolution batch norm graph."""
    self._maybe_skip(mode)
    random_seed.set_random_seed(0)
    x = _input([2, 8, 8, 1])
    y = _conv_bn(x)
    y = nn.dropout(y, rate=0.5)
    y = math_ops.add(y, 1, name='addition')
    y = _conv_bn(y)
    y = array_ops.identity(y)
    optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.01)
    g = optimizer.compute_gradients(y, [x])
    output = (y, g)

    output_val_ref, output_val, cost_graph = self._run(mode, output)
    node_map = _build_node_map(cost_graph.node)
    self._assert_output_f16(mode, node_map, 'Conv2D')
    self._assert_output_f16(mode, node_map, 'FusedBatchNormV3')
    # We do not assert dropout's dtype because we do not want to rely on the
    # node names of dropout's internal implementation.
    self._assert_output_f16(mode, node_map, 'addition')
    self._assert_output_f16(mode, node_map, 'Conv2D_1')

    output_val_ref, output_val, cost_graph = self._run(mode, output)
    # Bump up the tolerance for the ROCm platform
    # The default tolerance (1e-3) results in a tiny fraction (<1%) of
    # miscompares on ROCm platform, and hence the tolerance bump
    tol = 2e-3 if test.is_built_with_rocm else 1e-3
    self.assertAllClose(output_val_ref, output_val, atol=tol, rtol=tol)
Пример #5
0
 def testShapedDropout(self):
   # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
   # that it is producing approximately the right number of ones over a large
   # number of samples, based on the keep probability. This time with shaped
   # noise.
   x_dim = 40 * 30
   y_dim = 3
   num_iter = 10
   for keep_prob in [0.1, 0.5, 0.8]:
     with self.test_session():
       t = constant_op.constant(1.0,
                                shape=[x_dim, y_dim],
                                dtype=types.float32)
       dropout = nn.dropout(t, keep_prob, noise_shape=[x_dim, 1])
       self.assertEqual([x_dim, y_dim], dropout.get_shape())
       final_count = 0
       for _ in xrange(0, num_iter):
         value = dropout.eval()
         final_count += np.count_nonzero(value)
         # Verifies that there are only two values: 0 and 1/keep_prob.
         sorted_value = np.unique(np.sort(value))
         self.assertEqual(0, sorted_value[0])
         self.assertAllClose(1 / keep_prob, sorted_value[1])
     # Check that we are in the 15% error range
     expected_count = x_dim * y_dim * keep_prob * num_iter
     rel_error = math.fabs(final_count - expected_count) / expected_count
     print rel_error
     self.assertTrue(rel_error < 0.15)
def dropout(inputs,
            keep_prob=0.5,
            noise_shape=None,
            is_training=True,
            outputs_collections=None,
            scope=None):
  """Returns a dropout op applied to the input.
  With probability `keep_prob`, outputs the input element scaled up by
  `1 / keep_prob`, otherwise outputs `0`.  The scaling is so that the expected
  sum is unchanged.
  Args:
    inputs: the tensor to pass to the nn.dropout op.
    keep_prob: A scalar `Tensor` with the same type as x. The probability
      that each element is kept.
    noise_shape: A 1-D `Tensor` of type `int32`, representing the
      shape for randomly generated keep/drop flags.
    is_training: A bool `Tensor` indicating whether or not the model
      is in training mode. If so, dropout is applied and values scaled.
      Otherwise, inputs is returned.
    outputs_collections: collection to add the outputs.
    scope: Optional scope for op_scope.
  Returns:
    a tensor representing the output of the operation.
  """
  with ops.op_scope([inputs], scope, 'Dropout') as sc:
    is_training = ops.convert_to_tensor(is_training)
    outputs = control_flow_ops.cond(
        is_training,
        lambda: nn.dropout(inputs, keep_prob, noise_shape),
        lambda: inputs)
    return utils.collect_named_outputs(outputs_collections, sc, outputs)
    def test_conv_bn_dropout(self):
        """Test dropout precision of convolution batch norm graph."""
        with compat.forward_compatibility_horizon(2019, 6, 7):
            if test.is_gpu_available(cuda_only=True):
                random_seed.set_random_seed(0)
                x = _input([2, 8, 8, 1])
                y = _conv_bn(x)
                y = nn.dropout(y, rate=0.5)
                y = _conv_bn(y)
                y = array_ops.identity(y)
                optimizer = gradient_descent.GradientDescentOptimizer(
                    learning_rate=0.01)
                g = optimizer.compute_gradients(y, [x])
                output = (y, g)

                output_val_ref, output_val, cost_graph = self._run(output)
                node_map = _build_node_map(cost_graph.node)
                self._assert_output_fp16(node_map, 'Conv2D')
                self._assert_output_fp16(node_map, 'FusedBatchNormV3')
                self._assert_output_fp16(node_map, 'dropout/mul')
                self._assert_output_fp16(node_map, 'Conv2D_1')

                output_val_ref, output_val, cost_graph = self._run(output)
                self.assertAllClose(output_val_ref,
                                    output_val,
                                    atol=1e-3,
                                    rtol=1e-3)
Пример #8
0
 def testShapedDropout(self):
   # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
   # that it is producing approximately the right number of ones over a large
   # number of samples, based on the keep probability. This time with shaped
   # noise.
   x_dim = 40 * 30
   y_dim = 3
   num_iter = 10
   for keep_prob in [0.1, 0.5, 0.8]:
     with self.test_session():
       t = constant_op.constant(1.0,
                                shape=[x_dim, y_dim],
                                dtype=types.float32)
       dropout = nn.dropout(t, keep_prob, noise_shape=[x_dim, 1])
       self.assertEqual([x_dim, y_dim], dropout.get_shape())
       final_count = 0
       for _ in xrange(0, num_iter):
         value = dropout.eval()
         final_count += np.count_nonzero(value)
         # Verifies that there are only two values: 0 and 1/keep_prob.
         sorted_value = np.unique(np.sort(value))
         self.assertEqual(0, sorted_value[0])
         self.assertAllClose(1 / keep_prob, sorted_value[1])
     # Check that we are in the 15% error range
     expected_count = x_dim * y_dim * keep_prob * num_iter
     rel_error = math.fabs(final_count - expected_count) / expected_count
     print(rel_error)
     self.assertTrue(rel_error < 0.15)
    def test_conv_bn_dropout(self):
        """Test dropout precision of convolution batch norm graph."""
        with compat.forward_compatibility_horizon(2019, 6, 7):
            if test.is_gpu_available(cuda_only=True):
                random_seed.set_random_seed(0)
                x = _input([2, 8, 8, 1])
                y = _conv_bn(x)
                y = nn.dropout(y, rate=0.5)
                y = math_ops.add(y, 1, name='addition')
                y = _conv_bn(y)
                y = array_ops.identity(y)
                optimizer = gradient_descent.GradientDescentOptimizer(
                    learning_rate=0.01)
                g = optimizer.compute_gradients(y, [x])
                output = (y, g)

                output_val_ref, output_val, cost_graph = self._run(output)
                node_map = _build_node_map(cost_graph.node)
                self._assert_output_fp16(node_map, 'Conv2D')
                self._assert_output_fp16(node_map, 'FusedBatchNormV3')
                # We do not assert dropout's dtype because we do not want to rely on the
                # node names of dropout's internal implementation.
                self._assert_output_fp16(node_map, 'addition')
                self._assert_output_fp16(node_map, 'Conv2D_1')

                output_val_ref, output_val, cost_graph = self._run(output)
                self.assertAllClose(output_val_ref,
                                    output_val,
                                    atol=2e-3,
                                    rtol=2e-3)
Пример #10
0
 def testInvalidKeepProb(self):
     x_dim = 40
     y_dim = 30
     t = constant_op.constant(1.0,
                              shape=[x_dim, y_dim],
                              dtype=types.float32)
     with self.assertRaises(ValueError):
         nn.dropout(t, -1.0)
     with self.assertRaises(ValueError):
         nn.dropout(t, 1.1)
     with self.assertRaises(ValueError):
         nn.dropout(t, [0.0, 1.0])
     with self.assertRaises(ValueError):
         nn.dropout(t, array_ops.placeholder(types.float64))
     with self.assertRaises(ValueError):
         nn.dropout(t, array_ops.placeholder(types.float32, shape=[2]))
Пример #11
0
    def call(self, inputs, training=None):

        if 0. < self.rate < 1.:
            return nn.dropout(inputs,
                              noise_shape=self._get_noise_shape(inputs),
                              seed=self.seed,
                              rate=self.rate)
        else:
            return inputs
Пример #12
0
 def testShapedDropoutUnknownShape(self):
     x_dim = 40
     y_dim = 30
     keep_prob = 0.5
     x = constant_op.constant(1.0,
                              shape=[x_dim, y_dim],
                              dtype=types.float32)
     dropout_x = nn.dropout(x,
                            keep_prob,
                            noise_shape=array_ops.placeholder(types.int32))
     self.assertEqual(x.get_shape(), dropout_x.get_shape())
Пример #13
0
 def call(self, inputs, training=False):
     if isinstance(training, bool):
         training_bool = training
     else:
         training_bool = tensor_util.constant_value(training)
     if training_bool is False:
         return array_ops.identity(inputs)
     dropped_inputs = nn.dropout(inputs, 1 - self.rate, noise_shape=self.noise_shape, seed=self.seed)
     if training_bool is True:
         return dropped_inputs
     return control_flow_ops.cond(training, lambda: dropped_inputs, lambda: inputs)
Пример #14
0
 def _dropped_inputs():
     if input_type == 'real':
         _inputs = real_inputs
     elif input_type == 'imag':
         _inputs = imag_inputs
     else:
         raise ValueError("Invalid input type. "
                          "Available values are 'real' and 'imag'")
     return nn.dropout(_inputs,
                       noise_shape=self._get_noise_shape(_inputs),
                       seed=self.seed,
                       rate=self.rate)
Пример #15
0
 def call(self, inputs, training=False):
   if isinstance(training, bool):
     training_bool = training
   else:
     training_bool = tensor_util.constant_value(training)
   if training_bool is False:
     return array_ops.identity(inputs)
   dropped_inputs = nn.dropout(inputs, 1  - self.rate,
                               noise_shape=self.noise_shape,
                               seed=self.seed)
   if training_bool is True:
     return dropped_inputs
   return control_flow_ops.cond(training,
                                lambda: dropped_inputs,
                                lambda: inputs)
Пример #16
0
 def testShapedDropoutCorrelation(self):
     # Runs a shaped dropout and tests that the correlations are correct.
     x_dim = 40
     y_dim = 30
     num_iter = 10
     for keep_prob in [0.1, 0.5, 0.8]:
         with self.test_session():
             t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
             dropout = nn.dropout(t, keep_prob, noise_shape=[x_dim, 1])
             self.assertEqual([x_dim, y_dim], dropout.get_shape())
             for _ in xrange(0, num_iter):
                 value = dropout.eval()
                 # Verifies that each y column as only one type of activation.
                 for i in xrange(x_dim):
                     sorted_value = np.unique(np.sort(value[i, :]))
                     self.assertEqual(sorted_value.size, 1)
Пример #17
0
 def testShapedDropoutShapeError(self):
     # Runs shaped dropout and verifies an error is thrown on misshapen noise.
     x_dim = 40
     y_dim = 30
     keep_prob = 0.5
     t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
     with self.assertRaises(ValueError):
         _ = nn.dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
     with self.assertRaises(ValueError):
         _ = nn.dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
     with self.assertRaises(ValueError):
         _ = nn.dropout(t, keep_prob, noise_shape=[x_dim + 3])
     with self.assertRaises(ValueError):
         _ = nn.dropout(t, keep_prob, noise_shape=[x_dim])
     # test that broadcasting proceeds
     _ = nn.dropout(t, keep_prob, noise_shape=[y_dim])
     _ = nn.dropout(t, keep_prob, noise_shape=[1, y_dim])
     _ = nn.dropout(t, keep_prob, noise_shape=[x_dim, 1])
     _ = nn.dropout(t, keep_prob, noise_shape=[1, 1])
Пример #18
0
    def __init__(self, layer, keep_prob=0.2, seed=None):
        super().__init__(layer, layer.n_units, layer.shape, layer.dtype,
                         layer.name + "_dropout")

        self.seed = seed
        self.keep_prob = keep_prob

        if keep_prob == 0:
            self._forward(layer)
        else:
            with name_scope(self.name):
                if layer.is_sparse():
                    self.tensor = transform.sparse_dropout(
                        layer.tensor, self.keep_prob, seed)
                else:
                    self.tensor = dropout(layer.tensor,
                                          self.keep_prob,
                                          seed=seed)
Пример #19
0
 def testShapedDropoutCorrelation(self):
     # Runs a shaped dropout and tests that the correlations are correct.
     x_dim = 40
     y_dim = 30
     num_iter = 10
     for keep_prob in [0.1, 0.5, 0.8]:
         with self.test_session():
             t = constant_op.constant(1.0,
                                      shape=[x_dim, y_dim],
                                      dtype=types.float32)
             dropout = nn.dropout(t, keep_prob, noise_shape=[x_dim, 1])
             self.assertEqual([x_dim, y_dim], dropout.get_shape())
             for _ in xrange(0, num_iter):
                 value = dropout.eval()
                 # Verifies that each y column as only one type of activation.
                 for i in xrange(x_dim):
                     sorted_value = np.unique(np.sort(value[i, :]))
                     self.assertEqual(sorted_value.size, 1)
Пример #20
0
 def testShapedDropoutShapeError(self):
     # Runs shaped dropout and verifies an error is thrown on misshapen noise.
     x_dim = 40
     y_dim = 30
     keep_prob = 0.5
     t = constant_op.constant(1.0,
                              shape=[x_dim, y_dim],
                              dtype=types.float32)
     with self.assertRaises(ValueError):
         _ = nn.dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
     with self.assertRaises(ValueError):
         _ = nn.dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
     with self.assertRaises(ValueError):
         _ = nn.dropout(t, keep_prob, noise_shape=[x_dim + 3])
     with self.assertRaises(ValueError):
         _ = nn.dropout(t, keep_prob, noise_shape=[x_dim])
     # test that broadcasting proceeds
     _ = nn.dropout(t, keep_prob, noise_shape=[y_dim])
     _ = nn.dropout(t, keep_prob, noise_shape=[1, y_dim])
     _ = nn.dropout(t, keep_prob, noise_shape=[x_dim, 1])
     _ = nn.dropout(t, keep_prob, noise_shape=[1, 1])
Пример #21
0
def dropout(tensor_in, prob, name=None):
    """Adds dropout node and stores probability tensor into graph collection.

    Args:
        tensor_in: Input tensor.
        prob: Float or Tensor.

    Returns:
        Tensor of the same shape of `tensor_in`.

    Raises:
        ValueError: If `keep_prob` is not in `(0, 1]`.
    """
    with ops.op_scope([tensor_in], name, "dropout") as name:
        if isinstance(prob, float):
            prob = vs.get_variable("prob", [],
                                   initializer=init_ops.constant_initializer(prob),
                                   trainable=False)
        ops.add_to_collection(DROPOUTS, prob)
        return nn.dropout(tensor_in, prob)
Пример #22
0
def dropout(tensor_in, prob, name=None):
  """Adds dropout node and stores probability tensor into graph collection.

  Args:
    tensor_in: Input tensor.
    prob: Float or Tensor.
    name: Operation name.

  Returns:
    Tensor of the same shape of `tensor_in`.

  Raises:
    ValueError: If `keep_prob` is not in `(0, 1]`.
  """
  with ops.op_scope([tensor_in], name, "dropout") as name:
    if isinstance(prob, float):
      prob = vs.get_variable("prob", [],
                             initializer=init_ops.constant_initializer(prob),
                             trainable=False)
    ops.add_to_collection(DROPOUTS, prob)
    return nn.dropout(tensor_in, prob)
  def test_conv_bn_dropout(self):
    """Test dropout precision of convolution batch norm graph."""
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = _input([2, 8, 8, 1])
      y = _conv_bn(x)
      y = nn.dropout(y, rate=0.5)
      y = _conv_bn(y)
      y = array_ops.identity(y)
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.01)
      g = optimizer.compute_gradients(y, [x])
      output = (y, g)

      output_val_ref, output_val, cost_graph = self._run(output)
      node_map = _build_node_map(cost_graph.node)
      self._assert_output_fp16(node_map, 'Conv2D')
      self._assert_output_fp16(node_map, 'FusedBatchNorm')
      self._assert_output_fp16(node_map, 'dropout/mul')
      self._assert_output_fp16(node_map, 'Conv2D_1')

      output_val_ref, output_val, cost_graph = self._run(output)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3, rtol=1e-3)
Пример #24
0
 def dropped_inputs():
   return nn.dropout(inputs, 1  - self.rate,
                     noise_shape=self._get_noise_shape(inputs),
                     seed=self.seed)
Пример #25
0
 def dropped_inputs():
   return nn.dropout(
       inputs,
       noise_shape=self._get_noise_shape(inputs),
       seed=self.seed,
       rate=self.rate)
Пример #26
0
 def _dropout():
   return nn.dropout(inputs, keep_prob, noise_shape)
Пример #27
0
 def _dropout():
     return nn.dropout(inputs, keep_prob, noise_shape)
Пример #28
0
 def dropped_inputs():
     return nn.dropout(inputs,
                       1 - self.rate,
                       noise_shape=self.noise_shape,
                       seed=self.seed)
Пример #29
0
 def dropped_weights():
   return nn.dropout(weights, rate=self.dropout)
 def call(self, inputs):
     return nn.dropout(inputs,
                       noise_shape=self._get_noise_shape(inputs),
                       seed=self.seed,
                       rate=self.rate)
Пример #31
0
 def fnc(x):
     rate = self.random_rate()
     from tensorflow.python.ops import nn
     return nn.dropout(x * 1., rate=rate, seed=seed)