Ejemplo n.º 1
0
    def getRotatePoint(self, map_shape, rotate_center, rotate_theta,
                       origin_point):
        """
        实现功能,得到绕旋转中心旋转theta角度后的坐标
        :param map_shape:原始地图的尺寸,因为Image中的坐标原点在图片左上角,需要改变坐标系    Tensor-[height,width,channel]
        :param rotate_center:旋转中心   Tensor-[loc_x,loc_y]
        :param rotate_theta:旋转角度   Tensor-[theta]
        :param origin_point:需要进行旋转操作的点集 Tensor-[loc_x,loc_y]
        :return: rotate_point_list: Tensor-[loc_x,loc_y]
        """
        row = map_shape[0]
        center_x = rotate_center[0]
        center_y = row - rotate_center[1]
        point_x = origin_point[0]
        point_y = row - origin_point[1]

        after_rotate_x = math_ops.round(
            (point_x - center_x) * math_ops.cos(rotate_theta) -
            (point_y - center_y) * math_ops.sin(rotate_theta) + center_x)
        after_rotate_y = row - math_ops.round(
            (point_x - center_x) * math_ops.sin(rotate_theta) +
            (point_y - center_y) * math_ops.cos(rotate_theta) + center_y)
        rotate_point = [after_rotate_x, after_rotate_y]
        rotate_point = tf.reshape(rotate_point, [2])
        return rotate_point
Ejemplo n.º 2
0
def flip(x: tf.Tensor) -> tf.Tensor:
    """Flip augmentation

    Args:
        x: Image to flip

    Returns:
        Augmented image
    """
    batch_size = array_ops.shape(x)[0]
    uniform_random_left_right = random_ops.random_uniform([batch_size], 0, 1.0)
    flips_left_right = math_ops.round(
        array_ops.reshape(uniform_random_left_right, [batch_size, 1, 1, 1, 1]))
    flips_left_right = math_ops.cast(flips_left_right, x.dtype)
    flipped_input_left_right = array_ops.reverse(x, [3])
    output_image = flips_left_right * flipped_input_left_right + (
        1 - flips_left_right) * x

    uniform_random_up_down = random_ops.random_uniform([batch_size], 0, 1.0)
    flips_up_down = math_ops.round(
        array_ops.reshape(uniform_random_up_down, [batch_size, 1, 1, 1, 1]))
    flips_up_down = math_ops.cast(flips_up_down, x.dtype)
    flipped_input_up_down = array_ops.reverse(output_image, [2])

    x = flips_up_down * flipped_input_up_down + (1 -
                                                 flips_up_down) * output_image

    #x = tf.image.random_flip_left_right(x)
    #x = tf.image.random_flip_up_down(x)

    return x
Ejemplo n.º 3
0
  def call_gauss(self, inputs, input_stddev, training):
      """Pass a tensor through the bottleneck.

      Args:
        inputs: The tensor to be passed through the bottleneck.
        training: Boolean. If `True`, returns a differentiable approximation of
          the inputs, and their likelihoods under the modeled probability
          densities. If `False`, returns the quantized inputs and their
          likelihoods under the corresponding probability mass function. These
          quantities can't be used for training, as they are not differentiable,
          but represent actual compression more closely.

      Returns:
        values: `Tensor` with the same shape as `inputs` containing the perturbed
          or quantized input values.
        likelihood: `Tensor` with the same shape as `inputs` containing the
          likelihood of `values` under the modeled probability distributions.

      Raises:
        ValueError: if `inputs` has different `dtype` or number of channels than
          a previous set of inputs the model was invoked with earlier.
      """
      inputs = ops.convert_to_tensor(inputs)
      input_stddev = ops.convert_to_tensor(input_stddev)
      inputs = array_ops.expand_dims(inputs, axis=4)
      input_stddev = array_ops.expand_dims(input_stddev, axis=4)
      #self.build_gauss(input_stddev)
      half = constant_op.constant(.5, dtype=self.dtype)

      # Convert to (channels, 1, batch) format by commuting channels to front
      # and then collapsing.
      values = inputs
      stddev = input_stddev

      # Add noise or quantize.
      if training:
        noise = random_ops.random_uniform(array_ops.shape(values), -half, half)
        values = math_ops.add_n([values, noise])
      elif self.optimize_integer_offset:
        values = math_ops.round(values - self._medians) + self._medians
      else:
        values = math_ops.round(values)

      mean = constant_op.constant(0., dtype=self.dtype, shape=(self.n, self.h, self.w, self.c, 1))
      norm_dist = tfd.Normal(loc=mean, scale=stddev)
      likelihood = abs(norm_dist.cdf(values + half) - norm_dist.cdf(values - half))
      if self.likelihood_bound > 0:
        likelihood_bound = constant_op.constant(
          self.likelihood_bound, dtype=self.dtype)
        likelihood = tfc_math_ops.lower_bound(likelihood, likelihood_bound)

      if not context.executing_eagerly():
        values_shape, likelihood_shape = self.compute_output_shape(inputs.shape)
        values.set_shape(values_shape)
        likelihood.set_shape(likelihood_shape)

      values = array_ops.squeeze(values, [-1])
      likelihood = array_ops.squeeze(likelihood, [-1])

      return values, likelihood
Ejemplo n.º 4
0
    def call(self, inputs, state):
        add = math_ops.add
        sub = math_ops.subtract
        mult = math_ops.multiply

        # computing m_t
        m_t = add(math_ops.matmul(state, self._u),
                  math_ops.matmul(inputs, self._v))
        m_t = nn_ops.bias_add(m_t, self._b)
        m_t = math_ops.sigmoid(m_t)

        # add L1 loss
        ops.add_to_collection('L1 loss', math_ops.abs(m_t - self._m_target))

        # computing e_t (= thr)
        i = gen_math_ops._range(1, self._num_units + 1, 1)
        i = math_ops.cast(i, dtype=dtypes.float32)
        mtD = gen_array_ops.tile(mult(m_t[1], self._num_units),
                                 [self._num_units])
        thr = math_ops.sigmoid(mult(self._sharpness, sub(mtD, i)))
        thr = math_ops.round(add(thr, sub(0.5, self._epsilon)))
        ones = array_ops.ones_like(thr)
        thr_inv = sub(ones, thr)

        # computing h_t
        gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1),
                                      self._kernel)
        gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
        output = self._activation(gate_inputs)
        output = add(mult(gate_inputs, thr), mult(state, thr_inv))

        return output, output
Ejemplo n.º 5
0
    def _testConfMatrixOnTensors(self, tf_dtype, np_dtype):
        with self.test_session() as sess:
            m_neg = array_ops.placeholder(dtype=dtypes.float32)
            m_pos = array_ops.placeholder(dtype=dtypes.float32)
            s = array_ops.placeholder(dtype=dtypes.float32)

            neg = random_ops.random_normal([20], mean=m_neg, stddev=s, dtype=dtypes.float32)
            pos = random_ops.random_normal([20], mean=m_pos, stddev=s, dtype=dtypes.float32)

            data = array_ops.concat([neg, pos], 0)
            data = math_ops.cast(math_ops.round(data), tf_dtype)
            data = math_ops.minimum(math_ops.maximum(data, 0), 1)
            lab = array_ops.concat([array_ops.zeros([20], dtype=tf_dtype), array_ops.ones([20], dtype=tf_dtype)], 0)

            cm = confusion_matrix.confusion_matrix(lab, data, dtype=tf_dtype, num_classes=2)

            d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0, m_pos: 1.0, s: 1.0})

            truth = np.zeros([2, 2], dtype=np_dtype)
            try:
                range_builder = xrange
            except NameError:  # In Python 3.
                range_builder = range
            for i in range_builder(len(d)):
                truth[l[i], d[i]] += 1

            self.assertEqual(cm_out.dtype, np_dtype)
            self.assertAllClose(cm_out, truth, atol=1e-10)
Ejemplo n.º 6
0
    def testEvaluatePerfectModel(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_perfect_model_once')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        labels = constant_op.constant(self._labels, dtype=dtypes.float32)
        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metrics.accuracy(predictions=predictions,
                                               labels=labels)

        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=update_op,
            final_ops={'accuracy': accuracy},
            hooks=[
                evaluation._StopAfterNEvalsHook(1),
            ])
        self.assertTrue(final_ops_values['accuracy'] > .99)
Ejemplo n.º 7
0
    def testEvaluatePerfectModel(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_perfect_model_repeated')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        labels = constant_op.constant(self._labels, dtype=dtypes.float32)
        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metric_ops.streaming_accuracy(
            predictions, labels)

        final_values = evaluation.evaluate_repeatedly(
            checkpoint_dir=checkpoint_dir,
            eval_ops=update_op,
            final_ops={'accuracy': accuracy},
            hooks=[
                evaluation.StopAfterNEvalsHook(1),
            ],
            max_number_of_evaluations=1)
        self.assertTrue(final_values['accuracy'] > .99)
Ejemplo n.º 8
0
def assert_integer_form(x,
                        data=None,
                        summarize=None,
                        message=None,
                        name="assert_integer_form"):
    """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

    message = message or "x has non-integer components"
    x = ops.convert_to_tensor(x, name="x")
    casted_x = math_ops.to_int64(x)
    return check_ops.assert_equal(x,
                                  math_ops.cast(math_ops.round(casted_x),
                                                x.dtype),
                                  data=data,
                                  summarize=summarize,
                                  message=message,
                                  name=name)
Ejemplo n.º 9
0
    def testEvaluatePerfectModel(self):
        checkpoint_dir = tempfile.mkdtemp('evaluate_perfect_model_once')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        labels = constant_op.constant(self._labels, dtype=dtypes.float32)
        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metrics.accuracy(
            predictions=predictions, labels=labels)

        checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

        final_ops_values = evaluation.evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=update_op,
            final_ops={'accuracy': accuracy},
            hooks=[
                evaluation.StopAfterNEvalsHook(1),
            ])
        self.assertGreater(final_ops_values['accuracy'], .99)
Ejemplo n.º 10
0
  def testEvaluatePerfectModel(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_perfect_model_once')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    labels = constant_op.constant(self._labels, dtype=dtypes.float32)
    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metric_ops.streaming_accuracy(predictions, labels)

    checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

    final_ops_values = evaluation.evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[
            evaluation.StopAfterNEvalsHook(1),
        ])
    self.assertTrue(final_ops_values['accuracy'] > .99)
Ejemplo n.º 11
0
  def testEvaluateWithFiniteInputs(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_with_finite_inputs')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run evaluation. Inputs are fed through input producer for one epoch.
    all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    single_input, single_label = training.slice_input_producer(
        [all_inputs, all_labels], num_epochs=1)
    inputs, labels = training.batch([single_input, single_label], batch_size=6,
                                    allow_smaller_final_batch=True)

    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metrics.accuracy(
        predictions=predictions, labels=labels)

    checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

    final_ops_values = evaluation._evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy,
                   'eval_steps': evaluation._get_or_create_eval_step()},
        hooks=[evaluation._StopAfterNEvalsHook(None),])
    self.assertTrue(final_ops_values['accuracy'] > .99)
    # Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs
    # each; the 3rd iter evaluates the remaining 4 inputs, and the last one
    # triggers an error which stops evaluation.
    self.assertEqual(final_ops_values['eval_steps'], 4)
Ejemplo n.º 12
0
  def testEvaluationLoopTimeoutWithTimeoutFn(self):
    checkpoint_dir = tempfile.mkdtemp('evaluation_loop_timeout_with_timeout_fn')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    labels = constant_op.constant(self._labels, dtype=dtypes.float32)
    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metrics.accuracy(
        predictions=predictions, labels=labels)

    timeout_fn_calls = [0]
    def timeout_fn():
      timeout_fn_calls[0] += 1
      return timeout_fn_calls[0] > 3

    final_values = evaluation.evaluate_repeatedly(
        checkpoint_dir=checkpoint_dir,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[
            evaluation.StopAfterNEvalsHook(1),
        ],
        eval_interval_secs=1,
        max_number_of_evaluations=2,
        timeout=0.1,
        timeout_fn=timeout_fn)
    # We should have evaluated once.
    self.assertGreater(final_values['accuracy'], .99)
    # And called 4 times the timeout fn
    self.assertEqual(4, timeout_fn_calls[0])
Ejemplo n.º 13
0
  def _testConfMatrixOnTensors(self, tf_dtype, np_dtype):
    with self.cached_session() as sess:
      m_neg = array_ops.placeholder(dtype=dtypes.float32)
      m_pos = array_ops.placeholder(dtype=dtypes.float32)
      s = array_ops.placeholder(dtype=dtypes.float32)

      neg = random_ops.random_normal(
          [20], mean=m_neg, stddev=s, dtype=dtypes.float32)
      pos = random_ops.random_normal(
          [20], mean=m_pos, stddev=s, dtype=dtypes.float32)

      data = array_ops.concat([neg, pos], 0)
      data = math_ops.cast(math_ops.round(data), tf_dtype)
      data = math_ops.minimum(math_ops.maximum(data, 0), 1)
      lab = array_ops.concat(
          [
              array_ops.zeros(
                  [20], dtype=tf_dtype), array_ops.ones(
                      [20], dtype=tf_dtype)
          ],
          0)

      cm = confusion_matrix.confusion_matrix(
          lab, data, dtype=tf_dtype, num_classes=2)

      d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0, m_pos: 1.0, s: 1.0})

      truth = np.zeros([2, 2], dtype=np_dtype)
      for i in xrange(len(d)):
        truth[l[i], d[i]] += 1

      self.assertEqual(cm_out.dtype, np_dtype)
      self.assertAllClose(cm_out, truth, atol=1e-10)
Ejemplo n.º 14
0
  def testEvaluationLoopTimeoutWithTimeoutFn(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluation_loop_timeout_with_timeout_fn')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    labels = constant_op.constant(self._labels, dtype=dtypes.float32)
    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metric_ops.streaming_accuracy(predictions, labels)

    timeout_fn_calls = [0]
    def timeout_fn():
      timeout_fn_calls[0] += 1
      return timeout_fn_calls[0] > 3

    final_values = evaluation.evaluate_repeatedly(
        checkpoint_dir=checkpoint_dir,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[
            evaluation.StopAfterNEvalsHook(1),
        ],
        eval_interval_secs=1,
        max_number_of_evaluations=2,
        timeout=0.1,
        timeout_fn=timeout_fn)
    # We should have evaluated once.
    self.assertTrue(final_values['accuracy'] > .99)
    # And called 4 times the timeout fn
    self.assertEqual(4, timeout_fn_calls[0])
Ejemplo n.º 15
0
def specificity(y_true, y_pred):
    y_pred = math_ops.round(y_pred)
    TP = tf.count_nonzero(y_pred * y_true)
    TN = tf.count_nonzero((y_pred - 1) * (y_true - 1))
    FP = tf.count_nonzero(y_pred * (y_true - 1))
    FN = tf.count_nonzero((y_pred - 1) * y_true)
    metric = tf.divide(TN, TN + FP)
    return metric
Ejemplo n.º 16
0
def sensitivity(y_true, y_pred):
    y_pred = math_ops.round(y_pred)
    TP = tf.count_nonzero(y_pred * y_true)
    TN = tf.count_nonzero((y_pred - 1) * (y_true - 1))
    FP = tf.count_nonzero(y_pred * (y_true - 1))
    FN = tf.count_nonzero((y_pred - 1) * y_true)
    metric = tf.divide(TP, TP + FN)
    return metric
Ejemplo n.º 17
0
 def testRounding(self):
   x = [0.49, 0.7, -0.3, -0.8]
   for dtype in [np.float32, np.double]:
     x_np = np.array(x, dtype=dtype)
     with self.test_session(use_gpu=True):
       x_tf = constant_op.constant(x_np, shape=x_np.shape)
       y_tf = math_ops.round(x_tf)
       y_tf_np = y_tf.eval()
       y_np = np.round(x_np)
       self.assertAllClose(y_tf_np, y_np, atol=1e-2)
Ejemplo n.º 18
0
    def call(self, y_true, y_pred):
        y_pred = ops.convert_to_tensor_v2(y_pred)
        y_true = math_ops.cast(y_true, y_pred.dtype)

        y_pred = math_ops.round(y_pred)
        y_true = math_ops.round(y_true)
        y_pred_mean = math_ops.reduce_mean(y_pred, keepdims=True)
        y_true_mean = math_ops.reduce_mean(y_true, keepdims=True)
        pred_std = math_ops.reduce_std(y_pred, keepdims=True)
        true_std = math_ops.reduce_std(y_true, keepdims=True)

        pearson = self.pcc(y_true, y_pred)
        ccc_n = (2.0 * pearson * pred_std * true_std)

        ccc_d = (math_ops.square(pred_std) + math_ops.square(true_std) +
                 math_ops.square(y_pred_mean - y_true_mean))

        ccc = ccc_n / (ccc_d + 1e-25)
        return ccc
Ejemplo n.º 19
0
 def testRounding(self):
   x = np.arange(-5.0, 5.0, .25)
   for dtype in [np.float32, np.double, np.int32]:
     x_np = np.array(x, dtype=dtype)
     with test_util.device(use_gpu=True):
       x_tf = constant_op.constant(x_np, shape=x_np.shape)
       y_tf = math_ops.round(x_tf)
       y_tf_np = self.evaluate(y_tf)
       y_np = np.round(x_np)
       self.assertAllClose(y_tf_np, y_np, atol=1e-2)
Ejemplo n.º 20
0
 def testRounding(self):
     x = np.arange(-5.0, 5.0, .25)
     for dtype in [np.float32, np.double, np.int32]:
         x_np = np.array(x, dtype=dtype)
         with test_util.device(use_gpu=True):
             x_tf = constant_op.constant(x_np, shape=x_np.shape)
             y_tf = math_ops.round(x_tf)
             y_tf_np = self.evaluate(y_tf)
             y_np = np.round(x_np)
             self.assertAllClose(y_tf_np, y_np, atol=1e-2)
Ejemplo n.º 21
0
    def update_state(self, y_true, y_pred, sample_weight=None):
        y_pred = ops.convert_to_tensor_v2(y_pred)
        y_true = math_ops.cast(y_true, y_pred.dtype)

        y_pred = math_ops.round(y_pred)
        y_true = math_ops.round(y_true)
        y_pred_mean = math_ops.reduce_mean(y_pred, keepdims=True)
        y_true_mean = math_ops.reduce_mean(y_true, keepdims=True)
        pred_std = math_ops.reduce_std(y_pred, keepdims=True)
        true_std = math_ops.reduce_std(y_true, keepdims=True)

        pearson = self.pcc(y_true, y_pred)
        ccc_n = (2.0 * pearson * pred_std * true_std)

        ccc_d = (math_ops.square(pred_std) + math_ops.square(true_std) + math_ops.square(
                y_pred_mean - y_true_mean))

        ccc = ccc_n / (ccc_d+ 1e-25)
        self.ccc_r.assign_add(tf.reduce_sum(ccc))
        self.total_count.assign_add(len(ccc))
def f1_score(y_true,y_pred):
    y_pred=math_ops.round(y_pred)
    TP = tf.count_nonzero(y_pred * y_true)
    TN = tf.count_nonzero((y_pred - 1) * (y_true - 1))
    FP = tf.count_nonzero(y_pred * (y_true - 1))
    FN = tf.count_nonzero((y_pred - 1) * y_true)
    metric=tf.divide(TN,TN+FP)
    precision = tf.divide(TP,TP + FP)
    sensitivity = tf.divide(TP,TP+FN)
    metric = tf.divide(tf.multiply(2*precision,sensitivity),precision + sensitivity)
    return metric
Ejemplo n.º 23
0
 def testRounding(self):
     x = [0.49, 0.7, -0.3, -0.8]
     # TODO(nolivia): Remove this when RoundOp is forwards compatible
     # x = np.arange(-5.0, 5.0, .25)
     for dtype in [np.float32, np.double, np.int32]:
         x_np = np.array(x, dtype=dtype)
         with self.test_session(use_gpu=True):
             x_tf = constant_op.constant(x_np, shape=x_np.shape)
             y_tf = math_ops.round(x_tf)
             y_tf_np = y_tf.eval()
             y_np = np.round(x_np)
             self.assertAllClose(y_tf_np, y_np, atol=1e-2)
Ejemplo n.º 24
0
def adjust_contrast(images, contrast_factor, min_value=None, max_value=None):
  """Adjust contrast of RGB or grayscale images.

  `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
  interpreted as `[height, width, channels]`.  The other dimensions only
  represent a collection of images, such as `[batch, height, width, channels].`

  Contrast is adjusted independently for each channel of each image.

  For each channel, this Op first computes the mean of the image pixels in the
  channel and then adjusts each component `x` of each pixel to
  `(x - mean) * contrast_factor + mean`.

  The adjusted values are then clipped to fit in the `[min_value, max_value]`
  interval. If `min_value` or `max_value` is not given, it is replaced with the
  minimum and maximum values for the data type of `images` respectively.

  The contrast-adjusted image is always computed as `float`, and it is
  cast back to its original type after clipping.

  Args:
    images: Images to adjust.  At least 3-D.
    contrast_factor: A float multiplier for adjusting contrast.
    min_value: Minimum value for clipping the adjusted pixels.
    max_value: Maximum value for clipping the adjusted pixels.

  Returns:
    The constrast-adjusted image or images.

  Raises:
    ValueError: if the arguments are invalid.
  """
  _CheckAtLeast3DImage(images)

  # If these are None, the min/max should be a nop, but still prevent overflows
  # from the cast back to images.dtype at the end of adjust_contrast.
  if min_value is None:
    min_value = images.dtype.min
  if max_value is None:
    max_value = images.dtype.max

  with ops.op_scope(
      [images, contrast_factor, min_value,
       max_value], None, 'adjust_contrast') as name:
    adjusted = gen_image_ops.adjust_contrast(images,
                                             contrast_factor=contrast_factor,
                                             min_value=min_value,
                                             max_value=max_value,
                                             name=name)
    if images.dtype.is_integer:
      return math_ops.cast(math_ops.round(adjusted), images.dtype)
    else:
      return math_ops.cast(adjusted, images.dtype)
Ejemplo n.º 25
0
 def testRounding(self):
   x = [0.49, 0.7, -0.3, -0.8]
   # TODO(nolivia): Remove this when RoundOp is forwards compatible
   # x = np.arange(-5.0, 5.0, .25)
   for dtype in [np.float32, np.double, np.int32]:
     x_np = np.array(x, dtype=dtype)
     with self.test_session(use_gpu=True):
       x_tf = constant_op.constant(x_np, shape=x_np.shape)
       y_tf = math_ops.round(x_tf)
       y_tf_np = y_tf.eval()
       y_np = np.round(x_np)
       self.assertAllClose(y_tf_np, y_np, atol=1e-2)
Ejemplo n.º 26
0
def adjust_contrast(images, contrast_factor, min_value=None, max_value=None):
    """Adjust contrast of RGB or grayscale images.

  `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
  interpreted as `[height, width, channels]`.  The other dimensions only
  represent a collection of images, such as `[batch, height, width, channels].`

  Contrast is adjusted independently for each channel of each image.

  For each channel, this Op first computes the mean of the image pixels in the
  channel and then adjusts each component `x` of each pixel to
  `(x - mean) * contrast_factor + mean`.

  The adjusted values are then clipped to fit in the `[min_value, max_value]`
  interval. If `min_value` or `max_value` is not given, it is replaced with the
  minimum and maximum values for the data type of `images` respectively.

  The contrast-adjusted image is always computed as `float`, and it is
  cast back to its original type after clipping.

  Args:
    images: Images to adjust.  At least 3-D.
    contrast_factor: A float multiplier for adjusting contrast.
    min_value: Minimum value for clipping the adjusted pixels.
    max_value: Maximum value for clipping the adjusted pixels.

  Returns:
    The constrast-adjusted image or images.

  Raises:
    ValueError: if the arguments are invalid.
  """
    _CheckAtLeast3DImage(images)

    # If these are None, the min/max should be a nop, but still prevent overflows
    # from the cast back to images.dtype at the end of adjust_contrast.
    if min_value is None:
        min_value = images.dtype.min
    if max_value is None:
        max_value = images.dtype.max

    with ops.op_scope([images, contrast_factor, min_value, max_value], None,
                      'adjust_contrast') as name:
        adjusted = gen_image_ops.adjust_contrast(
            images,
            contrast_factor=contrast_factor,
            min_value=min_value,
            max_value=max_value,
            name=name)
        if images.dtype.is_integer:
            return math_ops.cast(math_ops.round(adjusted), images.dtype)
        else:
            return math_ops.cast(adjusted, images.dtype)
Ejemplo n.º 27
0
def around(a, decimals=0):  # pylint: disable=missing-docstring
    a = asarray(a)
    dtype = a.dtype
    factor = math.pow(10, decimals)
    # Use float as the working dtype instead of a.dtype, because a.dtype can be
    # integer and `decimals` can be negative.
    float_dtype = np_dtypes.default_float_type()
    a = a.astype(float_dtype).data
    factor = math_ops.cast(factor, float_dtype)
    a = math_ops.multiply(a, factor)
    a = math_ops.round(a)
    a = math_ops.divide(a, factor)
    return np_utils.tensor_to_ndarray(a).astype(dtype)
Ejemplo n.º 28
0
    def _update_mask(self, weights, threshold):
        """Updates the mask for a given weight tensor.

    This functions first computes the cdf of the weight tensor, and estimates
    the threshold value such that 'desired_sparsity' fraction of weights
    have magnitude less than the threshold.

    Args:
      weights: The weight tensor that needs to be masked.
      threshold: The current threshold value. The function will compute a new
        threshold and return the exponential moving average using the current
        value of threshold

    Returns:
      new_threshold: The new value of the threshold based on weights, and
        sparsity at the current global_step
      new_mask: A numpy array of the same size and shape as weights containing
        0 or 1 to indicate which of the values in weights falls below
        the threshold

    Raises:
      ValueError: if sparsity is not defined
    """
        if self._sparsity is None:
            raise ValueError('Sparsity variable undefined')

        sparsity = self._get_sparsity(weights.op.name)
        with ops.name_scope(weights.op.name + '_pruning_ops'):
            abs_weights = math_ops.abs(weights)
            k = math_ops.cast(
                math_ops.round(
                    math_ops.cast(array_ops.size(abs_weights), dtypes.float32)
                    * (1 - sparsity)), dtypes.int32)
            # Sort the entire array
            values, _ = nn_ops.top_k(array_ops.reshape(abs_weights, [-1]),
                                     k=array_ops.size(abs_weights))
            # Grab the (k-1) th value
            current_threshold = array_ops.gather(values, k - 1)
            smoothed_threshold = math_ops.add_n([
                math_ops.multiply(current_threshold,
                                  1 - self._spec.threshold_decay),
                math_ops.multiply(threshold, self._spec.threshold_decay)
            ])

            new_mask = math_ops.cast(
                math_ops.greater_equal(abs_weights, smoothed_threshold),
                dtypes.float32)

        return smoothed_threshold, new_mask
Ejemplo n.º 29
0
def _random_flip(image, flip_index, seed, scope_name, flip_3D_together=False):
    """Randomly (50% chance) flip an image along axis `flip_index`.

    Args:
    image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
      of shape `[height, width, channels]`.
    flip_index: Dimension along which to flip the image.
      Vertical: 0, Horizontal: 1
    seed: A Python integer. Used to create a random seed. See
      `tf.compat.v1.set_random_seed` for behavior.
    scope_name: Name of the scope in which the ops are added.

    Returns:
    A tensor of the same type and shape as `image`.

    Raises:
    ValueError: if the shape of `image` not supported.
    """
    with ops.name_scope(None, scope_name, [image]) as scope:
        image = ops.convert_to_tensor(image, name='image')
        shape = image.get_shape()
        if shape.ndims == 3 or shape.ndims is None:
            uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
            mirror_cond = math_ops.less(uniform_random, .5)
            result = control_flow_ops.cond(
                mirror_cond,
                lambda: array_ops.reverse(image, [flip_index]),
                lambda: image,
                name=scope)
            return fix_image_flip_shape(image, result)
        elif shape.ndims == 4:
            batch_size = array_ops.shape(image)[0]
            if flip_3D_together:
                uniform_random = array_ops.repeat(
                    random_ops.random_uniform([1], 0, 1.0, seed=seed),
                    batch_size)
            else:
                uniform_random = random_ops.random_uniform([batch_size],
                                                           0,
                                                           1.0,
                                                           seed=seed)
            flips = math_ops.round(
                array_ops.reshape(uniform_random, [batch_size, 1, 1, 1]))
            flips = math_ops.cast(flips, image.dtype)
            flipped_input = array_ops.reverse(image, [flip_index + 1])
            return flips * flipped_input + (1 - flips) * image
        else:
            raise ValueError('\'image\' must have either 3 or 4 dimensions.')
Ejemplo n.º 30
0
  def _update_mask(self, weights, threshold):
    """Updates the mask for a given weight tensor.

    This functions first computes the cdf of the weight tensor, and estimates
    the threshold value such that 'desired_sparsity' fraction of weights
    have magnitude less than the threshold.

    Args:
      weights: The weight tensor that needs to be masked.
      threshold: The current threshold value. The function will compute a new
        threshold and return the exponential moving average using the current
        value of threshold

    Returns:
      new_threshold: The new value of the threshold based on weights, and
        sparsity at the current global_step
      new_mask: A numpy array of the same size and shape as weights containing
        0 or 1 to indicate which of the values in weights falls below
        the threshold

    Raises:
      ValueError: if sparsity is not defined
    """
    if self._sparsity is None:
      raise ValueError('Sparsity variable undefined')

    sparsity = self._get_sparsity(weights.op.name)
    with ops.name_scope(weights.op.name + '_pruning_ops'):
      abs_weights = math_ops.abs(weights)
      k = math_ops.cast(
          math_ops.round(
              math_ops.cast(array_ops.size(abs_weights), dtypes.float32) *
              (1 - sparsity)), dtypes.int32)
      # Sort the entire array
      values, _ = nn_ops.top_k(
          array_ops.reshape(abs_weights, [-1]), k=array_ops.size(abs_weights))
      # Grab the (k-1) th value
      current_threshold = array_ops.gather(values, k - 1)
      smoothed_threshold = math_ops.add_n([
          math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
          math_ops.multiply(threshold, self._spec.threshold_decay)
      ])

      new_mask = math_ops.cast(
          math_ops.greater_equal(abs_weights, smoothed_threshold),
          dtypes.float32)

    return smoothed_threshold, new_mask
Ejemplo n.º 31
0
 def testRounding(self):
   try:
     x = [0.49, 0.7, -0.3, -0.8]
     for dtype in [np.float32, np.double]:
       x_np = np.array(x, dtype=dtype)
       for use_gpu in [True, False]:
         with self.test_session(use_gpu=use_gpu):
           x_tf = constant_op.constant(x_np, shape=x_np.shape)
           y_tf = math_ops.round(x_tf)
           y_tf_np = y_tf.eval()
           y_np = np.round(x_np)
           self.assertAllClose(y_tf_np, y_np, atol=1e-2)
   except:
     import sys, pdb, traceback
     type, value, tb = sys.exc_info()
     traceback.print_exc()
     pdb.post_mortem(tb)
Ejemplo n.º 32
0
        def __call__(self, y_true, y_pred):
          """Computes the number of true positives in a batch.

          Args:
              y_true: Tensor, batch_wise labels
              y_pred: Tensor, batch_wise predictions

          Returns:
              The total number of true positives seen this epoch at the
                  completion of the batch.
          """
          y_true = math_ops.cast(y_true, 'int32')
          y_pred = math_ops.cast(math_ops.round(y_pred), 'int32')
          correct_preds = math_ops.cast(math_ops.equal(y_pred, y_true), 'int32')
          true_pos = math_ops.cast(
              math_ops.reduce_sum(correct_preds * y_true), 'int32')
          current_true_pos = self.true_positives * 1
          self.add_update(
              state_ops.assign_add(self.true_positives, true_pos),
              inputs=[y_true, y_pred])
          return current_true_pos + true_pos
Ejemplo n.º 33
0
        def __call__(self, y_true, y_pred):
          """Computes the number of true positives in a batch.

          Args:
              y_true: Tensor, batch_wise labels
              y_pred: Tensor, batch_wise predictions

          Returns:
              The total number of true positives seen this epoch at the
                  completion of the batch.
          """
          y_true = math_ops.cast(y_true, 'int32')
          y_pred = math_ops.cast(math_ops.round(y_pred), 'int32')
          correct_preds = math_ops.cast(math_ops.equal(y_pred, y_true), 'int32')
          true_pos = math_ops.cast(
              math_ops.reduce_sum(correct_preds * y_true), 'int32')
          current_true_pos = self.true_positives * 1
          self.add_update(
              state_ops.assign_add(self.true_positives, true_pos),
              inputs=[y_true, y_pred])
          return current_true_pos + true_pos
Ejemplo n.º 34
0
    def testEvaluateWithFiniteInputs(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_with_finite_inputs')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run evaluation. Inputs are fed through input producer for one epoch.
        all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        single_input, single_label = training.slice_input_producer(
            [all_inputs, all_labels], num_epochs=1)
        inputs, labels = training.batch([single_input, single_label],
                                        batch_size=6,
                                        allow_smaller_final_batch=True)

        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metrics.accuracy(predictions=predictions,
                                               labels=labels)

        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=update_op,
            final_ops={
                'accuracy': accuracy,
                'eval_steps': evaluation._get_or_create_eval_step()
            },
            hooks=[
                evaluation._StopAfterNEvalsHook(None),
            ])
        self.assertTrue(final_ops_values['accuracy'] > .99)
        # Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs
        # each; the 3rd iter evaluates the remaining 4 inputs, and the last one
        # triggers an error which stops evaluation.
        self.assertEqual(final_ops_values['eval_steps'], 4)
Ejemplo n.º 35
0
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name)
  def testMultiThreadedEstimateDataDistribution(self):
    num_classes = 10

    # Set up graph.
    random_seed.set_random_seed(1234)
    label = math_ops.cast(
        math_ops.round(random_ops.random_uniform([1]) * num_classes),
        dtypes_lib.int32)

    prob_estimate = sampling_ops._estimate_data_distribution(  # pylint: disable=protected-access
        label, num_classes)
    # Check that prob_estimate is well-behaved in a multithreaded context.
    _, _, [prob_estimate] = sampling_ops._verify_input(  # pylint: disable=protected-access
        [], label, [prob_estimate])

    # Use queues to run multiple threads over the graph, each of which
    # fetches `prob_estimate`.
    queue = data_flow_ops.FIFOQueue(
        capacity=25,
        dtypes=[prob_estimate.dtype],
        shapes=[prob_estimate.get_shape()])
    enqueue_op = queue.enqueue([prob_estimate])
    queue_runner_impl.add_queue_runner(
        queue_runner_impl.QueueRunner(queue, [enqueue_op] * 25))
    out_tensor = queue.dequeue()

    # Run the multi-threaded session.
    with self.cached_session() as sess:
      # Need to initialize variables that keep running total of classes seen.
      variables.global_variables_initializer().run()

      coord = coordinator.Coordinator()
      threads = queue_runner_impl.start_queue_runners(coord=coord)

      for _ in range(25):
        sess.run([out_tensor])

      coord.request_stop()
      coord.join(threads)
Ejemplo n.º 37
0
def adjust_brightness(image, delta, min_value=None, max_value=None):
  """Adjust the brightness of RGB or Grayscale images.

  The value `delta` is added to all components of the tensor `image`. `image`
  and `delta` are cast to `float` before adding, and the resulting values are
  clamped to `[min_value, max_value]`. Finally, the result is cast back to
  `images.dtype`.

  If `min_value` or `max_value` are not given, they are set to the minimum and
  maximum allowed values for `image.dtype` respectively.

  Args:
    image: A tensor.
    delta: A scalar. Amount to add to the pixel values.
    min_value: Minimum value for output.
    max_value: Maximum value for output.

  Returns:
    A tensor of the same shape and type as `image`.
  """
  if min_value is None:
    min_value = image.dtype.min
  if max_value is None:
    max_value = image.dtype.max

  with ops.op_scope([image, delta, min_value, max_value], None,
                    'adjust_brightness') as name:
    adjusted = math_ops.add(
        math_ops.cast(image, dtypes.float32),
        math_ops.cast(delta, dtypes.float32),
        name=name)
    if image.dtype.is_integer:
      rounded = math_ops.round(adjusted)
    else:
      rounded = adjusted
    clipped = clip_ops.clip_by_value(rounded, float(min_value),
                                     float(max_value))
    output = math_ops.cast(clipped, image.dtype)
    return output
Ejemplo n.º 38
0
    def testMultiThreadedEstimateDataDistribution(self):
        num_classes = 10

        # Set up graph.
        random_seed.set_random_seed(1234)
        label = math_ops.cast(
            math_ops.round(random_ops.random_uniform([1]) * num_classes),
            dtypes_lib.int32)

        prob_estimate = sampling_ops._estimate_data_distribution(  # pylint: disable=protected-access
            label, num_classes)
        # Check that prob_estimate is well-behaved in a multithreaded context.
        _, _, [prob_estimate] = sampling_ops._verify_input(  # pylint: disable=protected-access
            [], label, [prob_estimate])

        # Use queues to run multiple threads over the graph, each of which
        # fetches `prob_estimate`.
        queue = data_flow_ops.FIFOQueue(capacity=25,
                                        dtypes=[prob_estimate.dtype],
                                        shapes=[prob_estimate.get_shape()])
        enqueue_op = queue.enqueue([prob_estimate])
        queue_runner_impl.add_queue_runner(
            queue_runner_impl.QueueRunner(queue, [enqueue_op] * 25))
        out_tensor = queue.dequeue()

        # Run the multi-threaded session.
        with self.cached_session() as sess:
            # Need to initialize variables that keep running total of classes seen.
            variables.global_variables_initializer().run()

            coord = coordinator.Coordinator()
            threads = queue_runner_impl.start_queue_runners(coord=coord)

            for _ in range(25):
                sess.run([out_tensor])

            coord.request_stop()
            coord.join(threads)
Ejemplo n.º 39
0
def adjust_brightness(image, delta, min_value=None, max_value=None):
    """Adjust the brightness of RGB or Grayscale images.

  The value `delta` is added to all components of the tensor `image`. `image`
  and `delta` are cast to `float` before adding, and the resulting values are
  clamped to `[min_value, max_value]`. Finally, the result is cast back to
  `images.dtype`.

  If `min_value` or `max_value` are not given, they are set to the minimum and
  maximum allowed values for `image.dtype` respectively.

  Args:
    image: A tensor.
    delta: A scalar. Amount to add to the pixel values.
    min_value: Minimum value for output.
    max_value: Maximum value for output.

  Returns:
    A tensor of the same shape and type as `image`.
  """
    if min_value is None:
        min_value = image.dtype.min
    if max_value is None:
        max_value = image.dtype.max

    with ops.op_scope([image, delta, min_value, max_value], None,
                      'adjust_brightness') as name:
        adjusted = math_ops.add(math_ops.cast(image, types.float32),
                                math_ops.cast(delta, types.float32),
                                name=name)
        if image.dtype.is_integer:
            rounded = math_ops.round(adjusted)
        else:
            rounded = adjusted
        clipped = clip_ops.clip_by_value(rounded, float(min_value),
                                         float(max_value))
        output = math_ops.cast(clipped, image.dtype)
        return output
Ejemplo n.º 40
0
    def _update_mask(self, weights):
        """Updates the mask for a given weight tensor.

    This functions first estimates the threshold value such that
    a given fraction of weights have magnitude less than
    the threshold.

    Args:
      weights: The weight tensor that needs to be masked.

    Returns:
      new_threshold: The new value of the threshold based on weights, and
        sparsity at the current global_step
      new_mask: A numpy array of the same size and shape as weights containing
        0 or 1 to indicate which of the values in weights falls below
        the threshold

    Raises:
      ValueError: if sparsity is not defined
    """
        sparsity = self._pruning_schedule(self._step_fn())[1]
        with ops.name_scope('pruning_ops'):
            abs_weights = math_ops.abs(weights)
            k = math_ops.cast(
                math_ops.round(
                    math_ops.cast(array_ops.size(abs_weights), dtypes.float32)
                    * (1 - sparsity)), dtypes.int32)
            # Sort the entire array
            values, _ = nn_ops.top_k(array_ops.reshape(abs_weights, [-1]),
                                     k=array_ops.size(abs_weights))
            # Grab the (k-1)th value

            current_threshold = array_ops.gather(values, k - 1)
            new_mask = math_ops.cast(
                math_ops.greater_equal(abs_weights, current_threshold),
                dtypes.float32)
        return current_threshold, new_mask
Ejemplo n.º 41
0
 def safe_polygamma(x, y):
     return math_ops.polygamma(
         math_ops.round(clip_ops.clip_by_value(y, 1, 10)), x * x + 1)
Ejemplo n.º 42
0
def _assert_integer_form(x):
  """Check x for integer components (or floats that are equal to integers)."""
  x = ops.convert_to_tensor(x, name='x')
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(x, math_ops.cast(
      math_ops.round(casted_x), x.dtype))
Ejemplo n.º 43
0
def percentile(x,
               q,
               axis=None,
               interpolation=None,
               keep_dims=False,
               validate_args=False,
               name=None):
  """Compute the `q`-th percentile of `x`.

  Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
  way from the minimum to the maximum in a sorted copy of `x`.

  The values and distances of the two nearest neighbors as well as the
  `interpolation` parameter will determine the percentile if the normalized
  ranking does not match the location of `q` exactly.

  This function is the same as the median if `q = 50`, the same as the minimum
  if `q = 0` and the same as the maximum if `q = 100`.


  ```python
  # Get 30th percentile with default ('nearest') interpolation.
  x = [1., 2., 3., 4.]
  percentile(x, q=30.)
  ==> 2.0

  # Get 30th percentile with 'lower' interpolation
  x = [1., 2., 3., 4.]
  percentile(x, q=30., interpolation='lower')
  ==> 1.0

  # Get 100th percentile (maximum).  By default, this is computed over every dim
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100.)
  ==> 4.0

  # Treat the leading dim as indexing samples, and find the 100th quantile (max)
  # over all such samples.
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100., axis=[0])
  ==> [3., 4.]
  ```

  Compare to `numpy.percentile`.

  Args:
    x:  Floating point `N-D` `Tensor` with `N > 0`.  If `axis` is not `None`,
      `x` must have statically known number of dimensions.
    q:  Scalar `Tensor` in `[0, 100]`. The percentile.
    axis:  Optional `0-D` or `1-D` integer `Tensor` with constant values.
      The axis that hold independent samples over which to return the desired
      percentile.  If `None` (the default), treat every dimension as a sample
      dimension, returning a scalar.
    interpolation : {"lower", "higher", "nearest"}.  Default: "nearest"
      This optional parameter specifies the interpolation method to
      use when the desired quantile lies between two data points `i < j`:
        * lower: `i`.
        * higher: `j`.
        * nearest: `i` or `j`, whichever is nearest.
    keep_dims:  Python `bool`. If `True`, the last dimension is kept with size 1
      If `False`, the last dimension is removed from the output shape.
    validate_args:  Whether to add runtime checks of argument validity.
      If False, and arguments are incorrect, correct behavior is not guaranteed.
    name:  A Python string name to give this `Op`.  Default is "percentile"

  Returns:
    A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
      `axis` is `None`, a scalar.

  Raises:
    ValueError:  If argument 'interpolation' is not an allowed type.
  """
  name = name or "percentile"
  allowed_interpolations = {"lower", "higher", "nearest"}

  if interpolation is None:
    interpolation = "nearest"
  else:
    if interpolation not in allowed_interpolations:
      raise ValueError("Argument 'interpolation' must be in %s.  Found %s" %
                       (allowed_interpolations, interpolation))

  with ops.name_scope(name, [x, q]):
    x = ops.convert_to_tensor(x, name="x")
    q = math_ops.to_float(q, name="q")
    _get_static_ndims(q, expect_ndims=0)

    if validate_args:
      q = control_flow_ops.with_dependencies([
          check_ops.assert_rank(q, 0), check_ops.assert_greater_equal(q, 0.),
          check_ops.assert_less_equal(q, 100.)
      ], q)

    if axis is None:
      y = array_ops.reshape(x, [-1])
    else:
      axis = ops.convert_to_tensor(axis, name="axis")
      check_ops.assert_integer(axis)
      axis_ndims = _get_static_ndims(
          axis, expect_static=True, expect_ndims_no_more_than=1)
      axis_const = tensor_util.constant_value(axis)
      if axis_const is None:
        raise ValueError(
            "Expected argument 'axis' to be statically available.  Found: %s" %
            axis)
      axis = axis_const
      if axis_ndims == 0:
        axis = [axis]
      axis = [int(a) for a in axis]
      x_ndims = _get_static_ndims(
          x, expect_static=True, expect_ndims_at_least=1)
      axis = _make_static_axis_non_negative(axis, x_ndims)
      y = _move_dims_to_flat_end(x, axis, x_ndims)

    frac_at_q_or_above = 1. - q / 100.
    d = math_ops.to_float(array_ops.shape(y)[-1])

    if interpolation == "lower":
      index = math_ops.ceil((d - 1) * frac_at_q_or_above)
    elif interpolation == "higher":
      index = math_ops.floor((d - 1) * frac_at_q_or_above)
    elif interpolation == "nearest":
      index = math_ops.round((d - 1) * frac_at_q_or_above)

    # Sort everything, not just the top 'k' entries, which allows multiple calls
    # to sort only once (under the hood) and use CSE.
    sorted_y = _sort_tensor(y)

    # result.shape = B
    result = sorted_y[..., math_ops.to_int32(index)]
    result.set_shape(y.get_shape()[:-1])

    if keep_dims:
      if axis is None:
        # ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
        ones_vec = array_ops.ones(
            shape=[_get_best_effort_ndims(x)], dtype=dtypes.int32)
        result *= array_ops.ones(ones_vec, dtype=x.dtype)
      else:
        result = _insert_back_keep_dims(result, axis)

    return result
Ejemplo n.º 44
0
  def build(self, input_shape):
    """Builds the layer.

    Creates the variables for the network modeling the densities, creates the
    auxiliary loss estimating the median and tail quantiles of the densities,
    and then uses that to create the probability mass functions and the update
    op that produces the discrete cumulative density functions used by the range
    coder.

    Args:
      input_shape: Shape of the input tensor, used to get the number of
        channels.

    Raises:
      ValueError: if `input_shape` doesn't specify the length of the channel
        dimension.
    """
    input_shape = tensor_shape.TensorShape(input_shape)
    channel_axis = self._channel_axis(input_shape.ndims)
    channels = input_shape[channel_axis].value
    if channels is None:
      raise ValueError("The channel dimension of the inputs must be defined.")
    self.input_spec = base_layer.InputSpec(
        ndim=input_shape.ndims, axes={channel_axis: channels})
    filters = (1,) + self.filters + (1,)
    scale = self.init_scale ** (1 / (len(self.filters) + 1))

    # Create variables.
    self._matrices = []
    self._biases = []
    self._factors = []
    for i in range(len(self.filters) + 1):
      init = np.log(np.expm1(1 / scale / filters[i + 1]))
      matrix = self.add_variable(
          "matrix_{}".format(i), dtype=self.dtype,
          shape=(channels, filters[i + 1], filters[i]),
          initializer=init_ops.Constant(init))
      matrix = nn.softplus(matrix)
      self._matrices.append(matrix)

      bias = self.add_variable(
          "bias_{}".format(i), dtype=self.dtype,
          shape=(channels, filters[i + 1], 1),
          initializer=init_ops.RandomUniform(-.5, .5))
      self._biases.append(bias)

      if i < len(self.filters):
        factor = self.add_variable(
            "factor_{}".format(i), dtype=self.dtype,
            shape=(channels, filters[i + 1], 1),
            initializer=init_ops.Zeros())
        factor = math_ops.tanh(factor)
        self._factors.append(factor)

    # To figure out what range of the densities to sample, we need to compute
    # the quantiles given by `tail_mass / 2` and `1 - tail_mass / 2`. Since we
    # can't take inverses of the cumulative directly, we make it an optimization
    # problem:
    # `quantiles = argmin(|logit(cumulative) - target|)`
    # where `target` is `logit(tail_mass / 2)` or `logit(1 - tail_mass / 2)`.
    # Taking the logit (inverse of sigmoid) of the cumulative makes the
    # representation of the right target more numerically stable.

    # Numerically stable way of computing logits of `tail_mass / 2`
    # and `1 - tail_mass / 2`.
    target = np.log(2 / self.tail_mass - 1)
    # Compute lower and upper tail quantile as well as median.
    target = constant_op.constant([-target, 0, target], dtype=self.dtype)

    def quantiles_initializer(shape, dtype=None, partition_info=None):
      del partition_info  # unused
      assert tuple(shape[1:]) == (1, 3)
      init = constant_op.constant(
          [[[-self.init_scale, 0, self.init_scale]]], dtype=dtype)
      return array_ops.tile(init, (shape[0], 1, 1))

    quantiles = self.add_variable(
        "quantiles", shape=(channels, 1, 3), dtype=self.dtype,
        initializer=quantiles_initializer)
    logits = self._logits_cumulative(quantiles, stop_gradient=True)
    loss = math_ops.reduce_sum(abs(logits - target))
    self.add_loss(loss, inputs=None)

    # Save medians for `call`, `compress`, and `decompress`.
    self._medians = quantiles[:, :, 1:2]
    if not self.optimize_integer_offset:
      self._medians = math_ops.round(self._medians)

    # Largest distance observed between lower tail quantile and median,
    # or between median and upper tail quantile.
    minima = math_ops.reduce_max(self._medians - quantiles[:, :, 0:1])
    maxima = math_ops.reduce_max(quantiles[:, :, 2:3] - self._medians)
    minmax = math_ops.maximum(minima, maxima)
    minmax = math_ops.ceil(minmax)
    minmax = math_ops.maximum(minmax, 1)

    # Sample the density up to `minmax` around the median.
    samples = math_ops.range(-minmax, minmax + 1, dtype=self.dtype)
    samples += self._medians

    half = constant_op.constant(.5, dtype=self.dtype)
    # We strip the sigmoid from the end here, so we can use the special rule
    # below to only compute differences in the left tail of the sigmoid.
    # This increases numerical stability (see explanation in `call`).
    lower = self._logits_cumulative(samples - half, stop_gradient=True)
    upper = self._logits_cumulative(samples + half, stop_gradient=True)
    # Flip signs if we can move more towards the left tail of the sigmoid.
    sign = -math_ops.sign(math_ops.add_n([lower, upper]))
    pmf = abs(math_ops.sigmoid(sign * upper) - math_ops.sigmoid(sign * lower))
    # Add tail masses to first and last bin of pmf, as we clip values for
    # compression, meaning that out-of-range values get mapped to these bins.
    pmf = array_ops.concat([
        math_ops.add_n([pmf[:, 0, :1], math_ops.sigmoid(lower[:, 0, :1])]),
        pmf[:, 0, 1:-1],
        math_ops.add_n([pmf[:, 0, -1:], math_ops.sigmoid(-upper[:, 0, -1:])]),
        ], axis=-1)
    self._pmf = pmf

    cdf = coder_ops.pmf_to_quantized_cdf(
        pmf, precision=self.range_coder_precision)
    def cdf_getter(*args, **kwargs):
      del args, kwargs  # ignored
      return variable_scope.get_variable(
          "quantized_cdf", dtype=dtypes.int32, initializer=cdf,
          trainable=False, validate_shape=False, collections=())
    # Need to provide a fake shape here since add_variable insists on it.
    self._quantized_cdf = self.add_variable(
        "quantized_cdf", shape=(channels, 1), dtype=dtypes.int32,
        getter=cdf_getter, trainable=False)

    update_op = state_ops.assign(
        self._quantized_cdf, cdf, validate_shape=False)
    self.add_update(update_op, inputs=None)

    super(EntropyBottleneck, self).build(input_shape)
Ejemplo n.º 45
0
  def call(self, inputs, training):
    """Pass a tensor through the bottleneck.

    Args:
      inputs: The tensor to be passed through the bottleneck.
      training: Boolean. If `True`, returns a differentiable approximation of
        the inputs, and their likelihoods under the modeled probability
        densities. If `False`, returns the quantized inputs and their
        likelihoods under the corresponding probability mass function. These
        quantities can't be used for training, as they are not differentiable,
        but represent actual compression more closely.

    Returns:
      values: `Tensor` with the same shape as `inputs` containing the perturbed
        or quantized input values.
      likelihood: `Tensor` with the same shape as `inputs` containing the
        likelihood of `values` under the modeled probability distributions.

    Raises:
      ValueError: if `inputs` has different `dtype` or number of channels than
        a previous set of inputs the model was invoked with earlier.
    """
    inputs = ops.convert_to_tensor(inputs)
    ndim = self.input_spec.ndim
    channel_axis = self._channel_axis(ndim)
    half = constant_op.constant(.5, dtype=self.dtype)

    # Convert to (channels, 1, batch) format by commuting channels to front
    # and then collapsing.
    order = list(range(ndim))
    order.pop(channel_axis)
    order.insert(0, channel_axis)
    values = array_ops.transpose(inputs, order)
    shape = array_ops.shape(values)
    values = array_ops.reshape(values, (shape[0], 1, -1))

    # Add noise or quantize.
    if training:
      noise = random_ops.random_uniform(array_ops.shape(values), -half, half)
      values = math_ops.add_n([values, noise])
    elif self.optimize_integer_offset:
      values = math_ops.round(values - self._medians) + self._medians
    else:
      values = math_ops.round(values)

    # Evaluate densities.
    # We can use the special rule below to only compute differences in the left
    # tail of the sigmoid. This increases numerical stability: sigmoid(x) is 1
    # for large x, 0 for small x. Subtracting two numbers close to 0 can be done
    # with much higher precision than subtracting two numbers close to 1.
    lower = self._logits_cumulative(values - half, stop_gradient=False)
    upper = self._logits_cumulative(values + half, stop_gradient=False)
    # Flip signs if we can move more towards the left tail of the sigmoid.
    sign = -math_ops.sign(math_ops.add_n([lower, upper]))
    sign = array_ops.stop_gradient(sign)
    likelihood = abs(
        math_ops.sigmoid(sign * upper) - math_ops.sigmoid(sign * lower))
    if self.likelihood_bound > 0:
      likelihood_bound = constant_op.constant(
          self.likelihood_bound, dtype=self.dtype)
      # TODO(jballe): Override gradients.
      likelihood = math_ops.maximum(likelihood, likelihood_bound)

    # Convert back to input tensor shape.
    order = list(range(1, ndim))
    order.insert(channel_axis, 0)
    values = array_ops.reshape(values, shape)
    values = array_ops.transpose(values, order)
    likelihood = array_ops.reshape(likelihood, shape)
    likelihood = array_ops.transpose(likelihood, order)

    if not context.executing_eagerly():
      values_shape, likelihood_shape = self.compute_output_shape(inputs.shape)
      values.set_shape(values_shape)
      likelihood.set_shape(likelihood_shape)

    return values, likelihood
Ejemplo n.º 46
0
def percentile(x,
               q,
               axis=None,
               interpolation=None,
               keep_dims=False,
               validate_args=False,
               name=None):
  """Compute the `q`-th percentile of `x`.

  Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
  way from the minimum to the maximum in a sorted copy of `x`.

  The values and distances of the two nearest neighbors as well as the
  `interpolation` parameter will determine the percentile if the normalized
  ranking does not match the location of `q` exactly.

  This function is the same as the median if `q = 50`, the same as the minimum
  if `q = 0` and the same as the maximum if `q = 100`.


  ```python
  # Get 30th percentile with default ('nearest') interpolation.
  x = [1., 2., 3., 4.]
  percentile(x, q=30.)
  ==> 2.0

  # Get 30th percentile with 'lower' interpolation
  x = [1., 2., 3., 4.]
  percentile(x, q=30., interpolation='lower')
  ==> 1.0

  # Get 100th percentile (maximum).  By default, this is computed over every dim
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100.)
  ==> 4.0

  # Treat the leading dim as indexing samples, and find the 100th quantile (max)
  # over all such samples.
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100., axis=[0])
  ==> [3., 4.]
  ```

  Compare to `numpy.percentile`.

  Args:
    x:  Floating point `N-D` `Tensor` with `N > 0`.  If `axis` is not `None`,
      `x` must have statically known number of dimensions.
    q:  Scalar `Tensor` in `[0, 100]`. The percentile.
    axis:  Optional `0-D` or `1-D` integer `Tensor` with constant values.
      The axis that hold independent samples over which to return the desired
      percentile.  If `None` (the default), treat every dimension as a sample
      dimension, returning a scalar.
    interpolation : {"lower", "higher", "nearest"}.  Default: "nearest"
      This optional parameter specifies the interpolation method to
      use when the desired quantile lies between two data points `i < j`:
        * lower: `i`.
        * higher: `j`.
        * nearest: `i` or `j`, whichever is nearest.
    keep_dims:  Python `bool`. If `True`, the last dimension is kept with size 1
      If `False`, the last dimension is removed from the output shape.
    validate_args:  Whether to add runtime checks of argument validity.
      If False, and arguments are incorrect, correct behavior is not guaranteed.
    name:  A Python string name to give this `Op`.  Default is "percentile"

  Returns:
    A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
      `axis` is `None`, a scalar.

  Raises:
    ValueError:  If argument 'interpolation' is not an allowed type.
  """
  name = name or "percentile"
  allowed_interpolations = {"lower", "higher", "nearest"}

  if interpolation is None:
    interpolation = "nearest"
  else:
    if interpolation not in allowed_interpolations:
      raise ValueError("Argument 'interpolation' must be in %s.  Found %s" %
                       (allowed_interpolations, interpolation))

  with ops.name_scope(name, [x, q]):
    x = ops.convert_to_tensor(x, name="x")
    # Double is needed here and below, else we get the wrong index if the array
    # is huge along axis.
    q = math_ops.to_double(q, name="q")
    _get_static_ndims(q, expect_ndims=0)

    if validate_args:
      q = control_flow_ops.with_dependencies([
          check_ops.assert_rank(q, 0),
          check_ops.assert_greater_equal(q, math_ops.to_double(0.)),
          check_ops.assert_less_equal(q, math_ops.to_double(100.))
      ], q)

    if axis is None:
      y = array_ops.reshape(x, [-1])
    else:
      axis = ops.convert_to_tensor(axis, name="axis")
      check_ops.assert_integer(axis)
      axis_ndims = _get_static_ndims(
          axis, expect_static=True, expect_ndims_no_more_than=1)
      axis_const = tensor_util.constant_value(axis)
      if axis_const is None:
        raise ValueError(
            "Expected argument 'axis' to be statically available.  Found: %s" %
            axis)
      axis = axis_const
      if axis_ndims == 0:
        axis = [axis]
      axis = [int(a) for a in axis]
      x_ndims = _get_static_ndims(
          x, expect_static=True, expect_ndims_at_least=1)
      axis = _make_static_axis_non_negative(axis, x_ndims)
      y = _move_dims_to_flat_end(x, axis, x_ndims)

    frac_at_q_or_above = 1. - q / 100.
    d = math_ops.to_double(array_ops.shape(y)[-1])

    if interpolation == "lower":
      index = math_ops.ceil((d - 1) * frac_at_q_or_above)
    elif interpolation == "higher":
      index = math_ops.floor((d - 1) * frac_at_q_or_above)
    elif interpolation == "nearest":
      index = math_ops.round((d - 1) * frac_at_q_or_above)

    # If d is gigantic, then we would have d == d - 1, even in double... So
    # let's use max/min to avoid out of bounds errors.
    d = array_ops.shape(y)[-1]
    # d - 1 will be distinct from d in int32.
    index = clip_ops.clip_by_value(math_ops.to_int32(index), 0, d - 1)

    # Sort everything, not just the top 'k' entries, which allows multiple calls
    # to sort only once (under the hood) and use CSE.
    sorted_y = _sort_tensor(y)

    # result.shape = B
    result = sorted_y[..., index]
    result.set_shape(y.get_shape()[:-1])

    if keep_dims:
      if axis is None:
        # ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
        ones_vec = array_ops.ones(
            shape=[_get_best_effort_ndims(x)], dtype=dtypes.int32)
        result *= array_ops.ones(ones_vec, dtype=x.dtype)
      else:
        result = _insert_back_keep_dims(result, axis)

    return result
Ejemplo n.º 47
0
def binary_accuracy(y_true, y_pred):
  return K.mean(math_ops.equal(y_true, math_ops.round(y_pred)), axis=-1)
Ejemplo n.º 48
0
 def safe_polygamma(x, y):
   return math_ops.polygamma(
       math_ops.round(clip_ops.clip_by_value(y, 1, 10)), x * x + 1)