def _check_images_and_flow(self, images, flow):
   # Check that the image2 warped by flow1 into image1 has lower pixelwise
   # error than the unwarped image
   image1, image2 = tf.unstack(images)
   image1 = tf.expand_dims(image1, axis=0)
   image2 = tf.expand_dims(image2, axis=0)
   flow = tf.expand_dims(flow, axis=0)
   mean_unwarped_diff = np.mean(np.abs(image1 - image2))
   warp = uflow_utils.flow_to_warp(flow)
   image2_to_image1 = uflow_utils.resample(image2, warp)
   mean_warped_diff = np.mean(np.abs(image2_to_image1 - image1))
   self.assertLess(mean_warped_diff, mean_unwarped_diff)
 def _check_images_and_flow(self,
                            image1,
                            image2,
                            flow,
                            save_images=False,
                            plot_dir='/tmp/flow_images'):
   self.assertGreaterEqual(np.min(image1), 0.)
   self.assertLessEqual(np.max(image1), 1.)
   # Check that the image2 warped by flow1 into image1 has lower pixelwise
   # error than the unwarped image
   mean_unwarped_diff = np.mean(np.abs(image1 - image2))
   warp = uflow_utils.flow_to_warp(flow)
   image2_to_image1 = uflow_utils.resample(image2, warp)
   mean_warped_diff = np.mean(np.abs(image2_to_image1 - image1))
   if save_images:
     plot_images(image1, image2, flow, image2_to_image1, plot_dir=plot_dir)
   # Check that the warped image has lower pixelwise error than the unwarped.
   self.assertLess(mean_warped_diff, mean_unwarped_diff)
Esempio n. 3
0
  def call(self, feature_pyramid1, feature_pyramid2, training=False):
    """Run the model."""
    context = None
    flow = None
    flow_up = None
    context_up = None
    flows = []

    # Go top down through the levels to the second to last one to estimate flow.
    for level, (features1, features2) in reversed(
        list(enumerate(zip(feature_pyramid1, feature_pyramid2)))[1:]):

      # init flows with zeros for coarsest level if needed
      if self._shared_flow_decoder and flow_up is None:
        batch_size, height, width, _ = features1.shape.as_list()
        flow_up = tf.zeros(
            [batch_size, height, width, 2],
            dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32)
        if self._num_context_up_channels:
          num_channels = int(self._num_context_up_channels *
                             self._channel_multiplier)
          context_up = tf.zeros(
              [batch_size, height, width, num_channels],
              dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32)

      # Warp features2 with upsampled flow from higher level.
      if flow_up is None or not self._use_feature_warp:
        warped2 = features2
      else:
        warp_up = uflow_utils.flow_to_warp(flow_up)
        warped2 = uflow_utils.resample(features2, warp_up)

      # Compute cost volume by comparing features1 and warped features2.
      features1_normalized, warped2_normalized = normalize_features(
          [features1, warped2],
          normalize=self._normalize_before_cost_volume,
          center=self._normalize_before_cost_volume,
          moments_across_channels=True,
          moments_across_images=True)

      if self._use_cost_volume:
        cost_volume = compute_cost_volume(
            features1_normalized, warped2_normalized, max_displacement=4)
      else:
        concat_features = Concatenate(axis=-1)(
            [features1_normalized, warped2_normalized])
        cost_volume = self._cost_volume_surrogate_convs[level](concat_features)

      cost_volume = LeakyReLU(
          alpha=self._leaky_relu_alpha, dtype=self._dtype_policy)(
              cost_volume)

      if self._shared_flow_decoder:
        # This will ensure to work for arbitrary feature sizes per level.
        conv_1x1 = self._1x1_shared_decoder[level]
        features1 = conv_1x1(features1)

      # Compute context and flow from previous flow, cost volume, and features1.
      if flow_up is None:
        x_in = Concatenate(axis=-1)([cost_volume, features1])
      else:
        if context_up is None:
          x_in = Concatenate(axis=-1)([flow_up, cost_volume, features1])
        else:
          x_in = Concatenate(axis=-1)(
              [context_up, flow_up, cost_volume, features1])

      # Use dense-net connections.
      x_out = None
      if self._shared_flow_decoder:
        # reuse the same flow decoder on all levels
        flow_layers = self._flow_layers
      else:
        flow_layers = self._flow_layers[level]
      for layer in flow_layers[:-1]:
        x_out = layer(x_in)
        x_in = Concatenate(axis=-1)([x_in, x_out])
      context = x_out

      flow = flow_layers[-1](context)

      if (training and self._drop_out_rate):
        maybe_dropout = tf.cast(
            tf.math.greater(tf.random.uniform([]), self._drop_out_rate),
            tf.bfloat16 if self._use_bfloat16 else tf.float32)
        context *= maybe_dropout
        flow *= maybe_dropout

      if flow_up is not None and self._accumulate_flow:
        flow += flow_up

      # Upsample flow for the next lower level.
      flow_up = uflow_utils.upsample(flow, is_flow=True)
      if self._num_context_up_channels:
        context_up = self._context_up_layers[level](context)

      # Append results to list.
      flows.insert(0, flow)

    # Refine flow at level 1.
    refinement = self._refine_model([context, flow])
    if (training and self._drop_out_rate):
      refinement *= tf.cast(
          tf.math.greater(tf.random.uniform([]), self._drop_out_rate),
          tf.bfloat16 if self._use_bfloat16 else tf.float32)
    refined_flow = flow + refinement
    flows[0] = refined_flow
    return [tf.cast(flow, tf.float32) for flow in flows]