Example #1
0
def thresholding(inputs):
    # find the mean for each example in the batch
    mean_output = tf.reduce_mean(inputs, axis=1)

    # scale each mean based on a factor
    threshold_scalar = tf.Variable(utils.threshold_scalar, tf.float32)
    scaled_mean = tf.scalar_mul(threshold_scalar, mean_output)
    scaled_mean = tf.reshape(scaled_mean, [utils.batch_size])

    # setup matrix for
    min_thresh_for_max = tf.fill([utils.batch_size], 0.05)
    max_thresh_for_min = tf.fill([utils.batch_size], 0.15)   #0.4
    thresholds = tf.maximum(min_thresh_for_max, scaled_mean)
    thresholds = tf.minimum(max_thresh_for_min, thresholds)

    # zero values under the thresholds using bitmask
    thresholds = tf.reshape(thresholds, [128, 1, 1])

    threshold_mask = tf.cast(tf.greater(inputs, thresholds), tf.float32)
    thresholded_input = tf.multiply(inputs, threshold_mask)

    # peak picking
    # select beats by x[i-1] < x[i] > x[i+1] (local maximum)
    x_minus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=-1, axis=1)), tf.float32)
    x_plus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=1, axis=1)), tf.float32)
    output = tf.multiply(x_minus_1, x_plus_1)


    return output
Example #2
0
 def IoULoss(self, pd, gt):
     mask = tf.cast(
         tf.greater(tf.reduce_sum(
             tf.cast(tf.greater(gt, 0), tf.int8), 3), 3),
         tf.float32
     )
     npd = tf.transpose(pd, [3, 0, 1, 2])
     ngt = tf.transpose(gt, [3, 0, 1, 2])
     area_x = tf.mul(
         tf.add(tf.gather(npd, 0), tf.gather(npd, 2)),
         tf.add(tf.gather(npd, 1), tf.gather(npd, 3)),
     )
     area_g = tf.mul(
         tf.add(tf.gather(ngt, 0), tf.gather(ngt, 2)),
         tf.add(tf.gather(ngt, 1), tf.gather(ngt, 3)),
     )
     w_overlap = tf.maximum(tf.constant(0, tf.float32), tf.add(
         tf.minimum(tf.gather(npd, 0), tf.gather(ngt, 0)),
         tf.minimum(tf.gather(npd, 2), tf.gather(ngt, 2)),
     ))
     h_overlap = tf.maximum(tf.constant(0, tf.float32), tf.add(
         tf.minimum(tf.gather(npd, 1), tf.gather(ngt, 1)),
         tf.minimum(tf.gather(npd, 3), tf.gather(ngt, 3)),
     ))
     area_overlap = tf.mul(w_overlap, h_overlap)
     area_u = tf.sub(tf.add(area_x, area_g), area_overlap)
     iou = tf.div(area_overlap, tf.add(area_u, tf.constant(1, tf.float32)))
     iou = tf.maximum(iou, tf.constant(1e-4, tf.float32))
     cost = -tf.log(iou)
     cost = tf.mul(cost, mask)
     cost = tf.reduce_sum(cost)
     return cost
Example #3
0
def set_logp_to_neg_inf(X, logp, bounds):
    """Set `logp` to negative infinity when `X` is outside the allowed bounds.

    # Arguments
        X: tensorflow.Tensor
            The variable to apply the bounds to
        logp: tensorflow.Tensor
            The log probability corrosponding to `X`
        bounds: list of `Region` objects
            The regions corrosponding to allowed regions of `X`

    # Returns
        logp: tensorflow.Tensor
            The newly bounded log probability
    """
    conditions = []
    for l, u in bounds:
        lower_is_neg_inf = not isinstance(l, tf.Tensor) and np.isneginf(l)
        upper_is_pos_inf = not isinstance(u, tf.Tensor) and np.isposinf(u)

        if not lower_is_neg_inf and upper_is_pos_inf:
            conditions.append(tf.greater(X, l))
        elif lower_is_neg_inf and not upper_is_pos_inf:
            conditions.append(tf.less(X, u))
        elif not (lower_is_neg_inf or upper_is_pos_inf):
            conditions.append(tf.logical_and(tf.greater(X, l), tf.less(X, u)))

    if len(conditions) > 0:
        is_inside_bounds = conditions[0]
        for condition in conditions[1:]:
            is_inside_bounds = tf.logical_or(is_inside_bounds, condition)

        logp = tf.select(is_inside_bounds, logp, tf.fill(tf.shape(X), config.dtype(-np.inf)))

    return logp
Example #4
0
                def get_losses(obj_mask):
                  """Get motion constraint loss."""
                  # Find height of segment.
                  coords = tf.where(tf.greater(  # Shape (num_true, 2=yx)
                      obj_mask[:, :, 0], tf.constant(0.5, dtype=tf.float32)))
                  y_max = tf.reduce_max(coords[:, 0])
                  y_min = tf.reduce_min(coords[:, 0])
                  seg_height = y_max - y_min
                  f_y = self.intrinsic_mat[i, 0, 1, 1]
                  approx_depth = ((f_y * self.global_scale_var) /
                                  tf.to_float(seg_height))
                  reference_pred = tf.boolean_mask(
                      depth_pred, tf.greater(
                          tf.reshape(obj_mask[:, :, 0],
                                     (self.img_height, self.img_width, 1)),
                          tf.constant(0.5, dtype=tf.float32)))

                  # Establish loss on approx_depth, a scalar, and
                  # reference_pred, our dense prediction. Normalize both to
                  # prevent degenerative depth shrinking.
                  global_mean_depth_pred = tf.reduce_mean(depth_pred)
                  reference_pred /= global_mean_depth_pred
                  approx_depth /= global_mean_depth_pred
                  spatial_err = tf.abs(reference_pred - approx_depth)
                  mean_spatial_err = tf.reduce_mean(spatial_err)
                  return mean_spatial_err
Example #5
0
def prune_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also clip_to_window which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
        tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices
Example #6
0
  def _variance(self):
    # We need to put the tf.where inside the outer tf.where to ensure we never
    # hit a NaN in the gradient.
    denom = tf.where(tf.greater(self.df, 2.),
                     self.df - 2.,
                     tf.ones_like(self.df))
    # Abs(scale) superfluous.
    var = (tf.ones(self.batch_shape_tensor(), dtype=self.dtype) *
           tf.square(self.scale) * self.df / denom)
    # When 1 < df <= 2, variance is infinite.
    inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
    result_where_defined = tf.where(
        self.df > tf.fill(self.batch_shape_tensor(), 2.),
        var,
        tf.fill(self.batch_shape_tensor(), inf, name="inf"))

    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return tf.where(
          tf.greater(
              self.df,
              tf.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          result_where_defined,
          tf.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              tf.assert_less(
                  tf.ones([], dtype=self.dtype),
                  self.df,
                  message="variance not defined for components of df <= 1"),
          ],
          result_where_defined)
def _compute_precision_recall(input_layer, labels, threshold,
                              per_example_weights):
  """Returns the numerator of both, the denominator of precision and recall."""

  # To apply per_example_weights, we need to collapse each row to a scalar, but
  # we really want the sum.
  labels.get_shape().assert_is_compatible_with(input_layer.get_shape())
  relevant = tf.to_float(tf.greater(labels, 0))
  retrieved = tf.to_float(tf.greater(input_layer, threshold))
  selected = relevant * retrieved

  if per_example_weights:
    per_example_weights = tf.convert_to_tensor(per_example_weights,
                                               name='per_example_weights')
    if selected.get_shape().dims:
      per_example_weights.get_shape().assert_is_compatible_with(
          [selected.get_shape().dims[0]])
    else:
      per_example_weights.get_shape().assert_is_compatible_with([None])
    per_example_weights = tf.to_float(tf.greater(per_example_weights, 0))
    selected = functions.reduce_batch_sum(selected) * per_example_weights
    relevant = functions.reduce_batch_sum(relevant) * per_example_weights
    retrieved = functions.reduce_batch_sum(retrieved) * per_example_weights
  sum_relevant = tf.reduce_sum(relevant)
  sum_retrieved = tf.reduce_sum(retrieved)
  selected = tf.reduce_sum(selected)
  return selected, sum_retrieved, sum_relevant
  def sanitize(self, x, eps_delta, sigma=None,
               option=ClipOption(None, None), tensor_name=None,
               num_examples=None, add_noise=True):
    """Sanitize the given tensor.

    This santize a given tensor by first applying l2 norm clipping and then
    adding Gaussian noise. It calls the privacy accountant for updating the
    privacy spending.

    Args:
      x: the tensor to sanitize.
      eps_delta: a pair of eps, delta for (eps,delta)-DP. Use it to
        compute sigma if sigma is None.
      sigma: if sigma is not None, use sigma.
      option: a ClipOption which, if supplied, used for
        clipping and adding noise.
      tensor_name: the name of the tensor.
      num_examples: if None, use the number of "rows" of x.
      add_noise: if True, then add noise, else just clip.
    Returns:
      a pair of sanitized tensor and the operation to accumulate privacy
      spending.
    """

    if sigma is None:
      # pylint: disable=unpacking-non-sequence
      eps, delta = eps_delta
      with tf.control_dependencies(
          [tf.Assert(tf.greater(eps, 0),
                     ["eps needs to be greater than 0"]),
           tf.Assert(tf.greater(delta, 0),
                     ["delta needs to be greater than 0"])]):
        # The following formula is taken from
        #   Dwork and Roth, The Algorithmic Foundations of Differential
        #   Privacy, Appendix A.
        #   http://www.cis.upenn.edu/~aaroth/Papers/privacybook.pdf
        sigma = tf.sqrt(2.0 * tf.log(1.25 / delta)) / eps

    l2norm_bound, clip = option
    if l2norm_bound is None:
      l2norm_bound, clip = self._default_option
      if ((tensor_name is not None) and
          (tensor_name in self._options)):
        l2norm_bound, clip = self._options[tensor_name]
    if clip:
      x = utils.BatchClipByL2norm(x, l2norm_bound)

    if add_noise:
      if num_examples is None:
        num_examples = tf.slice(tf.shape(x), [0], [1])
      privacy_accum_op = self._accountant.accumulate_privacy_spending(
          eps_delta, sigma, num_examples)
      with tf.control_dependencies([privacy_accum_op]):
        saned_x = utils.AddGaussianNoise(tf.reduce_sum(x, 0),
                                         sigma * l2norm_bound)
    else:
      saned_x = tf.reduce_sum(x, 0)
    return saned_x
Example #9
0
def _get_valid_sample_fraction(labels, p=0):
    """return fraction of non-negative examples, the ignored examples have been marked as negative"""
    num_valid = tf.reduce_sum(tf.cast(tf.greater_equal(labels, p), tf.float32))
    num_example = tf.cast(tf.size(labels), tf.float32)
    frac = tf.cond(tf.greater(num_example, 0), lambda:num_valid / num_example,  
            lambda: tf.cast(0, tf.float32))
    frac_ = tf.cond(tf.greater(num_valid, 0), lambda:num_example / num_valid, 
            lambda: tf.cast(0, tf.float32))
    return frac, frac_
def style_loss(CNN_structure, const_layers, var_layers, content_segs, style_segs, weight):
    loss_styles = []
    layer_count = float(len(const_layers))
    layer_index = 0

    _, content_seg_height, content_seg_width, _ = content_segs[0].get_shape().as_list()
    _, style_seg_height, style_seg_width, _ = style_segs[0].get_shape().as_list()
    for layer_name in CNN_structure:
        layer_name = layer_name[layer_name.find("/") + 1:]

        # downsampling segmentation
        if "pool" in layer_name:
            content_seg_width, content_seg_height = int(math.ceil(content_seg_width / 2)), int(math.ceil(content_seg_height / 2))
            style_seg_width, style_seg_height = int(math.ceil(style_seg_width / 2)), int(math.ceil(style_seg_height / 2))

            for i in xrange(len(content_segs)):
                content_segs[i] = tf.image.resize_bilinear(content_segs[i], tf.constant((content_seg_height, content_seg_width)))
                style_segs[i] = tf.image.resize_bilinear(style_segs[i], tf.constant((style_seg_height, style_seg_width)))

        elif "conv" in layer_name:
            for i in xrange(len(content_segs)):
                # have some differences on border with torch
                content_segs[i] = tf.nn.avg_pool(tf.pad(content_segs[i], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT"), \
                ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='VALID')
                style_segs[i] = tf.nn.avg_pool(tf.pad(style_segs[i], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT"), \
                ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='VALID')

        if layer_name == var_layers[layer_index].name[var_layers[layer_index].name.find("/") + 1:]:
            print("Setting up style layer: <{}>".format(layer_name))
            const_layer = const_layers[layer_index]
            var_layer = var_layers[layer_index]

            layer_index = layer_index + 1

            layer_style_loss = 0.0
            for content_seg, style_seg in zip(content_segs, style_segs):
                gram_matrix_const = gram_matrix(tf.multiply(const_layer, style_seg))
                style_mask_mean   = tf.reduce_mean(style_seg)
                gram_matrix_const = tf.cond(tf.greater(style_mask_mean, 0.),
                                        lambda: gram_matrix_const / (tf.to_float(tf.size(const_layer)) * style_mask_mean),
                                        lambda: gram_matrix_const
                                    )

                gram_matrix_var   = gram_matrix(tf.multiply(var_layer, content_seg))
                content_mask_mean = tf.reduce_mean(content_seg)
                gram_matrix_var   = tf.cond(tf.greater(content_mask_mean, 0.),
                                        lambda: gram_matrix_var / (tf.to_float(tf.size(var_layer)) * content_mask_mean),
                                        lambda: gram_matrix_var
                                    )

                diff_style_sum    = tf.reduce_mean(tf.squared_difference(gram_matrix_const, gram_matrix_var)) * content_mask_mean

                layer_style_loss += diff_style_sum

            loss_styles.append(layer_style_loss * weight)
    return loss_styles
Example #11
0
def get_position_cross_clf_label(positions, seq_length, ans_avg_len):
  start_labels = tf.reshape(positions[:, 0], [-1])  # positions shape: [batch_size, 2] => [batch_size]
  end_labels = tf.reshape(positions[:, 1], [-1])
  ans_len = end_labels - start_labels +1  # [batch_size] # 超过ans_avg_len则为avg_len

  mask = tf.cast(tf.greater(ans_len, ans_avg_len), tf.int32) * (
            tf.zeros_like(ans_len, dtype=tf.int32) + ans_avg_len)

  ans_len = ans_len * (1 - tf.cast(tf.greater(ans_len, ans_avg_len), tf.int32)) + mask
  return start_labels * ans_avg_len + ans_len - 1
Example #12
0
    def _match_when_rows_are_non_empty():
      """Performs matching when the rows of similarity matrix are non empty.

      Returns:
        matches:  int32 tensor indicating the row each column matches to.
      """
      # Matches for each column
      matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)

      # Deal with matched and unmatched threshold
      if self._matched_threshold is not None:
        # Get logical indices of ignored and unmatched columns as tf.int64
        matched_vals = tf.reduce_max(similarity_matrix, 0)
        below_unmatched_threshold = tf.greater(self._unmatched_threshold,
                                               matched_vals)
        between_thresholds = tf.logical_and(
            tf.greater_equal(matched_vals, self._unmatched_threshold),
            tf.greater(self._matched_threshold, matched_vals))

        if self._negatives_lower_than_unmatched:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -1)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -2)
        else:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -2)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -1)

      if self._force_match_for_each_row:
        similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
            similarity_matrix)
        force_match_column_ids = tf.argmax(similarity_matrix, 1,
                                           output_type=tf.int32)
        force_match_column_indicators = (
            tf.one_hot(
                force_match_column_ids, depth=similarity_matrix_shape[1]) *
            tf.cast(tf.expand_dims(valid_rows, axis=-1), dtype=tf.float32))
        force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
                                        output_type=tf.int32)
        force_match_column_mask = tf.cast(
            tf.reduce_max(force_match_column_indicators, 0), tf.bool)
        final_matches = tf.where(force_match_column_mask,
                                 force_match_row_ids, matches)
        return final_matches
      else:
        return matches
Example #13
0
def _compute_alpha(x):
    """
    Computing the scale parameter.
    """
    threshold = _compute_threshold(x)
    alpha1_temp1 = tf.where(tf.greater(x, threshold), x, tf.zeros_like(x, tf.float32))
    alpha1_temp2 = tf.where(tf.less(x, -threshold), x, tf.zeros_like(x, tf.float32))
    alpha_array = tf.add(alpha1_temp1, alpha1_temp2, name=None)
    alpha_array_abs = tf.abs(alpha_array)
    alpha_array_abs1 = tf.where(tf.greater(alpha_array_abs, 0), tf.ones_like(alpha_array_abs, tf.float32), tf.zeros_like(alpha_array_abs, tf.float32))
    alpha_sum = tf.reduce_sum(alpha_array_abs)
    n = tf.reduce_sum(alpha_array_abs1)
    alpha = tf.div(alpha_sum, n)
    return alpha
Example #14
0
    def _match_when_rows_are_non_empty():
      """Performs matching when the rows of similarity matrix are non empty.

      Returns:
        matches:  int32 tensor indicating the row each column matches to.
      """
      # Matches for each column
      matches = tf.argmax(similarity_matrix, 0)

      # Deal with matched and unmatched threshold
      if self._matched_threshold is not None:
        # Get logical indices of ignored and unmatched columns as tf.int64
        matched_vals = tf.reduce_max(similarity_matrix, 0)
        below_unmatched_threshold = tf.greater(self._unmatched_threshold,
                                               matched_vals)
        between_thresholds = tf.logical_and(
            tf.greater_equal(matched_vals, self._unmatched_threshold),
            tf.greater(self._matched_threshold, matched_vals))

        if self._negatives_lower_than_unmatched:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -1)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -2)
        else:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -2)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -1)

      if self._force_match_for_each_row:
        forced_matches_ids = tf.cast(tf.argmax(similarity_matrix, 1), tf.int32)

        # Set matches[forced_matches_ids] = [0, ..., R], R is number of rows.
        row_range = tf.range(tf.shape(similarity_matrix)[0])
        col_range = tf.range(tf.shape(similarity_matrix)[1])
        forced_matches_values = tf.cast(row_range, matches.dtype)
        keep_matches_ids, _ = tf.setdiff1d(col_range, forced_matches_ids)
        keep_matches_values = tf.gather(matches, keep_matches_ids)
        matches = tf.dynamic_stitch(
            [forced_matches_ids,
             keep_matches_ids], [forced_matches_values, keep_matches_values])

      return tf.cast(matches, tf.int32)
Example #15
0
def to_normalized_coordinates(boxlist, height, width,
                              check_range=True, scope=None):
  """Converts absolute box coordinates to normalized coordinates in [0, 1].

  Usually one uses the dynamic shape of the image or conv-layer tensor:
    boxlist = box_list_ops.to_normalized_coordinates(boxlist,
                                                     tf.shape(images)[1],
                                                     tf.shape(images)[2]),

  This function raises an assertion failed error at graph execution time when
  the maximum coordinate is smaller than 1.01 (which means that coordinates are
  already normalized). The value 1.01 is to deal with small rounding errors.

  Args:
    boxlist: BoxList with coordinates in terms of pixel-locations.
    height: Maximum value for height of absolute box coordinates.
    width: Maximum value for width of absolute box coordinates.
    check_range: If True, checks if the coordinates are normalized or not.
    scope: name scope.

  Returns:
    boxlist with normalized coordinates in [0, 1].
  """
  with tf.name_scope(scope, 'ToNormalizedCoordinates'):
    height = tf.cast(height, tf.float32)
    width = tf.cast(width, tf.float32)

    if check_range:
      max_val = tf.reduce_max(boxlist.get())
      max_assert = tf.Assert(tf.greater(max_val, 1.01),
                             ['max value is lower than 1.01: ', max_val])
      with tf.control_dependencies([max_assert]):
        width = tf.identity(width)

    return scale(boxlist, 1 / height, 1 / width)
Example #16
0
 def __imagenet_data_process_function(self, x, y):
     with tf.name_scope("imagenet_data_aug") as scope:
         #random scale
         #apparently, this works better than what we have:
         #https://github.com/facebook/fb.resnet.torch
         #but let's use the 'original' formulation for now
         #randomly sample a size in specified range
         random_size = tf.squeeze(tf.random_uniform((1, 1), 256, 480, dtype=tf.int32, name="random_scale_size"))
         #rescale smaller size with this factor
         tf.cond(tf.greater(tf.shape(x)[0], tf.shape(x)[1]), 
             lambda: tf.image.resize_images(x, [tf.shape(x)[0] * (tf.shape(x)[1] / random_size), random_size]),
             lambda: tf.image.resize_images(x, [random_size, tf.shape(x)[1] * (tf.shape(x)[0] / random_size)]))
         x = tf.image.resize_images(x, [224, 224])
         #random flip
         x = tf.image.flip_left_right(x)
         #random crop
         x = tf.random_crop(x, [224, 224, 3])
         #colour augmentation
         #this is a little more involved than I first thought
         #lets pick the inception colour distortion
         #https://github.com/tensorflow/models/blob/master/inception/inception/image_processing.py
         x = tf.image.random_brightness(x, max_delta=32. / 255.)
         x = tf.image.random_saturation(x, lower=0.5, upper=1.5)
         x = tf.image.random_hue(x, max_delta=0.2)
         x = tf.image.random_contrast(x, lower=0.5, upper=1.5)
         x = tf.clip_by_value(x, 0.0, 1.0)
         #normalisation
         x = tf.image.per_image_standardization(x)
     return [x, y]
Example #17
0
  def accumulate_privacy_spending(self, eps_delta, unused_sigma,
                                  num_examples):
    """Accumulate the privacy spending.

    Currently only support approximate privacy. Here we assume we use Gaussian
    noise on randomly sampled batch so we get better composition: 1. the per
    batch privacy is computed using privacy amplication via sampling bound;
    2. the composition is done using the composition with Gaussian noise.
    TODO(liqzhang) Add a link to a document that describes the bounds used.

    Args:
      eps_delta: EpsDelta pair which can be tensors.
      unused_sigma: the noise sigma. Unused for this accountant.
      num_examples: the number of examples involved.
    Returns:
      a TensorFlow operation for updating the privacy spending.
    """

    eps, delta = eps_delta
    with tf.control_dependencies(
        [tf.Assert(tf.greater(delta, 0),
                   ["delta needs to be greater than 0"])]):
      amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
                        self._total_examples)
      # Use privacy amplification via sampling bound.
      # See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
      # TODO(liqzhang) Add a link to a document with formal statement
      # and proof.
      amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
          tf.exp(eps) - 1.0)), [1])
      amortize_delta = tf.reshape(amortize_ratio * delta, [1])
      return tf.group(*[tf.assign_add(self._eps_squared_sum,
                                      tf.square(amortize_eps)),
                        tf.assign_add(self._delta_sum, amortize_delta)])
Example #18
0
    def do_report():
        r = sess.run([best,
                      correct,
                      tf.greater(y[:, 0], 0),
                      y_[:, 0],
                      digits_loss,
                      presence_loss,
                      cross_entropy],
                     feed_dict={x: test_xs, y_: test_ys})
        num_correct = numpy.sum(
                        numpy.logical_or(
                            numpy.all(r[0] == r[1], axis=1),
                            numpy.logical_and(r[2] < 0.5,
                                              r[3] < 0.5)))
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            print "{} {} <-> {} {}".format(vec_to_plate(c), pc,
                                           vec_to_plate(b), float(pb))
        num_p_correct = numpy.sum(r[2] == r[3])

        print ("B{:3d} {:2.02f}% {:02.02f}% loss: {} "
               "(digits: {}, presence: {}) |{}|").format(
            batch_idx,
            100. * num_correct / (len(r[0])),
            100. * num_p_correct / len(r[2]),
            r[6],
            r[4],
            r[5],
            "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)]
                                           for b, c, pb, pc in zip(*r_short)))
    def loss(self, logits, labels, regularization):
        """Adds to the inference model the layers required to generate loss."""
        with tf.name_scope('loss'):
            with tf.name_scope('var_loss'):
                labels = tf.cast(labels, tf.float32)
                shape = labels.get_shape()

                same_class = tf.boolean_mask(logits, tf.equal(labels, tf.ones(shape)))
                diff_class = tf.boolean_mask(logits, tf.not_equal(labels, tf.ones(shape)))
                same_mean, same_var = tf.nn.moments(same_class, [0])
                diff_mean, diff_var = tf.nn.moments(diff_class, [0])
                var_loss = same_var + diff_var

            with tf.name_scope('mean_loss'):
                mean_loss = self.lamda * tf.where(tf.greater(self.mu - (same_mean - diff_mean), 0),
                                                  self.mu - (same_mean - diff_mean), 0)

            with tf.name_scope('regularization'):
                regularization *= tf.add_n(self.regularizers)

            loss = var_loss + mean_loss + regularization

            # Summaries for TensorBoard.
            tf.summary.scalar('loss/total', loss)
            with tf.name_scope('averages'):
                averages = tf.train.ExponentialMovingAverage(0.9)
                op_averages = averages.apply([var_loss, mean_loss, regularization, loss])
                tf.summary.scalar('loss/avg/var_loss', averages.average(var_loss))
                tf.summary.scalar('loss/avg/mean_loss', averages.average(mean_loss))
                tf.summary.scalar('loss/avg/regularization', averages.average(regularization))
                tf.summary.scalar('loss/avg/total', averages.average(loss))
                with tf.control_dependencies([op_averages]):
                    loss_average = tf.identity(averages.average(loss), name='control')
            return loss, loss_average
Example #20
0
def unwrap(p, discont=np.pi, axis=-1):
  """Unwrap a cyclical phase tensor.

  Args:
    p: Phase tensor.
    discont: Float, size of the cyclic discontinuity.
    axis: Axis of which to unwrap.

  Returns:
    unwrapped: Unwrapped tensor of same size as input.
  """
  dd = diff(p, axis=axis)
  ddmod = tf.mod(dd + np.pi, 2.0 * np.pi) - np.pi
  idx = tf.logical_and(tf.equal(ddmod, -np.pi), tf.greater(dd, 0))
  ddmod = tf.where(idx, tf.ones_like(ddmod) * np.pi, ddmod)
  ph_correct = ddmod - dd
  idx = tf.less(tf.abs(dd), discont)
  ddmod = tf.where(idx, tf.zeros_like(ddmod), dd)
  ph_cumsum = tf.cumsum(ph_correct, axis=axis)

  shape = p.get_shape().as_list()
  shape[axis] = 1
  ph_cumsum = tf.concat([tf.zeros(shape, dtype=p.dtype), ph_cumsum], axis=axis)
  unwrapped = p + ph_cumsum
  return unwrapped
Example #21
0
  def _decode_png_instance_masks(self, keys_to_tensors):
    """Decode PNG instance segmentation masks and stack into dense tensor.

    The instance segmentation masks are reshaped to [num_instances, height,
    width].

    Args:
      keys_to_tensors: a dictionary from keys to tensors.

    Returns:
      A 3-D float tensor of shape [num_instances, height, width] with values
        in {0, 1}.
    """

    def decode_png_mask(image_buffer):
      image = tf.squeeze(
          tf.image.decode_image(image_buffer, channels=1), axis=2)
      image.set_shape([None, None])
      image = tf.to_float(tf.greater(image, 0))
      return image

    png_masks = keys_to_tensors['image/object/mask']
    height = keys_to_tensors['image/height']
    width = keys_to_tensors['image/width']
    if isinstance(png_masks, tf.SparseTensor):
      png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
    return tf.cond(
        tf.greater(tf.size(png_masks), 0),
        lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
        lambda: tf.zeros(tf.to_int32(tf.stack([0, height, width]))))
 def loss(self, logits, labels, regularization):
     """Adds to the inference model the layers required to generate loss."""
     with tf.name_scope('loss'):
         with tf.name_scope('hinge_loss'):
             labels = tf.cast(labels, tf.float32)
             zeros = tf.zeros(labels.get_shape())
             output = tf.ones(labels.get_shape()) - tf.multiply(labels, logits)
             hinge_loss = tf.where(tf.greater(output, zeros), output, zeros)
             hinge_loss = tf.reduce_mean(hinge_loss)
         with tf.name_scope('regularization'):
             regularization *= tf.add_n(self.regularizers)
         loss = hinge_loss + regularization
         
         # Summaries for TensorBoard.
         tf.summary.scalar('loss/hinge_loss', hinge_loss)
         tf.summary.scalar('loss/regularization', regularization)
         tf.summary.scalar('loss/total', loss)
         with tf.name_scope('averages'):
             averages = tf.train.ExponentialMovingAverage(0.9)
             op_averages = averages.apply([hinge_loss, regularization, loss])
             tf.summary.scalar('loss/avg/hinge_loss', averages.average(hinge_loss))
             tf.summary.scalar('loss/avg/regularization', averages.average(regularization))
             tf.summary.scalar('loss/avg/total', averages.average(loss))
             with tf.control_dependencies([op_averages]):
                 loss_average = tf.identity(averages.average(loss), name='control')
         return loss, loss_average
Example #23
0
    def testProbabilitiesCanBeChanged(self):
        # Set up graph.
        tf.set_random_seed(1234)
        lbl1 = 0
        lbl2 = 3
        # This cond allows the necessary class queues to be populated.
        label = tf.cond(tf.greater(0.5, tf.random_uniform([])), lambda: tf.constant(lbl1), lambda: tf.constant(lbl2))
        val = [np.array([1, 4]) * label]
        probs = tf.placeholder(tf.float32, shape=[5])
        batch_size = 2

        data_batch, labels = tf.contrib.training.stratified_sample_unknown_dist(val, label, probs, batch_size)

        with self.test_session() as sess:
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            for _ in range(5):
                [data], lbls = sess.run([data_batch, labels], feed_dict={probs: [1, 0, 0, 0, 0]})
                for data_example in data:
                    self.assertListEqual([0, 0], list(data_example))
                self.assertListEqual([0, 0], list(lbls))

            # Now change distribution and expect different output.
            for _ in range(5):
                [data], lbls = sess.run([data_batch, labels], feed_dict={probs: [0, 0, 0, 1, 0]})
                for data_example in data:
                    self.assertListEqual([3, 12], list(data_example))
                self.assertListEqual([3, 3], list(lbls))

            coord.request_stop()
            coord.join(threads)
Example #24
0
def _smallest_size_at_least(height, width, smallest_side):
    """Computes new shape with the smallest side equal to `smallest_side`.

    Computes new shape with the smallest side equal to `smallest_side` while
    preserving the original aspect ratio.

    Args:
      height: an int32 scalar tensor indicating the current height.
      width: an int32 scalar tensor indicating the current width.
      smallest_side: A python integer or scalar `Tensor` indicating the size of
        the smallest side after resize.

    Returns:
      new_height: an int32 scalar tensor indicating the new height.
      new_width: and int32 scalar tensor indicating the new width.
    """
    smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

    height = tf.to_float(height)
    width = tf.to_float(width)
    smallest_side = tf.to_float(smallest_side)

    scale = tf.cond(tf.greater(height, width),
                    lambda: smallest_side / width,
                    lambda: smallest_side / height)
    new_height = tf.to_int32(height * scale)
    new_width = tf.to_int32(width * scale)
    return new_height, new_width
Example #25
0
def beta_schedule(schedule, global_step, final_beta, decay_start, decay_end):
  """Get KL multiplier (beta) based on the schedule."""
  if decay_start > decay_end:
    raise ValueError("decay_end is smaller than decay_end.")

  # Since some of the TF schedules do not support incrementing a value,
  # in all of the schedules, we anneal the beta from final_beta to zero
  # and then reverse it at the bottom.
  if schedule == "constant":
    decayed_value = 0.0
  elif schedule == "linear":
    decayed_value = tf.train.polynomial_decay(
        learning_rate=final_beta,
        global_step=global_step - decay_start,
        decay_steps=decay_end - decay_start,
        end_learning_rate=0.0)
  elif schedule == "noisy_linear_cosine_decay":
    decayed_value = tf.train.noisy_linear_cosine_decay(
        learning_rate=final_beta,
        global_step=global_step - decay_start,
        decay_steps=decay_end - decay_start)
  # TODO(mechcoder): Add log_annealing schedule.
  else:
    raise ValueError("Unknown beta schedule.")

  increased_value = final_beta - decayed_value
  increased_value = tf.maximum(0.0, increased_value)

  beta = tf.case(
      pred_fn_pairs={
          tf.less(global_step, decay_start): lambda: 0.0,
          tf.greater(global_step, decay_end): lambda: final_beta},
      default=lambda: increased_value)
  return beta
  def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
    """Counts the number of positives and negatives numbers to be sampled.

    Args:
      sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
        the signed indices of the examples where the sign is based on the label
        value. The examples that cannot be sampled are set to 0. It samples
        atmost sample_size*positive_fraction positive examples and remaining
        from negative examples.
      sample_size: Size of subsamples.

    Returns:
      A tuple containing the number of positive and negative labels in the
      subsample.
    """
    input_length = tf.shape(sorted_indices_tensor)[0]
    valid_positive_index = tf.greater(sorted_indices_tensor,
                                      tf.zeros(input_length, tf.int32))
    num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32))
    max_num_positive_samples = tf.constant(
        int(sample_size * self._positive_fraction), tf.int32)
    num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
    num_negative_samples = tf.constant(sample_size,
                                       tf.int32) - num_positive_samples

    return num_positive_samples, num_negative_samples
Example #27
0
    def process_single_frame(prev_outputs, inputs):
      """Process a single frame of the video."""
      cur_image, input_reward, action = inputs
      time_step, prev_image, prev_reward, frame_buf, lstm_states = prev_outputs

      # sample from softmax (by argmax). this is noop for non-softmax loss.
      prev_image = self.get_sampled_frame(prev_image)

      generated_items = [prev_image]
      groundtruth_items = [cur_image]
      done_warm_start = tf.greater(time_step, context_frames - 1)
      input_image, = self.get_scheduled_sample_inputs(
          done_warm_start, groundtruth_items, generated_items, ss_func)

      # Prediction
      pred_image, lstm_states, _ = self.construct_predictive_tower(
          input_image, None, action, lstm_states, latent)

      if self.hparams.reward_prediction:
        reward_input_image = self.get_sampled_frame(pred_image)
        if self.hparams.reward_prediction_stop_gradient:
          reward_input_image = tf.stop_gradient(reward_input_image)
        with tf.control_dependencies([time_step]):
          frame_buf = [reward_input_image] + frame_buf[:-1]
        pred_reward = self.reward_prediction(frame_buf, None, action, latent)
        pred_reward = common_video.decode_to_shape(
            pred_reward, common_layers.shape_list(input_reward), "reward_dec")
      else:
        pred_reward = prev_reward

      time_step += 1
      outputs = (time_step, pred_image, pred_reward, frame_buf, lstm_states)

      return outputs
Example #28
0
def binary_accuracy_op(predictions, targets):
    """ binary_accuracy_op.

    An op that calculates mean accuracy, assuming predictions are logits, and
    targets are binary encoded (and represented as int32).

    Examples:
        ```python
        input_data = placeholder(shape=[None, 784])
        y_pred = my_network(input_data) # Apply some ops
        y_true = placeholder(shape=[None, 10]) # Labels
        acc_op = binary_accuracy_op(y_pred, y_true)

        # Calculate accuracy by feeding data X and labels Y
        binary_accuracy = sess.run(acc_op, feed_dict={input_data: X, y_true: Y})
        ```

    Arguments:
        predictions: `Tensor` of `float` type.
        targets: `Tensor` of `float` type.

    Returns:
        `Float`. The mean accuracy.

    """
    if not isinstance(targets, tf.Tensor):
        raise ValueError("mean_accuracy 'input' argument only accepts type "
                         "Tensor, '" + str(type(input)) + "' given.")

    with tf.name_scope('BinaryAccuracy'):
        predictions = tf.cast(tf.greater(predictions, 0), tf.float32)
        correct_pred = tf.equal(predictions, tf.cast(targets, tf.float32))
        acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    return acc
Example #29
0
    def custom_rnn_autodecoder(decoder_inputs, initial_input, initial_state, cell, scope=None):
      # customized rnn_decoder for the task of dealing with end of character
      with tf.variable_scope(scope or "rnn_decoder"):
        states = [initial_state]
        outputs = []
        prev = None

        for i in xrange(len(decoder_inputs)):
          inp = decoder_inputs[i]
          if i > 0:
            tf.get_variable_scope().reuse_variables()
          output, new_state = cell(inp, states[-1])

          num_batches = self.args.batch_size # new_state.get_shape()[0].value
          num_state = new_state.get_shape()[1].value

          # if the input has an end-of-character signal, have to zero out the state

          #to do:  test this code.

          eoc_detection = inp[:,3]
          eoc_detection = tf.reshape(eoc_detection, [num_batches, 1])

          eoc_detection_state = tfrepeat(eoc_detection, num_state)

          eoc_detection_state = tf.greater(eoc_detection_state, tf.zeros_like(eoc_detection_state, dtype=tf.float32))

          new_state = tf.select(eoc_detection_state, initial_state, new_state)

          outputs.append(output)
          states.append(new_state)
      return outputs, states
Example #30
0
def logistic_loss(prediction, label):
	""" Logistic loss function averaged over pixels in the breast area.
	
	Pixels in the background are ignored.
	
	Args:
		prediction: A 2D tensor of floats. The predicted heatmap of logits.
		label: A 2D tensor of integers. Possible labels are 0 (background), 127
			(breast tissue) and 255 (breast mass).

	Returns:
		A float. The loss.
	"""
	with tf.name_scope('logistic_loss'):
		# Generate binary masks.
		mass = tf.to_float(tf.equal(label, 255))
		breast_area = tf.to_float(tf.greater(label, 0))

		# Compute loss per pixel
		pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(prediction, mass)
	
		# Weight the errors (1 for pixels in breast area, zero otherwise)
		weighted_loss = tf.mul(pixel_loss, breast_area)
	
		# Average over pixels in the breast area
		loss = tf.reduce_sum(weighted_loss)/tf.reduce_sum(breast_area)

	return loss
Example #31
0
def run_inference_for_single_image(image, graph):
    with graph.as_default():
        # with tf.Session() as sess:
        with sess.as_default():
            # Get handles to input and output tensors
            ops = tf.get_default_graph().get_operations()
            all_tensor_names = {
                output.name
                for op in ops for output in op.outputs
            }
            tensor_dict = {}
            for key in [
                    'num_detections', 'detection_boxes', 'detection_scores',
                    'detection_classes', 'detection_masks'
            ]:
                tensor_name = key + ':0'
                if tensor_name in all_tensor_names:
                    tensor_dict[key] = tf.get_default_graph(
                    ).get_tensor_by_name(tensor_name)
            if 'detection_masks' in tensor_dict:
                # The following processing is only for single image
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'],
                                             [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'],
                                             [0])
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                real_num_detection = tf.cast(tensor_dict['num_detections'][0],
                                             tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0],
                                           [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                           [real_num_detection, -1, -1])
                detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
                    detection_masks, detection_boxes, image.shape[0],
                    image.shape[1])
                detection_masks_reframed = tf.cast(
                    tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(
                    detection_masks_reframed, 0)
            image_tensor = tf.get_default_graph().get_tensor_by_name(
                'image_tensor:0')

            # Run inference
            output_dict = sess.run(
                tensor_dict,
                feed_dict={image_tensor: np.expand_dims(image, 0)})

            # all outputs are float32 numpy arrays, so convert types as appropriate
            output_dict['num_detections'] = int(
                output_dict['num_detections'][0])
            output_dict['detection_classes'] = output_dict[
                'detection_classes'][0].astype(np.uint8)
            output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
            output_dict['detection_scores'] = output_dict['detection_scores'][
                0]
            #      print(output_dict['num_detections'])
            #      print(output_dict['detection_classes'])
            #print(output_dict['detection_scores'])
            if 'detection_masks' in output_dict:
                output_dict['detection_masks'] = output_dict[
                    'detection_masks'][0]
    return output_dict
Example #32
0
    def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
                nsteps, ent_coef, vf_coef, max_grad_norm, microbatch_size=None):
        self.sess = sess = get_session()

        with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE):
            # CREATE OUR TWO MODELS
            # act_model that is used for sampling
            act_model = policy(nbatch_act, 1, sess)

            # Train model for training
            if microbatch_size is None:
                train_model = policy(nbatch_train, nsteps, sess)
            else:
                train_model = policy(microbatch_size, nsteps, sess)

        # CREATE THE PLACEHOLDERS
        self.A = A = train_model.pdtype.sample_placeholder([None])
        self.ADV = ADV = tf.placeholder(tf.float32, [None])
        self.R = R = tf.placeholder(tf.float32, [None])
        # Keep track of old actor
        self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
        # Keep track of old critic
        self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None])
        self.LR = LR = tf.placeholder(tf.float32, [])
        # Cliprange
        self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, [])

        neglogpac = train_model.pd.neglogp(A)

        # Calculate the entropy
        # Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
        entropy = tf.reduce_mean(train_model.pd.entropy())

        # CALCULATE THE LOSS
        # Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss

        # Clip the value to reduce variability during Critic training
        # Get the predicted value
        vpred = train_model.vf
        vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
        # Unclipped value
        vf_losses1 = tf.square(vpred - R)
        # Clipped value
        vf_losses2 = tf.square(vpredclipped - R)

        vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))

        # Calculate ratio (pi current policy / pi old policy)
        ratio = tf.exp(OLDNEGLOGPAC - neglogpac)

        # Defining Loss = - J is equivalent to max J
        pg_losses = -ADV * ratio

        pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)

        # Final PG loss
        pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
        approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
        clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))

        # Total loss
        loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef

        # UPDATE THE PARAMETERS USING LOSS
        # 1. Get the model parameters
        params = tf.trainable_variables('ppo2_model')
        # 2. Build our trainer
        if MPI is not None:
            self.trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=LR, epsilon=1e-5)
        else:
            self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
        # 3. Calculate the gradients
        grads_and_var = self.trainer.compute_gradients(loss, params)
        grads, var = zip(*grads_and_var)

        if max_grad_norm is not None:
            # Clip the gradients (normalize)
            grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
        grads_and_var = list(zip(grads, var))
        # zip aggregate each gradient with parameters associated
        # For instance zip(ABCD, xyza) => Ax, By, Cz, Da

        self.grads = grads
        self.var = var
        self._train_op = self.trainer.apply_gradients(grads_and_var)
        self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
        self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac]


        self.train_model = train_model
        self.act_model = act_model
        self.step = act_model.step
        self.value = act_model.value
        self.initial_state = act_model.initial_state
        self.cal_neglogp = act_model.cal_neglogp

        self.save = functools.partial(save_variables, sess=sess)
        self.load = functools.partial(load_variables, sess=sess)

        initialize()
        global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
        if MPI is not None:
            sync_from_root(sess, global_variables) #pylint: disable=E1101
batch_size = 8

# 2个输入结点
x = tf.placeholder(tf.float32, shape=(None, 2), name='x-input')
# 回归问题只有1个输出结点
y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input')

# 定义一个单层的神经网络前向传播过程,只是简单加权和
w1 = tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))
y = tf.matmul(x, w1)

# 定义预测多了和预测少了的成本
loss_less = 1
loss_more = 10
loss = tf.reduce_sum(
    tf.select(tf.greater(y, y_), loss_more*(y-y_), loss_less*(y_-y)))
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)

# 定义损失函数为均方误差
loss_2 = tf.reduce_mean(tf.square(y_ - y))
train_step_2 = tf.train.AdamOptimizer(0.001).minimize(loss_2)

# 通过随机数生成一个模拟数据集
rdm = RandomState(1)
dataset_size = 128
X = rdm.rand(dataset_size, 2)
# 设置回归的正确值为两个输入加上一个随机量
Y = [[x1 + x2 + rdm.rand()/10.0-0.05] for (x1, x2) in X]

# 训练神经网络
with tf.Session() as sess:
Example #34
0
        def loop_step(batch_index, ts, stop_decoder, states, alphas, cand_seqs,
                      cand_scores, completed_scores, completed_scores_scaled,
                      completed_seqs, completed_lens):
            """
            Args:
              ts (int): time step
              stop_decoder (bool): stop decoding
              ys (?): [beam_size]
              states (float): [beam_size, state_size]
              alphas (float): [beam_size, alpha_size]
              cand_scores: [beam_size], sequence score
              cand_seqs: [beam_size, ts], ts increases over time

            Returns:
              logits shape: [beam_size, output_dim]
              state: [beam_size, state_size]
              alpha: [beam_size, alpha_size]

            """
            # 1. get score from one step decoder
            # logits = tf.one_hot(ts, depth=num_symbols, off_value=0.0, dtype=tf.float32)
            if DEBUG: ts = tf.Print(ts, [ts], message='ts: ')
            ys = cand_seqs[:, ts]
            if DEBUG: ys = tf.Print(ys, [ys], message='Y(t-1): ')
            logits, states, alphas = self.step(ys, states, alphas, batch_index)
            if DEBUG: logits = tf.Print(logits, [logits], message='logits: ')
            Z = tf.reduce_logsumexp(logits, 1, keep_dims=True)
            if DEBUG: Z = tf.Print(Z, [Z], message='Z: ')
            logprobs = tf.subtract(logits, Z)  # [beam_size, num_symbols]
            new_scores = tf.add(logprobs,
                                tf.expand_dims(cand_scores,
                                               1))  # [beam_size, num_symbols]
            if DEBUG:
                new_scores = tf.Print(new_scores, [new_scores],
                                      message='new_scores: ')
            num_unstop_symbols = tf.shape(new_scores)[1] - 1
            new_uncompleted_scores, new_completed_scores = tf.split(
                new_scores, [num_unstop_symbols, 1], 1)
            if DEBUG:
                new_uncompleted_scores = tf.Print(
                    new_uncompleted_scores, [new_uncompleted_scores],
                    message='new_uncompleted_scores: ')

            # 2. Update completed seqs  --------------------------------------
            # 2.1 update scores
            new_completed_scores = tf.squeeze(new_completed_scores,
                                              -1)  # [beam_size]
            all_completed_scores = tf.concat(
                [completed_scores, new_completed_scores], 0)  # [2*beam_size]

            # 2.2 choose top K from scaled_scores
            new_completed_scores_scaled = tf.div(new_completed_scores,
                                                 tf.to_float(ts + 1))
            all_scores_scaled = tf.concat(
                [completed_scores_scaled, new_completed_scores_scaled], 0)
            completed_scores_scaled, indices = tf.nn.top_k(all_scores_scaled,
                                                           k=beam_size,
                                                           sorted=False)
            if DEBUG:
                indices = tf.Print(indices, [indices],
                                   message='top K completed indices: ')

            # 2.2 update len
            new_completed_lens = tf.fill([beam_size], tf.add(ts,
                                                             1))  # [beam_size]
            all_lens = tf.concat([completed_lens, new_completed_lens],
                                 0)  # [2*beam_size]
            completed_lens = tf.gather(all_lens,
                                       indices,
                                       validate_indices=True,
                                       axis=0)  # [beam_size]
            if DEBUG:
                completed_lens = tf.Print(completed_lens, [completed_lens],
                                          message='completed lens',
                                          summarize=5)

            # 2.3 update seqs
            all_completed = tf.concat([completed_seqs, cand_seqs], 0)
            completed_seqs = tf.gather(all_completed,
                                       indices,
                                       validate_indices=True,
                                       axis=0)  # [beam_size, ts]
            if DEBUG:
                completed_seqs = tf.Print(completed_seqs, [completed_seqs],
                                          message='completed seqs: ',
                                          summarize=MAX_STEPS + 2)

            # 2.4 stop decoding loop
            max_uncompleted = tf.reduce_max(new_uncompleted_scores)
            completed_scores = tf.gather(all_completed_scores,
                                         indices,
                                         validate_indices=True,
                                         axis=0)
            min_completed = tf.reduce_min(completed_scores)
            stop_decoder = tf.greater(min_completed, max_uncompleted)

            # 2. Update completed seqs  --------------------------------------

            # 3. Update uncompleted sequences --------------------------------
            # new_uncompleted_scores: [beam_size, num_symbols-1]
            # top_k: [beam_size]. indices of top k scores
            def f0():
                return new_uncompleted_scores[0, :]

            def f1():
                return new_uncompleted_scores

            un_scores = tf.cond(tf.equal(ts, 0), f0, f1)
            new_flat = tf.squeeze(tf.reshape(
                un_scores, [-1, 1]))  # [beam_size*num_unstop_symbols]

            # get top K symbols
            cand_scores, flat_indices = tf.nn.top_k(new_flat,
                                                    k=beam_size,
                                                    sorted=False)
            cand_parents = tf.div(flat_indices, num_unstop_symbols)
            _ys = tf.mod(flat_indices,
                         num_unstop_symbols)  # [beam_size], y(t) for next step
            A = tf.gather(cand_seqs[:, 0:ts + 1],
                          cand_parents)  #[beam_size, ts+1]
            B = tf.expand_dims(_ys, -1)  # [beam_size, 1]
            C = tf.fill([beam_size, MAX_STEPS + 2 - ts - 2], stop_symbol)
            cand_seqs = tf.concat([A, B, C], 1)  # [beam_size, MAX_STEPS]
            if DEBUG:
                cand_seqs = tf.Print(cand_seqs, [cand_seqs],
                                     message='cand seqs: ',
                                     summarize=MAX_STEPS + 2)
            # cand_seqs.set_shape([beam_size, MAX_STEPS+2])
            cand_seqs = tf.reshape(cand_seqs, [beam_size, MAX_STEPS + 2])
            cand_scores.set_shape([beam_size])
            # completed_seqs.set_shape([beam_size, MAX_STEPS+2])
            completed_seqs = tf.reshape(completed_seqs,
                                        [beam_size, MAX_STEPS + 2])

            s1_shape = [beam_size, self.attention_cell.state_size]
            s2_shape = [beam_size, self.decoder_cell.state_size]
            s3_shape = [beam_size, self.attn_context.context_size]

            # prepare data for next step
            # states = tf.gather(states, cand_parents, axis=0)
            # states = self.select_states(states, cand_parents)
            states = tuple(tf.gather(el, cand_parents) for el in states)
            states[0].set_shape(s1_shape)
            states[1].set_shape(s2_shape)
            states[2].set_shape(s3_shape)
            alphas = tf.gather(alphas, cand_parents, axis=1)
            alphas_shape = [self.attn_context.num_encoder_states, beam_size]
            alphas = tf.reshape(alphas, alphas_shape)
            # alphas.set_shape(alphas_shape)
            # 3. Update uncompleted sequences --------------------------------

            ts = tf.add(ts, 1)
            return ts, stop_decoder, states, alphas, cand_seqs, \
                cand_scores, completed_scores, completed_scores_scaled, \
                completed_seqs, completed_lens
Example #35
0
    def __init__(self, data, args, embed):
        # 这里的输入和前面的seq2seq一致
        self.posts = tf.placeholder(tf.int32, (None, None),
                                    'enc_inps')  # batch*len
        self.posts_length = tf.placeholder(tf.int32, (None, ),
                                           'enc_lens')  # batch
        self.prevs_length = tf.placeholder(tf.int32, (None, ),
                                           'enc_lens_prev')  # batch
        self.origin_responses = tf.placeholder(tf.int32, (None, None),
                                               'dec_inps')  # batch*len
        self.origin_responses_length = tf.placeholder(tf.int32, (None, ),
                                                      'dec_lens')  # batch

        # kgs表示该样例所在这段对话中所有的知识:[batch, max_kg_nums, max_kg_length]
        # kgs_h_length表示每一个知识中head entity的长度:[batch, max_kg_nums]
        # kgs_hr_length表示每一个知识中head entity和relation的长度:[batch, max_kg_nums]
        # kgs_hrt_length表示每一个知识中h,r,t的长度:[batch, max_kg_nums]
        # kgs_index表示当前这句话实际使用的kg的索引指示矩阵:[batch, max_kg_nums](其中使用的知识对应为1,没有使用的知识对应为0)
        self.kgs = tf.placeholder(tf.int32, (None, None, None), 'kg_inps')
        self.kgs_h_length = tf.placeholder(tf.int32, (None, None), 'kg_h_lens')
        self.kgs_hr_length = tf.placeholder(tf.int32, (None, None),
                                            'kg_hr_lens')
        self.kgs_hrt_length = tf.placeholder(tf.int32, (None, None),
                                             'kg_hrt_lens')
        self.kgs_index = tf.placeholder(tf.float32, (None, None), 'kg_indices')

        # 用来平衡解码损失和kg损失的超参数
        self.lamb = tf.placeholder(tf.float32, name='lamb')
        self.is_train = tf.placeholder(tf.bool)

        # deal with original data to adapt encoder and decoder
        # 获取解码器的输入和输出
        batch_size, decoder_len = tf.shape(self.origin_responses)[0], tf.shape(
            self.origin_responses)[1]
        self.responses = tf.split(self.origin_responses, [1, decoder_len - 1],
                                  1)[1]  # no go_id
        self.responses_length = self.origin_responses_length - 1
        self.responses_input = tf.split(self.origin_responses,
                                        [decoder_len - 1, 1],
                                        1)[0]  # no eos_id
        self.responses_target = self.responses
        decoder_len = decoder_len - 1
        # 获取编码器的输入
        self.posts_input = self.posts  # batch*len
        # 对解码器的mask矩阵,对于pad的mask
        self.decoder_mask = tf.reshape(
            tf.cumsum(tf.one_hot(self.responses_length - 1, decoder_len),
                      reverse=True,
                      axis=1), [-1, decoder_len])
        kg_len = tf.shape(self.kgs)[2]
        #kg_len = tf.Print(kg_len, [batch_size, kg_len, decoder_len, self.kgs_length])
        # kg_h_mask = tf.reshape(tf.cumsum(tf.one_hot(self.kgs_h_length-1,
        # 	kg_len), reverse=True, axis=2), [batch_size, -1, kg_len, 1])
        # 这里分别得到对于key(也就是hr)的mask矩阵:[batch_size, max_kg_nums, max_kg_length, 1]
        # 以及对于value(也就是t)的mask矩阵:[batch_size, max_kg_nums, max_kg_length, 1]
        kg_hr_mask = tf.reshape(
            tf.cumsum(tf.one_hot(self.kgs_hr_length - 1, kg_len),
                      reverse=True,
                      axis=2), [batch_size, -1, kg_len, 1])
        kg_hrt_mask = tf.reshape(
            tf.cumsum(tf.one_hot(self.kgs_hrt_length - 1, kg_len),
                      reverse=True,
                      axis=2), [batch_size, -1, kg_len, 1])
        kg_key_mask = kg_hr_mask
        kg_value_mask = kg_hrt_mask - kg_hr_mask

        # initialize the training process
        self.learning_rate = tf.Variable(float(args.lr),
                                         trainable=False,
                                         dtype=tf.float32)
        self.learning_rate_decay_op = self.learning_rate.assign(
            self.learning_rate * args.lr_decay)
        self.global_step = tf.Variable(0, trainable=False)

        # build the embedding table and embedding input
        if embed is None:
            # initialize the embedding randomly
            self.embed = tf.get_variable(
                'embed', [data.vocab_size, args.embedding_size], tf.float32)
        else:
            # initialize the embedding by pre-trained word vectors
            self.embed = tf.get_variable('embed',
                                         dtype=tf.float32,
                                         initializer=embed)
        # encoder_input: [batch, encoder_len, embed_size]
        self.encoder_input = tf.nn.embedding_lookup(self.embed, self.posts)
        # decoder_input: [batch, decoder_len, embed_size]
        self.decoder_input = tf.nn.embedding_lookup(self.embed,
                                                    self.responses_input)
        # kg_input: [batch, max_kg_nums, max_kg_length, embed_size]
        self.kg_input = tf.nn.embedding_lookup(self.embed, self.kgs)
        #self.encoder_input = tf.cond(self.is_train,
        #							 lambda: tf.nn.dropout(tf.nn.embedding_lookup(self.embed, self.posts_input), 0.8),
        #							 lambda: tf.nn.embedding_lookup(self.embed, self.posts_input)) #batch*len*unit
        #self.decoder_input = tf.cond(self.is_train,
        #							 lambda: tf.nn.dropout(tf.nn.embedding_lookup(self.embed, self.responses_input), 0.8),
        #							 lambda: tf.nn.embedding_lookup(self.embed, self.responses_input))

        # build rnn_cell
        cell_enc = tf.nn.rnn_cell.GRUCell(args.eh_size)
        cell_dec = tf.nn.rnn_cell.GRUCell(args.dh_size)

        # build encoder
        with tf.variable_scope('encoder'):
            encoder_output, encoder_state = tf.nn.dynamic_rnn(
                cell_enc,
                self.encoder_input,
                self.posts_length,
                dtype=tf.float32,
                scope="encoder_rnn")
        # key对应一个知识h,r的词向量的均值 [batch, max_kg_nums, embed_size]
        # value对应一个知识t的词向量的均值 [batch, max_kg_nums, embed_size]
        self.kg_key_avg = tf.reduce_sum(
            self.kg_input * kg_key_mask, axis=2) / tf.maximum(
                tf.reduce_sum(kg_key_mask, axis=2),
                tf.ones_like(tf.expand_dims(self.kgs_hrt_length, -1),
                             dtype=tf.float32))
        self.kg_value_avg = tf.reduce_sum(
            self.kg_input * kg_value_mask, axis=2) / tf.maximum(
                tf.reduce_sum(kg_value_mask, axis=2),
                tf.ones_like(tf.expand_dims(self.kgs_hrt_length, -1),
                             dtype=tf.float32))
        # 将编码器的输出状态映射到embed_size的维度
        # query: [batch, 1, embed_size]
        with tf.variable_scope('knowledge'):
            query = tf.reshape(
                tf.layers.dense(tf.concat(encoder_state, axis=-1),
                                args.embedding_size,
                                use_bias=False),
                [batch_size, 1, args.embedding_size])
        # [batch, max_kg_nums]
        kg_score = tf.reduce_sum(query * self.kg_key_avg, axis=2)
        # 对于hrt大于0的位置(即该位置存在知识),取对应的kg_score,否则对应位置为-inf
        kg_score = tf.where(tf.greater(self.kgs_hrt_length, 0), kg_score,
                            -tf.ones_like(kg_score) * np.inf)
        # 计算每个知识对应的分数 [batch, max_kg_nums]
        kg_alignment = tf.nn.softmax(kg_score)

        # 根据计算的kg注意力分数的位置,计算关注的kg准确率和损失
        kg_max = tf.argmax(kg_alignment, axis=-1)
        kg_max_onehot = tf.one_hot(kg_max,
                                   tf.shape(kg_alignment)[1],
                                   dtype=tf.float32)
        self.kg_acc = tf.reduce_sum(
            kg_max_onehot * self.kgs_index) / tf.maximum(
                tf.reduce_sum(tf.reduce_max(self.kgs_index, axis=-1)),
                tf.constant(1.0))
        self.kg_loss = tf.reduce_sum(
            -tf.log(tf.clip_by_value(kg_alignment, 1e-12, 1.0)) *
            self.kgs_index,
            axis=1) / tf.maximum(tf.reduce_sum(self.kgs_index, axis=1),
                                 tf.ones([batch_size], dtype=tf.float32))
        self.kg_loss = tf.reduce_mean(self.kg_loss)
        # 得到注意力之后的知识的嵌入:[batch, embed_size]
        self.knowledge_embed = tf.reduce_sum(
            tf.expand_dims(kg_alignment, axis=-1) * self.kg_value_avg, axis=1)
        # 对维度进行扩充[batch, decoder_len, embed_size]
        knowledge_embed_extend = tf.tile(
            tf.expand_dims(self.knowledge_embed, axis=1), [1, decoder_len, 1])
        # 将知识和原始的解码输入拼接,作为新的解码输入 [batch, decoder_len, 2*embed_size]
        self.decoder_input = tf.concat(
            [self.decoder_input, knowledge_embed_extend], axis=2)

        # get output projection function
        output_fn = MyDense(data.vocab_size, use_bias=True)
        sampled_sequence_loss = output_projection_layer(
            args.dh_size, data.vocab_size, args.softmax_samples)

        encoder_len = tf.shape(encoder_output)[1]
        posts_mask = tf.sequence_mask(self.posts_length, encoder_len)
        prevs_mask = tf.sequence_mask(self.prevs_length, encoder_len)
        attention_mask = tf.reshape(tf.logical_xor(posts_mask, prevs_mask),
                                    [batch_size, encoder_len])

        # construct helper and attention
        train_helper = tf.contrib.seq2seq.TrainingHelper(
            self.decoder_input, self.responses_length)
        #infer_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(self.embed, tf.fill([batch_size], data.go_id), data.eos_id)
        # 为了在推理的时候,每一次的输入都是上一次输出和知识的拼接
        infer_helper = MyInferenceHelper(self.embed,
                                         tf.fill([batch_size], data.go_id),
                                         data.eos_id, self.knowledge_embed)
        #attn_mechanism = tf.contrib.seq2seq.BahdanauAttention(args.dh_size, encoder_output,
        #  memory_sequence_length=self.posts_length)
        # 这里的MyAttention主要解决BahdanauAttention只能输入编码序列长度的问题
        attn_mechanism = MyAttention(args.dh_size, encoder_output,
                                     attention_mask)
        cell_dec_attn = tf.contrib.seq2seq.AttentionWrapper(
            cell_dec, attn_mechanism, attention_layer_size=args.dh_size)
        enc_state_shaping = tf.layers.dense(encoder_state,
                                            args.dh_size,
                                            activation=None)
        dec_start = cell_dec_attn.zero_state(
            batch_size, dtype=tf.float32).clone(cell_state=enc_state_shaping)

        # build decoder (train)
        with tf.variable_scope('decoder'):
            decoder_train = tf.contrib.seq2seq.BasicDecoder(
                cell_dec_attn, train_helper, dec_start)
            train_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
                decoder_train, impute_finished=True, scope="decoder_rnn")
            self.decoder_output = train_outputs.rnn_output
            #self.decoder_output = tf.nn.dropout(self.decoder_output, 0.8)
            # 输出概率分布和解码损失
            self.decoder_distribution_teacher, self.decoder_loss, self.decoder_all_loss = \
             sampled_sequence_loss(self.decoder_output, self.responses_target, self.decoder_mask)

        # build decoder (test)
        with tf.variable_scope('decoder', reuse=True):
            decoder_infer = tf.contrib.seq2seq.BasicDecoder(
                cell_dec_attn, infer_helper, dec_start, output_layer=output_fn)
            infer_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
                decoder_infer,
                impute_finished=True,
                maximum_iterations=args.max_decoder_length,
                scope="decoder_rnn")
            # 输出解码概率分布
            self.decoder_distribution = infer_outputs.rnn_output
            self.generation_index = tf.argmax(
                tf.split(self.decoder_distribution, [2, data.vocab_size - 2],
                         2)[1], 2) + 2  # for removing UNK

        # calculate the gradient of parameters and update
        self.params = [
            k for k in tf.trainable_variables() if args.name in k.name
        ]
        opt = tf.train.AdamOptimizer(self.learning_rate)
        # 将解码损失和kg损失相加
        self.loss = self.decoder_loss + self.lamb * self.kg_loss
        gradients = tf.gradients(self.loss, self.params)
        clipped_gradients, self.gradient_norm = tf.clip_by_global_norm(
            gradients, args.grad_clip)
        self.update = opt.apply_gradients(zip(clipped_gradients, self.params),
                                          global_step=self.global_step)

        # save checkpoint
        self.latest_saver = tf.train.Saver(
            write_version=tf.train.SaverDef.V2,
            max_to_keep=args.checkpoint_max_to_keep,
            pad_step_number=True,
            keep_checkpoint_every_n_hours=1.0)
        self.best_saver = tf.train.Saver(write_version=tf.train.SaverDef.V2,
                                         max_to_keep=1,
                                         pad_step_number=True,
                                         keep_checkpoint_every_n_hours=1.0)

        # create summary for tensorboard
        self.create_summary(args)
Example #36
0
    def rpn_find_positive_negative_samples(self, anchors):
        '''
        assign anchors targets: object or background.
        :param anchors: [valid_num_of_anchors, 5]. use N to represent valid_num_of_anchors

        :return:labels. anchors_matched_gtboxes, object_mask

        labels shape is [N, ].  positive is 1, negative is 0, ignored is -1
        anchor_matched_gtboxes. each anchor's gtbox(only positive box has gtbox)shape is [N, 5]
        object_mask. tf.float32. 1.0 represent box is object, 0.0 is others. shape is [N, ]
        '''
        with tf.variable_scope('rpn_find_positive_negative_samples'):
            gtboxes = tf.reshape(self.gtboxes_and_label[:, :-1], [-1, 5])
            gtboxes = tf.cast(gtboxes, tf.float32)

            # [N, M]
            ious = tf_wrapper.get_iou_matrix_tf(anchors,
                                                gtboxes,
                                                use_gpu=cfgs.IOU_USE_GPU,
                                                gpu_id=0)
            # ious = tf.reshape(ious, [tf.shape(anchors)[0], tf.shape(gtboxes)[0]])

            max_iou_each_row = tf.reduce_max(ious, axis=1)

            labels = tf.ones(shape=[
                tf.shape(anchors)[0],
            ], dtype=tf.float32) * (-1)  # [N, ] # ignored is -1

            matchs = tf.cast(tf.argmax(ious, axis=1), tf.int32)
            # matchs = matchs * tf.cast(positives, dtype=matchs.dtype)  # remove background and ignored
            anchors_matched_gtboxes = tf.gather(gtboxes, matchs)  # [N, 5]

            negatives = tf.less(max_iou_each_row,
                                self.rpn_iou_negative_threshold)
            negatives = tf.logical_and(negatives,
                                       tf.greater(max_iou_each_row, 0.1))
            # here we set anchors whose anchor-gtboxes iou between 0.1 and 0.3 as negatives

            if self.use_angles_condition:
                # an anchor that has an IoU overlap higher than 0.7 with any ground-truth box
                cond1 = tf.greater_equal(
                    max_iou_each_row,
                    self.rpn_iou_positive_threshold)  # iou >= 0.7 is positive

                # angle condition
                gtboxes_angles = anchors_matched_gtboxes[:,
                                                         -1]  # tf.unstack(anchors_matched_gtboxes, axis=1)
                anchors_angles = anchors[:, -1]  # tf.unstack(anchors, axis=1)
                cond2 = tf.less_equal(tf.abs(gtboxes_angles - anchors_angles),
                                      self.anchor_angle_threshold)
                cond3 = tf.greater(tf.abs(gtboxes_angles - anchors_angles),
                                   self.anchor_angle_threshold)

                positives1 = tf.logical_and(cond1, cond2)
                negatives = tf.logical_or(negatives,
                                          tf.logical_and(cond1, cond3))
            else:
                positives1 = tf.greater_equal(max_iou_each_row,
                                              self.rpn_iou_positive_threshold)

            # to avoid none of boxes iou >= 0.7, use max iou boxes as positive
            max_iou_each_column = tf.reduce_max(ious, 0)
            # the anchor/anchors with the highest Intersection-over-Union (IoU) overlap with a ground-truth box
            positives2 = tf.reduce_sum(tf.cast(
                tf.equal(ious, max_iou_each_column), tf.float32),
                                       axis=1)

            positives = tf.logical_or(positives1, tf.cast(positives2, tf.bool))

            labels += 2 * tf.cast(
                positives,
                tf.float32)  # Now, positive is 1, ignored and background is -1

            # object_mask = tf.cast(positives, tf.float32)  # 1.0 is object, 0.0 is others
            # background's gtboxes tmp set the first gtbox, it dose not matter, because use object_mask will ignored it

            labels += tf.cast(
                negatives, tf.float32
            )  # [N, ] positive is >=1.0, negative is 0, ignored is -1.0
            '''
            Need to note: when opsitive, labels may >= 1.0.
            Because, when all the iou< 0.7, we set anchors having max iou each column as positive.
            these anchors may have iou < 0.3.
            We will show you the steps as following:
            For those anchors whose iou < 0.3 but having max iou at some columns.
            1. In the begining, labels is [-1, -1, -1...-1]
            2. labels are [1..1]. Because their iou are max at some columns.(labels +=2 * positives)
            3. labels are [2..2]. Because their iou < 0.3. (labels += negatives)
            So, the final result will be 2.0

            So, when opsitive, labels may in [1.0, 2.0]. that is labels >=1.0
            
            So we add following codes to avoid that.
            '''
            positives = tf.cast(tf.greater_equal(labels, 1.0), tf.float32)
            ignored = tf.cast(tf.equal(labels, -1.0), tf.float32) * -1

            labels = positives + ignored
            object_mask = tf.cast(positives,
                                  tf.float32)  # 1.0 is object, 0.0 is others

            return labels, anchors_matched_gtboxes, object_mask
                                       [real_num_detection, -1])
            detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                       [real_num_detection, -1, -1])
            # detection_masks = tf.expand_dims(detection_masks, 0)

            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')

            image_shape = tf.shape(image_tensor)
            detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
                detection_masks, detection_boxes, image_shape[1],
                image_shape[2])
            detection_masks_reframed = tf.cast(
                tf.greater(detection_masks_reframed, 0.5), tf.uint8)

            detection_masks_reframed = tf.expand_dims(detection_masks_reframed,
                                                      0)

            # image_shape = tf.shape(image)
            # import pdb;
            # pdb.set_trace()
            # shuffle(all_classes)

            count = 0
            for index, row in df.iterrows():
                print(str(index) + ' / ' + str(count) + ' / ' + str(len(df)))

                image_path = test_data_path + '/' + row['ImageId']
Example #38
0
#############################################
# Check if anchors' centers are in boxes area

ycenter2, xcenter2, _, _ = BoxList.get_center_coordinates_and_sizes(anchors)

gt_boxes_tensor = tf.convert_to_tensor(gt_boxes_array)
gt_boxes_broadcast_ymin = tf.squeeze(
    tf.slice(gt_boxes_tensor, (0, 0), (gt_boxes_tensor.shape[0], 1)))
gt_boxes_broadcast_xmin = tf.squeeze(
    tf.slice(gt_boxes_tensor, (0, 1), (gt_boxes_tensor.shape[0], 1)))
gt_boxes_broadcast_ymax = tf.squeeze(
    tf.slice(gt_boxes_tensor, (0, 2), (gt_boxes_tensor.shape[0], 1)))
gt_boxes_broadcast_xmax = tf.squeeze(
    tf.slice(gt_boxes_tensor, (0, 3), (gt_boxes_tensor.shape[0], 1)))

is_in_xmin = tf.greater(xcenter2 - tf.transpose([gt_boxes_broadcast_xmin]), 0)
is_in_ymin = tf.greater(ycenter2 - tf.transpose([gt_boxes_broadcast_ymin]), 0)
is_in_xmax = tf.less(xcenter2 - tf.transpose([gt_boxes_broadcast_xmax]), 0)
is_in_ymax = tf.less(ycenter2 - tf.transpose([gt_boxes_broadcast_ymax]), 0)
selected_anchors_by_center_in_area = tf.logical_and(
    tf.logical_and(is_in_xmin, is_in_ymin),
    tf.logical_and(is_in_xmax, is_in_ymax))

# Mask similarly to selected_anchors_by_threshold or selected_anchors_by_distance
selected_anchors_by_center_in_area = tf.cast(
    selected_anchors_by_center_in_area, tf.int32)

#############################################

# Rest of the process
selected_anchors_by_distance = tf.cast(
Example #39
0
def dXdt(X, t): # X is the state vector
    
    V_p   = X[0   : p_n] # Voltage(PN)
    V_l   = X[p_n : n_n] # Voltage(LN)
    
    n_K   = X[n_n : 2*n_n] # K-gating(ALL)
    
    m_Na  = X[2*n_n : 2*n_n + p_n] # Na-activation-gating(PN)
    h_Na  = X[2*n_n + p_n : 2*n_n + 2*p_n] # Na-inactivation-gating(PN)

    m_A   = X[2*n_n + 2*p_n : 2*n_n + 3*p_n] # Transient-K-activation-gating(PN)
    h_A   = X[2*n_n + 3*p_n : 2*n_n + 4*p_n] # Transient-K-inactivation-gating(PN)
    
    m_Ca  = X[2*n_n + 4*p_n : 2*n_n + 4*p_n + l_n] # Ca-activation-gating(LN)
    h_Ca  = X[2*n_n + 4*p_n + l_n: 2*n_n + 4*p_n + 2*l_n] # Ca-inactivation-gating(LN)
    
    m_KCa = X[2*n_n + 4*p_n + 2*l_n : 2*n_n + 4*p_n + 3*l_n] # K(Ca)-gating(LN)
    Ca    = X[2*n_n + 4*p_n + 3*l_n: 2*n_n + 4*p_n + 4*l_n] # Ca-concentration(LN)

    o_ach = X[6*n_n : 6*n_n + n_syn_ach] # Acetylcholine Open Fraction
    o_fgaba = X[6*n_n + n_syn_ach : 6*n_n + n_syn_ach + n_syn_fgaba] # GABAa Open Fraction
    r_sgaba = X[6*n_n + n_syn_ach + n_syn_fgaba : 6*n_n + n_syn_ach + n_syn_fgaba + n_syn_sgaba] # GABAa Open Fraction
    g_sgaba = X[6*n_n + n_syn_ach + n_syn_fgaba + n_syn_sgaba : 6*n_n + n_syn_ach + n_syn_fgaba + 2*n_syn_sgaba] # GABAa Open Fraction    
    fire_t = X[-n_n:] # Fire-times
    
    V = X[:n_n] # Overall Voltage (PN + LN)
    
    
    # Evaluate Differentials for Gating variables and Ca concentration
    
    n0,tn = K_prop(V)
    
    dn_k = - (1.0/tn)*(n_K-n0)
    
    m0,tm,h0,th = Na_prop(V_p)
    
    dm_Na = - (1.0/tm)*(m_Na-m0)
    dh_Na = - (1.0/th)*(h_Na-h0)
    
    m0,tm,h0,th = A_prop(V_p)
    
    dm_A = - (1.0/tm)*(m_A-m0)
    dh_A = - (1.0/th)*(h_A-h0)
    
    m0,tm,h0,th = Ca_prop(V_l)
    
    dm_Ca = - (1.0/tm)*(m_Ca-m0)
    dh_Ca = - (1.0/th)*(h_Ca-h0)
    
    m0,tm = KCa_prop(Ca)
    
    dm_KCa = - (1.0/tm)*(m_KCa-m0)
    
    dCa = - A_Ca*I_Ca(V_l,m_Ca,h_Ca) - (Ca - Ca0)/t_Ca
    
    # Evaluate differential for Voltage
    
    CmdV_p = - I_Na(V_p, m_Na, h_Na) - I_A(V_p, m_A, h_A)
    CmdV_l = - I_Ca(V_l, m_Ca, h_Ca) - I_KCa(V_l, m_KCa)
    
    # Once we have that, we merge the two into a single 120-vector.
    
    CmdV = tf.concat([CmdV_p,CmdV_l],0)
    
    # Finally we add the common currents and divide by Cm to get dV/dt.
    
    dV = (I_inj_t(t) + CmdV - I_K(V, n_K) - I_L(V) - I_ach(o_ach,V) - I_fgaba(o_fgaba,V) - I_sgaba(g_sgaba,V)) / C_m

    
    # Evaluate dynamics in synapses
    
    A_ = tf.constant(A,dtype=tf.float64)
    T_ach = tf.where(tf.logical_and(tf.greater(t,fire_t+t_delay),tf.less(t,fire_t+t_max+t_delay)),A_,tf.zeros(tf.shape(A_),dtype=A_.dtype))
    T_ach = tf.multiply(tf.constant(ach_mat,dtype=tf.float64),T_ach)
    T_ach = tf.boolean_mask(tf.reshape(T_ach,(-1,)),ach_mat.reshape(-1) == 1)
    do_achdt = alp_ach*(1.0-o_ach)*T_ach - bet_ach*o_ach
    
    T_fgaba = 1.0/(1.0+tf.exp(-(V-V0)/sigma))
    T_fgaba = tf.multiply(tf.constant(fgaba_mat,dtype=tf.float64),T_fgaba)
    T_fgaba = tf.boolean_mask(tf.reshape(T_fgaba,(-1,)),fgaba_mat.reshape(-1) == 1)
    do_fgabadt = alp_fgaba*(1.0-o_fgaba)*T_fgaba - bet_fgaba*o_fgaba
    
    dg_sgabadt = - np.array(r4_sgaba)*g_sgaba + np.array(r3_sgaba)*r_sgaba 
    
    A_ = tf.constant(A,dtype=tf.float64)
    T_sgaba = tf.where(tf.logical_and(tf.greater(t,fire_t+t_delay),tf.less(t,fire_t+t_max+t_delay)),A_,tf.zeros(tf.shape(A_),dtype=A_.dtype))
    T_sgaba = tf.multiply(tf.constant(sgaba_mat,dtype=tf.float64),T_sgaba)
    T_sgaba = tf.boolean_mask(tf.reshape(T_sgaba,(-1,)),sgaba_mat.reshape(-1) == 1)
    dr_sgabadt = r1_sgaba*(1.0-r_sgaba)*T_sgaba - r2_sgaba*r_sgaba
    
    # Set change in fire-times as zero
    
    dfdt = tf.zeros(tf.shape(fire_t),dtype=fire_t.dtype)

    # Combine to a single vector
    
    out = tf.concat([dV,         dn_k,
                     dm_Na,      dh_Na,
                     dm_A,       dh_A,
                     dm_Ca,      dh_Ca,
                     dm_KCa,     
                     dCa,        do_achdt,
                     do_fgabadt, dr_sgabadt,
                     dg_sgabadt, dfdt   ],0)
    return out
    def setup_model(self):
        with SetVerbosity(self.verbose):

            assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the PPO2 model must be " \
                                                               "an instance of common.policies.ActorCriticPolicy."

            self.n_batch = self.n_envs * self.n_steps

            n_cpu = multiprocessing.cpu_count()
            if sys.platform == 'darwin':
                n_cpu //= 2

            self.graph = tf.Graph()
            with self.graph.as_default():
                self.sess = tf_util.make_session(num_cpu=n_cpu, graph=self.graph)

                n_batch_step = None
                n_batch_train = None
                if issubclass(self.policy, LstmPolicy):
                    assert self.n_envs % self.nminibatches == 0, "For recurrent policies, "\
                        "the number of environments run in parallel should be a multiple of nminibatches."
                    n_batch_step = self.n_envs
                    n_batch_train = self.n_batch // self.nminibatches

                act_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
                                        n_batch_step, reuse=False)
                with tf.variable_scope("train_model", reuse=True,
                                       custom_getter=tf_util.outer_scope_getter("train_model")):
                    train_model = self.policy(self.sess, self.observation_space, self.action_space,
                                              self.n_envs // self.nminibatches, self.n_steps, n_batch_train,
                                              reuse=True)

                with tf.variable_scope("loss", reuse=False):
                    self.action_ph = train_model.pdtype.sample_placeholder([None], name="action_ph")
                    self.advs_ph = tf.placeholder(tf.float32, [None], name="advs_ph")
                    self.rewards_ph = tf.placeholder(tf.float32, [None], name="rewards_ph")
                    self.old_neglog_pac_ph = tf.placeholder(tf.float32, [None], name="old_neglog_pac_ph")
                    self.old_vpred_ph = tf.placeholder(tf.float32, [None], name="old_vpred_ph")
                    self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
                    self.clip_range_ph = tf.placeholder(tf.float32, [], name="clip_range_ph")

                    neglogpac = train_model.proba_distribution.neglogp(self.action_ph)
                    self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())

                    vpred = train_model.value_fn
                    vpredclipped = self.old_vpred_ph + tf.clip_by_value(
                        train_model.value_fn - self.old_vpred_ph, - self.clip_range_ph, self.clip_range_ph)
                    vf_losses1 = tf.square(vpred - self.rewards_ph)
                    vf_losses2 = tf.square(vpredclipped - self.rewards_ph)
                    self.vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
                    ratio = tf.exp(self.old_neglog_pac_ph - neglogpac)
                    pg_losses = -self.advs_ph * ratio
                    pg_losses2 = -self.advs_ph * tf.clip_by_value(ratio, 1.0 - self.clip_range_ph, 1.0 +
                                                                  self.clip_range_ph)
                    self.pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
                    self.approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - self.old_neglog_pac_ph))
                    self.clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), self.clip_range_ph)))
                    loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef

                    tf.summary.scalar('entropy_loss', self.entropy)
                    tf.summary.scalar('policy_gradient_loss', self.pg_loss)
                    tf.summary.scalar('value_function_loss', self.vf_loss)
                    tf.summary.scalar('approximate_kullback-leiber', self.approxkl)
                    tf.summary.scalar('clip_factor', self.clipfrac)
                    tf.summary.scalar('loss', loss)

                    with tf.variable_scope('model'):
                        self.params = tf.trainable_variables()
                    grads = tf.gradients(loss, self.params)
                    if self.max_grad_norm is not None:
                        grads, _grad_norm = tf.clip_by_global_norm(grads, self.max_grad_norm)
                    grads = list(zip(grads, self.params))
                trainer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph, epsilon=1e-5)
                self._train = trainer.apply_gradients(grads)

                self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']

                with tf.variable_scope("input_info", reuse=False):
                    tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))
                    tf.summary.histogram('discounted_rewards', self.rewards_ph)
                    tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
                    tf.summary.histogram('learning_rate', self.learning_rate_ph)
                    tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))
                    tf.summary.histogram('advantage', self.advs_ph)
                    tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_range_ph))
                    tf.summary.histogram('clip_range', self.clip_range_ph)
                    tf.summary.scalar('old_neglog_action_probabilty', tf.reduce_mean(self.old_neglog_pac_ph))
                    tf.summary.histogram('old_neglog_action_probabilty', self.old_neglog_pac_ph)
                    tf.summary.scalar('old_value_pred', tf.reduce_mean(self.old_vpred_ph))
                    tf.summary.histogram('old_value_pred', self.old_vpred_ph)
                    if len(self.observation_space.shape) == 3:
                        tf.summary.image('observation', train_model.obs_ph)
                    else:
                        tf.summary.histogram('observation', train_model.obs_ph)

                self.train_model = train_model
                self.act_model = act_model
                self.step = act_model.step
                self.proba_step = act_model.proba_step
                self.value = act_model.value
                self.initial_state = act_model.initial_state
                tf.global_variables_initializer().run(session=self.sess)  # pylint: disable=E1101

                self.summary = tf.summary.merge_all()
Example #41
0
        vgg16_Features = tf.reshape(net, (-1, 4096))
        # Convert end_points_collection into a end_point dict.
        end_points = slim.utils.convert_collection_to_dict(
            end_points_collection)

RNN_inputs = tf.reshape(vgg16_Features[0, :], (-1, feature_size))

h_1, curr_state1 = RNNCell(W_RNN1, b_RNN1, RNN_inputs, curr_state1)

# h_1, curr_state1 = lstm_cell(W_lstm1, b_lstm1, 1.0, inputs, curr_state1)
#curr_state1 = tf.reshape(curr_state1[0], (-1,n_hidden1))

fc1 = tf.matmul(h_1, W_fc1) + b_fc1
print(fc1[0, :].shape, vgg16_Features[1, :].shape)
sseLoss1 = tf.square(tf.subtract(fc1[0, :], vgg16_Features[1, :]))
mask = tf.greater(sseLoss1, learnError * tf.ones_like(sseLoss1))
sseLoss1 = tf.multiply(sseLoss1, tf.cast(mask, tf.float32))
sseLoss = tf.reduce_mean(sseLoss1)

#####################
### Training loop ###
#####################

init = tf.global_variables_initializer()

saver = tf.train.Saver(max_to_keep=0)

with tf.Session() as sess:
    # Initialize parameters
    sess.run(init)
    saver.restore(sess, modelPath)
Example #42
0
def leaky_relu(x):
    return tf.where(tf.greater(x, 0), x, 0.01 * x)
Example #43
0
    def rpn_losses(self):
        with tf.variable_scope('rpn_losses'):
            minibatch_indices, minibatch_anchor_matched_gtboxes, \
            object_mask, minibatch_labels_one_hot = self.make_minibatch(self.anchors)

            minibatch_anchors = tf.gather(self.anchors, minibatch_indices)
            minibatch_encode_boxes = tf.gather(self.rpn_encode_boxes,
                                               minibatch_indices)
            minibatch_boxes_scores = tf.gather(self.rpn_scores,
                                               minibatch_indices)

            # encode gtboxes
            minibatch_encode_gtboxes = encode_and_decode.encode_boxes(
                unencode_boxes=minibatch_anchor_matched_gtboxes,
                reference_boxes=minibatch_anchors,
                scale_factors=self.scale_factors)

            positive_anchors_in_img = draw_box_with_color(
                self.img_batch,
                minibatch_anchors * tf.expand_dims(object_mask, 1),
                text=tf.shape(tf.where(tf.equal(object_mask, 1.0)))[0])

            negative_mask = tf.cast(
                tf.logical_not(tf.cast(object_mask, tf.bool)), tf.float32)
            negative_anchors_in_img = draw_box_with_color(
                self.img_batch,
                minibatch_anchors * tf.expand_dims(negative_mask, 1),
                text=tf.shape(tf.where(tf.equal(object_mask, 0.0)))[0])

            minibatch_decode_boxes = encode_and_decode.decode_boxes(
                encode_boxes=minibatch_encode_boxes,
                reference_boxes=minibatch_anchors,
                scale_factors=self.scale_factors)

            tf.summary.image('/positive_anchors', positive_anchors_in_img)
            tf.summary.image('/negative_anchors', negative_anchors_in_img)

            minibatch_boxes_softmax_scores = tf.gather(
                slim.softmax(self.rpn_scores), minibatch_indices)
            top_k_scores, top_k_indices = tf.nn.top_k(
                minibatch_boxes_softmax_scores[:, 1], k=20)

            top_k_boxes = tf.gather(minibatch_decode_boxes, top_k_indices)
            top_detections_in_img = draw_boxes_with_scores(self.img_batch,
                                                           boxes=top_k_boxes,
                                                           scores=top_k_scores)

            tf.summary.image('/top_20', top_detections_in_img)

            temp_indices = tf.reshape(
                tf.where(tf.greater(top_k_scores, cfgs.FINAL_SCORE_THRESHOLD)),
                [-1])
            rpn_predict_boxes = tf.gather(top_k_boxes, temp_indices)
            rpn_predict_scores = tf.gather(top_k_scores, temp_indices)

            # losses
            with tf.variable_scope('rpn_location_loss'):
                location_loss = losses.l1_smooth_losses(
                    predict_boxes=minibatch_encode_boxes,
                    gtboxes=minibatch_encode_gtboxes,
                    object_weights=object_mask)
                slim.losses.add_loss(
                    location_loss)  # add smooth l1 loss to losses collection

            with tf.variable_scope('rpn_classification_loss'):
                classification_loss = slim.losses.softmax_cross_entropy(
                    logits=minibatch_boxes_scores,
                    onehot_labels=minibatch_labels_one_hot)

            return location_loss, classification_loss, rpn_predict_boxes, rpn_predict_scores
Example #44
0
    def build_train_op(
        self
    ):  # Set computing the gradient wrt for the group assignment variables
        # Learning rate
        tf.summary.scalar(
            (self._name + "/" if self._name else "") + 'learing_rate', self.lr)

        # Optimizer and gradients for each GPU
        opt = tf.train.MomentumOptimizer(self.lr, self._hp.momentum)
        all_vars = tf.trainable_variables()
        group_vars = [v for v in all_vars if v.name.startswith('group/')]
        model_vars = [v for v in all_vars if not v.name.startswith('group/')]

        self._gv_list_task_loss = []
        self._gv_group_loss = []

        # Computer gradients for each GPU
        for i in range(self._hp.num_gpus):
            with tf.device('/GPU:%d' % i), tf.variable_scope(
                    tf.get_variable_scope()):
                with tf.name_scope('tower_%d/' % i) as scope:
                    print('Compute gradients of tower: %s' % scope)
                    if self._reuse_weights or i > 0:
                        tf.get_variable_scope().reuse_variables()

                    losses = []

                    # Add l2 loss
                    if self._hp.weight_decay > 0.0:
                        with tf.variable_scope('l2_loss'):
                            costs = [
                                tf.nn.l2_loss(var) for var in
                                tf.get_collection(utils.WEIGHT_DECAY_KEY)
                            ]
                            l2_loss = tf.multiply(self._hp.weight_decay,
                                                  tf.add_n(costs))
                            losses.append(l2_loss)

                    # Add group split loss
                    with tf.variable_scope('group'):
                        if tf.get_collection(
                                'OVERLAP_LOSS') and self._hp.gamma1 > 0.0:
                            cost1 = tf.reduce_mean(
                                tf.get_collection('OVERLAP_LOSS'))
                            cost1 = cost1 * self._hp.gamma1
                            if i == 0:
                                tf.summary.scalar('group/overlap_loss/', cost1)
                            losses.append(cost1)

                        if tf.get_collection(
                                'WEIGHT_SPLIT') and self._hp.gamma2 > 0.0:
                            if self._hp.weighted_group_loss:
                                reg_weights = [
                                    tf.stop_gradient(x)
                                    for x in tf.get_collection('WEIGHT_SPLIT')
                                ]
                                regs = [
                                    tf.stop_gradient(x) * x
                                    for x in tf.get_collection('WEIGHT_SPLIT')
                                ]
                                cost2 = tf.reduce_sum(regs) / tf.reduce_sum(
                                    reg_weights)
                            else:
                                cost2 = tf.reduce_mean(
                                    tf.get_collection('WEIGHT_SPLIT'))
                            cost2 = cost2 * self._hp.gamma2
                            if i == 0:
                                tf.summary.scalar('group/weight_split_loss/',
                                                  cost2)
                            losses.append(cost2)

                        if tf.get_collection(
                                'UNIFORM_LOSS') and self._hp.gamma3 > 0.0:
                            cost3 = tf.reduce_mean(
                                tf.get_collection('UNIFORM_LOSS'))
                            cost3 = cost3 * self._hp.gamma3
                            if i == 0:
                                tf.summary.scalar('group/group_uniform_loss/',
                                                  cost3)
                            losses.append(cost3)

                    if losses:
                        total_loss = self._loss_list[i] + tf.add_n(losses)

                    # Compute gradients of total loss
                    grads_and_vars = opt.compute_gradients(
                        self._loss_list[i], model_vars)

                    # Append gradients and vars
                    self._gv_list_task_loss.append(grads_and_vars)

        # Computer gradients of regularization loss
        # It needs one more GPU
        with tf.device('/GPU:%d' % self._hp.num_gpus), tf.variable_scope(
                tf.get_variable_scope()):
            with tf.name_scope('tower_group/') as scope:
                print('Compute gradients of regularization loss: %s' % scope)
                if self._reuse_weights:
                    tf.get_variable_scope().reuse_variables()

                losses = []

                # Add l2 loss
                if self._hp.weight_decay > 0.0:
                    with tf.variable_scope('l2_loss'):
                        costs = [
                            tf.nn.l2_loss(var) for var in tf.get_collection(
                                utils.WEIGHT_DECAY_KEY)
                        ]
                        l2_loss = tf.multiply(self._hp.weight_decay,
                                              tf.add_n(costs))
                        losses.append(l2_loss)

                # Add group split loss
                with tf.variable_scope('group'):
                    if tf.get_collection(
                            'OVERLAP_LOSS') and self._hp.gamma1 > 0.0:
                        cost1 = tf.reduce_mean(
                            tf.get_collection('OVERLAP_LOSS'))
                        cost1 = cost1 * self._hp.gamma1
                        if i == 0:
                            tf.summary.scalar('group/overlap_loss/', cost1)
                        losses.append(cost1)

                    if tf.get_collection(
                            'WEIGHT_SPLIT') and self._hp.gamma2 > 0.0:
                        if self._hp.weighted_group_loss:
                            reg_weights = [
                                tf.stop_gradient(x)
                                for x in tf.get_collection('WEIGHT_SPLIT')
                            ]
                            regs = [
                                tf.stop_gradient(x) * x
                                for x in tf.get_collection('WEIGHT_SPLIT')
                            ]
                            cost2 = tf.reduce_sum(regs) / tf.reduce_sum(
                                reg_weights)
                        else:
                            cost2 = tf.reduce_mean(
                                tf.get_collection('WEIGHT_SPLIT'))
                        cost2 = cost2 * self._hp.gamma2
                        if i == 0:
                            tf.summary.scalar('group/weight_split_loss/',
                                              cost2)
                        losses.append(cost2)

                    if tf.get_collection(
                            'UNIFORM_LOSS') and self._hp.gamma3 > 0.0:
                        cost3 = tf.reduce_mean(
                            tf.get_collection('UNIFORM_LOSS'))
                        cost3 = cost3 * self._hp.gamma3
                        if i == 0:
                            tf.summary.scalar('group/group_uniform_loss/',
                                              cost3)
                        losses.append(cost3)

                if losses:
                    # Compute gradients of total loss
                    total_loss = tf.add_n(losses)
                    self._gv_group_loss = opt.compute_gradients(
                        total_loss, all_vars)

        # Merge gradients
        print('Average gradients')
        with tf.device('/CPU:0'):
            grads_and_vars = self._average_gradients2(self._gv_list_task_loss,
                                                      self._gv_group_loss)

            # Finetuning
            if self._hp.finetune:
                for idx, (grad, var) in enumerate(grads_and_vars):
                    if "group" in var.op.name or \
                            (("conv3_" in var.op.name) and self._hp.ngroups3 > 1) or \
                            (("conv4_" in var.op.name) and self._hp.ngroups2 > 1) or \
                            ("conv5_" in var.op.name) or \
                            "logits" in var.op.name:
                        print('\tScale up learning rate of % s by 10.0' %
                              var.op.name)
                        grad = 10.0 * grad
                        grads_and_vars[idx] = (grad, var)

            # Reduced gradient
            eps = 1e-5
            for idx, (grad, var) in enumerate(grads_and_vars):
                if "group" in var.name:
                    print('\tApply reduced gradient on ' + var.name)
                    ngroups, dim = var.get_shape().as_list()
                    zeros = tf.zeros((ngroups, dim), dtype=tf.float32)
                    zeros_col = tf.zeros((ngroups, ), dtype=tf.float32)
                    zeros_row = tf.zeros((dim, ), dtype=tf.float32)
                    ones = tf.ones((ngroups, dim), dtype=tf.float32)
                    ones_col = tf.ones((ngroups, ), dtype=tf.float32)
                    ones_row = tf.ones((dim, ), dtype=tf.float32)

                    mu = tf.cast(tf.argmax(var, 0), dtype=tf.int32)
                    offset = tf.constant([ngroups * i for i in range(dim)],
                                         dtype=tf.int32)
                    mask = tf.cast(tf.transpose(tf.one_hot(mu, ngroups)),
                                   dtype=tf.bool)

                    grad_mu = tf.gather(tf.reshape(tf.transpose(grad), [-1]),
                                        mu + offset)
                    grad_mu_tile = tf.tile(tf.reshape(grad_mu, [1, -1]),
                                           [ngroups, 1])
                    grad_1 = grad - grad_mu_tile
                    grad_2 = tf.where(
                        tf.logical_and(tf.less_equal(var, ones * eps),
                                       tf.greater(grad_1, zeros)), zeros,
                        grad_1)

                    grad_red_mu = -tf.reduce_sum(grad_2, 0)
                    grad_red_mu = tf.tile(tf.reshape(grad_red_mu, [1, -1]),
                                          [ngroups, 1])
                    grad_red = tf.where(mask, grad_red_mu, grad_2)

                    max_step_size = tf.where(tf.greater(grad_red, zeros),
                                             var / grad_red, ones)
                    max_step_size = tf.reduce_min(max_step_size, 0)
                    lr_mult = tf.where(tf.less(max_step_size, self.lr),
                                       max_step_size / self.lr, ones_row)
                    # lr_mult = tf.where(tf.less(max_step_size, self.lr*10.0), max_step_size/self.lr/10.0, ones_row)
                    grad_red = grad_red * lr_mult
                    grads_and_vars[idx] = (grad_red, var)

                    tf.summary.scalar(var.op.name + "/lr_mult_min",
                                      tf.reduce_min(lr_mult))
                    tf.summary.scalar(var.op.name + "/lr_mult_avg",
                                      tf.reduce_mean(lr_mult))
                    tf.summary.histogram(var.op.name + "/group_sum",
                                         tf.reduce_sum(var, 0))
                    tf.summary.scalar(var.op.name + "/sparsity",
                                      tf.nn.zero_fraction(var - eps))
                    tf.summary.histogram(var.op.name + "/grad", grad_red)

            # Apply gradient
            apply_grad_op = opt.apply_gradients(grads_and_vars,
                                                global_step=self._global_step)

            # Batch normalization moving average update
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            self.train_op = tf.group(*(update_ops + [apply_grad_op]),
                                     name="train_op")

        # Build validity op
        print('Build validity op')
        with tf.device('/CPU:0'):
            # Force the group variable to be non-negative and sum-to-one
            validity_op_list = []
            with tf.name_scope("sum-to-one"):
                for var in tf.trainable_variables():
                    if "group" in var.name:
                        ngroups, dim = var.get_shape().as_list()
                        ones = tf.ones((ngroups, dim), dtype=tf.float32)
                        zeros = tf.zeros((ngroups, dim), dtype=tf.float32)
                        var_temp = tf.where(tf.less(var, ones * eps),
                                            ones * eps, var)  # non-negativity
                        var_temp = var_temp / tf.reduce_sum(var_temp,
                                                            0)  # sum-to-one
                        assign_op = var.assign(var_temp)
                        validity_op_list.append(assign_op)
            self.validity_op = tf.group(*validity_op_list,
                                        name="group_validity")
Example #45
0
def drelu(x):
    zero = tf.zeros(x.get_shape())
    one = tf.ones(x.get_shape())
    return (tf.where(tf.greater(x, zero), one, zero))
Example #46
0
        with tf.name_scope('layer1'):
            zy = zm + tf.sqrt(zv) * z
            h1 = custom_layer(zy, reuse)
        h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse=reuse)
        h3 = Dense(h2, 512, 'layer3', tf.nn.relu, reuse=reuse)
        h4 = Dense(h3, 512, 'layer4', tf.nn.relu, reuse=reuse)
        px_logit = Dense(h2, 784, 'logit', reuse=reuse)
    return px_logit


tf.reset_default_graph()
x = Placeholder((None, 784), 'x')

# binarize data and create a y "placeholder"
with tf.name_scope('x_binarized'):
    xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)),
                 tf.float32)
with tf.name_scope('y_'):
    y_ = tf.fill(tf.pack([tf.shape(x)[0], 10]), 0.0)

# propose distribution over y
qy_logit, qy = qy_graph(xb)

# for each proposed y, infer z and reconstruct x
z, zm, zv, px_logit = [[None] * 10 for i in xrange(4)]
for i in xrange(10):
    with tf.name_scope('graphs/hot_at{:d}'.format(i)):
        y = tf.add(y_, Constant(np.eye(10)[i], name='hot_at_{:d}'.format(i)))
        z[i], zm[i], zv[i] = qz_graph(xb, y)
        px_logit[i] = px_graph(z[i], y)
Example #47
0
def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5):
    """Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015.
  
    Performs box voting as described in 'Object detection via a multi-region &
    semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For
    each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes
    with iou overlap >= iou_thresh. The location of B is set to the weighted
    average location of boxes in S (scores are used for weighting). And the score
    of B is set to the average score of boxes in S.
  
    Args:
      selected_boxes: BoxList containing a subset of boxes in pool_boxes. These
        boxes are usually selected from pool_boxes using non max suppression.
      pool_boxes: BoxList containing a set of (possibly redundant) boxes.
      iou_thresh: (float scalar) iou threshold for matching boxes in
        selected_boxes and pool_boxes.
  
    Returns:
      BoxList containing averaged locations and scores for each box in
      selected_boxes.
  
    Raises:
      ValueError: if
        a) selected_boxes or pool_boxes is not a BoxList.
        b) if iou_thresh is not in [0, 1].
        c) pool_boxes does not have a scores field.
    """
    if not 0.0 <= iou_thresh <= 1.0:
        raise ValueError('iou_thresh must be between 0 and 1')
    if not isinstance(selected_boxes, box_list.BoxList):
        raise ValueError('selected_boxes must be a BoxList')
    if not isinstance(pool_boxes, box_list.BoxList):
        raise ValueError('pool_boxes must be a BoxList')
    if not pool_boxes.has_field('scores'):
        raise ValueError('pool_boxes must have a \'scores\' field')

    iou_ = iou(selected_boxes, pool_boxes)
    match_indicator = tf.to_float(tf.greater(iou_, iou_thresh))
    num_matches = tf.reduce_sum(match_indicator, 1)
    # TODO: Handle the case where some boxes in selected_boxes do not
    # match to any boxes in pool_boxes. For such boxes without any matches, we
    # should return the original boxes without voting.
    match_assert = tf.Assert(tf.reduce_all(tf.greater(num_matches, 0)), [
        'Each box in selected_boxes must match with at least one box '
        'in pool_boxes.'
    ])

    scores = tf.expand_dims(pool_boxes.get_field('scores'), 1)
    scores_assert = tf.Assert(tf.reduce_all(tf.greater_equal(scores, 0)),
                              ['Scores must be non negative.'])

    with tf.control_dependencies([scores_assert, match_assert]):
        sum_scores = tf.matmul(match_indicator, scores)
    averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches

    box_locations = tf.matmul(match_indicator,
                              pool_boxes.get() * scores) / sum_scores
    averaged_boxes = box_list.BoxList(box_locations)
    _copy_extra_fields(averaged_boxes, selected_boxes)
    averaged_boxes.add_field('scores', averaged_scores)
    return averaged_boxes
Example #48
0
pdepth = proj_depth_p
prgb = tf.to_float(proj_rgb_p)
psift = tf.to_float(proj_sift_p)

keep = prm.pct_3D_points / 100.
pdepth = tf.nn.dropout(
    pdepth, keep, noise_shape=[1, prm.crop_size, prm.crop_size, 1
                               ], seed=0) * keep
prgb = tf.nn.dropout(
    prgb, keep, noise_shape=[1, prm.crop_size, prm.crop_size, 1
                             ], seed=0) * keep
psift = tf.nn.dropout(
    psift, keep, noise_shape=[1, prm.crop_size, prm.crop_size, 1
                              ], seed=0) * keep
valid = tf.greater(pdepth, 0.)

# set up visibnet
if prm.input_attr == 'depth':
    vinp = pdepth
elif prm.input_attr == 'depth_rgb':
    vinp = tf.concat((pdepth, prgb / 127.5 - 1.), axis=3)
elif prm.input_attr == 'depth_sift':
    vinp = tf.concat((pdepth, psift / 127.5 - 1.), axis=3)
elif prm.input_attr == 'depth_sift_rgb':
    vinp = tf.concat((pdepth, psift / 127.5 - 1., prgb / 127.5 - 1.), axis=3)
vnet = VisibNet(vinp, bn='test')
vpred = tf.logical_and(tf.greater(vnet.pred, .5), valid)
vpredf = tf.to_float(vpred) * 0. + 1.

# set up coarsenet
Example #49
0
def main():
    args = parser.parse_args()

    # Data augmentation
    global seq_geo
    global seq_img
    seq_geo = iaa.SomeOf(
        (0, 5),
        [
            iaa.Fliplr(0.5),  # horizontally flip 50% of the images
            iaa.PerspectiveTransform(scale=(0, 0.075)),
            iaa.Affine(
                scale={
                    "x": (0.8, 1.0),
                    "y": (0.8, 1.0)
                },
                rotate=(-5, 5),
                translate_percent={
                    "x": (-0.1, 0.1),
                    "y": (-0.1, 0.1)
                },
            ),  # rotate by -45 to +45 degrees),
            iaa.Crop(percent=(
                0, 0.125
            )),  # crop images from each side by 0 to 12.5% (randomly chosen)
            iaa.CoarsePepper(p=0.01, size_percent=0.1)
        ],
        random_order=False)
    # Content transformation
    seq_img = iaa.SomeOf(
        (0, 3),
        [
            iaa.GaussianBlur(
                sigma=(0, 1.0)),  # blur images with a sigma of 0 to 2.0
            iaa.ContrastNormalization(alpha=(0.9, 1.1)),
            iaa.Grayscale(alpha=(0, 0.2)),
            iaa.Multiply((0.9, 1.1))
        ])

    # We store all arguments in a json file. This has two advantages:
    # 1. We can always get back and see what exactly that experiment was
    # 2. We can resume an experiment as-is without needing to remember all flags.
    args_file = os.path.join(args.experiment_root, 'args.json')
    if args.resume:
        if not os.path.isfile(args_file):
            raise IOError('`args.json` not found in {}'.format(args_file))

        print('Loading args from {}.'.format(args_file))
        with open(args_file, 'r') as f:
            args_resumed = json.load(f)
        args_resumed['resume'] = True  # This would be overwritten.

        # When resuming, we not only want to populate the args object with the
        # values from the file, but we also want to check for some possible
        # conflicts between loaded and given arguments.
        for key, value in args.__dict__.items():
            if key in args_resumed:
                resumed_value = args_resumed[key]
                if resumed_value != value:
                    print('Warning: For the argument `{}` we are using the'
                          ' loaded value `{}`. The provided value was `{}`'
                          '.'.format(key, resumed_value, value))
                    args.__dict__[key] = resumed_value
            else:
                print('Warning: A new argument was added since the last run:'
                      ' `{}`. Using the new value: `{}`.'.format(key, value))

    else:
        # If the experiment directory exists already, we bail in fear.
        if os.path.exists(args.experiment_root):
            if os.listdir(args.experiment_root):
                print('The directory {} already exists and is not empty.'
                      ' If you want to resume training, append --resume to'
                      ' your call.'.format(args.experiment_root))
                exit(1)
        else:
            os.makedirs(args.experiment_root)

        # Store the passed arguments for later resuming and grepping in a nice
        # and readable format.
        with open(args_file, 'w') as f:
            json.dump(vars(args),
                      f,
                      ensure_ascii=False,
                      indent=2,
                      sort_keys=True)

    log_file = os.path.join(args.experiment_root, "train")
    logging.config.dictConfig(common.get_logging_dict(log_file))
    log = logging.getLogger('train')

    # Also show all parameter values at the start, for ease of reading logs.
    log.info('Training using the following parameters:')
    for key, value in sorted(vars(args).items()):
        log.info('{}: {}'.format(key, value))

    # Check them here, so they are not required when --resume-ing.
    if not args.train_set:
        parser.print_help()
        log.error("You did not specify the `train_set` argument!")
        sys.exit(1)
    if not args.image_root:
        parser.print_help()
        log.error("You did not specify the required `image_root` argument!")
        sys.exit(1)

    # Load the data from the CSV file.
    pids, fids = common.load_dataset(args.train_set, args.image_root)
    max_fid_len = max(map(len, fids))  # We'll need this later for logfiles.

    # Load feature embeddings
    if args.hard_pool_size > 0:
        with h5py.File(args.train_embeddings, 'r') as f_train:
            train_embs = np.array(f_train['emb'])
            f_dists = scipy.spatial.distance.cdist(train_embs, train_embs)
            hard_ids = get_hard_id_pool(pids, f_dists, args.hard_pool_size)

    # Setup a tf.Dataset where one "epoch" loops over all PIDS.
    # PIDS are shuffled after every epoch and continue indefinitely.
    unique_pids = np.unique(pids)
    dataset = tf.data.Dataset.from_tensor_slices(unique_pids)
    dataset = dataset.shuffle(len(unique_pids))

    # Constrain the dataset size to a multiple of the batch-size, so that
    # we don't get overlap at the end of each epoch.
    if args.hard_pool_size == 0:
        dataset = dataset.take(
            (len(unique_pids) // args.batch_p) * args.batch_p)
        dataset = dataset.repeat(
            None)  # Repeat forever. Funny way of stating it.

    else:
        dataset = dataset.repeat(
            None)  # Repeat forever. Funny way of stating it.
        dataset = dataset.map(lambda pid: sample_batch_ids_for_pid(
            pid, all_pids=pids, batch_p=args.batch_p, all_hard_pids=hard_ids))
        # Unbatch the P PIDs
        dataset = dataset.apply(tf.contrib.data.unbatch())

    # For every PID, get K images.
    dataset = dataset.map(lambda pid: sample_k_fids_for_pid(
        pid, all_fids=fids, all_pids=pids, batch_k=args.batch_k))

    # Ungroup/flatten the batches for easy loading of the files.
    dataset = dataset.apply(tf.contrib.data.unbatch())

    # Convert filenames to actual image tensors.
    net_input_size = (args.net_input_height, args.net_input_width)
    pre_crop_size = (args.pre_crop_height, args.pre_crop_width)
    dataset = dataset.map(lambda fid, pid: common.fid_to_image(
        fid,
        pid,
        image_root=args.image_root,
        image_size=pre_crop_size if args.crop_augment else net_input_size),
                          num_parallel_calls=args.loading_threads)

    # Augment the data if specified by the arguments.
    if args.augment == False:
        dataset = dataset.map(
            lambda im, fid, pid: common.fid_to_image(
                fid,
                pid,
                image_root=args.image_root,
                image_size=pre_crop_size
                if args.crop_augment else net_input_size),  #Ergys
            num_parallel_calls=args.loading_threads)

        if args.flip_augment:
            dataset = dataset.map(lambda im, fid, pid: (
                tf.image.random_flip_left_right(im), fid, pid))
        if args.crop_augment:
            dataset = dataset.map(lambda im, fid, pid: (tf.random_crop(
                im, net_input_size + (3, )), fid, pid))
    else:
        dataset = dataset.map(lambda im, fid, pid: common.fid_to_image(
            fid, pid, image_root=args.image_root, image_size=net_input_size),
                              num_parallel_calls=args.loading_threads)

        dataset = dataset.map(lambda im, fid, pid: (tf.py_func(
            augment_images, [im], [tf.float32]), fid, pid))
        dataset = dataset.map(lambda im, fid, pid: (tf.reshape(
            im[0],
            (args.net_input_height, args.net_input_width, 3)), fid, pid))

    # Group it back into PK batches.
    batch_size = args.batch_p * args.batch_k
    dataset = dataset.batch(batch_size)

    # Overlap producing and consuming for parallelism.
    dataset = dataset.prefetch(batch_size * 2)

    # Since we repeat the data infinitely, we only need a one-shot iterator.
    images, fids, pids = dataset.make_one_shot_iterator().get_next()

    # Create the model and an embedding head.
    model = import_module('nets.' + args.model_name)
    head = import_module('heads.' + args.head_name)

    # Feed the image through the model. The returned `body_prefix` will be used
    # further down to load the pre-trained weights for all variables with this
    # prefix.
    endpoints, body_prefix = model.endpoints(images, is_training=True)
    with tf.name_scope('head'):
        endpoints = head.head(endpoints, args.embedding_dim, is_training=True)

    # Create the loss in two steps:
    # 1. Compute all pairwise distances according to the specified metric.
    # 2. For each anchor along the first dimension, compute its loss.
    dists = loss.cdist(endpoints['emb'], endpoints['emb'], metric=args.metric)
    losses, train_top1, prec_at_k, _, neg_dists, pos_dists = loss.LOSS_CHOICES[
        args.loss](dists,
                   pids,
                   args.margin,
                   batch_precision_at_k=args.batch_k - 1)

    # Count the number of active entries, and compute the total batch loss.
    num_active = tf.reduce_sum(tf.cast(tf.greater(losses, 1e-5), tf.float32))
    loss_mean = tf.reduce_mean(losses)

    # Some logging for tensorboard.
    tf.summary.histogram('loss_distribution', losses)
    tf.summary.scalar('loss', loss_mean)
    tf.summary.scalar('batch_top1', train_top1)
    tf.summary.scalar('batch_prec_at_{}'.format(args.batch_k - 1), prec_at_k)
    tf.summary.scalar('active_count', num_active)
    tf.summary.histogram('embedding_dists', dists)
    tf.summary.histogram('embedding_pos_dists', pos_dists)
    tf.summary.histogram('embedding_neg_dists', neg_dists)
    tf.summary.histogram('embedding_lengths',
                         tf.norm(endpoints['emb_raw'], axis=1))

    # Create the mem-mapped arrays in which we'll log all training detail in
    # addition to tensorboard, because tensorboard is annoying for detailed
    # inspection and actually discards data in histogram summaries.
    if args.detailed_logs:
        log_embs = lb.create_or_resize_dat(
            os.path.join(args.experiment_root, 'embeddings'),
            dtype=np.float32,
            shape=(args.train_iterations, batch_size, args.embedding_dim))
        log_loss = lb.create_or_resize_dat(
            os.path.join(args.experiment_root, 'losses'),
            dtype=np.float32,
            shape=(args.train_iterations, batch_size))
        log_fids = lb.create_or_resize_dat(
            os.path.join(args.experiment_root, 'fids'),
            dtype='S' + str(max_fid_len),
            shape=(args.train_iterations, batch_size))

    # These are collected here before we add the optimizer, because depending
    # on the optimizer, it might add extra slots, which are also global
    # variables, with the exact same prefix.
    model_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                        body_prefix)

    # Define the optimizer and the learning-rate schedule.
    # Unfortunately, we get NaNs if we don't handle no-decay separately.
    # modified by ha (default decay rate 0.001)
    global_step = tf.Variable(0, name='global_step', trainable=False)
    if 0 <= args.decay_start_iteration < args.train_iterations:
        learning_rate = tf.train.exponential_decay(
            args.learning_rate,
            tf.maximum(0, global_step - args.decay_start_iteration),
            args.train_iterations - args.decay_start_iteration, 0.001)
    else:
        learning_rate = args.learning_rate
    tf.summary.scalar('learning_rate', learning_rate)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    # Feel free to try others!
    # optimizer = tf.train.AdadeltaOptimizer(learning_rate)

    # Update_ops are used to update batchnorm stats.
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        train_op = optimizer.minimize(loss_mean, global_step=global_step)

    # Define a saver for the complete model.
    checkpoint_saver = tf.train.Saver(max_to_keep=0)

    with tf.Session() as sess:
        if args.resume:
            # In case we're resuming, simply load the full checkpoint to init.
            last_checkpoint = tf.train.latest_checkpoint(args.experiment_root)
            log.info('Restoring from checkpoint: {}'.format(last_checkpoint))
            checkpoint_saver.restore(sess, last_checkpoint)
        else:
            # But if we're starting from scratch, we may need to load some
            # variables from the pre-trained weights, and random init others.
            sess.run(tf.global_variables_initializer())
            if args.initial_checkpoint is not None:
                # saver = tf.train.Saver(model_variables)
                saver = tf.train.import_meta_graph(
                    './mobilenet/mobilenet_v2_1.4_224.ckpt.meta')
                saver.restore(sess, args.initial_checkpoint)

            # In any case, we also store this initialization as a checkpoint,
            # such that we could run exactly reproduceable experiments.
            checkpoint_saver.save(sess,
                                  os.path.join(args.experiment_root,
                                               'checkpoint'),
                                  global_step=0)

        merged_summary = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(args.experiment_root,
                                               sess.graph)

        start_step = sess.run(global_step)
        log.info('Starting training from iteration {}.'.format(start_step))

        # Finally, here comes the main-loop. This `Uninterrupt` is a handy
        # utility such that an iteration still finishes on Ctrl+C and we can
        # stop the training cleanly.
        with lb.Uninterrupt(sigs=[SIGINT, SIGTERM], verbose=True) as u:
            for i in range(start_step, args.train_iterations):

                # Compute gradients, update weights, store logs!
                start_time = time.time()
                _, summary, step, b_prec_at_k, b_embs, b_loss, b_fids = \
                    sess.run([train_op, merged_summary, global_step,
                              prec_at_k, endpoints['emb'], losses, fids])
                elapsed_time = time.time() - start_time

                # Compute the iteration speed and add it to the summary.
                # We did observe some weird spikes that we couldn't track down.
                summary2 = tf.Summary()
                summary2.value.add(tag='secs_per_iter',
                                   simple_value=elapsed_time)
                summary_writer.add_summary(summary2, step)
                summary_writer.add_summary(summary, step)

                if args.detailed_logs:
                    log_embs[i], log_loss[i], log_fids[
                        i] = b_embs, b_loss, b_fids

                # Do a huge print out of the current progress.
                seconds_todo = (args.train_iterations - step) * elapsed_time
                log.info(
                    'iter:{:6d}, loss min|avg|max: {:.3f}|{:.3f}|{:6.3f}, '
                    'batch-p@{}: {:.2%}, ETA: {} ({:.2f}s/it)'.format(
                        step, float(np.min(b_loss)), float(np.mean(b_loss)),
                        float(np.max(b_loss)), args.batch_k - 1,
                        float(b_prec_at_k),
                        timedelta(seconds=int(seconds_todo)), elapsed_time))
                sys.stdout.flush()
                sys.stderr.flush()

                # Save a checkpoint of training every so often.
                if (args.checkpoint_frequency > 0
                        and step % args.checkpoint_frequency == 0):
                    checkpoint_saver.save(sess,
                                          os.path.join(args.experiment_root,
                                                       'checkpoint'),
                                          global_step=step)

                # Stop the main-loop at the end of the step, if requested.
                if u.interrupted:
                    log.info("Interrupted on request!")
                    break

        # Store one final checkpoint. This might be redundant, but it is crucial
        # in case intermediate storing was disabled and it saves a checkpoint
        # when the process was interrupted.
        checkpoint_saver.save(sess,
                              os.path.join(args.experiment_root, 'checkpoint'),
                              global_step=step)
Example #50
0
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import RandomState

batch_size = 8

x = tf.placeholder(tf.float32, shape=(None, 2), name="x-input")
y_ = tf.placeholder(tf.float32, shape=(None, 1), name="y-input")

w1 = tf.Variable(tf.random_normal([2, 1],stddev=1, seed=1))
y = tf.matmul(x, w1)

loss_less = 10
loss_more = 1

loss = tf.reduce_sum(tf.where(tf.greater(y, y_), loss_more*(y-y_), loss_less*(y_-y)))

train_step = tf.train.AdamOptimizer(0.001).minimize(loss)

rdm = RandomState(1)
dataset_size = 128
X = rdm.rand(dataset_size, 2)
Y = [[x1 + x2 + rdm.rand()/10.0 -0.05] for (x1, x2) in X]

xx = np.zeros([91], dtype=float)
yy = np.ones([91], dtype=float)
with tf.Session() as sess:
    init_op = tf.initialize_all_variables()
    sess.run(init_op)
    STEP = 9000
    for i in range(STEP):
Example #51
0
    def forward(self, logits, targets, target_mask, training):
        with tf.name_scope(self.name, values=[logits, targets, target_mask]):
            # Get smoothing parameters (no smoothing/ normalization at test time)
            high_confidence, low_confidence, normalizing_factor = \
                tf.cond(tf.logical_and(training, tf.greater(self.label_smoothing_discount, 0.0)),
                        self._get_smoothing_parameters,
                        lambda: (1.0, 0.0, 0.0))

            # If necessary, pad the label and the label-mask to match the length of decoder output
            # Not sure if that's a sensible thing to do
            targets_shape = tf.shape(targets)
            logits_shape = tf.shape(logits)
            targets_length = targets_shape[self.time_dim]
            logits_length = logits_shape[self.time_dim]

            def _get_pad_shape(shape_to_pad, shape_to_match):
                """ Calculates the shape of the padding to be applied to the logits or targets. """
                time_steps_to_pad = shape_to_match[
                    self.time_dim] - shape_to_pad[self.time_dim]
                if self.time_dim == 0:
                    pad_shape = [time_steps_to_pad, shape_to_pad[1]]
                else:
                    pad_shape = [shape_to_pad[0], time_steps_to_pad]
                return pad_shape

            def _pad_targets(targets, target_mask, logits):
                """ Pads the targets to match the size of the model-generated logits. """
                pad_shape = _get_pad_shape(targets_shape, logits_shape)
                targets = tf.concat(
                    [targets,
                     tf.zeros(pad_shape, dtype=self.int_dtype)],
                    axis=self.time_dim)
                target_mask = tf.concat(
                    [target_mask,
                     tf.zeros(pad_shape, dtype=self.float_dtype)],
                    axis=self.time_dim)
                return targets, target_mask, logits

            def _pad_logits(targets, target_mask, logits):
                """ Pads the logits to match the size of the ground-truth targets. """
                pad_shape = _get_pad_shape(logits_shape, targets_shape)
                logits = tf.concat([
                    logits,
                    tf.zeros(pad_shape + [logits_shape[-1]],
                             dtype=self.float_dtype)
                ],
                                   axis=self.time_dim)
                return targets, target_mask, logits

            # For teacher-forcing with RNN models
            targets, target_mask, logits = tf.cond(
                tf.equal(targets_length, logits_length), lambda:
                (targets, target_mask, logits), lambda: tf.cond(
                    tf.less(targets_length, logits_length), lambda:
                    _pad_targets(targets, target_mask, logits), lambda:
                    _pad_logits(targets, target_mask, logits)))

            # Project and optionally smooth target token ids
            projected_targets = tf.one_hot(targets,
                                           depth=self.vocab_size,
                                           on_value=high_confidence,
                                           off_value=low_confidence,
                                           dtype=self.float_dtype)

            # Compute token-level loss
            flat_logits = tf.reshape(logits, [-1, self.vocab_size])
            flat_targets = tf.reshape(projected_targets, [-1, self.vocab_size])
            flat_loss = tf.nn.softmax_cross_entropy_with_logits_v2(
                logits=flat_logits, labels=flat_targets)
            flat_normalized_loss = flat_loss - normalizing_factor
            # Compute sentence- and batch-level losses (i.e. mean token-loss per sentence/ batch)
            normalized_loss = tf.reshape(flat_normalized_loss,
                                         tf.shape(targets))
            masked_loss = normalized_loss * target_mask
            sentence_lengths = tf.reduce_sum(target_mask,
                                             axis=self.time_dim,
                                             keepdims=False)
            sentence_loss = tf.div(
                tf.reduce_sum(masked_loss, axis=self.time_dim, keepdims=False),
                sentence_lengths)
            batch_loss = tf.reduce_mean(sentence_loss, keepdims=False)
        return masked_loss, sentence_loss, batch_loss
Example #52
0
    def setup_model(self):
      print("setup_model : ppo_imitation.py -> PPOImitation.setup_model()")
      traceback.print_stack()
      with SetVerbosity(self.verbose):

        self.graph = tf.Graph()
        with self.graph.as_default():
          self.set_random_seed(self.seed)
          self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)

          # Construct network for new policy
          self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
                                       None, reuse=False, **self.policy_kwargs)

          # Network for old policy
          with tf.variable_scope("oldpi", reuse=False):
            old_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
                                 None, reuse=False, **self.policy_kwargs)

          with tf.variable_scope("loss", reuse=False):
            # Target advantage function (if applicable)
            atarg = tf.placeholder(dtype=tf.float32, shape=[None])

            # Empirical return
            ret = tf.placeholder(dtype=tf.float32, shape=[None])

            # learning rate multiplier, updated with schedule
            lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[])

            # Annealed cliping parameter epislon
            clip_param = self.clip_param * lrmult

            obs_ph = self.policy_pi.obs_ph
            action_ph = self.policy_pi.pdtype.sample_placeholder([None])

            kloldnew = old_pi.proba_distribution.kl(self.policy_pi.proba_distribution)
            ent = self.policy_pi.proba_distribution.entropy()
            meankl = tf.reduce_mean(kloldnew)
            meanent = tf.reduce_mean(ent)
            pol_entpen = (-self.entcoeff) * meanent

            # pnew / pold
            ratio = tf.exp(self.policy_pi.proba_distribution.logp(action_ph) -
                           old_pi.proba_distribution.logp(action_ph))

            # surrogate from conservative policy iteration
            surr1 = ratio * atarg
            surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg

            clip_frac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), clip_param)))

            # PPO's pessimistic surrogate (L^CLIP)
            pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2))
            vf_loss = tf.reduce_mean(tf.square(self.policy_pi.value_flat - ret))
            total_loss = pol_surr + pol_entpen + vf_loss
            losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
            self.loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]

            tf.summary.scalar('entropy_loss', pol_entpen)
            tf.summary.scalar('policy_gradient_loss', pol_surr)
            tf.summary.scalar('value_function_loss', vf_loss)
            tf.summary.scalar('approximate_kullback-leibler', meankl)
            tf.summary.scalar('clip_factor', clip_param)
            tf.summary.scalar('loss', total_loss)
            tf.summary.scalar('clip_frac', clip_frac)

            self.params = tf_util.get_trainable_vars("model")

            self.assign_old_eq_new = tf_util.function(
                [], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
                                 zipsame(tf_util.get_globals_vars("oldpi"), tf_util.get_globals_vars("model"))])

          with tf.variable_scope("Adam_mpi", reuse=False):
            self.adam = MpiAdam(self.params, epsilon=self.adam_epsilon, sess=self.sess)

          with tf.variable_scope("input_info", reuse=False):
            tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret))
            tf.summary.scalar('learning_rate', tf.reduce_mean(self.optim_stepsize))
            tf.summary.scalar('advantage', tf.reduce_mean(atarg))
            tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_param))

            if self.full_tensorboard_log:
              tf.summary.histogram('discounted_rewards', ret)
              tf.summary.histogram('learning_rate', self.optim_stepsize)
              tf.summary.histogram('advantage', atarg)
              tf.summary.histogram('clip_range', self.clip_param)
              if tf_util.is_image(self.observation_space):
                tf.summary.image('observation', obs_ph)
              else:
                tf.summary.histogram('observation', obs_ph)

          self.step = self.policy_pi.step
          self.proba_step = self.policy_pi.proba_step
          self.initial_state = self.policy_pi.initial_state

          tf_util.initialize(sess=self.sess)

          self.summary = tf.summary.merge_all()

          self.lossandgrad = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult],
                                              [self.summary, tf_util.flatgrad(total_loss, self.params)] + losses)
          self.compute_losses = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult],
                                                 losses)

      return
    mask_hard_pred = tf.argmax(
        output, axis=3, name="mask_pred_class"
    )  # [batch_size, patch_size, patch_size] Each elt of the matrice (pixel) corresponds the predicted class 0, 1, .., or classes-1
    mask_soft_pred_resh = tf.reshape(
        output, [-1, classes]
    )  # [batch_size*patch_size*patch_size, classes] Each row (pixel) is the predicted logits for the pixel

else:
    """ classes = 1 and We have binary segmentation
        1. mask_hard_true: True mask hard coded with shape [batch_size, patch_size, patch_size]. The same as mask
    a = [random.randint(0, 2) + random.random() for i in range(5)]
    """

    # True labels
    mask_hard_true = tf.cast(
        tf.greater(mask, threshold), 'float'
    )  # [batch_size, patch_size, patch_size]. Each elt (pixel) of the matrice is 0 or 1, the class of the pixel
    mask_hard_true_resh = tf.reshape(mask, [
        -1
    ])  # [batch_size*patch_size*patch_size].  Each row is 0 or 1 for the pixel

    # Predictions
    mask_hard_pred = tf.cast(
        tf.greater(output, threshold), 'float'
    )  # [batch_size, patch_size, patch_size] Each elt of the matrice (pixel) corresponds the predicted class 0 or 1
    mask_soft_pred_resh = tf.reshape(
        output, [-1]
    )  #  [batch_size*patch_size*patch_size] Each elt of the vector (pixel) corresponds the calculated logit

# Loss
entropy_loss = cross_entropy_loss(mask_hard_true_resh, mask_soft_pred_resh,
Example #54
0
image_pool_input = tf.placeholder(shape=[None, height, width, channel],
                                  dtype=tf.float32,
                                  name='image_pool_input')
u_thres = tf.placeholder(shape=[], dtype=tf.float32, name='u_thres')
l_thres = tf.placeholder(shape=[], dtype=tf.float32, name='l_thres')
lr = tf.placeholder(shape=[], dtype=tf.float32, name='learning_rate')

label_feat = ConvNetwork(image_pool_input,
                         num_cluster,
                         name='ConvNetwork',
                         reuse=False)
label_feat_norm = tf.nn.l2_normalize(label_feat, dim=1)
sim_mat = tf.matmul(label_feat_norm, label_feat_norm, transpose_b=True)

pos_loc = tf.greater(sim_mat, u_thres, name='greater')
neg_loc = tf.less(sim_mat, l_thres, name='less')
pos_loc_mask = tf.cast(pos_loc, dtype=tf.float32)
neg_loc_mask = tf.cast(neg_loc, dtype=tf.float32)

pred_label = tf.argmax(label_feat, axis=1)

# Deep Adaptive Image Clustering Cost Function Optimize
pos_entropy = tf.multiply(-tf.log(tf.clip_by_value(sim_mat, eps, 1.0)),
                          pos_loc_mask)
neg_entropy = tf.multiply(-tf.log(tf.clip_by_value(1 - sim_mat, eps, 1.0)),
                          neg_loc_mask)

loss_sum = tf.reduce_mean(pos_entropy) + tf.reduce_mean(neg_entropy)
train_op = tf.train.RMSPropOptimizer(lr).minimize(loss_sum)
Simple TensorFlow exercises
You should thoroughly test your code
"""

import tensorflow as tf

###############################################################################
# 1a: Create two random 0-d tensors x and y of any distribution.
# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
# Hint: look up tf.cond()
# I do the first problem for you
###############################################################################

x = tf.random_uniform([])  # Empty array as shape creates a scalar.
y = tf.random_uniform([])
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))

###############################################################################
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
# Return x + y if x < y, x - y if x > y, 0 otherwise.
# Hint: Look up tf.case().
###############################################################################

x = tf.random_uniform([],-1,1,dtype=tf.float32)
y = tf.random_uniform([],-1,1,dtype=tf.float32)
out = tf.case({tf.less(x,y):lambda:tf.add(x,y),
				tf.greater(x,y):lambda:tf.subtract(x,y),
				default=lambda:tf.constant(0.0),exclusive=True})

###############################################################################
# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]] 
    def _parse_train_data(self, data):
        """Parses data for training.

    Args:
      data: the decoded tensor dictionary from TfExampleDecoder.

    Returns:
      image: image tensor that is preproessed to have normalized value and
        dimension [output_size[0], output_size[1], 3]
      labels: a dictionary of tensors used for training. The following describes
        {key: value} pairs in the dictionary.
        image_info: a 2D `Tensor` that encodes the information of the image and
          the applied preprocessing. It is in the format of
          [[original_height, original_width], [scaled_height, scaled_width],
        anchor_boxes: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, 4] representing anchor boxes at each level.
        rpn_score_targets: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, anchors_per_location]. The height_l and
          width_l represent the dimension of class logits at l-th level.
        rpn_box_targets: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, anchors_per_location * 4]. The height_l and
          width_l represent the dimension of bounding box regression output at
          l-th level.
        gt_boxes: Groundtruth bounding box annotations. The box is represented
           in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
           image that is fed to the network. The tennsor is padded with -1 to
           the fixed dimension [self._max_num_instances, 4].
        gt_classes: Groundtruth classes annotations. The tennsor is padded
          with -1 to the fixed dimension [self._max_num_instances].
        gt_masks: groundtrugh masks cropped by the bounding box and
          resized to a fixed size determined by mask_crop_size.
    """
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        if self._include_mask:
            masks = data['groundtruth_instance_masks']

        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtruths = tf.shape(classes)[0]
            with tf.control_dependencies([num_groundtruths, is_crowds]):
                indices = tf.cond(
                    tf.greater(tf.size(is_crowds), 0),
                    lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    lambda: tf.cast(tf.range(num_groundtruths), tf.int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)
            if self._include_mask:
                masks = tf.gather(masks, indices)

        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            if self._include_mask:
                image, boxes, masks = input_utils.random_horizontal_flip(
                    image, boxes, masks)
            else:
                image, boxes = input_utils.random_horizontal_flip(image, boxes)

        # Converts boxes from normalized coordinates to pixel coordinates.
        # Now the coordinates of boxes are w.r.t. the original image.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)
        image_height, image_width, _ = image.get_shape().as_list()

        # Resizes and crops boxes.
        # Now the coordinates of boxes are w.r.t the scaled image.
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  image_info[1, :], offset)

        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)
        if self._include_mask:
            masks = tf.gather(masks, indices)
            # Transfer boxes to the original image space and do normalization.
            cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0),
                                            [1, 2])
            cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0),
                                     [1, 2])
            cropped_boxes = box_utils.normalize_boxes(cropped_boxes,
                                                      image_shape)
            num_masks = tf.shape(masks)[0]
            masks = tf.image.crop_and_resize(
                tf.expand_dims(masks, axis=-1),
                cropped_boxes,
                box_indices=tf.range(num_masks, dtype=tf.int32),
                crop_size=[self._mask_crop_size, self._mask_crop_size],
                method='bilinear')
            masks = tf.squeeze(masks, axis=-1)

        # Class manipulation.
        # Filter out novel split classes from training.
        if self._train_class != 'all':
            valid_classes = tf.cast(class_utils.coco_split_class_ids(
                self._train_class),
                                    dtype=classes.dtype)
            match = tf.reduce_any(
                tf.equal(tf.expand_dims(valid_classes, 1),
                         tf.expand_dims(classes, 0)), 0)
            # kill novel split classes and boxes.
            boxes = tf.gather(boxes, tf.where(match)[:, 0])
            classes = tf.gather(classes, tf.where(match)[:, 0])
            if self._include_mask:
                masks = tf.gather(masks, tf.where(match)[:, 0])

        # Assigns anchor targets.
        # Note that after the target assignment, box targets are absolute pixel
        # offsets w.r.t. the scaled image.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size,
                                     (image_height, image_width))
        anchor_labeler = anchor.OlnAnchorLabeler(
            input_anchor,
            self._rpn_match_threshold,
            self._rpn_unmatched_threshold,
            self._rpn_batch_size_per_im,
            self._rpn_fg_fraction,
            # for centerness target.
            self._has_centerness,
            self._rpn_center_match_iou_threshold,
            self._rpn_center_unmatched_iou_threshold,
            self._rpn_num_center_samples_per_im,
        )

        if self._has_centerness:
            rpn_score_targets, _, rpn_lrtb_targets, rpn_center_targets = (
                anchor_labeler.label_anchors_lrtb(gt_boxes=boxes,
                                                  gt_labels=tf.cast(
                                                      tf.expand_dims(classes,
                                                                     axis=-1),
                                                      dtype=tf.float32)))
        else:
            rpn_score_targets, rpn_box_targets = anchor_labeler.label_anchors(
                boxes,
                tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32))
            # For base rpn, dummy placeholder for centerness target.
            rpn_center_targets = rpn_score_targets.copy()

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        inputs = {
            'image': image,
            'image_info': image_info,
        }
        # Packs labels for model_fn outputs.
        labels = {
            'anchor_boxes':
            input_anchor.multilevel_boxes,
            'image_info':
            image_info,
            'rpn_score_targets':
            rpn_score_targets,
            'rpn_box_targets':
            (rpn_lrtb_targets if self._has_centerness else rpn_box_targets),
            'rpn_center_targets':
            rpn_center_targets,
        }
        # If class_agnostic, convert to binary classes.
        if self._class_agnostic:
            classes = tf.where(tf.greater(classes, 0), tf.ones_like(classes),
                               tf.zeros_like(classes))

        inputs['gt_boxes'] = input_utils.pad_to_fixed_size(
            boxes, self._max_num_instances, -1)
        inputs['gt_classes'] = input_utils.pad_to_fixed_size(
            classes, self._max_num_instances, -1)
        if self._include_mask:
            inputs['gt_masks'] = input_utils.pad_to_fixed_size(
                masks, self._max_num_instances, -1)

        return inputs, labels
Example #57
0
COST = 1
PROFIT = 9

rdm = np.random.RandomState(SEED)
X = rdm.rand(32,2)
Y = [[x1+x2+(rdm.rand()/10.0-0.05)] for (x1, x2) in X]

#1定义神经网络的输入、参数和输出,定义前向传播过程。
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))
w1= tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))
y = tf.matmul(x, w1)

#2定义损失函数及反向传播方法。
# 定义损失函数使得预测少了的损失大,于是模型应该偏向多的方向预测。
loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_)*COST, (y_ - y)*PROFIT))
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)

#3生成会话,训练STEPS轮。
with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    STEPS = 3000
    for i in range(STEPS):
        start = (i*BATCH_SIZE) % 32
        end = (i*BATCH_SIZE) % 32 + BATCH_SIZE
        sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
        if i % 500 == 0:
            print "After %d training steps, w1 is: " % (i)
            print sess.run(w1), "\n"
    print "Final w1 is: \n", sess.run(w1)
Example #58
0
# print(tf.greater(v1, v2).eval())
# print(tf.where(tf.greater(v1, v2), v1, v2).eval())
# sess.close()

batch_size = 8
x = tf.placeholder(tf.float32, shape=(None, 2), name='x-input')
y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input')

w1 = tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))
y = tf.matmul(x, w1)

loss_less = 10
loss_more = 1

loss = tf.reduce_sum(
    tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))

train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
rdm = RandomState(1)
dataset_size = 128

X = rdm.rand(dataset_size, 2)
Y = [[x1 + x2 + rdm.rand() / 10.0 - 0.05] for (x1, x2) in X]

# 开始训练
with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    STEPS = 5000
    for i in range(STEPS):
        start = (i * batch_size) % dataset_size
    def top_k_top_p(
        self,
        tokenized_input_dict,
        max_iterations,
        top_k=0,
        top_p=0,
        temperature=1.0,
        do_sample=True,
        num_return_sequences=1,
        eos_id=-100,
    ):

        # We need this to return
        input_ids_original = tokenized_input_dict["input_ids"]
        batch_size = tf.shape(input_ids_original)[0]
        max_sequence_length = tf.shape(input_ids_original)[1]

        # Repeat for beam search
        tokenized_input_dict_ragged = {}
        for input_key, input_value in tokenized_input_dict.items():
            tokenized_input_dict_ragged[input_key] = tf.repeat(
                input_value, [num_return_sequences], axis=0)

        # We take 2x beams
        batch_size_updated = tokenized_input_dict_ragged["input_ids"].shape[0]

        # Initialize with zeros
        zero_entry = tf.zeros((
            self.num_hidden_layers,
            batch_size_updated,
            self.num_attention_heads,
            max_sequence_length,
            self.attention_state,
        ))
        all_cache_key = zero_entry
        all_cache_value = zero_entry
        past_length = tf.expand_dims(
            tf.zeros(batch_size_updated, dtype=tf.int32), 0)

        # Inputs ready
        tokenized_input_dict_ragged["all_cache_key"] = all_cache_key
        tokenized_input_dict_ragged["all_cache_value"] = all_cache_value
        tokenized_input_dict_ragged["past_length"] = past_length

        if self.input_type_ids > -1:
            tokenized_input_dict_ragged["input_type_ids"] = (
                tf.ones_like(tokenized_input_dict_ragged["input_ids"]) *
                self.input_type_ids)
        if self.input_mask_ids > -1:
            tokenized_input_dict_ragged["input_mask"] = (
                tf.ones_like(tokenized_input_dict_ragged["input_ids"]) *
                self.input_mask_ids)

        all_predictions = []
        matched_positions = tf.constant([-1] * batch_size_updated)

        # Iterate Over
        for i in range(max_iterations):
            result = self.model_fn(tokenized_input_dict_ragged)
            model_logits = result["last_token_logits"]
            all_cache_key = result["all_cache_key"]
            all_cache_value = result["all_cache_value"]
            past_length = result["past_length"]

            if top_k > 0:
                model_logits = top_k_logits(model_logits, k=top_k)
            if top_p > 0:
                model_logits = top_p_logits(model_logits, p=top_p)

            if do_sample:
                prediction_ids = tf.random.categorical(model_logits,
                                                       num_samples=1)
                input_ids = tf.cast(prediction_ids, tf.int32)
            else:
                prediction_ids = tf.argmax(model_logits, axis=1)
                input_ids = tf.cast(tf.expand_dims(prediction_ids, axis=1),
                                    tf.int32)
            all_predictions.append(input_ids)
            if i == 0:
                # all_cache_key = assign_zeros_to_K_V(all_cache_key, \
                # input_ids_copy, batch_size, max_sequence_length)
                # all_cache_value = assign_zeros_to_K_V(all_cache_value, \
                # input_ids_copy, batch_size, max_sequence_length)

                masks = tf.cast(
                    tf.not_equal(tokenized_input_dict_ragged["input_ids"], -1),
                    tf.float32,
                )
                masks = tf.reshape(
                    masks, (1, batch_size_updated, 1, max_sequence_length, 1))
                all_cache_key = all_cache_key * masks
                all_cache_value = all_cache_value * masks

            tokenized_input_dict_ragged["input_ids"] = tf.cast(
                input_ids, tf.int32)
            tokenized_input_dict_ragged["all_cache_key"] = all_cache_key
            tokenized_input_dict_ragged["all_cache_value"] = all_cache_value
            tokenized_input_dict_ragged["past_length"] = past_length

            if self.input_type_ids > -1:
                tokenized_input_dict_ragged["input_type_ids"] = (
                    tf.ones_like(tokenized_input_dict_ragged["input_ids"]) *
                    self.input_type_ids)
            if self.input_mask_ids > -1:
                tokenized_input_dict_ragged["input_mask"] = (
                    tf.ones_like(tokenized_input_dict_ragged["input_ids"]) *
                    self.input_mask_ids)

            if eos_id:
                temp_m = tf.concat(all_predictions, axis=1)
                eos_check = tf.greater(
                    tf.reduce_prod(
                        tf.reduce_sum(tf.cast(tf.equal(temp_m, eos_id),
                                              tf.int32),
                                      axis=[1])),
                    0,
                )
                if eos_check:
                    matched_positions = tf.argmax(tf.cast(
                        tf.equal(eos_id, temp_m), tf.int32),
                                                  axis=1)
                    # matched_positions += 1
                    break

        matched_positions = (tf.reshape(
            tf.argmax(
                tf.cast(tf.equal(eos_id, tf.concat(all_predictions, axis=1)),
                        tf.int32),
                axis=1,
            ),
            -1,
        ) - 1)
        # no eos matched positions will be 0, replace with -1
        eos_pos_mask = tf.cast(tf.equal(matched_positions, 0), tf.int32) * -1
        matched_positions = tf.cast(matched_positions, tf.int32) + eos_pos_mask

        all_predictions = tf.reshape(tf.concat(all_predictions, axis=1),
                                     (batch_size, num_return_sequences, -1))
        return {
            "iterations": i + 1,
            "input_ids": input_ids_original,
            "predicted_ids": all_predictions,
            "matched_eos_pos": matched_positions,
        }
Example #60
0
    def _parse_train_data(self, data):
        """Generates images and labels that are usable for model training.

    Args:
      data: a dict of Tensors produced by the decoder.
    Returns:
      images: the image tensor.
      labels: a dict of Tensors that contains labels.
    """

        shape = tf.shape(data['image'])
        image = data['image'] / 255
        boxes = data['groundtruth_boxes']
        width = shape[0]
        height = shape[1]

        image, boxes = yolo_preprocess_ops.fit_preserve_aspect_ratio(
            image,
            boxes,
            width=width,
            height=height,
            target_dim=self._max_process_size)

        image_shape = tf.shape(image)[:2]

        if self._random_flip:
            image, boxes, _ = preprocess_ops.random_horizontal_flip(
                image, boxes, seed=self._seed)

        randscale = self._image_w // self._net_down_scale

        if not self._fixed_size:
            do_scale = tf.greater(
                tf.random.uniform([], minval=0, maxval=1, seed=self._seed),
                0.5)
            if do_scale:
                # This scales the image to a random multiple of net_down_scale
                # between 320 to 608
                randscale = tf.random.uniform(
                    [],
                    minval=self._min_process_size // self._net_down_scale,
                    maxval=self._max_process_size // self._net_down_scale,
                    seed=self._seed,
                    dtype=tf.int32) * self._net_down_scale

        if self._jitter_boxes != 0.0:
            boxes = box_ops.denormalize_boxes(boxes, image_shape)
            boxes = box_ops.jitter_boxes(boxes, 0.025)
            boxes = box_ops.normalize_boxes(boxes, image_shape)

        # YOLO loss function uses x-center, y-center format
        boxes = yolo_box_ops.yxyx_to_xcycwh(boxes)

        if self._jitter_im != 0.0:
            image, boxes = yolo_preprocess_ops.random_translate(
                image, boxes, self._jitter_im, seed=self._seed)

        if self._aug_rand_zoom:
            image, boxes = yolo_preprocess_ops.resize_crop_filter(
                image,
                boxes,
                default_width=self._image_w,
                default_height=self._image_h,
                target_width=randscale,
                target_height=randscale)
        image = tf.image.resize(image, (416, 416), preserve_aspect_ratio=False)

        if self._aug_rand_brightness:
            image = tf.image.random_brightness(image=image,
                                               max_delta=.1)  # Brightness
        if self._aug_rand_saturation:
            image = tf.image.random_saturation(image=image,
                                               lower=0.75,
                                               upper=1.25)  # Saturation
        if self._aug_rand_hue:
            image = tf.image.random_hue(image=image, max_delta=.3)  # Hue
        image = tf.clip_by_value(image, 0.0, 1.0)
        # Find the best anchor for the ground truth labels to maximize the iou
        best_anchors = yolo_preprocess_ops.get_best_anchor(
            boxes, self._anchors, width=self._image_w, height=self._image_h)

        # Padding
        boxes = preprocess_ops.clip_or_pad_to_fixed_size(
            boxes, self._max_num_instances, 0)
        classes = preprocess_ops.clip_or_pad_to_fixed_size(
            data['groundtruth_classes'], self._max_num_instances, -1)
        best_anchors = preprocess_ops.clip_or_pad_to_fixed_size(
            best_anchors, self._max_num_instances, 0)
        area = preprocess_ops.clip_or_pad_to_fixed_size(
            data['groundtruth_area'], self._max_num_instances, 0)
        is_crowd = preprocess_ops.clip_or_pad_to_fixed_size(
            tf.cast(data['groundtruth_is_crowd'], tf.int32),
            self._max_num_instances, 0)

        labels = {
            'source_id': data['source_id'],
            'bbox': tf.cast(boxes, self._dtype),
            'classes': tf.cast(classes, self._dtype),
            'area': tf.cast(area, self._dtype),
            'is_crowd': is_crowd,
            'best_anchors': tf.cast(best_anchors, self._dtype),
            'width': width,
            'height': height,
            'num_detections': tf.shape(data['groundtruth_classes'])[0],
        }

        if self._fixed_size:
            grid = self._build_grid(labels,
                                    self._image_w,
                                    use_tie_breaker=self._use_tie_breaker)
            labels.update({'grid_form': grid})

        return image, labels