def _compute_sparse_average_correct(input_, labels, per_example_weights, topk=1):
    """Returns the numerator and denominator of classifier accuracy."""
    labels = tf.to_int64(labels)
    labels.get_shape().assert_is_compatible_with([input_.get_shape()[0], None])
    if topk == 1:
        predictions = tf.reshape(tf.argmax(input_, 1), [-1, 1])
        in_topk = tf.reduce_any(tf.equal(labels, predictions), reduction_indices=[1])
    else:
        # Use broadcasting to check if ANY of the predictions are in the top k.
        # TODO(eiderman): For a multi-label top k, what does accuracy mean?
        predictions = tf.reshape(tf.nn.top_k(input_, topk)[1], [-1, 1, topk])
        labels = tf.expand_dims(labels, [-1])

        in_topk = tf.reduce_any(tf.equal(tf.cast(labels, predictions.dtype), predictions), reduction_indices=[1, 2])
    correct_predictions = tf.to_float(in_topk)

    # If individual examples are weighted, then we want to normalize by that.
    if per_example_weights is not None:
        per_example_weights = _convert_and_assert_per_example_weights_compatible(
            input_, per_example_weights, dtype=None
        )
        float_weights = tf.to_float(per_example_weights)
        # TODO(eiderman): This should use an op that doesn't support broadcasting.
        correct_predictions *= float_weights
        num_examples = tf.reduce_sum(float_weights)
    else:
        # shape only holds ints, but we want to always return the same type
        # for num_examples to make everything compatible.
        num_examples = tf.to_float(tf.gather(tf.shape(input_), 0))
    return tf.reduce_sum(correct_predictions), num_examples
def _compute_precision_recall(input_layer, labels, threshold,
                              per_example_weights):
  """Returns the numerator of both, the denominator of precision and recall."""

  # To apply per_example_weights, we need to collapse each row to a scalar, but
  # we really want the sum.
  labels.get_shape().assert_is_compatible_with(input_layer.get_shape())
  relevant = tf.to_float(tf.greater(labels, 0))
  retrieved = tf.to_float(tf.greater(input_layer, threshold))
  selected = relevant * retrieved

  if per_example_weights:
    per_example_weights = tf.convert_to_tensor(per_example_weights,
                                               name='per_example_weights')
    if selected.get_shape().dims:
      per_example_weights.get_shape().assert_is_compatible_with(
          [selected.get_shape().dims[0]])
    else:
      per_example_weights.get_shape().assert_is_compatible_with([None])
    per_example_weights = tf.to_float(tf.greater(per_example_weights, 0))
    selected = functions.reduce_batch_sum(selected) * per_example_weights
    relevant = functions.reduce_batch_sum(relevant) * per_example_weights
    retrieved = functions.reduce_batch_sum(retrieved) * per_example_weights
  sum_relevant = tf.reduce_sum(relevant)
  sum_retrieved = tf.reduce_sum(retrieved)
  selected = tf.reduce_sum(selected)
  return selected, sum_retrieved, sum_relevant
def get_idx_map(shape):
    """Get index map for a image.
    Args:
        shape: [B, T, H, W] or [B, H, W]
    Returns:
        idx: [B, T, H, W, 2], or [B, H, W, 2]
    """
    s = shape
    ndims = tf.shape(s)
    wdim = ndims - 1
    hdim = ndims - 2
    idx_shape = tf.concat(0, [s, tf.constant([1])])
    ones_h = tf.ones(hdim - 1, dtype='int32')
    ones_w = tf.ones(wdim - 1, dtype='int32')
    h_shape = tf.concat(0, [ones_h, tf.constant([-1]), tf.constant([1, 1])])
    w_shape = tf.concat(0, [ones_w, tf.constant([-1]), tf.constant([1])])

    idx_y = tf.zeros(idx_shape, dtype='float')
    idx_x = tf.zeros(idx_shape, dtype='float')

    h = tf.slice(s, ndims - 2, [1])
    w = tf.slice(s, ndims - 1, [1])
    idx_y += tf.reshape(tf.to_float(tf.range(h[0])), h_shape)
    idx_x += tf.reshape(tf.to_float(tf.range(w[0])), w_shape)
    idx = tf.concat(ndims[0], [idx_y, idx_x])

    return idx
Example #4
0
def ctrl_rewards(states,
                 actions,
                 rewards,
                 next_states,
                 contexts,
                 reward_scales=1.0):
  """Returns the negative control cost.

  Args:
    states: A [batch_size, num_state_dims] Tensor representing a batch
        of states.
    actions: A [batch_size, num_action_dims] Tensor representing a batch
      of actions.
    rewards: A [batch_size] Tensor representing a batch of rewards.
    next_states: A [batch_size, num_state_dims] Tensor representing a batch
      of next states.
    contexts: A list of [batch_size, num_context_dims] Tensor representing
      a batch of contexts.
    reward_scales: multiplicative scale for rewards. A scalar or 1D tensor,
      must be broadcastable to number of reward dimensions.

  Returns:
    A new tf.float32 [batch_size] rewards Tensor, and
      tf.float32 [batch_size] discounts tensor.
  """
  del states, rewards, contexts  # Unused
  if actions is None:
    rewards = tf.to_float(tf.zeros(shape=next_states.shape[:1]))
  else:
    rewards = -tf.reduce_sum(tf.square(actions), axis=1)
    rewards *= reward_scales
    rewards = tf.to_float(rewards)
  return rewards, tf.ones_like(rewards)
def compute_IOU(bboxA, bboxB):
    """Compute the Intersection Over Union.
    Args:
        bboxA: [N X 4 tensor] format = [left, top, right, bottom]
        bboxB: [N X 4 tensor] 

    Return:
        IOU: [N X 1 tensor]
    """

    x1A, y1A, x2A, y2A = tf.split(1, 4, bboxA)
    x1B, y1B, x2B, y2B = tf.split(1, 4, bboxB)

    # compute intersection
    x1_max = tf.maximum(x1A, x1B)
    y1_max = tf.maximum(y1A, y1B)
    x2_min = tf.minimum(x2A, x2B)
    y2_min = tf.minimum(y2A, y2B)

    # overlap_flag = tf.logical_and( tf.less(x1_max, x2_min), tf.less(y1_max, y2_min))

    overlap_flag = tf.to_float(tf.less(x1_max, x2_min)) * \
        tf.to_float(tf.less(y1_max, y2_min))

    overlap_area = tf.mul(overlap_flag, tf.mul(
        x2_min - x1_max, y2_min - y1_max))

    # compute union
    areaA = tf.mul(x2A - x1A, y2A - y1A)
    areaB = tf.mul(x2B - x1B, y2B - y1B)
    union_area = areaA + areaB - overlap_area

    return tf.div(overlap_area, union_area)
Example #6
0
    def _init_training(self, optimizer):
        with self.model_graph.as_default():
            # счётчик обработанных батчей
            self.batches_processed = tf.Variable(
                initial_value=0, trainable=False
            )

            increment_batches = self.batches_processed.assign_add(1)

            # аккумулятор для среднего значения потерь
            self.average_loss = tf.Variable(
                initial_value=0.0, trainable=False
            )

            # рекуррентный пересчёт среднего значения функции потерь
            updated_loss = tf.truediv(
                tf.add(
                    self.average_loss * tf.to_float(self.batches_processed),
                    self.loss
                ),
                tf.to_float(self.batches_processed) + 1.0
            )
            update_average_loss = self.average_loss.assign(updated_loss)

            opt_op = optimizer.minimize(self.loss)

            # группируем операции оптимизации и обновления счётчиков в одну
            with tf.control_dependencies([opt_op]):
                self.train_op = tf.group(
                    update_average_loss, increment_batches
                )
 def testPaddingCrossEntropyFactored(self):
   vocab_size = 19
   rows = 5
   cols = 4
   depth = 11
   label_smoothing = 0.1
   features = np.random.rand(rows, cols, depth)
   weights = np.random.rand(vocab_size, depth)
   labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
   with self.test_session() as session:
     features = tf.to_float(features)
     weights = tf.to_float(weights)
     labels = tf.to_int32(labels)
     logits = tf.matmul(
         tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
     logits = tf.reshape(logits, [rows, cols, vocab_size])
     loss_num, loss_den = common_layers.padded_cross_entropy(
         logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
     factored_logits = common_layers.FactoredTensor(features, weights)
     loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored(
         factored_logits,
         labels=labels,
         label_smoothing=label_smoothing,
         reduce_sum=False)
     num, den, num_f, den_f = session.run(
         [loss_num, loss_den, loss_num_f, loss_den_f])
   self.assertEqual(num.shape, (rows, cols))
   self.assertEqual(den.shape, (rows, cols))
   self.assertEqual(num_f.shape, (rows, cols))
   self.assertEqual(den_f.shape, (rows, cols))
   self.assertAllClose(num, num_f)
   self.assertAllClose(den, den_f)
    def __init__(self, epsilon=1e-2, shape=()):

        self._sum = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(0.0),
            name="runningsum", trainable=False)
        self._sumsq = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(epsilon),
            name="runningsumsq", trainable=False)
        self._count = tf.get_variable(
            dtype=tf.float64,
            shape=(),
            initializer=tf.constant_initializer(epsilon),
            name="count", trainable=False)
        self.shape = shape

        self.mean = tf.to_float(self._sum / self._count)
        self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))

        newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
        newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
        newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
        self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
            updates=[tf.assign_add(self._sum, newsum),
                     tf.assign_add(self._sumsq, newsumsq),
                     tf.assign_add(self._count, newcount)])
Example #9
0
def top_1_and_5(predictions, labels):
    #test_size = FLAGS.test_size #tf.shape(predictions)[0]
    in_top1 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=1))
    in_top5 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=5))
    num_correct_1 = tf.reduce_sum(in_top1, name ="top1")
    num_correct_5 = tf.reduce_sum(in_top5, name ="top5")
    return num_correct_1, num_correct_5
Example #10
0
def char_accuracy(predictions, targets, rej_char, streaming=False):
  """Computes character level accuracy.

  Both predictions and targets should have the same shape
  [batch_size x seq_length].

  Args:
    predictions: predicted characters ids.
    targets: ground truth character ids.
    rej_char: the character id used to mark an empty element (end of sequence).
    streaming: if True, uses the streaming mean from the slim.metric module.

  Returns:
    a update_ops for execution and value tensor whose value on evaluation
    returns the total character accuracy.
  """
  with tf.variable_scope('CharAccuracy'):
    predictions.get_shape().assert_is_compatible_with(targets.get_shape())

    targets = tf.to_int32(targets)
    const_rej_char = tf.constant(rej_char, shape=targets.get_shape())
    weights = tf.to_float(tf.not_equal(targets, const_rej_char))
    correct_chars = tf.to_float(tf.equal(predictions, targets))
    accuracy_per_example = tf.div(
        tf.reduce_sum(tf.multiply(correct_chars, weights), 1),
        tf.reduce_sum(weights, 1))
    if streaming:
      return tf.contrib.metrics.streaming_mean(accuracy_per_example)
    else:
      return tf.reduce_mean(accuracy_per_example)
Example #11
0
def crop_or_pad(waves, length, channels):
  """Crop or pad wave to have shape [N, length, channels].

  Args:
    waves: A 3D `Tensor` of NLC format.
    length: A Python scalar. The output wave size.
    channels: Number of output waves channels.

  Returns:
    A 3D `Tensor` of NLC format with shape [N, length, channels].
  """
  waves = tf.convert_to_tensor(waves)
  batch_size = waves.shape[0].value
  waves_shape = tf.shape(waves)

  # Force audio length.
  pad = tf.maximum(0, length - waves_shape[1])
  right_pad = tf.to_int32(tf.to_float(pad) / 2.0)
  left_pad = pad - right_pad
  waves = tf.pad(waves, [[0, 0], [left_pad, right_pad], [0, 0]])
  waves = waves[:, :length, :]

  # Force number of channels.
  num_repeats = tf.to_int32(
      tf.ceil(tf.to_float(channels) / tf.to_float(waves_shape[2])))
  waves = tf.tile(waves, [1, 1, num_repeats])[:, :, :channels]

  waves.set_shape([batch_size, length, channels])
  return waves
def attentive_pooling_weights(U_AP, raw_question_rep, raw_answer_rep, tokens_question, tokens_answer,
                              apply_softmax=True):
    """Calculates the attentive pooling weights for question and answer

    :param U_AP: the soft-attention similarity matrix (to learn)
    :param raw_question_rep:
    :param raw_answer_rep:
    :param tokens_question: The raw token indices of the question. Used to detection not-set tokens
    :param tokens_answer: The raw token indices of the answer. Used to detection not-set tokens
    :param Q_PW: Positional weighting matrix for the question
    :param A_PW: Positional weighting matrix for the answer
    :param apply_softmax:
    :return: question weights, answer weights
    """
    tokens_question_float = tf.to_float(tokens_question)
    tokens_answer_float = tf.to_float(tokens_answer)
    tokens_question_non_zero = non_zero_tokens(tokens_question_float)
    tokens_answer_non_zero = non_zero_tokens(tokens_answer_float)

    G = soft_alignment(U_AP, raw_question_rep, raw_answer_rep, tokens_question_non_zero, tokens_answer_non_zero)

    maxpool_GQ = tf.reduce_max(G, [2], keep_dims=False)
    maxpool_GA = tf.reduce_max(G, [1], keep_dims=False)

    if apply_softmax:
        attention_Q = attention_softmax(maxpool_GQ, tokens_question_non_zero)
        attention_A = attention_softmax(maxpool_GA, tokens_answer_non_zero)
    else:
        attention_Q = maxpool_GQ
        attention_A = maxpool_GA

    return attention_Q, attention_A
  def _summarize_input(self, groundtruth_boxes_list, match_list):
    """Creates tensorflow summaries for the input boxes and anchors.

    This function creates four summaries corresponding to the average
    number (over images in a batch) of (1) groundtruth boxes, (2) anchors
    marked as positive, (3) anchors marked as negative, and (4) anchors marked
    as ignored.

    Args:
      groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
        containing corners of the groundtruth boxes.
      match_list: a list of matcher.Match objects encoding the match between
        anchors and groundtruth boxes for each image of the batch,
        with rows of the Match objects corresponding to groundtruth boxes
        and columns corresponding to anchors.
    """
    num_boxes_per_image = tf.stack(
        [tf.shape(x)[0] for x in groundtruth_boxes_list])
    pos_anchors_per_image = tf.stack(
        [match.num_matched_columns() for match in match_list])
    neg_anchors_per_image = tf.stack(
        [match.num_unmatched_columns() for match in match_list])
    ignored_anchors_per_image = tf.stack(
        [match.num_ignored_columns() for match in match_list])
    tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage',
                      tf.reduce_mean(tf.to_float(num_boxes_per_image)))
    tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage',
                      tf.reduce_mean(tf.to_float(pos_anchors_per_image)))
    tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage',
                      tf.reduce_mean(tf.to_float(neg_anchors_per_image)))
    tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage',
                      tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))
Example #14
0
def log_loss(labels, predictions, epsilon=1e-7, scope=None, weights=None):
  """Calculate log losses.

  Same as tf.losses.log_loss except that this returns the individual losses
  instead of passing them into compute_weighted_loss and returning their
  weighted mean. This is useful for eval jobs that report the mean loss. By
  returning individual losses, that mean loss can be the same regardless of
  batch size.

  Args:
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    predictions: The predicted outputs.
    epsilon: A small increment to add to avoid taking a log of zero.
    scope: The scope for the operations performed in computing the loss.
    weights: Weights to apply to labels.

  Returns:
    A `Tensor` representing the loss values.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels`.
  """
  with tf.name_scope(scope, "log_loss", (predictions, labels)):
    predictions = tf.to_float(predictions)
    labels = tf.to_float(labels)
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    losses = -tf.multiply(labels, tf.log(predictions + epsilon)) - tf.multiply(
        (1 - labels), tf.log(1 - predictions + epsilon))
    if weights is not None:
      losses = tf.multiply(losses, weights)

    return losses
Example #15
0
def _smallest_size_at_least(height, width, smallest_side):
    """Computes new shape with the smallest side equal to `smallest_side`.

    Computes new shape with the smallest side equal to `smallest_side` while
    preserving the original aspect ratio.

    Args:
      height: an int32 scalar tensor indicating the current height.
      width: an int32 scalar tensor indicating the current width.
      smallest_side: A python integer or scalar `Tensor` indicating the size of
        the smallest side after resize.

    Returns:
      new_height: an int32 scalar tensor indicating the new height.
      new_width: and int32 scalar tensor indicating the new width.
    """
    smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

    height = tf.to_float(height)
    width = tf.to_float(width)
    smallest_side = tf.to_float(smallest_side)

    scale = tf.cond(tf.greater(height, width),
                    lambda: smallest_side / width,
                    lambda: smallest_side / height)
    new_height = tf.to_int32(height * scale)
    new_width = tf.to_int32(width * scale)
    return new_height, new_width
Example #16
0
 def while_step(t, rnn_state, tas, accs):
   """Implements one timestep of FIVO computation."""
   log_weights_acc, log_p_hat_acc, kl_acc = accs
   cur_inputs, cur_mask = nested.read_tas([inputs_ta, mask_ta], t)
   # Run the cell for one step.
   log_q_z, log_p_z, log_p_x_given_z, kl, new_state = cell(
       cur_inputs,
       rnn_state,
       cur_mask,
   )
   # Compute the incremental weight and use it to update the current
   # accumulated weight.
   kl_acc += kl * cur_mask
   log_alpha = (log_p_x_given_z + log_p_z - log_q_z) * cur_mask
   log_alpha = tf.reshape(log_alpha, [num_samples, batch_size])
   log_weights_acc += log_alpha
   # Calculate the effective sample size.
   ess_num = 2 * tf.reduce_logsumexp(log_weights_acc, axis=0)
   ess_denom = tf.reduce_logsumexp(2 * log_weights_acc, axis=0)
   log_ess = ess_num - ess_denom
   # Calculate the ancestor indices via resampling. Because we maintain the
   # log unnormalized weights, we pass the weights in as logits, allowing
   # the distribution object to apply a softmax and normalize them.
   resampling_dist = tf.contrib.distributions.Categorical(
       logits=tf.transpose(log_weights_acc, perm=[1, 0]))
   ancestor_inds = tf.stop_gradient(
       resampling_dist.sample(sample_shape=num_samples, seed=random_seed))
   # Because the batch is flattened and laid out as discussed
   # above, we must modify ancestor_inds to index the proper samples.
   # The particles in the ith filter are distributed every batch_size rows
   # in the batch, and offset i rows from the top. So, to correct the indices
   # we multiply by the batch_size and add the proper offset. Crucially,
   # when ancestor_inds is flattened the layout of the batch is maintained.
   offset = tf.expand_dims(tf.range(batch_size), 0)
   ancestor_inds = tf.reshape(ancestor_inds * batch_size + offset, [-1])
   noresample_inds = tf.range(num_samples * batch_size)
   # Decide whether or not we should resample; don't resample if we are past
   # the end of a sequence.
   should_resample = resampling_criterion(num_samples, log_ess, t)
   should_resample = tf.logical_and(should_resample,
                                    cur_mask[:batch_size] > 0.)
   float_should_resample = tf.to_float(should_resample)
   ancestor_inds = tf.where(
       tf.tile(should_resample, [num_samples]),
       ancestor_inds,
       noresample_inds)
   new_state = nested.gather_tensors(new_state, ancestor_inds)
   # Update the TensorArrays before we reset the weights so that we capture
   # the incremental weights and not zeros.
   ta_updates = [log_weights_acc, log_ess, float_should_resample]
   new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates)]
   # For the particle filters that resampled, update log_p_hat and
   # reset weights to zero.
   log_p_hat_update = tf.reduce_logsumexp(
       log_weights_acc, axis=0) - tf.log(tf.to_float(num_samples))
   log_p_hat_acc += log_p_hat_update * float_should_resample
   log_weights_acc *= (1. - tf.tile(float_should_resample[tf.newaxis, :],
                                    [num_samples, 1]))
   new_accs = (log_weights_acc, log_p_hat_acc, kl_acc)
   return t + 1, new_state, new_tas, new_accs
Example #17
0
 def summarize(self):
   """Summarize the number of positives and negatives after mining."""
   if self._num_positives_list and self._num_negatives_list:
     avg_num_positives = tf.reduce_mean(tf.to_float(self._num_positives_list))
     avg_num_negatives = tf.reduce_mean(tf.to_float(self._num_negatives_list))
     tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives)
     tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives)
Example #18
0
def preprocess_for_test(image, gt_boxes, gt_masks):


    ih, iw = tf.shape(image)[0], tf.shape(image)[1]

    ## min size resizing
    new_ih, new_iw = preprocess_utils._smallest_size_at_least(ih, iw, cfg.FLAGS.image_min_size)
    image = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image, [new_ih, new_iw], align_corners=False)
    image = tf.squeeze(image, axis=[0])

    gt_masks = tf.expand_dims(gt_masks, -1)
    gt_masks = tf.cast(gt_masks, tf.float32)
    gt_masks = tf.image.resize_nearest_neighbor(gt_masks, [new_ih, new_iw], align_corners=False)
    gt_masks = tf.cast(gt_masks, tf.int32)
    gt_masks = tf.squeeze(gt_masks, axis=[-1])

    scale_ratio = tf.to_float(new_ih) / tf.to_float(ih)
    gt_boxes = preprocess_utils.resize_gt_boxes(gt_boxes, scale_ratio)
    
    ## zero mean image
    image = tf.cast(image, tf.float32)
    image = image / 256.0
    image = (image - 0.5) * 2.0
    image = tf.expand_dims(image, axis=0)

    ## rgb to bgr
    image = tf.reverse(image, axis=[-1])

    return image, gt_boxes, gt_masks 
Example #19
0
def mask_probs(probs, eos_token, finished):
  """Masks log probabilities such that finished beams
  allocate all probability mass to eos. Unfinished beams remain unchanged.

  Args:
    probs: Log probabiltiies of shape `[beam_width, vocab_size]`
    eos_token: An int32 id corresponding to the EOS token to allocate
      probability to
    finished: A boolean tensor of shape `[beam_width]` that specifies which
      elements in the beam are finished already.

  Returns:
    A tensor of shape `[beam_width, vocab_size]`, where unfinished beams
    stay unchanged and finished beams are replaced with a tensor that has all
    probability on the EOS token.
  """
  vocab_size = tf.shape(probs)[1]
  finished_mask = tf.expand_dims(tf.to_float(1. - tf.to_float(finished)), 1)
  # These examples are not finished and we leave them
  non_finished_examples = finished_mask * probs
  # All finished examples are replaced with a vector that has all
  # probability on EOS
  finished_row = tf.one_hot(
      eos_token,
      vocab_size,
      dtype=tf.float32,
      on_value=0.,
      off_value=tf.float32.min)
  finished_examples = (1. - finished_mask) * finished_row
  return finished_examples + non_finished_examples
Example #20
0
 def __init__(self, env, hidden_size, entcoeff=0.001, lr_rate=1e-3, scope="adversary"):
     self.scope = scope
     self.observation_shape = env.observation_space.shape
     self.actions_shape = env.action_space.shape
     self.input_shape = tuple([o+a for o, a in zip(self.observation_shape, self.actions_shape)])
     self.num_actions = env.action_space.shape[0]
     self.hidden_size = hidden_size
     self.build_ph()
     # Build grpah
     generator_logits = self.build_graph(self.generator_obs_ph, self.generator_acs_ph, reuse=False)
     expert_logits = self.build_graph(self.expert_obs_ph, self.expert_acs_ph, reuse=True)
     # Build accuracy
     generator_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(generator_logits) < 0.5))
     expert_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(expert_logits) > 0.5))
     # Build regression loss
     # let x = logits, z = targets.
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     generator_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=generator_logits, labels=tf.zeros_like(generator_logits))
     generator_loss = tf.reduce_mean(generator_loss)
     expert_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=expert_logits, labels=tf.ones_like(expert_logits))
     expert_loss = tf.reduce_mean(expert_loss)
     # Build entropy loss
     logits = tf.concat([generator_logits, expert_logits], 0)
     entropy = tf.reduce_mean(logit_bernoulli_entropy(logits))
     entropy_loss = -entcoeff*entropy
     # Loss + Accuracy terms
     self.losses = [generator_loss, expert_loss, entropy, entropy_loss, generator_acc, expert_acc]
     self.loss_name = ["generator_loss", "expert_loss", "entropy", "entropy_loss", "generator_acc", "expert_acc"]
     self.total_loss = generator_loss + expert_loss + entropy_loss
     # Build Reward for policy
     self.reward_op = -tf.log(1-tf.nn.sigmoid(generator_logits)+1e-8)
     var_list = self.get_trainable_variables()
     self.lossandgrad = U.function([self.generator_obs_ph, self.generator_acs_ph, self.expert_obs_ph, self.expert_acs_ph],
                                   self.losses + [U.flatgrad(self.total_loss, var_list)])
Example #21
0
def logistic_loss(prediction, label):
	""" Logistic loss function averaged over pixels in the breast area.
	
	Pixels in the background are ignored.
	
	Args:
		prediction: A 2D tensor of floats. The predicted heatmap of logits.
		label: A 2D tensor of integers. Possible labels are 0 (background), 127
			(breast tissue) and 255 (breast mass).

	Returns:
		A float. The loss.
	"""
	with tf.name_scope('logistic_loss'):
		# Generate binary masks.
		mass = tf.to_float(tf.equal(label, 255))
		breast_area = tf.to_float(tf.greater(label, 0))

		# Compute loss per pixel
		pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(prediction, mass)
	
		# Weight the errors (1 for pixels in breast area, zero otherwise)
		weighted_loss = tf.mul(pixel_loss, breast_area)
	
		# Average over pixels in the breast area
		loss = tf.reduce_sum(weighted_loss)/tf.reduce_sum(breast_area)

	return loss
Example #22
0
 def testLSTMCellReparameterization(
     self, kernel_initializer, recurrent_initializer, bias_initializer,
     all_close):
   batch_size, timesteps, dim = 5, 3, 12
   hidden_size = 10
   inputs = tf.to_float(np.random.rand(batch_size, timesteps, dim))
   cell = bayes.LSTMCellReparameterization(
       hidden_size, kernel_initializer=kernel_initializer,
       recurrent_initializer=recurrent_initializer,
       bias_initializer=bias_initializer)
   noise = tf.to_float(np.random.rand(1, hidden_size))
   h0, c0 = cell.get_initial_state(inputs)
   state = (h0 + noise, c0)
   outputs1, _ = cell(inputs[:, 0, :], state)
   outputs2, _ = cell(inputs[:, 0, :], state)
   cell.sample_weights()
   outputs3, _ = cell(inputs[:, 0, :], state)
   self.evaluate(tf.global_variables_initializer())
   res1, res2, res3 = self.evaluate([outputs1, outputs2, outputs3])
   self.assertEqual(res1.shape, (batch_size, hidden_size))
   self.assertAllClose(res1, res2)
   if all_close:
     self.assertAllClose(res1, res3)
   else:
     self.assertNotAllClose(res1, res3)
   cell.get_config()
 def _scale(x):
   min_x_valuef = tf.to_float(min_x_value)
   max_x_valuef = tf.to_float(max_x_value)
   output_minf = tf.to_float(output_min)
   output_maxf = tf.to_float(output_max)
   return ((((tf.to_float(x) - min_x_valuef) * (output_maxf - output_minf)) /
           (max_x_valuef - min_x_valuef)) + output_minf)
Example #24
0
def total_variation_loss(stylized_inputs, total_variation_weight):
  """Total variation regularization loss.

  This loss improves the smoothness of the image by expressing high frequency
  variations as a loss.
  http://link.springer.com/article/10.1023/B:JMIV.0000011325.36760.1e

  Args:
    stylized_inputs: The batched set of images.
    total_variation_weight: Weight of total variation loss.

  Returns:
    Tensor for the total variation loss, dict mapping loss names to losses.
  """
  shape = tf.shape(stylized_inputs)
  batch_size = shape[0]
  height = shape[1]
  width = shape[2]
  channels = shape[3]
  y_size = tf.to_float((height - 1) * width * channels)
  x_size = tf.to_float(height * (width - 1) * channels)
  y_loss = tf.nn.l2_loss(
      stylized_inputs[:, 1:, :, :] - stylized_inputs[:, :-1, :, :]) / y_size
  x_loss = tf.nn.l2_loss(
      stylized_inputs[:, :, 1:, :] - stylized_inputs[:, :, :-1, :]) / x_size
  loss = (y_loss + x_loss) / tf.to_float(batch_size)
  weighted_loss = loss * total_variation_weight
  return weighted_loss, {
      'total_variation_loss': loss,
      'weighted_total_variation_loss': weighted_loss
  }
Example #25
0
def compute_metrics(output_video, target_video):
  max_pixel_value = 255.0
  output_video = tf.to_float(output_video)
  target_video = tf.to_float(target_video)
  psnr = tf.image.psnr(output_video, target_video, max_pixel_value)
  ssim = tf.image.ssim(output_video, target_video, max_pixel_value)
  return {"PSNR": psnr, "SSIM": ssim}
Example #26
0
def f_conf_loss(s_out, match, timespan, use_cum_min=True):
    """Loss function for confidence score sequence.

    Args:
        s_out:
        match:
        use_cum_min:
    """
    s_out_shape = tf.shape(s_out)
    num_ex = tf.to_float(s_out_shape[0])
    max_num_obj = tf.to_float(s_out_shape[1])
    match_sum = tf.reduce_sum(match, reduction_indices=[2])

    # Loss for confidence scores.
    if use_cum_min:
        # [B, N]
        s_out_min = f_cum_min(s_out, timespan)
        s_out_max = f_cum_max(s_out, timespan)
        # [B, N]
        s_bce = f_bce_minmax(s_out_min, s_out_max, match_sum)
    else:
        s_bce = f_bce(s_out, match_sum)
    loss = tf.reduce_sum(s_bce) / num_ex / max_num_obj

    return loss
def total_variation_loss(layer):
    shape = tf.shape(layer)
    height = shape[1]
    width = shape[2]
    y = tf.slice(layer, [0,0,0,0], tf.pack([-1,height-1,-1,-1])) - tf.slice(layer, [0,1,0,0], [-1,-1,-1,-1])
    x = tf.slice(layer, [0,0,0,0], tf.pack([-1,-1,width-1,-1])) - tf.slice(layer, [0,0,1,0], [-1,-1,-1,-1])
    return tf.nn.l2_loss(x) / tf.to_float(tf.size(x)) + tf.nn.l2_loss(y) / tf.to_float(tf.size(y))
Example #28
0
def _get_sampling_probability(hparams, is_training):
  """Returns `sampling_probabiliy` if `sampling schedule` given or 0."""
  if (not hasattr(hparams, 'sampling_schedule') or
      not hparams.sampling_schedule):
    return tf.convert_to_tensor(0.0, tf.float32)

  if not is_training:
    # This is likely an eval/test job associated with a training job using
    # scheduled sampling.
    tf.logging.warning(
        'Setting non-training sampling schedule from %s:%f to constant:1.0.',
        hparams.sampling_schedule, hparams.sampling_rate)
    hparams.sampling_schedule = 'constant'
    hparams.sampling_rate = 1.0
  if hparams.sampling_schedule == 'constant':
    sampling_probability = tf.constant(hparams.sampling_rate)
  elif hparams.sampling_schedule == 'inverse_sigmoid':
    k = tf.constant(hparams.sampling_rate)
    sampling_probability = 1.0 - (
        k / (k + tf.exp(tf.to_float(tf.train.get_or_create_global_step()) / k)))
  elif hparams.sampling_schedule == 'exponential':
    if not 0 < hparams.sampling_rate < 1:
      raise ValueError(
          'Exponential sampling rate must be in the interval (0, 1). Got %f.'
          % hparams.sampling_rate)
    k = tf.constant(hparams.sampling_rate)
    sampling_probability = (
        1.0 - tf.pow(k, tf.to_float(tf.train.get_or_create_global_step())))
  else:
    tf.logging.fatal('Invalid sampling_schedule: %s',
                     hparams.sampling_schedule)
  tf.summary.scalar('sampling_probability', sampling_probability)
  return tf.convert_to_tensor(sampling_probability, tf.float32)
Example #29
0
def f_iou_box(top_left_a, bot_right_a, top_left_b, bot_right_b):
    """Computes IoU of boxes.

    Args:
        top_left_a: [B, T, 2] or [B, 2]
        bot_right_a: [B, T, 2] or [B, 2]
        top_left_b: [B, T, 2] or [B, 2]
        bot_right_b: [B, T, 2] or [B, 2]

    Returns:
        iou: [B, T]
    """
    inter_area = f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b)
    inter_area = tf.maximum(inter_area, 1e-6)
    ndims = tf.shape(tf.shape(top_left_a))
    # area_a = tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
    # area_b = tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
    check_a = tf.reduce_prod(tf.to_float(top_left_a < bot_right_a), ndims - 1)
    area_a = check_a * tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
    check_b = tf.reduce_prod(tf.to_float(top_left_b < bot_right_b), ndims - 1)
    area_b = check_b * tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
    union_area = (area_a + area_b - inter_area + 1e-5)
    union_area = tf.maximum(union_area, 1e-5)
    iou = inter_area / union_area
    iou = tf.maximum(iou, 1e-5)
    iou = tf.minimum(iou, 1.0)

    return iou
Example #30
0
  def testBayesianLinearModel(self):
    """Tests that model makes reasonable predictions."""
    np.random.seed(42)
    train_batch_size = 5
    test_batch_size = 2
    num_features = 3
    noise_variance = 0.01
    coeffs = tf.range(num_features, dtype=tf.float32)
    features = tf.to_float(np.random.randn(train_batch_size, num_features))
    labels = (tf.tensordot(features, coeffs, [[-1], [0]])
              + noise_variance * tf.to_float(np.random.randn(train_batch_size)))

    model = bayes.BayesianLinearModel(noise_variance=noise_variance)
    model.fit(features, labels)

    test_features = tf.to_float(np.random.randn(test_batch_size, num_features))
    test_labels = tf.tensordot(test_features, coeffs, [[-1], [0]])
    outputs = model(test_features)
    test_predictions = outputs.distribution.mean()
    test_predictions_variance = outputs.distribution.variance()

    [
        test_labels_val, test_predictions_val, test_predictions_variance_val,
    ] = self.evaluate(
        [test_labels, test_predictions, test_predictions_variance])
    self.assertEqual(test_predictions_val.shape, (test_batch_size,))
    self.assertEqual(test_predictions_variance_val.shape, (test_batch_size,))
    self.assertAllClose(test_predictions_val, test_labels_val, atol=0.1)
    self.assertAllLessEqual(test_predictions_variance_val, noise_variance)
from __future__ import print_function
import tensorflow as tf
import numpy as np
import time

from gaussian_log import NormalWithLogScale

EPSILON = 1e-9
step_size = 2
height, width = 6, 9
meshgrid = tf.to_float(tf.meshgrid(tf.range(width), tf.range(height - 1, -1, -1)))
print(meshgrid.get_shape())
# hidden_map = np.random.operator((map_height, map_width), maxval=10)
hidden_map = np.random.randint(0, 9, size=(step_size, height, width))
print('map')
print(hidden_map)

hidden_map = tf.constant(hidden_map, dtype=tf.float32)
lidar_size = 6
lidar = np.random.randint(0, 9, size=(step_size, 2, lidar_size)).astype(np.float32)
alpha = tf.ones((step_size, height, width))

with tf.Session() as sess:
    def print_val(x, f=None, name=None):
        if type(x) is str:
            print(x + ': ')
            x = eval(x)
        val = sess.run(x)
        if name is not None:
            print(name + ': ')
        if f is None:
 def get_dropout(self, dropout_rate, is_training):
   return 1 - (tf.to_float(is_training) * dropout_rate)
def features_to_nonpadding(features, inputs_or_targets="inputs"):
  key = inputs_or_targets + "_segmentation"
  if features and key in features:
    return tf.minimum(tf.to_float(features[key]), 1.0)
  return None
Example #34
0
 def train_confusion_fn():
     train_confusion = tf.confusion_matrix(tf.argmax(y_train, 1), tf.argmax(logit_train, 1))
     train_confusion = tf.to_float(train_confusion) / tf.constant(
         (logit_train.shape.as_list()[0] / float(logit_train.shape.as_list()[1])), dtype=tf.float32)
     train_confusion = tf.expand_dims(tf.expand_dims(train_confusion, 0), 3)
     return train_confusion
Example #35
0
    def build_model(self):
        # build index table
        index_table = tf.contrib.lookup.index_table_from_file(
            vocabulary_file=self.config.vocab_list,
            num_oov_buckets=0,
            default_value=0)

        # get data iterator
        self.data_iterator = self.data.get_data_iterator(index_table,
                                                         mode=self.mode)

        # get inputs
        with tf.variable_scope("inputs"):
            # get next batch if there is no feeded data
            next_batch = self.data_iterator.get_next()
            self.input_queries = tf.placeholder_with_default(
                next_batch["input_queries"], [None, self.config.max_length],
                name="input_queries")
            self.input_replies = tf.placeholder_with_default(
                next_batch["input_replies"], [None, self.config.max_length],
                name="input_replies")
            self.query_lengths = tf.placeholder_with_default(
                tf.squeeze(next_batch["query_lengths"]), [None],
                name="query_lengths")
            self.reply_lengths = tf.placeholder_with_default(
                tf.squeeze(next_batch["reply_lengths"]), [None],
                name="reply_lengths")

            # get hyperparams
            self.embed_dropout_keep_prob = tf.placeholder(
                tf.float32, name="embed_dropout_keep_prob")
            self.lstm_dropout_keep_prob = tf.placeholder(
                tf.float32, name="lstm_dropout_keep_prob")
            self.num_negative_samples = tf.placeholder(
                tf.int32, name="num_negative_samples")
            self.add_echo = tf.placeholder(tf.bool, name="add_echo")

        with tf.variable_scope("properties"):
            # length properties
            cur_batch_length = tf.shape(self.input_queries)[0]
            query_max_length = tf.shape(self.input_queries)[1]
            reply_max_length = tf.shape(self.input_replies)[1]

            # learning rate and optimizer
            learning_rate = tf.train.exponential_decay(
                self.config.learning_rate,
                self.global_step_tensor,
                decay_steps=100000,
                decay_rate=0.9)
            self.optimizer = tf.train.AdamOptimizer(learning_rate)

        # embedding layer
        with tf.variable_scope("embedding"):
            embeddings = tf.Variable(get_embeddings(
                self.config.vocab_list, self.config.pretrained_embed_dir,
                self.config.vocab_size, self.config.embed_dim),
                                     trainable=True,
                                     name="embeddings")
            queries_embedded = tf.to_float(
                tf.nn.embedding_lookup(embeddings,
                                       self.input_queries,
                                       name="queries_embedded"))
            replies_embedded = tf.to_float(
                tf.nn.embedding_lookup(embeddings,
                                       self.input_replies,
                                       name="replies_embedded"))

        # build LSTM layer
        with tf.variable_scope("lstm_layer") as vs:
            query_lstm_cell = tf.nn.rnn_cell.LSTMCell(
                self.config.lstm_dim,
                forget_bias=2.0,
                use_peepholes=True,
                state_is_tuple=True,
                # initializer=tf.orthogonal_initializer(),
            )
            query_lstm_cell = tf.contrib.rnn.DropoutWrapper(
                query_lstm_cell, input_keep_prob=self.lstm_dropout_keep_prob)
            reply_lstm_cell = tf.nn.rnn_cell.LSTMCell(
                self.config.lstm_dim,
                forget_bias=2.0,
                use_peepholes=True,
                state_is_tuple=True,
                # initializer=tf.orthogonal_initializer(),
                reuse=True)
            reply_lstm_cell = tf.contrib.rnn.DropoutWrapper(
                reply_lstm_cell, input_keep_prob=self.lstm_dropout_keep_prob)
            _, queries_encoded = tf.nn.dynamic_rnn(
                cell=query_lstm_cell,
                inputs=queries_embedded,
                sequence_length=tf.cast(self.query_lengths, tf.float32),
                dtype=tf.float32,
            )
            _, replies_encoded = tf.nn.dynamic_rnn(
                cell=reply_lstm_cell,
                inputs=replies_embedded,
                sequence_length=tf.cast(self.reply_lengths, tf.float32),
                dtype=tf.float32,
            )

            _, echo_encoded = tf.nn.dynamic_rnn(
                cell=reply_lstm_cell,
                inputs=queries_embedded,
                sequence_length=tf.cast(tf.squeeze(self.query_lengths),
                                        tf.float32),
                dtype=tf.float32)

            self.queries_encoded = tf.cast(queries_encoded.h, tf.float64)
            self.replies_encoded = tf.cast(replies_encoded.h, tf.float64)
            self.echo_encoded = tf.cast(echo_encoded.h, tf.float64)

        # build dense layer
        with tf.variable_scope("dense_layer"):
            M = tf.get_variable(
                "M",
                shape=[self.config.lstm_dim, self.config.lstm_dim],
                initializer=tf.initializers.truncated_normal())
            self.queries_encoded = tf.matmul(self.queries_encoded,
                                             tf.cast(M, tf.float64))

        with tf.variable_scope("sampling"):
            self.distances = tf.matmul(self.queries_encoded,
                                       self.replies_encoded,
                                       transpose_b=True)
            self.echo_distances = tf.matmul(self.queries_encoded,
                                            self.echo_encoded,
                                            transpose_b=True)
            positive_mask = tf.reshape(tf.eye(cur_batch_length), [-1])
            negative_mask = tf.reshape(
                make_negative_mask(
                    self.distances,
                    method=self.config.negative_sampling,
                    num_negative_samples=self.num_negative_samples), [-1])

        with tf.variable_scope("prediction"):
            distances_flattened = tf.reshape(self.distances, [-1])
            echo_distances_flattened = tf.reshape(self.echo_distances, [-1])
            self.positive_logits = tf.gather(distances_flattened,
                                             tf.where(positive_mask), 1)
            self.negative_logits = tf.gather(distances_flattened,
                                             tf.where(negative_mask), 1)
            self.echo_logits = tf.gather(echo_distances_flattened,
                                         tf.where(positive_mask), 1)

            self.logits = tf.cond(
                self.add_echo, lambda: tf.concat([
                    self.positive_logits, self.negative_logits, self.
                    echo_logits
                ],
                                                 axis=0),
                lambda: tf.concat([self.positive_logits, self.negative_logits],
                                  axis=0))
            self.labels = tf.cond(
                self.add_echo, lambda: tf.concat([
                    tf.ones_like(self.positive_logits),
                    tf.zeros_like(self.negative_logits),
                    tf.zeros_like(self.echo_logits)
                ],
                                                 axis=0),
                lambda: tf.concat([
                    tf.ones_like(self.positive_logits),
                    tf.zeros_like(self.negative_logits)
                ],
                                  axis=0))

            self.positive_probs = tf.sigmoid(self.positive_logits)
            self.echo_probs = tf.sigmoid(self.echo_logits)

            self.probs = tf.sigmoid(self.logits)
            self.predictions = tf.cast(self.probs > 0.5, dtype=tf.int32)

        with tf.variable_scope("loss"):
            self.loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(labels=self.labels,
                                                        logits=self.logits))
            # gvs = self.optimizer.compute_gradients(self.loss)
            # capped_gvs = [(tf.clip_by_norm(grad, 5), var) for grad, var in gvs]
            # self.train_step = self.optimizer.apply_gradients(capped_gvs)
            self.train_step = self.optimizer.minimize(self.loss)

        with tf.variable_scope("score"):
            correct_predictions = tf.equal(self.predictions,
                                           tf.to_int32(self.labels))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,
                                                   "float"),
                                           name="accuracy")
Example #36
0
def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5):
  """Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015.

  Performs box voting as described in 'Object detection via a multi-region &
  semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For
  each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes
  with iou overlap >= iou_thresh. The location of B is set to the weighted
  average location of boxes in S (scores are used for weighting). And the score
  of B is set to the average score of boxes in S.

  Args:
    selected_boxes: BoxList containing a subset of boxes in pool_boxes. These
      boxes are usually selected from pool_boxes using non max suppression.
    pool_boxes: BoxList containing a set of (possibly redundant) boxes.
    iou_thresh: (float scalar) iou threshold for matching boxes in
      selected_boxes and pool_boxes.

  Returns:
    BoxList containing averaged locations and scores for each box in
    selected_boxes.

  Raises:
    ValueError: if
      a) selected_boxes or pool_boxes is not a BoxList.
      b) if iou_thresh is not in [0, 1].
      c) pool_boxes does not have a scores field.
  """
  if not 0.0 <= iou_thresh <= 1.0:
    raise ValueError('iou_thresh must be between 0 and 1')
  if not isinstance(selected_boxes, box_list.BoxList):
    raise ValueError('selected_boxes must be a BoxList')
  if not isinstance(pool_boxes, box_list.BoxList):
    raise ValueError('pool_boxes must be a BoxList')
  if not pool_boxes.has_field('scores'):
    raise ValueError('pool_boxes must have a \'scores\' field')

  iou_ = iou(selected_boxes, pool_boxes)
  match_indicator = tf.to_float(tf.greater(iou_, iou_thresh))
  num_matches = tf.reduce_sum(match_indicator, 1)
  # TODO: Handle the case where some boxes in selected_boxes do not
  # match to any boxes in pool_boxes. For such boxes without any matches, we
  # should return the original boxes without voting.
  match_assert = tf.Assert(
      tf.reduce_all(tf.greater(num_matches, 0)),
      ['Each box in selected_boxes must match with at least one box '
       'in pool_boxes.'])

  scores = tf.expand_dims(pool_boxes.get_field('scores'), 1)
  scores_assert = tf.Assert(
      tf.reduce_all(tf.greater_equal(scores, 0)),
      ['Scores must be non negative.'])

  with tf.control_dependencies([scores_assert, match_assert]):
    sum_scores = tf.matmul(match_indicator, scores)
  averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches

  box_locations = tf.matmul(match_indicator,
                            pool_boxes.get() * scores) / sum_scores
  averaged_boxes = box_list.BoxList(box_locations)
  _copy_extra_fields(averaged_boxes, selected_boxes)
  averaged_boxes.add_field('scores', averaged_scores)
  return averaged_boxes
Example #37
0
def main():

    # my_netwokr = siamese_network()
    #
    # print("djkfljdkd")

    # d = Dataset(mnist_data_train_images, mnist_data_train_labels)
    #
    # img_l, img_r, l = d._get_siamese_batch(10)
    left = tf.placeholder(dtype=tf.float32, shape=[None, 784], name="left")
    right = tf.placeholder(dtype=tf.float32, shape=[None, 784], name="right")

    l_img = tf.reshape(left, shape=[-1, 28, 28, 1], name="l_img")
    r_img = tf.reshape(right, shape=[-1, 28, 28, 1], name="r_img")

    with tf.name_scope("similarity"):
        # label = tf.placeholder(tf.int64, [None, 1], name='label')  # 1 if same, 0 if different
        label = tf.placeholder(tf.int64, [None],
                               name='label')  # 1 if same, 0 if different

    label_float = tf.to_float(label)
    margin = 0.5
    with tf.variable_scope("siamese") as scope:
        left_output = mnist_model(l_img)

        scope.reuse_variables()

        right_output = mnist_model(r_img)

    # loss = contrastive_loss(left_output, right_output, label_float, margin)
    loss = loss_with_spring(left_output, right_output, label_float)
    tf.summary.scalar("loss", loss)

    # Setup Optimizer
    global_step = tf.Variable(0, trainable=False)

    train_step = tf.train.AdamOptimizer(1.e-5).minimize(loss)
    saver = tf.train.Saver()
    merged = tf.summary.merge_all()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # setup tensorboard
        tf.summary.scalar('step', global_step)
        tf.summary.scalar('loss', loss)

        writer = tf.summary.FileWriter('train.log', sess.graph)

        train_data_set = Dataset(mnist_data_train_images,
                                 mnist_data_train_labels)
        test_data_set = Dataset(mnist_data_test_images, mnist_data_test_labels)

        for i in range(50000):
            l_imgs, r_imgs, lbs = train_data_set._get_siamese_batch(10)

            batch_x1, batch_y1 = mnist.train.next_batch(128)
            batch_x2, batch_y2 = mnist.train.next_batch(128)
            batch_y = (batch_y1 == batch_y2).astype('float')
            # print("jdkfjlsd")

            # id = 0
            # for _ in range(10):
            #     plt.subplot(2, 10, id + 1)
            #     plt.imshow(l_imgs[id, :].reshape((28, 28)))
            #
            #     plt.subplot(2, 10, 10 + id + 1)
            #     plt.imshow(r_imgs[id, :].reshape((28, 28)))
            #     plt.title(str(lbs[id]))
            #     id += 1
            #
            # plt.show()
            # print("djkfljd")

            _, l, merged_sum = sess.run([train_step, loss, merged],
                                        feed_dict={
                                            left: batch_x1,
                                            right: batch_x2,
                                            label: batch_y
                                        })

            writer.add_summary(merged_sum, i)

            if i % 1000 == 0:
                # l_imgs, r_imgs, lbs = test_data_set._get_siamese_batch(100)
                # l = sess.run([loss], feed_dict={left: l_imgs,
                #                                 right: r_imgs,
                #                                 label: lbs})
                print("epoch: {}, error: {}".format(i, l))
Example #38
0
def bicond_reader_embedded(placeholders,
                           inputs_embedded,
                           emb_dim,
                           drop_keep_prob=1.0):
    """
    Performs the same function as the bicond_reader function but works on inputs that have already been embedded by the
    embedding lookup op
    :param placeholders: dict containing tensorflow placeholders
    :param inputs_embedded: Inputs to the reader after already having passed through the embedding lookup ops
    :param emb_dim: word embedding dimension
    :param drop_keep_prob: keep probability for dropout
    :return: score (unscaled logits), loss and prediction (normalised logits (softmax)) tensorflow ops
    """
    # [batch_size, candidate_size]
    targets = tf.to_float(placeholders['targets'])

    question_embedded = inputs_embedded['question_embedded']
    support_embedded = inputs_embedded['support_embedded']
    candidates_embedded = inputs_embedded['candidates_embedded']

    dim1s, dim2s, dim3s, dim4s = tf.unstack(
        tf.shape(support_embedded
                 ))  # [batch_size, num_supports, max_seq2_length, emb_dim]

    # iterate through all supports
    num_steps = dim2s

    initial_outputs = tf.TensorArray(size=num_steps, dtype='float32')
    initial_i = tf.constant(0, dtype='int32')

    # question_encoding = tf.reduce_sum(question_embedded, 1)

    with tf.variable_scope("conditional_reader_seq1") as varscope1:
        # seq1_states: (c_fw, h_fw), (c_bw, h_bw)
        _, seq1_states = reader(question_embedded,
                                placeholders['question_lengths'],
                                emb_dim,
                                scope=varscope1,
                                drop_keep_prob=drop_keep_prob)

    def should_continue(i, *args):
        # execute the loop for all i supports
        return i < num_steps

    def iteration(i, outputs_):
        # get all instances, take only i-th support, flatten so this becomes a 3-dim tensor
        sup_batchi = tf.reshape(
            tf.slice(support_embedded, [0, i, 0, 0], [dim1s, 1, dim3s, dim4s]),
            [dim1s, dim3s, emb_dim
             ])  # [batch_size, num_supports, max_seq2_length, emb_dim]
        sup_lens_batchi = tf.reshape(
            tf.slice(placeholders['support_lengths'], [0, i], [dim1s, 1]),
            [-1])  # [batch_size]

        with tf.variable_scope("conditional_reader_seq2") as varscope2:
            varscope1.reuse_variables()
            # each [batch_size x max_seq_length x output_size]
            outputs, states = reader(sup_batchi,
                                     sup_lens_batchi,
                                     emb_dim,
                                     seq1_states,
                                     scope=varscope2,
                                     drop_keep_prob=drop_keep_prob)

        output = tf.concat(axis=1, values=[states[0][1], states[1][1]])

        # squish back into emb_dim num dimensions
        output = tf.contrib.layers.linear(output, emb_dim)

        # batch matrix multiplication to get per-candidate scores
        scores = tf.einsum('bid,bcd->bc', tf.expand_dims(output, 1),
                           candidates_embedded)

        # append scores for the i-th support to previous supports so we can combine scores for all supports later
        outputs_ = outputs_.write(i, scores)

        return i + 1, outputs_

    i, outputs = tf.while_loop(should_continue, iteration,
                               [initial_i, initial_outputs])

    # packs along axis 0, there doesn't seem to be a way to change that (?)
    outputs_logits = outputs.stack()  # [num_support, batch_size, num_cands]
    scores = tf.reduce_sum(outputs_logits, 0)

    loss = tf.nn.softmax_cross_entropy_with_logits(logits=scores,
                                                   labels=targets)
    predict = tf.nn.softmax(scores)

    return scores, loss, predict
    variance_dis = tf.abs(t1_var - t2_var)
    return variance_dis
def distance(t1, t2):
    k = 1.0
    delta = 3.0
    return  tf.reduce_mean(tf.exp(k * tf.abs(t1 - t2) + delta) - tf.exp(delta) , [1,2,3])
    


sess = tf.Session()

dark_pipe = SimplePipeline(batch_size, 1, 0, dark_img_dir)
gt_pipe = SimplePipeline(batch_size, 1, 0, gt_img_dir)
daliop = dali_tf.DALIIterator()
in_image= daliop(pipeline = dark_pipe, shapes=[[batch_size,400,600,3]],dtypes=[tf.uint8])
in_image=tf.to_float(in_image[0])/255.0

gt_image= daliop(pipeline = gt_pipe, shapes=[[batch_size,400,600,3]],dtypes=[tf.uint8])
gt_image=tf.to_float(gt_image[0])/255.0

print(in_image,gt_image)

input_patches, gt_patches = generate_batch(patches_num, in_image, gt_image)
output_patches = network(input_patches)
out_max = tf.reduce_max(output_patches)
out_min = tf.reduce_min(output_patches)
out_image = network(in_image[0:1,:,:,:])[0,:,:,:]
debug_in = tf.reduce_mean(in_image[0:1,:,:,:])

o_debug = tf.reduce_mean(output_patches)
o_img_debug = tf.reduce_mean(out_image)
Example #40
0
def get_model_learning_rate(learning_policy,
                            base_learning_rate,
                            learning_rate_decay_step,
                            learning_rate_decay_factor,
                            training_number_of_steps,
                            learning_power,
                            slow_start_step,
                            slow_start_learning_rate,
                            slow_start_burnin_type='none',
                            decay_steps=0.0,
                            end_learning_rate=1e-6,
                            boundaries=None,
                            boundary_learning_rates=None):
    """Gets model's learning rate.
  Computes the model's learning rate for different learning policy.
  Right now, only "step" and "poly" are supported.
  (1) The learning policy for "step" is computed as follows:
    current_learning_rate = base_learning_rate *
      learning_rate_decay_factor ^ (global_step / learning_rate_decay_step)
  See tf.train.exponential_decay for details.
  (2) The learning policy for "poly" is computed as follows:
    current_learning_rate = base_learning_rate *
      (1 - global_step / training_number_of_steps) ^ learning_power
  Args:
    learning_policy: Learning rate policy for training.
    base_learning_rate: The base learning rate for model training.
    learning_rate_decay_step: Decay the base learning rate at a fixed step.
    learning_rate_decay_factor: The rate to decay the base learning rate.
    training_number_of_steps: Number of steps for training.
    learning_power: Power used for 'poly' learning policy.
    slow_start_step: Training model with small learning rate for the first
      few steps.
    slow_start_learning_rate: The learning rate employed during slow start.
    slow_start_burnin_type: The burnin type for the slow start stage. Can be
      `none` which means no burnin or `linear` which means the learning rate
      increases linearly from slow_start_learning_rate and reaches
      base_learning_rate after slow_start_steps.
    decay_steps: Float, `decay_steps` for polynomial learning rate.
    end_learning_rate: Float, `end_learning_rate` for polynomial learning rate.
    boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
      increasing entries.
    boundary_learning_rates: A list of `Tensor`s or `float`s or `int`s that
      specifies the values for the intervals defined by `boundaries`. It should
      have one more element than `boundaries`, and all elements should have the
      same type.
  Returns:
    Learning rate for the specified learning policy.
  Raises:
    ValueError: If learning policy or slow start burnin type is not recognized.
    ValueError: If `boundaries` and `boundary_learning_rates` are not set for
      multi_steps learning rate decay.
  """
    global_step = tf.train.get_or_create_global_step()
    adjusted_global_step = tf.maximum(global_step - slow_start_step, 0)
    if decay_steps == 0.0:
        tf.logging.info('Setting decay_steps to total training steps.')
        decay_steps = training_number_of_steps - slow_start_step
    if learning_policy == 'step':
        learning_rate = tf.train.exponential_decay(base_learning_rate,
                                                   adjusted_global_step,
                                                   learning_rate_decay_step,
                                                   learning_rate_decay_factor,
                                                   staircase=True)
    elif learning_policy == 'poly':
        learning_rate = tf.train.polynomial_decay(
            base_learning_rate,
            adjusted_global_step,
            decay_steps=decay_steps,
            end_learning_rate=end_learning_rate,
            power=learning_power)
    elif learning_policy == 'cosine':
        learning_rate = tf.train.cosine_decay(
            base_learning_rate, adjusted_global_step,
            training_number_of_steps - slow_start_step)
    elif learning_policy == 'multi_steps':
        if boundaries is None or boundary_learning_rates is None:
            raise ValueError(
                'Must set `boundaries` and `boundary_learning_rates` '
                'for multi_steps learning rate decay.')
        learning_rate = tf.train.piecewise_constant_decay(
            adjusted_global_step, boundaries, boundary_learning_rates)
    else:
        raise ValueError('Unknown learning policy.')

    adjusted_slow_start_learning_rate = slow_start_learning_rate
    if slow_start_burnin_type == 'linear':
        # Do linear burnin. Increase linearly from slow_start_learning_rate and
        # reach base_learning_rate after (global_step >= slow_start_steps).
        adjusted_slow_start_learning_rate = (
            slow_start_learning_rate +
            (base_learning_rate - slow_start_learning_rate) *
            tf.to_float(global_step) / slow_start_step)
    elif slow_start_burnin_type != 'none':
        raise ValueError('Unknown burnin type.')

    # Employ small learning rate at the first few steps for warm start.
    return tf.where(global_step < slow_start_step,
                    adjusted_slow_start_learning_rate, learning_rate)
def get_loss(mask_label, center_label, \
             heading_class_label, heading_residual_label, \
             size_class_label, size_residual_label, \
             end_points, \
             corner_loss_weight=10.0, \
             box_loss_weight=1.0):
    ''' Loss functions for 3D object detection.
    Input:
        mask_label: TF int32 tensor in shape (B,N)
        center_label: TF tensor in shape (B,3)
        heading_class_label: TF int32 tensor in shape (B,) 
        heading_residual_label: TF tensor in shape (B,) 
        size_class_label: TF tensor int32 in shape (B,)
        size_residual_label: TF tensor tensor in shape (B,)
        end_points: dict, outputs from our model
        corner_loss_weight: float scalar
        box_loss_weight: float scalar
    Output:
        total_loss: TF scalar tensor
            the total_loss is also added to the losses collection
    '''
    # 3D Segmentation loss
    mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\
        logits=end_points['mask_logits'], labels=mask_label))
    tf.summary.scalar('3d mask loss', mask_loss)

    # Center regression losses
    center_dist = tf.norm(center_label - end_points['center'], axis=-1)
    center_loss = huber_loss(center_dist, delta=2.0)
    tf.summary.scalar('center loss', center_loss)
    stage1_center_dist = tf.norm(center_label - \
        end_points['stage1_center'], axis=-1)
    stage1_center_loss = huber_loss(stage1_center_dist, delta=1.0)
    tf.summary.scalar('stage1 center loss', stage1_center_loss)

    # Heading loss
    heading_class_loss = tf.reduce_mean( \
        tf.nn.sparse_softmax_cross_entropy_with_logits( \
        logits=end_points['heading_scores'], labels=heading_class_label))
    tf.summary.scalar('heading class loss', heading_class_loss)

    hcls_onehot = tf.one_hot(heading_class_label,
                             depth=NUM_HEADING_BIN,
                             on_value=1,
                             off_value=0,
                             axis=-1)  # BxNUM_HEADING_BIN
    heading_residual_normalized_label = \
        heading_residual_label / (np.pi/NUM_HEADING_BIN)
    heading_residual_normalized_loss = huber_loss(tf.reduce_sum( \
        end_points['heading_residuals_normalized']*tf.to_float(hcls_onehot), axis=1) - \
        heading_residual_normalized_label, delta=1.0)
    tf.summary.scalar('heading residual normalized loss',
                      heading_residual_normalized_loss)

    # Size loss
    size_class_loss = tf.reduce_mean( \
        tf.nn.sparse_softmax_cross_entropy_with_logits( \
        logits=end_points['size_scores'], labels=size_class_label))
    tf.summary.scalar('size class loss', size_class_loss)

    scls_onehot = tf.one_hot(size_class_label,
                             depth=NUM_SIZE_CLUSTER,
                             on_value=1,
                             off_value=0,
                             axis=-1)  # BxNUM_SIZE_CLUSTER
    scls_onehot_tiled = tf.tile(tf.expand_dims( \
        tf.to_float(scls_onehot), -1), [1,1,3]) # BxNUM_SIZE_CLUSTERx3
    predicted_size_residual_normalized = tf.reduce_sum( \
        end_points['size_residuals_normalized']*scls_onehot_tiled, axis=[1]) # Bx3

    mean_size_arr_expand = tf.expand_dims( \
        tf.constant(g_mean_size_arr, dtype=tf.float32),0) # 1xNUM_SIZE_CLUSTERx3
    mean_size_label = tf.reduce_sum( \
        scls_onehot_tiled * mean_size_arr_expand, axis=[1]) # Bx3
    size_residual_label_normalized = size_residual_label / mean_size_label
    size_normalized_dist = tf.norm( \
        size_residual_label_normalized - predicted_size_residual_normalized,
        axis=-1)
    size_residual_normalized_loss = huber_loss(size_normalized_dist, delta=1.0)
    tf.summary.scalar('size residual normalized loss',
                      size_residual_normalized_loss)

    # Corner loss
    # We select the predicted corners corresponding to the
    # GT heading bin and size cluster.
    corners_3d = get_box3d_corners(
        end_points['center'], end_points['heading_residuals'],
        end_points['size_residuals'])  # (B,NH,NS,8,3)
    gt_mask = tf.tile(tf.expand_dims(hcls_onehot, 2), [1,1,NUM_SIZE_CLUSTER]) * \
        tf.tile(tf.expand_dims(scls_onehot,1), [1,NUM_HEADING_BIN,1]) # (B,NH,NS)
    corners_3d_pred = tf.reduce_sum( \
        tf.to_float(tf.expand_dims(tf.expand_dims(gt_mask,-1),-1)) * corners_3d,
        axis=[1,2]) # (B,8,3)

    heading_bin_centers = tf.constant( \
        np.arange(0,2*np.pi,2*np.pi/NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
    heading_label = tf.expand_dims(heading_residual_label,1) + \
        tf.expand_dims(heading_bin_centers, 0) # (B,NH)
    heading_label = tf.reduce_sum(tf.to_float(hcls_onehot) * heading_label, 1)
    mean_sizes = tf.expand_dims( \
        tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # (1,NS,3)
    size_label = mean_sizes + \
        tf.expand_dims(size_residual_label, 1) # (1,NS,3) + (B,1,3) = (B,NS,3)
    size_label = tf.reduce_sum( \
        tf.expand_dims(tf.to_float(scls_onehot),-1)*size_label, axis=[1]) # (B,3)
    corners_3d_gt = get_box3d_corners_helper( \
        center_label, heading_label, size_label) # (B,8,3)
    corners_3d_gt_flip = get_box3d_corners_helper( \
        center_label, heading_label+np.pi, size_label) # (B,8,3)

    corners_dist = tf.minimum(
        tf.norm(corners_3d_pred - corners_3d_gt, axis=-1),
        tf.norm(corners_3d_pred - corners_3d_gt_flip, axis=-1))
    corners_loss = huber_loss(corners_dist, delta=1.0)
    tf.summary.scalar('corners loss', corners_loss)

    # Weighted sum of all losses
    total_loss = mask_loss + box_loss_weight * (center_loss + \
        heading_class_loss + size_class_loss + \
        heading_residual_normalized_loss*20 + \
        size_residual_normalized_loss*20 + \
        stage1_center_loss + \
        corner_loss_weight*corners_loss)
    tf.add_to_collection('losses', total_loss)

    return total_loss
Example #42
0
def bicond_reader(placeholders, vocab_size, emb_dim, drop_keep_prob=1.0):
    """
    Builds the tensorflow graph for a bidirectional LSTM conditional reader for question answering
    :param placeholders: dict containing tensorflow placeholders
    :param vocab_size: size of the vocab of the data to be used with this model
    :param emb_dim: word embedding dimension
    :param drop_keep_prob: keep probability for dropout
    :return: score (unscaled logits), loss and prediction (normalised logits (softmax)) tensorflow ops
    """
    # [batch_size, max_seq1_length]
    question = placeholders['question']

    # [batch_size, num_sup, max_seq2_length]
    support = placeholders['support']

    # [batch_size, candidate_size]
    targets = tf.to_float(placeholders['targets'])

    # [batch_size, max_num_cands]
    candidates = placeholders['candidates']

    with tf.variable_scope("embeddings"):
        embeddings = tf.get_variable("word_embeddings", [vocab_size, emb_dim],
                                     dtype=tf.float32)

    with tf.variable_scope("embedders") as varscope:
        question_embedded = tf.nn.embedding_lookup(embeddings, question)
        varscope.reuse_variables()
        support_embedded = tf.nn.embedding_lookup(embeddings, support)
        varscope.reuse_variables()
        candidates_embedded = tf.nn.embedding_lookup(embeddings, candidates)

    dim1s, dim2s, dim3s, dim4s = tf.unstack(
        tf.shape(support_embedded
                 ))  # [batch_size, num_supports, max_seq2_length, emb_dim]

    # iterate through all supports
    num_steps = dim2s

    initial_outputs = tf.TensorArray(size=num_steps, dtype='float32')
    initial_i = tf.constant(0, dtype='int32')

    # question_encoding = tf.reduce_sum(question_embedded, 1)

    with tf.variable_scope("conditional_reader_seq1") as varscope1:
        # seq1_states: (c_fw, h_fw), (c_bw, h_bw)
        _, seq1_states = reader(question_embedded,
                                placeholders['question_lengths'],
                                emb_dim,
                                scope=varscope1,
                                drop_keep_prob=drop_keep_prob)

    def should_continue(i, *args):
        # execute the loop for all i supports
        return i < num_steps

    def iteration(i, outputs_):
        # get all instances, take only i-th support, flatten so this becomes a 3-dim tensor
        sup_batchi = tf.reshape(
            tf.slice(support_embedded, [0, i, 0, 0], [dim1s, 1, dim3s, dim4s]),
            [dim1s, dim3s, emb_dim
             ])  # [batch_size, num_supports, max_seq2_length, emb_dim]
        sup_lens_batchi = tf.reshape(
            tf.slice(placeholders['support_lengths'], [0, i], [dim1s, 1]),
            [-1])  # [batch_size]

        with tf.variable_scope("conditional_reader_seq2") as varscope2:
            varscope1.reuse_variables()
            # each [batch_size x max_seq_length x output_size]
            outputs, states = reader(sup_batchi,
                                     sup_lens_batchi,
                                     emb_dim,
                                     seq1_states,
                                     scope=varscope2,
                                     drop_keep_prob=drop_keep_prob)

        output = tf.concat(axis=1,
                           values=[states[0][1],
                                   states[1][1]])  # [batch_size, 2*emb_dim]

        # squish back into emb_dim num dimensions
        output = tf.contrib.layers.linear(output,
                                          emb_dim)  # [batch_size, emb_dim]

        # batch matrix multiplication to get per-candidate scores
        scores = tf.einsum(
            'bid,bcd->bc', tf.expand_dims(output, 1), candidates_embedded
        )  # [batch_size, 1, emb_dim], [batch_size, max_num_cands, emb_dim] -> [batch_size, max_num_cands]

        # append scores for the i-th support to previous supports so we can combine scores for all supports later
        outputs_ = outputs_.write(i, scores)

        return i + 1, outputs_

    i, outputs = tf.while_loop(should_continue, iteration,
                               [initial_i, initial_outputs])

    # packs along axis 0, there doesn't seem to be a way to change that (?)
    outputs_logits = outputs.stack()  # [num_support, batch_size, num_cands]
    scores = tf.reduce_sum(outputs_logits, 0)

    loss = tf.nn.softmax_cross_entropy_with_logits(logits=scores,
                                                   labels=targets)
    predict = tf.nn.softmax(scores)

    return scores, loss, predict
Example #43
0
    def _creat_model(self):
        self.y_ = tf.one_hot(indices=self.y, depth=self.target_vocab_size)

        with tf.variable_scope('generator'):
            self.g = self.generator()
        with tf.variable_scope('discriminator') as scope:
            self.D_real = self.discriminator(self.y_)
            scope.reuse_variables()
            self.D_fake = self.discriminator(self.g)

        self.G_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope='generator')
        self.D_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope='discriminator')

        self.preds = tf.to_int32(tf.arg_max(self.g, dimension=-1))
        self.istarget = tf.to_float(tf.not_equal(self.y, 0))
        self.acc = tf.reduce_sum(
            tf.to_float(tf.equal(self.preds, self.y)) *
            self.istarget) / (tf.reduce_sum(self.istarget))
        tf.summary.scalar('acc', self.acc)
        if self.is_training:
            # Loss
            self.y_smoothed = label_smoothing(
                tf.one_hot(self.y, depth=self.target_vocab_size))
            self.loss = tf.nn.softmax_cross_entropy_with_logits(
                logits=self.g, labels=self.y_smoothed)
            self.content_loss = tf.reduce_sum(
                self.loss * self.istarget) / (tf.reduce_sum(self.istarget))
            tf.summary.scalar('mean_loss', self.content_loss)

            disc_loss = -tf.reduce_mean(self.D_real) + tf.reduce_mean(
                self.D_fake)
            gen_loss = -tf.reduce_mean(self.D_fake)

            alpha = tf.random_uniform(shape=[hp.batch_size, 1, 1],
                                      minval=0.,
                                      maxval=1.)

            differences = self.y_ - self.g
            interpolates = self.y_ + alpha * differences
            gradients = tf.gradients(self.discriminator(interpolates),
                                     [interpolates])[0]
            slopes = tf.sqrt(
                tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
            gradient_penalty = tf.reduce_mean((slopes - 1.)**2)

            self.global_step = tf.Variable(0, name='global_step')
            #====
            # tf.assign(ref, value, validate_shape=None, use_locking=None, name=None)
            #函数完成了将value赋值给ref的作用。其中:ref 必须是tf.Variable创建的tensor,如果ref=tf.constant()会报错!
            #同时,shape(value)==shape(ref)
            #=====
            self.gs_op = tf.assign(self.global_step,
                                   tf.add(self.global_step, 1))

            self.D_loss = self.SIGMA * (disc_loss +
                                        self.LAMBDA * gradient_penalty)
            self.G_loss = self.content_loss + self.SIGMA * gen_loss
            self.D_opt = tf.train.AdamOptimizer(
                learning_rate=hp.D_learning_rate, beta1=0.5,
                beta2=0.9).minimize(self.D_loss, var_list=self.D_params)
            self.G_opt = tf.train.AdamOptimizer(
                learning_rate=hp.G_learning_rate,
                beta1=0.8,
                beta2=0.98,
                epsilon=1e-8).minimize(self.G_loss, var_list=self.G_params)
            self.merged = tf.summary.merge_all()
Example #44
0
def adam_train(x_train, y_train, x_valid, y_valid, x_test):
    # TODO: Make sure you set all the random seed properly at the beginning
    # of this function so your code won't
    # have different output every time you run

    x_train = x_train.reshape((len(x_train), -1))
    x_valid = x_valid.reshape((len(x_valid), -1))
    x_test = x_test.reshape((len(x_test), -1))
    y_train = one_hot(y_train, 10)
    y_valid = one_hot(y_valid, 10)

    n_inputs = 32 * 32 * 3
    n_hidden1 = 200
    n_hidden2 = 100
    n_outputs = 10

    X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
    y = tf.placeholder(tf.float32, shape=(None, n_outputs), name="y")

    def neuron_layer(X, n_neurons, name, activation=None):
        with tf.name_scope(name):
            n_inputs = int(X.get_shape()[1])
            stddev = 2 / np.sqrt(n_inputs)
            init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
            W = tf.Variable(init, name="kernel")
            b = tf.Variable(tf.zeros([1, n_neurons]), name="bias")
            Z = tf.matmul(X, W) + b
            if activation is not None:
                return activation(Z), W, b
            else:
                return Z, W, b

    with tf.name_scope("dnn"):
        hidden1, W1, b1 = neuron_layer(X,
                                       n_hidden1,
                                       name="hidden1",
                                       activation=tf.nn.relu)
        hidden2, W2, b2 = neuron_layer(hidden1,
                                       n_hidden2,
                                       name="hidden2",
                                       activation=tf.nn.relu)
        logits, W3, b3 = neuron_layer(hidden2, n_outputs, name="outputs")

    with tf.name_scope("loss"):
        yp = tf.nn.softmax(logits)
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))

    with tf.name_scope("backprop"):
        d_logits = yp - y

        d_hidden2 = tf.matmul(d_logits, tf.transpose(W3))
        d_W3 = tf.matmul(tf.transpose(hidden2), d_logits)
        d_b3 = tf.reduce_sum(d_logits, axis=0, keep_dims=True)

        d_2 = tf.to_float(tf.greater(tf.matmul(hidden1, W2) + b2,
                                     0)) * d_hidden2
        d_hidden1 = tf.matmul(d_2, tf.transpose(W2))
        d_W2 = tf.matmul(tf.transpose(hidden1), d_2)
        d_b2 = tf.reduce_sum(d_2, axis=0, keep_dims=True)

        d_1 = tf.to_float(tf.greater(tf.matmul(X, W1) + b1, 0)) * d_hidden1
        d_W1 = tf.matmul(tf.transpose(X), d_1)
        d_b1 = tf.reduce_sum(d_1, axis=0, keep_dims=True)

    learning_rate = 0.0001  # this is a good learning rate. you can change

    update_ops = []  # contains the ops to update auxilary variables like
    # beta_power, m and v
    training_ops = []  # contains the ops to update variables
    eps = 1e-8

    # list of params and gradient

    Vs = [W1, b1, W2, b2, W3, b3]

    dVs = [d_W1, d_b1, d_W2, d_b2, d_W3, d_b3]
    # set betas
    beta1 = 0.9
    beta2 = 0.999

    # TODO: write all the code to update betas, m, v, compute m_hat, v_hat
    # and update all the variables here.
    # Add all tensorflow ops to update betas, m,v to updates_ops
    # Add all ops to update V to training_ops

    #minimize loss function
    # t = tf.Variable(0.)
    #
    # with tf.name_scope("adam"):
    #
    #     m_array = [0] * len(Vs)
    #     v_array = [0] * len(dVs)
    #     training_ops = [None] * len(Vs)
    #     for i in range(len(Vs)):
    #         m_array[i] = beta1 * m_array[i] + ((1-beta1) * dVs[i])
    #         v_array[i] = beta2 * v_array[i] + ((1-beta2) * tf.square(dVs[i]))
    #
    #         mt_hat = m_array[i]/(1 - beta1**t)
    #         vt_hat = v_array[i]/(1 - beta2**t)
    #
    #         params = tf.assign(Vs[i], Vs[i] - learning_rate * (mt_hat/(tf.sqrt(vt_hat) + eps )))
    #         training_ops.append(params)
    #         #a = learning_rate * (mt_hat_w/(tf.sqrt(vt_hat_w + eps)))
    #
    #     update_ops.append(m_array)
    #     update_ops.append(v_array)
    t = tf.Variable(0.)

    with tf.name_scope("adam"):
        m_array = []
        v_array = []
        for i in range(len(Vs)):
            m_array.append(tf.Variable(tf.zeros(Vs[i].shape)))
            v_array.append(tf.Variable(tf.zeros(Vs[i].shape)))

        for i in range(len(Vs)):
            m = tf.assign(m_array[i],
                          beta1 * m_array[i] + ((1 - beta1) * dVs[i]))
            v = tf.assign(
                v_array[i],
                beta2 * v_array[i] + ((1 - beta2) * tf.square(dVs[i])))

            update_ops.append((m, v))

            learning_rate *= tf.sqrt(1 - (beta2**t)) / (1 - (beta1**t))
            params = tf.assign(
                Vs[i], Vs[i] - learning_rate * (m / (tf.sqrt(v) + eps)))

            training_ops.append(params)

    with tf.name_scope("eval"):
        accuracy = 100.0 * tf.reduce_mean(
            tf.cast(tf.equal(tf.argmax(logits, axis=1), tf.argmax(y, axis=1)),
                    dtype=tf.float32))

    init = tf.global_variables_initializer()

    n_epochs = 100
    batch_size = 200
    n_batches = len(x_train) * 3 // batch_size

    dataset_iterator = DatasetIterator(x_train, y_train, batch_size)

    with tf.Session() as sess:
        sess.run(init)

        for epoch in range(n_epochs):
            # compute model
            sess.run(tf.assign(t, t + 1))
            for iteration in range(n_batches):
                x_batch, y_batch = dataset_iterator.next_batch()
                sess.run(update_ops, feed_dict={X: x_batch, y: y_batch})
                sess.run(training_ops, feed_dict={X: x_batch, y: y_batch})

            acc_train = accuracy.eval(feed_dict={X: x_batch, y: y_batch})
            acc_validation = accuracy.eval(feed_dict={X: x_valid, y: y_valid})
            print(epoch, "Training batch accuracy:", acc_train,
                  "Validation set accuracy:", acc_validation)

        # Now that the model is trained, it is the test time!
        yp_test = sess.run(tf.argmax(logits, axis=1), feed_dict={X: x_test})

    return yp_test
Example #45
0
def reinforce_episodic_gradients(logits, sampled_outputs, rewards, lengths=None,
                                 params=None):
    """
    Calculate REINFORCE gradients given a batch of single episodic rewards.

    Args:
        logits: list of `num_timesteps` batches, each of size
            `batch_size * num_classes`: logits for distribution over actions
            at each timestep
        sampled_outputs: list of `num_timesteps` batches, each of size
            `batch_size`: ints describing sampled action at each timestep
            for each example
        rewards: float batch `batch_size` describing episodic reward per
            example
        lengths: `None` or a `batch_size` int batch describing length of each
            sequence. If `None`, then all sequences are assumed to have the
            same length `num_timesteps`
        baseline_fn:
        params:

    Return:
        updates: list of (gradient, param) update tuples
    """

    if params is None:
        params = tf.trainable_variables()

    batch_size = tf.shape(logits[0])[0]
    num_classes = tf.shape(logits[0])[1]
    num_timesteps = len(logits)

    # Feed logits through log softmax.
    log_softmaxes = [tf.nn.log_softmax(logits_t) for logits_t in logits]

    # Fetch p(sampled_output) for each timestep.
    # This is a bit awkward -- need to pick a single element out of each
    # example softmax vector.
    # Output is (batch_size, 1) per timestep
    flat_softmaxes = [tf.reshape(log_softmax_t, (-1,))
                      for log_softmax_t in log_softmaxes]
    lookup_offset = tf.range(batch_size) * num_classes
    log_p_sampled = [tf.gather(log_softmax_t, tf.to_int32(sampled_outputs_t) + lookup_offset)
                     for log_softmax_t, sampled_outputs_t
                     in zip(flat_softmaxes, sampled_outputs)]

    # Merge into single (batch_size, num_timesteps) batch
    log_p_sampled = [tf.expand_dims(log_p_sampled_t, 1)
                     for log_p_sampled_t in log_p_sampled]
    log_p_sampled = tf.concat(1, log_p_sampled)

    if lengths is not None:
        # For each example, zero out probabilities after i > example_length
        mask = tf.tile(tf.expand_dims(tf.range(num_timesteps), 0), (batch_size, 1))
        mask = tf.to_float(tf.greater(tf.expand_dims(lengths, 1), mask))
        log_p_sampled *= mask

    # Calculate p(sampled_output) by chain rule. We can merge these ahead of
    # time, since we only have a single episode-level reward.
    # (batch_size,) batch
    log_p_sampled = tf.reduce_sum(log_p_sampled, 1)

    # Main REINFORCE gradient equation.
    # Apply rewards on batch + mean beforehand for efficiency.
    log_p_sampled *= -1 * rewards
    log_p_sampled = tf.reduce_mean(log_p_sampled)
    gradients = tf.gradients(log_p_sampled, params)

    return zip(gradients, params)
Example #46
0
def frame_loss(Y, batch_size):
    frame_loss = 0
    b,h,w,d = tf.unstack(tf.shape(Y))
    for i in xrange(batch_size - 1):
        frame_loss += tf.nn.l2_loss(Y[i+1,:,:,:] - Y[i,:,:,:]) * 2 / tf.to_float(h*w*d)
    return frame_loss / (batch_size-1)
Example #47
0
def L2_loss(x,y):
    size = tf.size(x)
    return tf.nn.l2_loss(x-y)* 2 / tf.to_float(size)
Example #48
0
def build_model_graph(features, labels, is_training, params):
    """Builds the forward model graph."""
    use_batched_nms = (not params['use_tpu'] and params['use_batched_nms'])
    is_gpu_inference = (not is_training and use_batched_nms)
    model_outputs = {}

    if is_training and params['transpose_input']:
        if (params['backbone'].startswith('resnet')
                and params['conv0_space_to_depth_block_size'] > 0):
            features['images'] = tf.transpose(features['images'], [2, 0, 1, 3])
        else:
            features['images'] = tf.transpose(features['images'], [3, 0, 1, 2])

    batch_size, image_height, image_width, _ = (
        features['images'].get_shape().as_list())

    conv0_space_to_depth_block_size = 0
    if (is_training and (params['backbone'].startswith('resnet')
                         and params['conv0_space_to_depth_block_size'] > 0)):
        conv0_space_to_depth_block_size = params[
            'conv0_space_to_depth_block_size']
        image_height *= conv0_space_to_depth_block_size
        image_width *= conv0_space_to_depth_block_size

    if 'source_ids' not in features:
        features['source_ids'] = -1 * tf.ones([batch_size], dtype=tf.float32)

    all_anchors = anchors.Anchors(params['min_level'], params['max_level'],
                                  params['num_scales'],
                                  params['aspect_ratios'],
                                  params['anchor_scale'],
                                  (image_height, image_width))

    if 'resnet' in params['backbone']:
        with tf.variable_scope(params['backbone']):
            resnet_fn = resnet.resnet_v1(
                params['backbone'],
                conv0_kernel_size=params['conv0_kernel_size'],
                conv0_space_to_depth_block_size=conv0_space_to_depth_block_size,
                num_batch_norm_group=params['num_batch_norm_group'])
            backbone_feats = resnet_fn(
                features['images'], (params['is_training_bn'] and is_training))
    elif 'mnasnet' in params['backbone']:
        with tf.variable_scope(params['backbone']):
            _, endpoints = mnasnet_models.build_mnasnet_base(
                features['images'],
                params['backbone'],
                training=(params['is_training_bn'] and is_training),
                override_params={'use_keras': False})

            backbone_feats = {
                2: endpoints['reduction_2'],
                3: endpoints['reduction_3'],
                4: endpoints['reduction_4'],
                5: endpoints['reduction_5'],
            }
    else:
        raise ValueError('Not a valid backbone option: %s' %
                         params['backbone'])

    fpn_feats = fpn.fpn(backbone_feats, params['min_level'],
                        params['max_level'])
    model_outputs.update({
        'fpn_features': fpn_feats,
    })

    rpn_score_outputs, rpn_box_outputs = heads.rpn_head(
        fpn_feats, params['min_level'], params['max_level'],
        len(params['aspect_ratios'] * params['num_scales']))

    if is_training:
        rpn_pre_nms_topn = params['rpn_pre_nms_topn']
        rpn_post_nms_topn = params['rpn_post_nms_topn']
    else:
        rpn_pre_nms_topn = params['test_rpn_pre_nms_topn']
        rpn_post_nms_topn = params['test_rpn_post_nms_topn']

    rpn_box_scores, rpn_box_rois = roi_ops.multilevel_propose_rois(
        rpn_score_outputs,
        rpn_box_outputs,
        all_anchors,
        features['image_info'],
        rpn_pre_nms_topn,
        rpn_post_nms_topn,
        params['rpn_nms_threshold'],
        params['rpn_min_size'],
        bbox_reg_weights=None,
        use_batched_nms=use_batched_nms)
    rpn_box_rois = tf.to_float(rpn_box_rois)
    if is_training:
        rpn_box_rois = tf.stop_gradient(rpn_box_rois)
        rpn_box_scores = tf.stop_gradient(rpn_box_scores)

    if is_training:
        # Sampling
        box_targets, class_targets, rpn_box_rois, proposal_to_label_map = (
            training_ops.proposal_label_op(
                rpn_box_rois,
                labels['gt_boxes'],
                labels['gt_classes'],
                features['image_info'],
                batch_size_per_im=params['batch_size_per_im'],
                fg_fraction=params['fg_fraction'],
                fg_thresh=params['fg_thresh'],
                bg_thresh_hi=params['bg_thresh_hi'],
                bg_thresh_lo=params['bg_thresh_lo']))

    # Performs multi-level RoIAlign.
    box_roi_features = spatial_transform_ops.multilevel_crop_and_resize(
        fpn_feats,
        rpn_box_rois,
        output_size=7,
        is_gpu_inference=is_gpu_inference)

    class_outputs, box_outputs, _ = heads.box_head(
        box_roi_features,
        num_classes=params['num_classes'],
        mlp_head_dim=params['fast_rcnn_mlp_head_dim'])

    if not is_training:
        if is_gpu_inference:
            generate_detections_fn = postprocess_ops.generate_detections_gpu
        else:
            generate_detections_fn = postprocess_ops.generate_detections_tpu
        detections = generate_detections_fn(
            class_outputs, box_outputs, rpn_box_rois, features['image_info'],
            params['test_rpn_post_nms_topn'],
            params['test_detections_per_image'], params['test_nms'],
            params['bbox_reg_weights'])

        model_outputs.update({
            'num_detections': detections[0],
            'detection_boxes': detections[1],
            'detection_classes': detections[2],
            'detection_scores': detections[3],
        })
    else:
        encoded_box_targets = training_ops.encode_box_targets(
            rpn_box_rois, box_targets, class_targets,
            params['bbox_reg_weights'])
        model_outputs.update({
            'rpn_score_outputs': rpn_score_outputs,
            'rpn_box_outputs': rpn_box_outputs,
            'class_outputs': class_outputs,
            'box_outputs': box_outputs,
            'class_targets': class_targets,
            'box_targets': encoded_box_targets,
            'box_rois': rpn_box_rois,
        })

    # Faster-RCNN mode.
    if not params['include_mask']:
        return model_outputs

    # Mask sampling
    if not is_training:
        selected_box_rois = model_outputs['detection_boxes']
        class_indices = model_outputs['detection_classes']
        # If using GPU for inference, delay the cast until when Gather ops show up
        # since GPU inference supports float point better.
        # TODO(laigd): revisit this when newer versions of GPU libraries is
        # released.
        if not is_gpu_inference:
            class_indices = tf.to_int32(class_indices)
    else:
        (selected_class_targets, selected_box_targets, selected_box_rois,
         proposal_to_label_map) = (training_ops.select_fg_for_masks(
             class_targets,
             box_targets,
             rpn_box_rois,
             proposal_to_label_map,
             max_num_fg=int(params['batch_size_per_im'] *
                            params['fg_fraction'])))
        class_indices = tf.to_int32(selected_class_targets)

    mask_roi_features = spatial_transform_ops.multilevel_crop_and_resize(
        fpn_feats,
        selected_box_rois,
        output_size=14,
        is_gpu_inference=is_gpu_inference)
    mask_outputs = heads.mask_head(mask_roi_features,
                                   class_indices,
                                   num_classes=params['num_classes'],
                                   mrcnn_resolution=params['mrcnn_resolution'],
                                   is_gpu_inference=is_gpu_inference)

    if is_training:
        mask_targets = training_ops.get_mask_targets(
            selected_box_rois, proposal_to_label_map, selected_box_targets,
            labels['cropped_gt_masks'], params['mrcnn_resolution'])
        model_outputs.update({
            'mask_outputs': mask_outputs,
            'mask_targets': mask_targets,
            'selected_class_targets': selected_class_targets,
        })
    else:
        model_outputs.update({
            'detection_masks': tf.nn.sigmoid(mask_outputs),
        })

    return model_outputs
Example #49
0
    def build_lstm_graph(self):
        config = self.config
        vocab_size = self.vocab.vocab_size

        self.global_step = tf.get_variable("global_step", [],
                                           tf.int32,
                                           initializer=tf.zeros_initializer,
                                           trainable=False)

        with tf.name_scope('input'):
            self.x, self.y, self.w = self.iterator.get_next()
            # self.x = tf.placeholder(tf.int32, [config.batch_size, config.num_steps])
            # self.y = tf.placeholder(tf.int32, [config.batch_size, config.num_steps])
            # self.w = tf.placeholder(tf.int32, [config.batch_size, config.num_steps])

            self.keep_prob = tf.get_variable('keep_prob', [],
                                             dtype=tf.float32,
                                             trainable=False)
            self.new_keep_prob = tf.placeholder(tf.float32,
                                                shape=[],
                                                name="new_keep_prob")
            self.keep_prob_update = tf.assign(self.keep_prob,
                                              self.new_keep_prob)

            self.lr = tf.get_variable('lr', [],
                                      dtype=tf.float32,
                                      trainable=False)
            self.new_lr = tf.placeholder(tf.float32, shape=[], name="new_lr")
            self.lr_update = tf.assign(self.lr, self.new_lr)

        with tf.name_scope('embedding'):
            self.embed = tf.get_variable('embedding',
                                         shape=[vocab_size, config.embed_size],
                                         dtype=tf.float32)
            self.embed_x = tf.nn.embedding_lookup(self.embed, self.x)

            if config.keep_prob < 1.0:
                self.embed_x = tf.nn.dropout(self.embed_x, config.keep_prob)

        with tf.name_scope('lstm'):
            cells = []
            for _ in range(config.num_layers):
                cell = tf.contrib.rnn.LSTMBlockCell(config.hidden_size,
                                                    forget_bias=0.0)
                if config.keep_prob < 1.0:
                    cell = tf.contrib.rnn.DropoutWrapper(
                        cell, output_keep_prob=config.keep_prob)
                cells.append(cell)
            cell = tf.contrib.rnn.MultiRNNCell(cells)
            self.outputs, _ = tf.nn.dynamic_rnn(cell,
                                                self.embed_x,
                                                dtype=tf.float32)
            self.outputs = tf.reshape(
                self.outputs,
                [config.batch_size * config.num_steps, config.hidden_size])

        with tf.name_scope('softmax'):
            self.softmax_w = tf.get_variable('softmax_w',
                                             [vocab_size, config.hidden_size],
                                             dtype=tf.float32)
            self.softmax_b = tf.get_variable('softmax_b', [vocab_size],
                                             dtype=tf.float32)

        with tf.name_scope('loss'):
            if config.num_sampled > 0:
                labels = tf.reshape(self.y,
                                    [config.batch_size * config.num_steps, 1])
                self.loss = tf.nn.sampled_softmax_loss(
                    weights=self.softmax_w,
                    biases=self.softmax_b,
                    labels=labels,
                    inputs=self.outputs,
                    num_sampled=config.num_sampled,
                    num_classes=vocab_size,
                    partition_strategy="div")
            else:
                labels = tf.reshape(self.y,
                                    [config.batch_size * config.num_steps])
                logits = tf.matmul(self.outputs, tf.transpose(self.softmax_w))
                logits = tf.nn.bias_add(logits, self.softmax_b)
                self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=labels, logits=logits)
            self.loss = tf.reduce_mean(
                tf.reshape(self.loss, [config.num_steps, config.batch_size]) *
                tf.reshape(tf.to_float(self.w),
                           [config.num_steps, config.batch_size]),
                axis=1)
            self.loss = tf.reshape(self.loss, [config.num_steps])
            self.loss = tf.reduce_sum(self.loss)

        with tf.name_scope('optimize'):
            tvars = tf.trainable_variables()
            grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars),
                                              config.max_grad_norm)
            optimizer = tf.train.GradientDescentOptimizer(self.lr)
            self.train_op = optimizer.apply_gradients(
                zip(grads, tvars), global_step=self.global_step)

        with tf.name_scope('ema'):
            lstm_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          ".*lstm.*")
            ema = tf.train.ExponentialMovingAverage(decay=0.999)
            self.train_op = tf.group(*[self.train_op, ema.apply(lstm_vars)])
Example #50
0
 def loss_1st(y_true, y_pred):
     L = y_true
     Y = y_pred
     batch_size = tf.to_float(K.shape(L)[0])
     return alpha * 2 * tf.linalg.trace(tf.matmul(tf.matmul(Y, L, transpose_a=True), Y)) / batch_size
    def __init__(self, dataset_type, input_dimensions, regularizer, number_mini_batches, number_output_units, activation_unit, learning_rate,
     hidden_units, number_samples_variance_reduction, precision_alpha, weights_prior_mean_1, weights_prior_mean_2,
      weights_prior_deviation_1, weights_prior_deviation_2, mixture_pie, rho_mean, extra_likelihood_emphasis, delta=0.1,
       num_classes=1, num_dimensions=1, ss=0):
        number_output_units = num_classes * num_dimensions
        self.dataset_type = dataset_type
        self.num_classes = num_classes
        self.regularizer = regularizer
        self.number_mini_batches = tf.constant(number_mini_batches, dtype=tf.int64)
        self.activation_unit = activation_unit
        self.learning_rate = learning_rate
        self.hidden_units = hidden_units
        self.number_samples_variance_reduction = number_samples_variance_reduction
        self.precision_alpha = precision_alpha
        self.weights_prior_mean_1 = weights_prior_mean_1
        self.weights_prior_mean_2 = weights_prior_mean_2
        self.weights_prior_deviation_1 = weights_prior_deviation_1
        self.weights_prior_deviation_2 = weights_prior_deviation_2
        self.mixture_pie = mixture_pie
        self.rho_mean = rho_mean
        self.extra_likelihood_emphasis = extra_likelihood_emphasis

        self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step')
        self.mini_batch_index = tf.Variable(1, dtype=tf.int64, trainable=False, name='mini_batch_index')

        #self.all_prior_cost = 0
        #self.all_variational_MAP_cost = 0
        self.all_likelihood_cost = 0

        output_forward_pass_1 = None
        output_forward_pass_2 = None
        output_forward_pass_3 = None
        output_forward_pass_4 = None
        output_forward_pass_5 = None
        output_forward_pass = None

        # Mixture Prior
        #ds = tf.contrib.distributions
        #self.WEIGHTS_PRIOR = ds.Mixture(cat=ds.Categorical(probs=[self.mixture_pie, 1.- self.mixture_pie]),
        #                                components=[ds.Normal(loc=self.weights_prior_mean_1, scale=self.weights_prior_deviation_1), ds.Normal(loc=self.weights_prior_mean_2, scale=self.weights_prior_deviation_2)],
        #                                name='WEIGHTS_MIXTURE_PRIOR')

        self.WEIGHTS_PRIOR = tf.distributions.Normal(loc=0., scale=1., name='WEIGHTS_PRIOR')

        with tf.name_scope('inputs'):
            self.input_x = tf.placeholder(tf.float32, shape=(None, input_dimensions), name='input_x')
            self.input_y = tf.placeholder(tf.float32, shape=(None, number_output_units), name='input_y')
            self.N_BOUND = tf.placeholder(tf.float32, shape=(), name='N_BOUND')            

        with tf.name_scope('input_output_forward_pass_mapping'):
            with tf.name_scope('between_input_and_first_hidden_layer'):
                self.sampled_weights_1, self.sampled_biases_1, self.mu_weights_1, self.rho_weights_1, self.mu_biases_1, self.rho_biases_1 = self.get_weights_and_phi(shape=(input_dimensions, self.hidden_units[0]))
                #self.all_variational_MAP_cost = self.all_variational_MAP_cost + variational_MAP_cost
                #self.all_prior_cost = self.all_prior_cost + prior_cost
                for variance_reductor_iterator in range(self.number_samples_variance_reduction):
                    if output_forward_pass_1 == None:
                        output_forward_pass_1 = self.fetch_ACTIVATION_UNIT(tf.matmul(self.input_x, self.sampled_weights_1[variance_reductor_iterator]) + self.sampled_biases_1[variance_reductor_iterator])[None]
                    else:
                        output_forward_pass_1 = tf.concat([output_forward_pass_1, self.fetch_ACTIVATION_UNIT(tf.matmul(self.input_x, self.sampled_weights_1[variance_reductor_iterator]) + self.sampled_biases_1[variance_reductor_iterator])[None]], 0)            
            
            with tf.name_scope('between_hidden_layers'):
                self.sampled_weights_2, self.sampled_biases_2, self.mu_weights_2, self.rho_weights_2, self.mu_biases_2, self.rho_biases_2 = self.get_weights_and_phi(shape=(self.hidden_units[0], self.hidden_units[1]))
                #self.all_variational_MAP_cost = self.all_variational_MAP_cost + variational_MAP_cost
                #self.all_prior_cost = self.all_prior_cost + prior_cost
                for variance_reductor_iterator in range(self.number_samples_variance_reduction):
                    if output_forward_pass_2 == None:
                        output_forward_pass_2 = self.fetch_ACTIVATION_UNIT(tf.matmul(output_forward_pass_1[variance_reductor_iterator], self.sampled_weights_2[variance_reductor_iterator]) + self.sampled_biases_2[variance_reductor_iterator])[None]
                    else:
                        output_forward_pass_2 = tf.concat([output_forward_pass_2, self.fetch_ACTIVATION_UNIT(tf.matmul(output_forward_pass_1[variance_reductor_iterator], self.sampled_weights_2[variance_reductor_iterator]) + self.sampled_biases_2[variance_reductor_iterator])[None]], 0)
                self.sampled_weights_3, self.sampled_biases_3, self.mu_weights_3, self.rho_weights_3, self.mu_biases_3, self.rho_biases_3 = self.get_weights_and_phi(shape=(self.hidden_units[1], self.hidden_units[2]))
                #self.all_variational_MAP_cost = self.all_variational_MAP_cost + variational_MAP_cost
                #self.all_prior_cost = self.all_prior_cost + prior_cost
                for variance_reductor_iterator in range(self.number_samples_variance_reduction):
                    if output_forward_pass_3 == None:
                        output_forward_pass_3 = self.fetch_ACTIVATION_UNIT(tf.matmul(output_forward_pass_2[variance_reductor_iterator], self.sampled_weights_3[variance_reductor_iterator]) + self.sampled_biases_3[variance_reductor_iterator])[None]
                    else:
                        output_forward_pass_3 = tf.concat([output_forward_pass_3, self.fetch_ACTIVATION_UNIT(tf.matmul(output_forward_pass_2[variance_reductor_iterator], self.sampled_weights_3[variance_reductor_iterator]) + self.sampled_biases_3[variance_reductor_iterator])[None]], 0)

            with tf.name_scope('between_last_hidden_and_output_layer'):
                self.sampled_weights_4, self.sampled_biases_4, self.mu_weights_4, self.rho_weights_4, self.mu_biases_4, self.rho_biases_4 = self.get_weights_and_phi(shape=(self.hidden_units[-1], number_output_units))
                #self.all_variational_MAP_cost = self.all_variational_MAP_cost + variational_MAP_cost
                #self.all_prior_cost = self.all_prior_cost + prior_cost
                for variance_reductor_iterator in range(self.number_samples_variance_reduction):
                    if dataset_type == 'categorical':
                        if output_forward_pass == None:
                            output_forward_pass = tf.nn.softmax(tf.matmul(output_forward_pass_3[variance_reductor_iterator], self.sampled_weights_4[variance_reductor_iterator]) + self.sampled_biases_4[variance_reductor_iterator])[None]
                        else:
                            output_forward_pass = tf.concat([output_forward_pass, (tf.matmul(output_forward_pass_3[variance_reductor_iterator], self.sampled_weights_4[variance_reductor_iterator]) + self.sampled_biases_4[variance_reductor_iterator])[None]], 0)
                        model_distribution = tf.distributions.Categorical(probs=output_forward_pass)
                        self.all_likelihood_cost = self.all_likelihood_cost + tf.reduce_sum(model_distribution.log_prob(self.input_y))
                    elif dataset_type == 'continuous':
                        if output_forward_pass == None:
                            output_forward_pass = (tf.matmul(output_forward_pass_3[variance_reductor_iterator], self.sampled_weights_4[variance_reductor_iterator]) + self.sampled_biases_4[variance_reductor_iterator])[None]
                        else:
                            output_forward_pass = tf.concat([output_forward_pass, (tf.matmul(output_forward_pass_3[variance_reductor_iterator], self.sampled_weights_4[variance_reductor_iterator]) + self.sampled_biases_4[variance_reductor_iterator])[None]], 0)
                        model_distribution = tf.distributions.Normal(loc=output_forward_pass[variance_reductor_iterator], scale=(1.0/tf.sqrt(self.precision_alpha)))
                        self.all_likelihood_cost = self.all_likelihood_cost + tf.reduce_sum(model_distribution.log_prob(self.input_y))

        with tf.name_scope('final_outputs'):
            if dataset_type == 'categorical':
                '''
                predicted_classes = tf.argmax(output_forward_pass, axis=1)
                self.mean_of_output_forward_pass = tf.reduce_max(tf.bincount())
                self.deviation_of_output_forward_pass = 
                self.maximum_of_output_forward_pass = tf.reduce_max(predicted_classes, axis=0, name='prediction_maximum')
                self.minimum_of_output_forward_pass = tf.reduce_min(predicted_classes, axis=0, name='prediction_minimum')
                '''
                pass
                ## This is to be fixed ##
            elif dataset_type == 'continuous':
                mean_of_output_forward_pass_temporary, variance_of_output_forward_pass = tf.nn.moments(output_forward_pass, 0, name='pred_mean_n_var')
                self.mean_of_output_forward_pass = tf.identity(mean_of_output_forward_pass_temporary, name='pred_mean')
                self.deviation_of_output_forward_pass = tf.sqrt(variance_of_output_forward_pass, name='pred_sigma')
                self.maximum_of_output_forward_pass = tf.reduce_max(output_forward_pass, axis=0, name='pred_max')
                self.minimum_of_output_forward_pass = tf.reduce_min(output_forward_pass, axis=0, name='pred_min')

        with tf.name_scope('cost'):
            self.intercost_minibatch_weight_pie = (tf.pow(2., tf.to_float(self.number_mini_batches - self.mini_batch_index)))/(tf.pow(2., tf.to_float(self.number_mini_batches)) - 1)
            #self.complexity_cost = self.all_variational_MAP_cost - self.all_prior_cost
            #self.var_MAP_cost = tf.identity(self.all_variational_MAP_cost, name='var_MAP_cost')
            #self.pr_cost = tf.identity(self.all_prior_cost, name='prior_cost')
            #self.ll_cost = tf.multiply(self.extra_likelihood_emphasis, self.all_likelihood_cost, name='likelihood_cost')
            self.all_likelihood_cost = tf.divide(self.all_likelihood_cost, tf.cast(self.number_samples_variance_reduction, tf.float32), name='likelihood_cost')
            self.all_prior_cost = tf.divide(tf.reduce_sum(self.WEIGHTS_PRIOR.log_prob(self.sampled_weights_1)) + tf.reduce_sum(self.WEIGHTS_PRIOR.log_prob(self.sampled_biases_1)) + tf.reduce_sum(self.WEIGHTS_PRIOR.log_prob(self.sampled_weights_2)) + tf.reduce_sum(self.WEIGHTS_PRIOR.log_prob(self.sampled_biases_2)) + tf.reduce_sum(self.WEIGHTS_PRIOR.log_prob(self.sampled_weights_3)) + tf.reduce_sum(self.WEIGHTS_PRIOR.log_prob(self.sampled_biases_3)) + tf.reduce_sum(self.WEIGHTS_PRIOR.log_prob(self.sampled_weights_4)) + tf.reduce_sum(self.WEIGHTS_PRIOR.log_prob(self.sampled_biases_4)), tf.cast(self.number_samples_variance_reduction, tf.float32))
            self.weight_vector = tf.concat([tf.reshape(self.sampled_weights_1, [-1]), tf.reshape(self.sampled_biases_1, [-1]), tf.reshape(self.sampled_weights_2, [-1]), tf.reshape(self.sampled_biases_2, [-1]), tf.reshape(self.sampled_weights_3, [-1]), tf.reshape(self.sampled_biases_3, [-1]), tf.reshape(self.sampled_weights_4, [-1]), tf.reshape(self.sampled_biases_4, [-1])], axis=0)
            self.mu_vector = tf.concat([tf.reshape(self.mu_weights_1, [-1]), tf.reshape(self.mu_biases_1, [-1]), tf.reshape(self.mu_weights_2, [-1]), tf.reshape(self.mu_biases_2, [-1]), tf.reshape(self.mu_weights_3, [-1]), tf.reshape(self.mu_biases_3, [-1]), tf.reshape(self.mu_weights_4, [-1]), tf.reshape(self.mu_biases_4, [-1])], axis=0)
            self.rho_vector = tf.concat([tf.reshape(self.rho_weights_1, [-1]), tf.reshape(self.rho_biases_1, [-1]), tf.reshape(self.rho_weights_2, [-1]), tf.reshape(self.rho_biases_2, [-1]), tf.reshape(self.rho_weights_3, [-1]), tf.reshape(self.rho_biases_3, [-1]), tf.reshape(self.rho_weights_4, [-1]), tf.reshape(self.rho_biases_4, [-1])], axis=0)
            self.variational_distribution = tf.distributions.Normal(loc=self.mu_vector, scale=tf.log(1 + tf.exp(self.rho_vector)))
            self.all_variational_MAP_cost = tf.divide(tf.reduce_sum(self.variational_distribution.log_prob(self.weight_vector)), tf.cast(self.number_samples_variance_reduction, tf.float32))

            self.ELBO = tf.subtract((self.intercost_minibatch_weight_pie * (self.all_variational_MAP_cost - self.all_prior_cost)), (self.extra_likelihood_emphasis * self.all_likelihood_cost), name='ELBO')
            if self.regularizer == 'PAC_Bayes':
                if ss == 0:
                    s_square = self.extra_likelihood_emphasis
                elif ss == 1:
                    s_square = (1/self.precision_alpha)
                elif ss == 2:
                    s_square = (self.extra_likelihood_emphasis/self.precision_alpha)
                else:
                    s_square = (1/self.precision_alpha) + self.extra_likelihood_emphasis
                self.pac_bayes_bound = tf.subtract((((self.all_variational_MAP_cost - self.all_prior_cost + tf.log(1/delta))/self.N_BOUND) + (s_square/2)), ((self.extra_likelihood_emphasis * self.all_likelihood_cost)/self.N_BOUND), name='pac_bayes_bound')
                #self.pac_bayes_bound = tf.add((-self.extra_likelihood_emphasis * self.all_likelihood_cost), tf.add(((tf.reduce_sum(tf.distributions.kl_divergence(self.variational_distribution, self.WEIGHTS_PRIOR)) + tf.log(1/delta))/N_BOUND), ((self.extra_likelihood_emphasis*self.precision_alpha)/2)), name='pac_bayes_bound')
                self.cost = tf.identity(self.ELBO, name='cost')
            else:
                self.pac_bayes_bound = tf.constant(0., name='pac_bayes_bound')
                self.cost = tf.multiply(-self.extra_likelihood_emphasis, self.all_likelihood_cost, name='cost')

        with tf.name_scope('error'):
            if dataset_type == 'continuous':
                self.pred_err = tf.reduce_mean(tf.squared_difference(self.mean_of_output_forward_pass, self.input_y), name='pred_err')
            elif dataset_type == 'categorical':
                pass
                ##################################
                ##################################
                ##################################
                ### Need to finish this up later ####

                #self.pred_train_err = tf.equal(tf.argmax())
                #self.pred_val_err = 

        with tf.name_scope('optimization'):
            optimizer = tf.train.AdamOptimizer(self.learning_rate, name='adam_optimizer')
            self.training = optimizer.minimize(self.cost, global_step=self.global_step, name='training')

        with tf.name_scope('summaries'):
            #tf.summary.scalar(name='pred_err_log', tensor=self.pred_err)
            #tf.summary.scalar(name='deviation_of_output_forward_pass_log', tensor=tf.reduce_mean(self.deviation_of_output_forward_pass))
            #tf.summary.scalar(name='prior_cost_log', tensor=self.all_prior_cost)
            #tf.summary.scalar(name='variational_MAP_cost_log', tensor=self.all_variational_MAP_cost)
            #tf.summary.scalar(name='likelihood_cost_log', tensor=self.all_likelihood_cost)
            #tf.summary.scalar(name='complexity_cost_log', tensor=self.complexity_cost)
            tf.summary.scalar(name='cost_log', tensor=self.cost)
            self.summary_op = tf.summary.merge_all()

        with tf.name_scope('mini_batch_index_update'):
            self.mini_batch_index_update = tf.assign(ref=self.mini_batch_index, value=((self.mini_batch_index % self.number_mini_batches) + 1), name='mini_batch_index_update')
def safe_div(numerator, denominator):
    """Safe division, return 0 if denominator is 0"""
    numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)
    zeros = tf.zeros_like(numerator, dtype=numerator.dtype)
    denominator_is_zero = tf.equal(denominator, zeros)
    return tf.where(denominator_is_zero, zeros, numerator / denominator)
Example #53
0
 def get_locs(self):
     locs = [[x, y]
             for y in range(self.height)
             for x in range(self.width)]
     return tf.to_float(locs)
Example #54
0
    def build_gcnn_graph(self):
        config = self.config
        vocab_size = self.vocab.vocab_size

        self.global_step = tf.get_variable("global_step", [],
                                           tf.int32,
                                           initializer=tf.zeros_initializer,
                                           trainable=False)

        with tf.name_scope('input'):
            self.x, self.y, self.w = self.iterator.get_next()
            # self.x = tf.placeholder(tf.int32, [config.batch_size, config.num_steps])
            # self.y = tf.placeholder(tf.int32, [config.batch_size, config.num_steps])
            # self.w = tf.placeholder(tf.int32, [config.batch_size, config.num_steps])

            paddings = tf.constant([[0, 0], [config.filter_w // 2, 0]])
            self.padded_x = tf.pad(self.x, paddings, "CONSTANT")
            paddings = tf.constant([[0, 0], [0, config.filter_w // 2]])
            self.padded_y = tf.pad(self.y, paddings, "CONSTANT")
            self.padded_w = tf.pad(self.w, paddings, "CONSTANT")

            self.keep_prob = tf.get_variable('keep_prob', [],
                                             dtype=tf.float32,
                                             trainable=False)
            self.new_keep_prob = tf.placeholder(tf.float32,
                                                shape=[],
                                                name="new_keep_prob")
            self.keep_prob_update = tf.assign(self.keep_prob,
                                              self.new_keep_prob)

            self.lr = tf.get_variable('lr', [],
                                      dtype=tf.float32,
                                      trainable=False)
            self.new_lr = tf.placeholder(tf.float32, shape=[], name="new_lr")
            self.lr_update = tf.assign(self.lr, self.new_lr)

        with tf.name_scope('embedding'):
            self.embed = tf.get_variable('embedding',
                                         shape=[vocab_size, config.embed_size],
                                         dtype=tf.float32)
            self.embed_x = tf.nn.embedding_lookup(self.embed, self.padded_x)

            if config.keep_prob < 1.0:
                self.embed_x = tf.nn.dropout(self.embed_x, config.keep_prob)

        with tf.name_scope('gcnn'):
            width = config.num_steps + config.filter_w // 2
            self.embed_x = tf.reshape(
                self.embed_x, [config.batch_size, width, config.embed_size])
            h = self.embed_x

            for i in range(config.num_layers + 1):
                fanin_depth = h.get_shape()[-1]
                filter_size = config.filter_size
                shape = (config.filter_w, fanin_depth, filter_size)

                with tf.variable_scope('layer_%d' % i):
                    with tf.variable_scope('linear'):
                        W = tf.get_variable(
                            'W', shape, tf.float32,
                            tf.random_normal_initializer(0.0, 0.1))
                        b = tf.get_variable('b', filter_size, tf.float32,
                                            tf.constant_initializer(1.0))
                        conv_w = tf.add(
                            tf.nn.conv1d(h, W, stride=1, padding='SAME'), b)
                    with tf.variable_scope('gated'):
                        W = tf.get_variable(
                            'W', shape, tf.float32,
                            tf.random_normal_initializer(0.0, 0.1))
                        b = tf.get_variable('b', filter_size, tf.float32,
                                            tf.constant_initializer(1.0))
                        conv_v = tf.add(
                            tf.nn.conv1d(h, W, stride=1, padding='SAME'), b)
                    h = conv_w * tf.sigmoid(conv_v)
                    if i == 0:
                        res_input = h
                    elif i % config.block_size == 0:
                        h += res_input
                        res_input = h
            self.outputs = tf.reshape(
                h, [config.batch_size * width, config.filter_size])

        with tf.name_scope('softmax'):
            self.softmax_w = tf.get_variable('softmax_w',
                                             [vocab_size, config.filter_size],
                                             dtype=tf.float32)
            self.softmax_b = tf.get_variable('softmax_b', [vocab_size],
                                             dtype=tf.float32)

        with tf.name_scope('loss'):
            if config.num_sampled > 0:
                labels = tf.reshape(self.padded_y,
                                    [config.batch_size * width, 1])
                self.loss = tf.nn.sampled_softmax_loss(
                    weights=self.softmax_w,
                    biases=self.softmax_b,
                    labels=labels,
                    inputs=self.outputs,
                    num_sampled=config.num_sampled,
                    num_classes=vocab_size,
                    partition_strategy="div")
            else:
                labels = tf.reshape(self.padded_y, [config.batch_size * width])
                logits = tf.matmul(self.outputs, tf.transpose(self.softmax_w))
                logits = tf.nn.bias_add(logits, self.softmax_b)
                self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=labels, logits=logits)
            self.loss = tf.reduce_mean(
                tf.reshape(self.loss, [width, config.batch_size]) * tf.reshape(
                    tf.to_float(self.padded_w), [width, config.batch_size]),
                axis=1)
            self.loss = tf.reshape(self.loss, [width])
            self.loss = tf.reduce_sum(self.loss)

        with tf.name_scope('optimize'):
            tvars = tf.trainable_variables()
            grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars),
                                              config.max_grad_norm)
            optimizer = tf.train.GradientDescentOptimizer(self.lr)
            self.train_op = optimizer.apply_gradients(
                zip(grads, tvars), global_step=self.global_step)

        with tf.name_scope('ema'):
            gcnn_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          ".*gcnn.*")
            ema = tf.train.ExponentialMovingAverage(decay=0.999)
            self.train_op = tf.group(*[self.train_op, ema.apply(gcnn_vars)])
Example #55
0
def main(args):
    debug = (args.debug == 'True')
    print(args)
    np.random.seed(args.seed)
    with tf.Graph().as_default():
        train_dataset, num_train_file = DateSet(args.file_list, args, debug)
        test_dataset, num_test_file = DateSet(args.test_list, args, debug)
        list_ops = {}

        batch_train_dataset = train_dataset.batch(args.batch_size).repeat()
        train_iterator = batch_train_dataset.make_one_shot_iterator()
        train_next_element = train_iterator.get_next()

        batch_test_dataset = test_dataset.batch(args.batch_size).repeat()
        test_iterator = batch_test_dataset.make_one_shot_iterator()
        test_next_element = test_iterator.get_next()

        list_ops['num_train_file'] = num_train_file
        list_ops['num_test_file'] = num_test_file

        model_dir = args.model_dir
        # if 'test' in model_dir and debug and os.path.exists(model_dir):
        #     import shutil
        #     shutil.rmtree(model_dir)
        # assert not os.path.exists(model_dir)
        # os.mkdir(model_dir)

        print('Total number of examples: {}'.format(num_train_file))
        print('Test number of examples: {}'.format(num_test_file))
        print('Model dir: {}'.format(model_dir))

        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        list_ops['global_step'] = global_step
        list_ops['train_dataset'] = train_dataset
        list_ops['test_dataset'] = test_dataset
        list_ops['train_next_element'] = train_next_element
        list_ops['test_next_element'] = test_next_element

        epoch_size = num_train_file // args.batch_size
        print('Number of batches per epoch: {}'.format(epoch_size))

        image_batch = tf.placeholder(tf.float32, shape=(None, args.image_size, args.image_size, 3),\
                                     name='image_batch')
        landmark_batch = tf.placeholder(tf.float32,
                                        shape=(None, 196),
                                        name='landmark_batch')
        attribute_batch = tf.placeholder(tf.int32,
                                         shape=(None, 6),
                                         name='attribute_batch')
        euler_angles_gt_batch = tf.placeholder(tf.float32,
                                               shape=(None, 3),
                                               name='euler_angles_gt_batch')

        list_ops['image_batch'] = image_batch
        list_ops['landmark_batch'] = landmark_batch
        list_ops['attribute_batch'] = attribute_batch
        list_ops['euler_angles_gt_batch'] = euler_angles_gt_batch

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        list_ops['phase_train_placeholder'] = phase_train_placeholder

        print('Building training graph.')
        # total_loss, landmarks, heatmaps_loss, heatmaps= create_model(image_batch, landmark_batch,\
        #                                                                                phase_train_placeholder, args)
        landmarks_pre, landmarks_loss, euler_angles_pre = create_model(image_batch, landmark_batch,\
                                                                              phase_train_placeholder, args)

        attributes_w_n = tf.to_float(attribute_batch[:, 1:6])
        # _num = attributes_w_n.shape[0]
        mat_ratio = tf.reduce_mean(attributes_w_n, axis=0)
        mat_ratio = tf.map_fn(
            lambda x:
            (tf.cond(x > 0, lambda: 1 / x, lambda: float(args.batch_size))),
            mat_ratio)
        attributes_w_n = tf.convert_to_tensor(attributes_w_n * mat_ratio)
        attributes_w_n = tf.reduce_sum(attributes_w_n, axis=1)
        list_ops['attributes_w_n_batch'] = attributes_w_n

        L2_loss = tf.add_n(tf.losses.get_regularization_losses())
        _sum_k = tf.reduce_sum(tf.map_fn(
            lambda x: 1 - tf.cos(abs(x)),
            euler_angles_gt_batch - euler_angles_pre),
                               axis=1)
        loss_sum = tf.reduce_sum(tf.square(landmark_batch - landmarks_pre),
                                 axis=1)
        loss_sum = tf.reduce_mean(loss_sum * _sum_k * attributes_w_n)
        loss_sum += L2_loss

        train_op, lr_op = train_model(loss_sum, global_step, num_train_file,
                                      args)

        list_ops['landmarks'] = landmarks_pre
        list_ops['L2_loss'] = L2_loss
        list_ops['loss'] = loss_sum
        list_ops['train_op'] = train_op
        list_ops['lr_op'] = lr_op

        test_mean_error = tf.Variable(tf.constant(0.0),
                                      dtype=tf.float32,
                                      name='ME')
        test_failure_rate = tf.Variable(tf.constant(0.0),
                                        dtype=tf.float32,
                                        name='FR')
        test_10_loss = tf.Variable(tf.constant(0.0),
                                   dtype=tf.float32,
                                   name='TestLoss')
        train_loss = tf.Variable(tf.constant(0.0),
                                 dtype=tf.float32,
                                 name='TrainLoss')
        train_loss_l2 = tf.Variable(tf.constant(0.0),
                                    dtype=tf.float32,
                                    name='TrainLoss2')
        tf.summary.scalar('test_mean_error', test_mean_error)
        tf.summary.scalar('test_failure_rate', test_failure_rate)
        tf.summary.scalar('test_10_loss', test_10_loss)
        tf.summary.scalar('train_loss', train_loss)
        tf.summary.scalar('train_loss_l2', train_loss_l2)

        save_params = tf.trainable_variables()
        saver = tf.train.Saver(save_params, max_to_keep=None)
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)

        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                allow_soft_placement=False,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        with sess.as_default():
            epoch_start = 0
            if args.pretrained_model:
                pretrained_model = args.pretrained_model
                if (not os.path.isdir(pretrained_model)):
                    print('Restoring pretrained model: {}'.format(
                        pretrained_model))
                    saver.restore(sess, args.pretrained_model)
                else:
                    print('Model directory: {}'.format(pretrained_model))
                    ckpt = tf.train.get_checkpoint_state(pretrained_model)
                    model_path = ckpt.model_checkpoint_path
                    assert (ckpt and model_path)
                    epoch_start = int(
                        model_path[model_path.find('model.ckpt-') + 11:]) + 1
                    print('Checkpoint file: {}'.format(model_path))
                    saver.restore(sess, model_path)

            # if args.save_image_example:
            #     save_image_example(sess, list_ops, args)

            print('Running train.')

            merged = tf.summary.merge_all()
            train_write = tf.summary.FileWriter(log_dir, sess.graph)
            for epoch in range(epoch_start, args.max_epoch):
                start = time.time()
                train_L, train_L2 = train(sess, epoch_size, epoch, list_ops)
                print("train time: {}".format(time.time() - start))

                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                metagraph_path = os.path.join(model_dir, 'model.meta')
                saver.save(sess,
                           checkpoint_path,
                           global_step=epoch,
                           write_meta_graph=False)
                if not os.path.exists(metagraph_path):
                    saver.export_meta_graph(metagraph_path)

                start = time.time()
                test_ME, test_FR, test_loss = test(sess, list_ops, args)
                print("test time: {}".format(time.time() - start))

                summary, _, _, _, _, _ = sess.run([
                    merged,
                    test_mean_error.assign(test_ME),
                    test_failure_rate.assign(test_FR),
                    test_10_loss.assign(test_loss),
                    train_loss.assign(train_L),
                    train_loss_l2.assign(train_L2)
                ])
                train_write.add_summary(summary, epoch)
Example #56
0
    def _build(self):

        inpts = [self.tiled_obs]
        if self.coords is not None:
            inpts.append(self.tiled_coords)

        self.outputs = self.sequence(*inpts)
        self.__dict__.update(self.outputs)

        log_weights = tf.reduce_sum(self.outputs.log_weights_per_timestep, 0)
        self.log_weights = tf.reshape(log_weights, (self.batch_size, self.k_particles))

        self.elbo_vae = tf.reduce_mean(self.log_weights)
        self.elbo_iwae_per_example = targets.iwae(self.log_weights)
        self.elbo_iwae = tf.reduce_mean(self.elbo_iwae_per_example)

        self.normalised_elbo_vae = self.elbo_vae / tf.to_float(self.n_timesteps)
        self.normalised_elbo_iwae = self.elbo_iwae / tf.to_float(self.n_timesteps)
        tf.summary.scalar('normalised_vae', self.normalised_elbo_vae)
        tf.summary.scalar('normalised_iwae', self.normalised_elbo_iwae)

        self.importance_weights = tf.stop_gradient(tf.nn.softmax(self.log_weights, -1))
        self.ess = ops.ess(self.importance_weights, average=True)
        self.iw_distrib = tf.distributions.Categorical(probs=self.importance_weights)
        self.iw_resampling_idx = self.iw_distrib.sample()


        # Logging
        self._log_resampled(self.data_ll_per_sample, 'data_ll')
        self._log_resampled(self.log_p_z_per_sample, 'log_p_z')
        self._log_resampled(self.log_q_z_given_x_per_sample, 'log_q_z_given_x')
        self._log_resampled(self.kl_per_sample, 'kl')

        # Mean squared error between inpt and mean of output distribution
        inpt_obs = self.tiled_obs
        if inpt_obs.shape[-1] == 1:
            inpt_obs = tf.squeeze(inpt_obs, -1)

        axes = [0] + list(range(inpt_obs.shape.ndims)[2:])
        self.mse_per_sample = tf.reduce_mean((inpt_obs - self.canvas) ** 2, axes)
        self._log_resampled(self.mse_per_sample, 'mse')
        self.raw_mse = tf.reduce_mean(self.mse_per_sample)
        tf.summary.scalar('raw_mse', self.raw_mse)

        if hasattr(self, 'num_steps_per_sample'):
            self._log_resampled(self.num_steps_per_sample, 'num_steps')

        if self.gt_presence is not None:
            self.gt_num_steps = tf.reduce_sum(self.gt_presence, -1)

            num_steps_per_sample = tf.reshape(self.num_steps_per_sample, (-1, self.batch_size, self.k_particles))
            gt_num_steps = tf.expand_dims(self.gt_num_steps, -1)

            self.num_step_accuracy_per_example = tf.to_float(tf.equal(gt_num_steps, num_steps_per_sample))
            self.raw_num_step_accuracy = tf.reduce_mean(self.num_step_accuracy_per_example)
            self.num_step_accuracy = self._imp_weighted_mean(self.num_step_accuracy_per_example)
            tf.summary.scalar('num_step_acc', self.num_step_accuracy)

        # For rendering
        resampled_names = 'obj_id canvas glimpse presence_prob presence presence_logit where'.split()
        for name in resampled_names:
            try:
                setattr(self, 'resampled_' + name, self.resample(getattr(self, name), axis=1))
            except AttributeError:
                pass
        try:
            self._log_resampled(self.num_disc_steps_per_sample, 'num_disc_steps')
            self._log_resampled(self.num_prop_steps_per_sample, 'num_prop_steps')
        except AttributeError:
            pass
	def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
				nsteps, ent_coef, vf_coef, max_grad_norm):
		sess = tf.get_default_session()
		filmObj = filmInit(sess, nenvs)
		act_model = policy(sess, ob_space, ac_space, nbatch_act, 1, filmObj, reuse=False,st = "act")
		# print('Shape of obs in the model is ',ob_space.shape)
		train_model = policy(sess, ob_space, ac_space, nbatch_train, nsteps, filmObj, reuse=True,st = "train")
		self.filmObj = filmObj
		A = train_model.pdtype.sample_placeholder([None])
		ADV = tf.placeholder(tf.float32, [None])
		R = tf.placeholder(tf.float32, [None])
		OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
		OLDVPRED = tf.placeholder(tf.float32, [None])
		LR = tf.placeholder(tf.float32, [])
		CLIPRANGE = tf.placeholder(tf.float32, [])

		neglogpac = train_model.pd.neglogp(A)
		entropy = tf.reduce_mean(train_model.pd.entropy())

		vpred = train_model.vf
		vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
		vf_losses1 = tf.square(vpred - R)
		vf_losses2 = tf.square(vpredclipped - R)
		vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
		ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
		pg_losses = -ADV * ratio
		pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
		pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
		approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
		clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
		loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
		with tf.variable_scope('model'):
			params = tf.trainable_variables()
		grads = tf.gradients(loss, params)
		if max_grad_norm is not None:
			grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
		grads = list(zip(grads, params))
		print('print this before using the optimizer')
		trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
		_train = trainer.apply_gradients(grads)
		
		# def reinit():
		# 	filmObj.reinit()

		def train(idx,lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
			advs = returns - values
			advs = (advs - advs.mean()) / (advs.std() + 1e-8)
			td_map = {train_model.X:obs,train_model.index :[idx], A:actions, ADV:advs, R:returns, LR:lr,
					CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}
			if states is not None:
				td_map[train_model.S] = states
				td_map[train_model.M] = masks
			return sess.run(
				[pg_loss, vf_loss, entropy, approxkl, clipfrac, _train],
				td_map
			)[:-1]
		self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']

		def save(save_path):
			saver = tf.train.Saver()
			saver.save(sess, save_path + '_tf')

		def load(load_path):
			saver = tf.train.Saver()
			print('Loading ' + load_path + '_tf')
			saver.restore(sess, load_path + '_tf')

		self.train = train
		self.train_model = train_model
		self.act_model = act_model
		self.step = act_model.step
		self.value = act_model.value
		self.initial_state = act_model.initial_state
		self.save = save
		self.load = load
		tf.global_variables_initializer().run(session=sess) #pylint: disable=E1101
Example #58
0
     2.可微分。 保证定义域任意一点可导,从而使得梯度下降法能够正常使用来自这类激活函数的输出。
"""
# 1.tf.nn.relu ,修正线性单元也被称为斜坡函数,函数图形与滑板斜坡非常相似
# ReLU是分段线性的。
# 输入为非负:输出与输入相同
# 输入为负  : 输出为0
# 优点:不受梯度消失影响。且取值范围[0,+∞]
# 缺点:较大学习效率,易受到饱和神经元影响
features = tf.range(-2, 3)
sess = tf.Session()
print(sess.run([features, tf.nn.relu(features)]))
# [array([-2, -1,  0,  1,  2]), array([0, 0, 0, 1, 2])]

# 2.tf.sigmoid,返回值[0.0,1.0].输入较大趋于1,输入较小趋于0.
# tf.sigmoid(tf.nn.sigmoid) 目前仅支持接收浮点
features0 = tf.to_float(tf.range(-1, 3))
print(sess.run([features0, tf.sigmoid(features0)]))
# [array([-1.,  0.,  1.,  2.], dtype=float32), array([0.26894143, 0.5       , 0.7310586 , 0.880797  ], dtype=float32)]

# 3.tf.tanh,双曲正切函数(tanh)与tf.sigmoid非常接近,优缺点相类似。
# 值域为[-1,1],特定网络架构中能够输出赋值的能力可能非常有用
# tf.tanh(tf.nn.tanh)目前只支持浮点类型输入
features1 = tf.to_float(tf.range(-1, 3))
print(sess.run([features1, tf.tanh(features1)]))
# [array([-1.,  0.,  1.,  2.], dtype=float32), array([-0.7615942,  0.       ,  0.7615942,  0.9640276], dtype=float32)]

# 4.tf.nn.dropout,根据某个可配置概率输出设为0.0
# 引入少量随机性有助于训练
# 场景:当要学习一些模式与其邻近特征耦合过强,将输出添加少量噪声

# 这种模型应该只在训练层使用。如果在测试阶段使用该层,引入随机噪声将对齐结果产生误导。
Example #59
0
def noisy_top_k_gating(x,
                       num_experts,
                       train,
                       k=2,
                       initializer=tf.zeros_initializer(),
                       noisy_gating=True,
                       noise_epsilon=1e-2,
                       name=None):
    """Noisy top-k gating.
  See paper: https://arxiv.org/abs/1701.06538.
  Args:
    x: input Tensor with shape [batch_size, input_size]
    num_experts: an integer
    train: a boolean - we only add noise at training time.
    k: an integer - number of experts per example
    initializer: an initializer
    noisy_gating: a boolean
    noise_epsilon: a float
    name: an optional string
  Returns:
    gates: a Tensor with shape [batch_size, num_experts]
    load: a Tensor with shape [num_experts]
  """
    with tf.variable_scope(name, default_name="noisy_top_k_gating"):
        input_size = x.get_shape().as_list()[-1]
        w_gate = tf.get_variable("w_gate", [input_size, num_experts],
                                 tf.float32, initializer)
        if noisy_gating:
            w_noise = tf.get_variable("w_noise", [input_size, num_experts],
                                      tf.float32, initializer)
        clean_logits = tf.matmul(x, w_gate)
        if noisy_gating:
            raw_noise_stddev = tf.matmul(x, w_noise)
            noise_stddev = (
                (tf.nn.softplus(raw_noise_stddev) + noise_epsilon) *
                (tf.to_float(train)))
            noisy_logits = clean_logits + (
                tf.random_normal(tf.shape(clean_logits)) * noise_stddev)
            logits = noisy_logits
            if common_layers.should_generate_summaries():
                tf.summary.histogram("noisy_logits", noisy_logits)
                tf.summary.histogram("noise_stddev", noise_stddev)
        else:
            logits = clean_logits
        top_logits, top_indices = _my_top_k(logits, min(k + 1, num_experts))
        # top k logits has shape [batch, k]
        top_k_logits = tf.slice(top_logits, [0, 0], [-1, k])
        top_k_indices = tf.slice(top_indices, [0, 0], [-1, k])
        top_k_gates = tf.nn.softmax(top_k_logits)
        # This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the
        # positions corresponding to all but the top k experts per example.
        gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices,
                                              num_experts)
        if noisy_gating and k < num_experts:
            load = tf.reduce_sum(
                _prob_in_top_k(clean_logits, noisy_logits, noise_stddev,
                               top_logits, k), 0)
        else:
            load = _gates_to_load(gates)
        if common_layers.should_generate_summaries():
            tf.summary.histogram("importance", tf.reduce_sum(gates, 0))
            tf.summary.histogram("load", load)
        return gates, load
Example #60
0
    def call(self, x):
        input_image, y_pred, y_true, true_boxes = x

        # adjust the shape of the y_predict [batch, grid_h, grid_w, 3, 4+1+nb_class]
        y_pred = tf.reshape(y_pred, tf.concat([tf.shape(y_pred)[:3], tf.constant([3, -1])], axis=0))
        
        # initialize the masks
        object_mask     = tf.expand_dims(y_true[..., 4], 4)

        # the variable to keep track of number of batches processed
        batch_seen = tf.Variable(0.)        

        # compute grid factor and net factor
        grid_h      = tf.shape(y_true)[1]
        grid_w      = tf.shape(y_true)[2]
        grid_factor = tf.reshape(tf.cast([grid_w, grid_h], tf.float32), [1,1,1,1,2])

        net_h       = tf.shape(input_image)[1]
        net_w       = tf.shape(input_image)[2]            
        net_factor  = tf.reshape(tf.cast([net_w, net_h], tf.float32), [1,1,1,1,2])
        
        """
        Adjust prediction
        """
        pred_box_xy    = (self.cell_grid[:,:grid_h,:grid_w,:,:] + tf.sigmoid(y_pred[..., :2]))  # sigma(t_xy) + c_xy
        pred_box_wh    = y_pred[..., 2:4]                                                       # t_wh
        pred_box_conf  = tf.expand_dims(tf.sigmoid(y_pred[..., 4]), 4)                          # adjust confidence
        pred_box_class = y_pred[..., 5:]                                                        # adjust class probabilities      

        """
        Adjust ground truth
        """
        true_box_xy    = y_true[..., 0:2] # (sigma(t_xy) + c_xy)
        true_box_wh    = y_true[..., 2:4] # t_wh
        true_box_conf  = tf.expand_dims(y_true[..., 4], 4)
        true_box_class = tf.argmax(y_true[..., 5:], -1)         

        """
        Compare each predicted box to all true boxes
        """        
        # initially, drag all objectness of all boxes to 0
        conf_delta  = pred_box_conf - 0 

        # then, ignore the boxes which have good overlap with some true box
        true_xy = true_boxes[..., 0:2] / grid_factor
        true_wh = true_boxes[..., 2:4] / net_factor
        
        true_wh_half = true_wh / 2.
        true_mins    = true_xy - true_wh_half
        true_maxes   = true_xy + true_wh_half
        
        pred_xy = tf.expand_dims(pred_box_xy / grid_factor, 4)
        pred_wh = tf.expand_dims(tf.exp(pred_box_wh) * self.anchors / net_factor, 4)
        
        pred_wh_half = pred_wh / 2.
        pred_mins    = pred_xy - pred_wh_half
        pred_maxes   = pred_xy + pred_wh_half    

        intersect_mins  = tf.maximum(pred_mins,  true_mins)
        intersect_maxes = tf.minimum(pred_maxes, true_maxes)

        intersect_wh    = tf.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
        
        true_areas = true_wh[..., 0] * true_wh[..., 1]
        pred_areas = pred_wh[..., 0] * pred_wh[..., 1]

        union_areas = pred_areas + true_areas - intersect_areas
        iou_scores  = tf.truediv(intersect_areas, union_areas)

        best_ious   = tf.reduce_max(iou_scores, axis=4)        
        conf_delta *= tf.expand_dims(tf.to_float(best_ious < self.ignore_thresh), 4)

        """
        Compute some online statistics
        """            
        true_xy = true_box_xy / grid_factor
        true_wh = tf.exp(true_box_wh) * self.anchors / net_factor

        true_wh_half = true_wh / 2.
        true_mins    = true_xy - true_wh_half
        true_maxes   = true_xy + true_wh_half

        pred_xy = pred_box_xy / grid_factor
        pred_wh = tf.exp(pred_box_wh) * self.anchors / net_factor 
        
        pred_wh_half = pred_wh / 2.
        pred_mins    = pred_xy - pred_wh_half
        pred_maxes   = pred_xy + pred_wh_half      

        intersect_mins  = tf.maximum(pred_mins,  true_mins)
        intersect_maxes = tf.minimum(pred_maxes, true_maxes)
        intersect_wh    = tf.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
        
        true_areas = true_wh[..., 0] * true_wh[..., 1]
        pred_areas = pred_wh[..., 0] * pred_wh[..., 1]

        union_areas = pred_areas + true_areas - intersect_areas
        iou_scores  = tf.truediv(intersect_areas, union_areas)
        iou_scores  = object_mask * tf.expand_dims(iou_scores, 4)
        
        count       = tf.reduce_sum(object_mask)
        count_noobj = tf.reduce_sum(1 - object_mask)
        detect_mask = tf.to_float((pred_box_conf*object_mask) >= 0.5)
        class_mask  = tf.expand_dims(tf.to_float(tf.equal(tf.argmax(pred_box_class, -1), true_box_class)), 4)
        recall50    = tf.reduce_sum(tf.to_float(iou_scores >= 0.5 ) * detect_mask  * class_mask) / (count + 1e-3)
        recall75    = tf.reduce_sum(tf.to_float(iou_scores >= 0.75) * detect_mask  * class_mask) / (count + 1e-3)    
        avg_iou     = tf.reduce_sum(iou_scores) / (count + 1e-3)
        avg_obj     = tf.reduce_sum(pred_box_conf  * object_mask)  / (count + 1e-3)
        avg_noobj   = tf.reduce_sum(pred_box_conf  * (1-object_mask))  / (count_noobj + 1e-3)
        avg_cat     = tf.reduce_sum(object_mask * class_mask) / (count + 1e-3) 

        """
        Warm-up training
        """
        batch_seen = tf.assign_add(batch_seen, 1.)
        
        true_box_xy, true_box_wh, xywh_mask = tf.cond(tf.less(batch_seen, self.warmup_batches+1), 
                              lambda: [true_box_xy + (0.5 + self.cell_grid[:,:grid_h,:grid_w,:,:]) * (1-object_mask), 
                                       true_box_wh + tf.zeros_like(true_box_wh) * (1-object_mask), 
                                       tf.ones_like(object_mask)],
                              lambda: [true_box_xy, 
                                       true_box_wh,
                                       object_mask])

        """
        Compare each true box to all anchor boxes
        """      
        wh_scale = tf.exp(true_box_wh) * self.anchors / net_factor
        wh_scale = tf.expand_dims(2 - wh_scale[..., 0] * wh_scale[..., 1], axis=4) # the smaller the box, the bigger the scale

        xy_delta    = xywh_mask   * (pred_box_xy-true_box_xy) * wh_scale * self.xywh_scale
        wh_delta    = xywh_mask   * (pred_box_wh-true_box_wh) * wh_scale * self.xywh_scale
        conf_delta  = object_mask * (pred_box_conf-true_box_conf) * self.obj_scale + (1-object_mask)*10 * conf_delta * self.noobj_scale
        class_delta = object_mask * \
                      tf.expand_dims(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class), 4) * \
                      self.class_scale

        loss_xy    = tf.reduce_sum(tf.square(xy_delta),       list(range(1,5)))
        loss_wh    = tf.reduce_sum(tf.square(wh_delta),       list(range(1,5)))
        loss_conf  = tf.reduce_sum(tf.square(conf_delta),     list(range(1,5)))
        loss_class = tf.reduce_sum(class_delta,               list(range(1,5)))

        loss = loss_xy + loss_wh + loss_conf + loss_class

        loss = tf.Print(loss, [grid_h, avg_obj], message='avg_obj \t\t', summarize=1000)
        loss = tf.Print(loss, [grid_h, avg_noobj], message='avg_noobj \t\t', summarize=1000)
        loss = tf.Print(loss, [grid_h, avg_iou], message='avg_iou \t\t', summarize=1000)
        loss = tf.Print(loss, [grid_h, avg_cat], message='avg_cat \t\t', summarize=1000)
        loss = tf.Print(loss, [grid_h, recall50], message='recall50 \t', summarize=1000)
        loss = tf.Print(loss, [grid_h, recall75], message='recall75 \t', summarize=1000)   
        loss = tf.Print(loss, [grid_h, count], message='count \t', summarize=1000)     
        loss = tf.Print(loss, [grid_h, tf.reduce_sum(loss_xy), 
                                       tf.reduce_sum(loss_wh), 
                                       tf.reduce_sum(loss_conf), 
                                       tf.reduce_sum(loss_class)],  message='loss xy, wh, conf, class: \t',   summarize=1000)   


        return loss*self.grid_scale