Example #1
0
def _dualloss(logits, mid_logits, labels, class_hist, num_labels, is_training=True):
  #loss1 = losses.cross_entropy_loss(logits, labels, weights, num_labels)
  #loss2 = losses.cross_entropy_loss(mid_logits, labels, weights, num_labels)
  #max_weight = 10
  max_weight = 1
  loss1 = losses.weighted_cross_entropy_loss(logits, labels, class_hist,
                                             max_weight=max_weight)
  loss2 = losses.weighted_cross_entropy_loss(mid_logits, labels, class_hist,
                                             max_weight=max_weight)
  #loss1 = losses.weighted_cross_entropy_loss_dense(logits, labels, weights, num_labels,
  #    max_weight=max_weight)
  #loss2 = losses.weighted_cross_entropy_loss_dense(mid_logits, labels, weights, num_labels,
  #    max_weight=max_weight)
  #wgt = 0.4
  #xent_loss = loss1 + wgt * loss2
  wgt = 0.3 # best
  #wgt = 0.2
  #wgt = 0.4
  xent_loss = (1-wgt)*loss1 + wgt*loss2
  all_losses = [xent_loss]

  # get losses + regularization
  total_loss = losses.total_loss_sum(all_losses)

  if is_training:
    loss_averages_op = losses.add_loss_summaries(total_loss)
    with tf.control_dependencies([loss_averages_op]):
      total_loss = tf.identity(total_loss)

  return total_loss
Example #2
0
def _multiloss(logits, aux_logits, labels, num_labels, class_hist, is_training):
  max_weight = FLAGS.max_weight
  xent_loss = 0
  #main_wgt = 0.6
  if len(aux_logits) > 0:
    main_wgt = 0.7
    aux_wgt = (1 - main_wgt) / len(aux_logits)
  else:
    main_wgt = 1.0
    aux_wgt = 0
  xent_loss = main_wgt * losses.weighted_cross_entropy_loss(
      logits, labels, class_hist, max_weight=max_weight)
  for i, l in enumerate(aux_logits):
    print('loss' + str(i), ' --> ' , l)
    xent_loss += aux_wgt * losses.weighted_cross_entropy_loss(
      l, labels, class_hist, max_weight=max_weight)

  all_losses = [xent_loss]
  # get losses + regularization
  total_loss = losses.total_loss_sum(all_losses)
  if is_training:
    loss_averages_op = losses.add_loss_summaries(total_loss)
    with tf.control_dependencies([loss_averages_op]):
      total_loss = tf.identity(total_loss)
  return total_loss
def get_loss(logits, labels, weights, is_training):
    #xent_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels))
    xent_loss = losses.weighted_cross_entropy_loss(logits, labels, weights)
    total_loss = total_loss_sum([xent_loss])
    if is_training:
        loss_averages_op = losses.add_loss_summaries(total_loss)
        with tf.control_dependencies([loss_averages_op]):
            total_loss = tf.identity(total_loss)

    return total_loss