Пример #1
0
def loss_l2reg(pred, target, **kwargs):  # batchSize=Sne
    """Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
      logits: Logits from inference().
      labels: Labels from distorted_inputs or inputs(). 1-D tensor
              of shape [batch_size, heatmap_size ]
    Returns:
      Loss tensor of type float.
    """
    clsf_loss_name = kwargs['lossFunction']
    loss_clsf = model_base.loss_l2reg(pred['clsf'], target['clsf'],
                                      pred['l2reg'], **kwargs)
    kwargs['lossFunction'] = 'deconv'
    #loss_deconv = tf.multiply(model_base.loss_l2reg(pred['deconv'], target['deconv'], 0, **kwargs), 1)#.1)
    loss_deconv = model_base.loss_l2reg(pred['deconv'], target['deconv'], 0,
                                        **kwargs)
    total_loss = tf.add(loss_clsf, loss_deconv, name='loss_total')
    return total_loss
Пример #2
0
def loss_l2reg(pred, target, l2reg, **kwargs):  # batchSize=Sne
    """Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
      logits: Logits from inference().
      labels: Labels from distorted_inputs or inputs(). 1-D tensor
              of shape [batch_size, heatmap_size ]
    Returns:
      Loss tensor of type float.
    """
    return model_base.loss_l2reg(pred, target, l2reg, **kwargs)