def _get_loss(self, targets, logits, probabilities): with tf.variable_scope('loss_{}'.format(self.name)): train_loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=tf.to_float(targets), logits=logits) if self.loss['robust_lambda'] > 0: train_loss = ((1 - self.loss['robust_lambda']) * train_loss + self.loss['robust_lambda'] / 2) train_mean_loss = tf.reduce_mean(train_loss, name='train_mean_loss_{}'.format( self.name)) if self.loss['confidence_penalty'] > 0: mean_penalty = mean_confidence_penalty(probabilities, 2) train_mean_loss += (self.loss['confidence_penalty'] * mean_penalty) return train_mean_loss, train_loss
def _get_loss(self, targets, hidden, logits, probabilities, class_weights, class_biases): with tf.variable_scope('loss_{}'.format(self.name)): if ('class_similarities' in self.loss and self.loss['class_similarities'] is not None): class_similarities = self.loss['class_similarities'] if (class_similarities.shape[0] != self.num_classes or class_similarities.shape[1] != self.num_classes): logging.info( 'Class similarities is {} while num classes is {}'. format(class_similarities.shape, self.num_classes)) if (class_similarities.shape[0] > self.num_classes and class_similarities.shape[1] > self.num_classes): # keep only the first num_classes rows and columns class_similarities = class_similarities[:self. num_classes, : self. num_classes] elif (class_similarities.shape[0] < self.num_classes and class_similarities.shape[1] < self.num_classes): # fill the missing parts of the matrix with 0s and 1 # on the diagonal diag = np.diag((self.num_classes, self.num_classes)) diag[:class_similarities.shape[0], :class_similarities. shape[1]] = class_similarities class_similarities = diag class_similarities = tf.constant( class_similarities, dtype=tf.float32, name='class_similarities_{}'.format(self.name)) vector_labels = tf.gather(class_similarities, targets, name='vector_labels_{}'.format( self.name)) else: vector_labels = tf.one_hot(targets, self.num_classes, name='vector_labels_{}'.format( self.name)) if self.loss['type'] == 'sampled_softmax_cross_entropy': train_loss, eval_loss = sampled_softmax_cross_entropy( targets, hidden, logits, vector_labels, class_weights, class_biases, self.loss, self.num_classes) elif self.loss['type'] == 'softmax_cross_entropy': train_loss = weighted_softmax_cross_entropy( logits, vector_labels, self.loss) eval_loss = train_loss else: train_mean_loss = None eval_loss = None raise ValueError('Unsupported loss type {}'.format( self.loss['type'])) if self.loss['robust_lambda'] > 0: train_loss = ((1 - self.loss['robust_lambda']) * train_loss + self.loss['robust_lambda'] / self.num_classes) train_mean_loss = tf.reduce_mean(train_loss, name='train_mean_loss_{}'.format( self.name)) if self.loss['confidence_penalty'] > 0: mean_penalty = mean_confidence_penalty(probabilities, self.num_classes) train_mean_loss += (self.loss['confidence_penalty'] * mean_penalty) return train_mean_loss, eval_loss