Beispiel #1
0
    def vector_loss(self, targets, predictions, logits):
        with tf.variable_scope('loss_{}'.format(self.name)):
            if self.loss['type'] == MEAN_SQUARED_ERROR:
                train_loss = tf.reduce_sum(get_squared_error(
                    targets, predictions, self.name),
                                           axis=1)

            elif self.loss['type'] == MEAN_ABSOLUTE_ERROR:
                train_loss = tf.reduce_sum(get_absolute_error(
                    targets, predictions, self.name),
                                           axis=1)

            elif self.loss['type'] == SOFTMAX_CROSS_ENTROPY:
                train_loss = weighted_softmax_cross_entropy(
                    logits, targets, self.loss)

            else:
                train_mean_loss = None
                train_loss = None
                raise ValueError('Unsupported loss type {}'.format(
                    self.loss['type']))

            train_mean_loss = tf.reduce_mean(train_loss,
                                             name='train_mean_loss_{}'.format(
                                                 self.name))

        return train_mean_loss, train_loss
Beispiel #2
0
    def _get_loss(self, targets, hidden, logits, probabilities, class_weights,
                  class_biases):
        with tf.variable_scope('loss_{}'.format(self.name)):
            if ('class_similarities' in self.loss
                    and self.loss['class_similarities'] is not None):

                class_similarities = self.loss['class_similarities']

                if (class_similarities.shape[0] != self.num_classes
                        or class_similarities.shape[1] != self.num_classes):
                    logging.info(
                        'Class similarities is {} while num classes is {}'.
                        format(class_similarities.shape, self.num_classes))
                    if (class_similarities.shape[0] > self.num_classes and
                            class_similarities.shape[1] > self.num_classes):
                        # keep only the first num_classes rows and columns
                        class_similarities = class_similarities[:self.
                                                                num_classes, :
                                                                self.
                                                                num_classes]
                    elif (class_similarities.shape[0] < self.num_classes
                          and class_similarities.shape[1] < self.num_classes):
                        # fill the missing parts of the matrix with 0s and 1
                        # on the diagonal
                        diag = np.diag((self.num_classes, self.num_classes))
                        diag[:class_similarities.shape[0], :class_similarities.
                             shape[1]] = class_similarities
                        class_similarities = diag

                class_similarities = tf.constant(
                    class_similarities,
                    dtype=tf.float32,
                    name='class_similarities_{}'.format(self.name))
                vector_labels = tf.gather(class_similarities,
                                          targets,
                                          name='vector_labels_{}'.format(
                                              self.name))
            else:
                vector_labels = tf.one_hot(targets,
                                           self.num_classes,
                                           name='vector_labels_{}'.format(
                                               self.name))

            if self.loss['type'] == 'sampled_softmax_cross_entropy':
                train_loss, eval_loss = sampled_softmax_cross_entropy(
                    targets, hidden, logits, vector_labels, class_weights,
                    class_biases, self.loss, self.num_classes)
            elif self.loss['type'] == 'softmax_cross_entropy':
                train_loss = weighted_softmax_cross_entropy(
                    logits, vector_labels, self.loss)
                eval_loss = train_loss
            else:
                train_mean_loss = None
                eval_loss = None
                raise ValueError('Unsupported loss type {}'.format(
                    self.loss['type']))

            if self.loss['robust_lambda'] > 0:
                train_loss = ((1 - self.loss['robust_lambda']) * train_loss +
                              self.loss['robust_lambda'] / self.num_classes)

            train_mean_loss = tf.reduce_mean(train_loss,
                                             name='train_mean_loss_{}'.format(
                                                 self.name))

            if self.loss['confidence_penalty'] > 0:
                mean_penalty = mean_confidence_penalty(probabilities,
                                                       self.num_classes)
                train_mean_loss += (self.loss['confidence_penalty'] *
                                    mean_penalty)

        return train_mean_loss, eval_loss