def vector_loss(self, targets, predictions, logits): with tf.variable_scope('loss_{}'.format(self.name)): if self.loss['type'] == MEAN_SQUARED_ERROR: train_loss = tf.reduce_sum(get_squared_error( targets, predictions, self.name), axis=1) elif self.loss['type'] == MEAN_ABSOLUTE_ERROR: train_loss = tf.reduce_sum(get_absolute_error( targets, predictions, self.name), axis=1) elif self.loss['type'] == SOFTMAX_CROSS_ENTROPY: train_loss = weighted_softmax_cross_entropy( logits, targets, self.loss) else: train_mean_loss = None train_loss = None raise ValueError('Unsupported loss type {}'.format( self.loss['type'])) train_mean_loss = tf.reduce_mean(train_loss, name='train_mean_loss_{}'.format( self.name)) return train_mean_loss, train_loss
def _get_measures(self, targets, predictions): with tf.variable_scope('measures_{}'.format(self.name)): error_val = get_error(targets, predictions, self.name) absolute_error_val = get_absolute_error(targets, predictions, self.name) squared_error_val = get_squared_error(targets, predictions, self.name) r2_val = get_r2(targets, predictions, self.name) return error_val, squared_error_val, absolute_error_val, r2_val
def _get_measures(self, targets, predictions): with tf.variable_scope('measures_{}'.format(self.name)): error_val = get_error(targets, predictions, self.name) absolute_error_val = tf.reduce_sum(get_absolute_error( targets, predictions, self.name), axis=1) squared_error_val = tf.reduce_sum(get_squared_error( targets, predictions, self.name), axis=1) # TODO - not sure if this is correct r2_val = tf.reduce_sum(get_r2(targets, predictions, self.name)) return error_val, squared_error_val, absolute_error_val, r2_val