def _generic_accuracy(y_true, y_pred): if K.int_shape(y_pred)[1] == 1: return binary_accuracy(y_true, y_pred) if K.int_shape(y_true)[-1] == 1: return sparse_categorical_accuracy(y_true, y_pred) return categorical_accuracy(y_true, y_pred)
def train_on_batch(inputs, target): with tf.GradientTape() as tape: predictions = model(inputs, training=True) loss = loss_fn(target, predictions) + sum(model.losses) acc = tf.reduce_mean(sparse_categorical_accuracy(target, predictions)) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) return loss, acc
def evaluate(loader): step = 0 results = [] for batch in loader: step += 1 inputs, target = batch predictions = model(inputs, training=False) loss = loss_fn(target, predictions) acc = tf.reduce_mean(sparse_categorical_accuracy(target, predictions)) results.append((loss, acc, len(target))) # Keep track of batch size if step == loader.steps_per_epoch: results = np.array(results) return np.average(results[:, :-1], 0, weights=results[:, -1])
def compile(self, num_sampled=5): with self.graph.as_default(): # Construct loss with tf.name_scope('loss'): # Use NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. # Explanation of the meaning of NCE loss: # http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/ self.loss = tf.reduce_mean( tf.nn.nce_loss( weights=self.weights, biases=self.biases, labels=self.y, inputs=self.embed, num_sampled=num_sampled, num_classes=self.label_size, num_true=self.num_true_class, remove_accidental_hits=True, )) # Construct Metric with tf.name_scope('metric'): self.accuracy = tf.reduce_mean( sparse_categorical_accuracy(self.y[:, :1], self.logit)) # Construct optimizer with tf.name_scope('optimizer'): self.learning_rate = tf.Variable(1E-3, trainable=False, name="learning_rate") self.optimizer = tf.train.AdamOptimizer( learning_rate=self.learning_rate).minimize(self.loss) # Summary self.loss_summary = tf.summary.scalar("loss/loss_train", self.loss) self.loss_val_summary = tf.summary.scalar("loss/loss_val", self.loss) self.accuracy_summary = tf.summary.scalar("metric/acc_train", self.accuracy) self.accuracy_val_summary = tf.summary.scalar( "metric/acc_val", self.accuracy) # Saver self.saver = tf.train.Saver(max_to_keep=10) # Initialization self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
def score(y_true, y_pred): y_t_rank = len(y_true.shape.as_list()) y_p_rank = len(y_pred.shape.as_list()) y_t_last_dim = y_true.shape.as_list()[-1] y_p_last_dim = y_pred.shape.as_list()[-1] is_binary = y_p_last_dim == 1 is_sparse_categorical = (y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1) if isinstance(metric_function, six.string_types): if metric_function in ["accuracy", "acc"]: if is_binary: metric = binary_accuracy(y_true, y_pred) elif is_sparse_categorical: metric = sparse_categorical_accuracy(y_true, y_pred) else: metric = categorical_accuracy(y_true, y_pred) else: metric = categorical_accuracy(y_true, y_pred) else: metric = metric_function(y_true, y_pred) return K.cast(metric * (1.0 + delta), K.floatx())
def sparse_categorical_accuracy_with_mask(y_true, y_pred): y_true_masked , y_pred_masked = boolean_masking(y_true, y_pred) return tf.keras.backend.mean(sparse_categorical_accuracy(y_true_masked, y_pred_masked))
def acc(y, y_h): return sparse_categorical_accuracy(y, y_h)