# Now that model is saved set init to false so we reload it next time init = False # init batch arrays batch_cv_acc = [] # initialize the local variables so we have metrics only on the evaluation sess.run(tf.local_variables_initializer()) print("Evaluating model...") # load the test data X_cv, y_cv = load_validation_data(percentage=1, how="normal", which=dataset) # evaluate the test data for X_batch, y_batch in get_batches(X_cv, y_cv, batch_size, distort=False): _, _, valid_acc, valid_recall, valid_precision, valid_fscore, valid_cost = sess.run( [update_op, extra_update_ops, accuracy, rec_op, prec_op, f1_score, mean_ce], feed_dict={ X: X_batch, y: y_batch, training: False }) batch_cv_acc.append(valid_acc) # Write average of validation data to summary logs if log_to_tensorboard: # evaluate once more to get the summary, which will then be written to tensorboard summary, cv_accuracy = sess.run( [merged, accuracy],
# init batch arrays batch_cv_acc = [] batch_cv_cost = [] batch_cv_loss = [] batch_cv_recall = [] batch_cv_precision = [] ## evaluate on test data if it exists, otherwise ignore this step if evaluate: print("Evaluating model...") # load the test data X_cv, y_cv = load_validation_data(percentage=1, how="normal") # evaluate the test data for X_batch, y_batch in get_batches(X_cv, y_cv, batch_size // 2, distort=False): summary, valid_acc, valid_recall, valid_precision, valid_cost, valid_loss = sess.run( [merged, accuracy, rec_op, prec_op, mean_ce, loss], feed_dict={ X: X_batch, y: y_batch, is_testing: True, training: False }) batch_cv_acc.append(valid_acc) batch_cv_cost.append(valid_cost) batch_cv_loss.append(valid_loss) batch_cv_recall.append(np.mean(valid_recall)) batch_cv_precision.append(np.mean(valid_precision))