def evaluate_epoch( axes, tr_loader, val_loader, te_loader, model, criterion, epoch, stats, include_test=False, update_plot=True, multiclass=False, ): """Evaluate the `model` on the train and validation set.""" def _get_metrics(loader): y_true, y_pred, y_score = [], [], [] correct, total = 0, 0 running_loss = [] for X, y in loader: with torch.no_grad(): output = model(X) predicted = predictions(output.data) y_true.append(y) y_pred.append(predicted) if not multiclass: y_score.append(softmax(output.data, dim=1)[:, 1]) else: y_score.append(softmax(output.data, dim=1)) total += y.size(0) correct += (predicted == y).sum().item() running_loss.append(criterion(output, y).item()) y_true = torch.cat(y_true) y_pred = torch.cat(y_pred) y_score = torch.cat(y_score) loss = np.mean(running_loss) acc = correct / total if not multiclass: auroc = metrics.roc_auc_score(y_true, y_score) else: auroc = metrics.roc_auc_score(y_true, y_score, multi_class="ovo") return acc, loss, auroc train_acc, train_loss, train_auc = _get_metrics(tr_loader) val_acc, val_loss, val_auc = _get_metrics(val_loader) stats_at_epoch = [ val_acc, val_loss, val_auc, train_acc, train_loss, train_auc, ] if include_test: stats_at_epoch += list(_get_metrics(te_loader)) stats.append(stats_at_epoch) utils.log_training(epoch, stats) if update_plot: utils.update_training_plot(axes, epoch, stats)
def report_training_progress(sess, batch_index, images, labels, loss, acc, clothes): """ Performs inference on the validation set and reports the loss to the terminal and the training plot. """ if batch_index % 50 == 0: batch_images, batch_labels = clothes.get_batch(partition='validate', batch_size=512) print("calculating acc and loss") print(batch_labels.shape) print(batch_images.shape) ########################################################## ''' logit = cnn(batch_images.astype('float32')) print(logit.shape) ''' valid_acc, valid_loss = sess.run([acc, loss], feed_dict={ images: batch_images, labels: batch_labels }) utils.log_training(batch_index, valid_loss, valid_acc) utils.update_training_plot(batch_index, valid_acc, valid_loss)
def report_training_progress(sess, batch_index, images, labels, keep_prob, loss, acc, dataset): if batch_index % 50 == 0: batch_images, batch_labels = dataset.get_valid_batch(batch_size=512) test_acc, test_loss = sess.run([acc, loss], feed_dict={ images: batch_images, labels: batch_labels, keep_prob: 1 }) utils.log_training(batch_index, test_loss, test_acc) utils.update_training_plot(batch_index, test_acc, test_loss)
def report_training_progress(sess, batch_index, images, labels, loss, acc, food): """ Performs inference on the validation set and reports the loss to the terminal and the training plot. """ if batch_index % 50 == 0: batch_images, batch_labels = food.get_batch( partition='valid', batch_size=get('rnn.batch_size')) valid_acc, valid_loss = sess.run([acc, loss], feed_dict={ images: batch_images, labels: batch_labels }) utils.log_training(batch_index, valid_loss, valid_acc) utils.update_training_plot(batch_index, valid_acc, valid_loss)
def evaluate_epoch(axes, tr_loader, val_loader, te_loader, model, criterion, epoch, stats, prolist, include_test=False, update_plot=True, multiclass=False, probabimode=False): """Evaluate the `model` on the train and validation set.""" def _get_metrics(loader): y_true, y_pred, y_score = [], [], [] correct, total = 0, 0 running_loss = [] oneroundlis = [] mseloss = [] mselossfunc = torch.nn.MSELoss() disagreeloss = [] for X, y in loader: with torch.no_grad(): output = model(X) predicted = predictions(output.data) # print('the predicted and the true: ', predicted, ' ', y) tmp = softmax(output.data, dim=1) oneroundlis.append((output, y)) mseloss.append(mselossfunc(tmp, y).item()) for idx, y0 in enumerate(y): if np.count_nonzero(y0) > 1: print(y0) disagreeloss.append(mselossfunc(y0, tmp[idx])) y = np.argmax(y, axis=1) y_true.append(y) y_pred.append(predicted) if not multiclass: y_score.append(softmax(output.data, dim=1)[:, 1]) else: y_score.append(softmax(output.data, dim=1)) total += y.size(0) correct += (predicted == y).sum().item() running_loss.append(criterion(output, y).item()) prolist.append(oneroundlis) y_true = torch.cat(y_true) y_pred = torch.cat(y_pred) y_score = torch.cat(y_score) # print(y_true) loss = np.mean(mseloss) acc = correct / total if not multiclass: auroc = metrics.roc_auc_score(y_true, y_score) else: auroc = metrics.roc_auc_score(y_true, y_score, multi_class="ovo", labels=[0, 1, 2, 3]) disagreeval = np.mean(disagreeloss) return acc, loss, auroc, disagreeval train_acc, train_loss, train_auc, train_dis = _get_metrics(tr_loader) val_acc, val_loss, val_auc, val_dis = _get_metrics(val_loader) test_acc, test_loss, test_auc, test_dis = _get_metrics(te_loader) wandb.log({"train_acc":train_acc, "train_loss": train_loss, "train_auc": train_auc, "train_dis": train_dis,\ "val_acc":val_acc, "val_loss": val_loss, "val_auc": val_auc, "val_dis": val_dis, \ "test_acc":test_acc, "test_loss": test_loss, "test_auc": test_auc, "test_dis": test_dis }) stats_at_epoch = [ val_acc, val_loss, val_auc, train_acc, train_loss, train_auc, test_acc, test_loss, test_acc, ] if include_test: stats_at_epoch += list(_get_metrics(te_loader)) stats.append(stats_at_epoch) utils.log_training(epoch, stats) if update_plot: utils.update_training_plot(axes, epoch, stats)