for parameter in model.parameters(): parameter.requires_grad = False if (use_cuda): model.cuda() model.eval() summary(model, (1, 64, 64)) print("Loaded model...") preds = [] labels = [] for local_batch, local_labels in tqdm(test_generator): labels.extend(local_labels.numpy().tolist()) local_batch, local_labels = local_batch.to(device), local_labels.to(device) output = model.forward(local_batch) output = model.softmax(output) output = torch.max(output, 1)[1] preds.extend(output.cpu().numpy().tolist()) recall = recall_score(y_true=labels, y_pred=preds, average='weighted') prec = precision_score(y_true=labels, y_pred=preds, average='weighted') f1 = f1_score(y_true=labels, y_pred=preds, average='weighted') acc = accuracy_score(labels, preds) print("Accuracy: {}".format(acc)) print("Recall: {}\tPrecision: {}\tF1 Score: {}".format(recall, prec, f1)) print( classification_report(y_true=labels, y_pred=preds, target_names=["No Findings", "Pneumonia"]))
global_train_step = 0 global_val_step = 0 # Loop over epochs for epoch in range(max_epochs): tqdm.write("Epoch: {}".format(epoch)) progress_bar = tqdm(total=len(train_dataset), leave=True, position=0) # Training for local_batch, local_labels in training_generator: # Transfer to GPU local_batch, local_labels = local_batch.to(device), local_labels.to( device) loss, yhat = model.step(local_batch, local_labels) yhat = model.softmax(yhat) yhat = torch.max(yhat, 1)[1].cpu() acc = pred_acc(local_labels.cpu(), yhat) writer.add_scalar('Train/Accuracy', acc, global_train_step) writer.add_scalar('Train/Loss', loss, global_train_step) global_train_step += params['batch_size'] progress_bar.update(params['batch_size']) progress_bar.set_postfix(loss=loss) tqdm.write("Running validation...") # Validation y_pred = [] y_true = [] with torch.set_grad_enabled(False): for local_batch, local_labels in validation_generator: