def compute(self): _prediction_tensor = torch.cat(self._predictions, dim=0) _target_tensor = torch.cat(self._targets, dim=0) if dist.is_available() and dist.is_initialized( ) and not self._is_reduced: # create placeholder to collect the data from all processes: output = [ torch.zeros_like(_prediction_tensor) for _ in range(dist.get_world_size()) ] dist.all_gather(output, _prediction_tensor) _prediction_tensor = torch.cat(output, dim=0) output = [ torch.zeros_like(_target_tensor) for _ in range(dist.get_world_size()) ] dist.all_gather(output, _target_tensor) _target_tensor = torch.cat(output, dim=0) self._is_reduced = True return compute_roc_auc( y_pred=_prediction_tensor, y=_target_tensor, to_onehot_y=self.to_onehot_y, softmax=self.softmax, other_act=self.other_act, average=self.average, )
def _compute_fn(pred, label): return compute_roc_auc( y_pred=pred, y=label, to_onehot_y=to_onehot_y, softmax=softmax, other_act=other_act, average=Average(average), )
def test_value(self, y_pred, y, softmax, to_onehot, average, expected_value): y_pred_trans = Compose([ToTensor(), Activations(softmax=softmax)]) y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot)]) y_pred = torch.stack( [y_pred_trans(i) for i in decollate_batch(y_pred)], dim=0) y = torch.stack([y_trans(i) for i in decollate_batch(y)], dim=0) result = compute_roc_auc(y_pred=y_pred, y=y, average=average) np.testing.assert_allclose(expected_value, result, rtol=1e-5)
def compute(self): _prediction_tensor = torch.cat(self._predictions, dim=0) _target_tensor = torch.cat(self._targets, dim=0) return compute_roc_auc( y_pred=_prediction_tensor, y=_target_tensor, to_onehot_y=self.to_onehot_y, softmax=self.softmax, other_act=self.other_act, average=self.average, )
def compute(self): _prediction_tensor = torch.cat(self._predictions, dim=0) _target_tensor = torch.cat(self._targets, dim=0) if dist.is_available() and dist.is_initialized( ) and not self._is_reduced: _prediction_tensor = all_gather(_prediction_tensor) _target_tensor = all_gather(_target_tensor) self._is_reduced = True return compute_roc_auc( y_pred=_prediction_tensor, y=_target_tensor, to_onehot_y=self.to_onehot_y, softmax=self.softmax, other_act=self.other_act, average=self.average, )
def run_eval(model, val_dataloader, cfg, writer, epoch): model.eval() torch.set_grad_enabled(False) # store information for evaluation val_losses = [] if cfg.compute_auc is True: val_preds = [] val_targets = [] for batch in val_dataloader: batch = cfg.to_device_transform(batch) if cfg.mixed_precision: with autocast(): output = model(batch) else: output = model(batch) val_losses += [output["loss"]] if cfg.compute_auc is True: val_preds += [output["logits"].sigmoid()] val_targets += [batch["target"]] val_losses = torch.stack(val_losses) val_losses = val_losses.cpu().numpy() val_loss = np.mean(val_losses) if cfg.compute_auc is True: val_preds = torch.cat(val_preds) val_targets = torch.cat(val_targets) val_preds = val_preds.cpu().numpy().astype(np.float32) val_targets = val_targets.cpu().numpy().astype(np.float32) avg_auc = compute_roc_auc(val_preds, val_targets, average="macro") writer.add_scalar("val_avg_auc", avg_auc, epoch) writer.add_scalar("val_loss", val_loss, epoch) return val_loss
def compute(self): _prediction_tensor = torch.cat(self._predictions, dim=0) _target_tensor = torch.cat(self._targets, dim=0) return compute_roc_auc(_prediction_tensor, _target_tensor, self.to_onehot_y, self.softmax, self.average)
def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ "/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman labels = np.array( [0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) train_files = [{ "img": img, "label": label } for img, label in zip(images[:10], labels[:10])] val_files = [{ "img": img, "label": label } for img, label in zip(images[-10:], labels[-10:])] # Define transforms for image train_transforms = Compose([ LoadNiftid(keys=["img"]), AddChanneld(keys=["img"]), ScaleIntensityd(keys=["img"]), Resized(keys=["img"], spatial_size=(96, 96, 96)), RandRotate90d(keys=["img"], prob=0.8, spatial_axes=[0, 2]), ToTensord(keys=["img"]), ]) val_transforms = Compose([ LoadNiftid(keys=["img"]), AddChanneld(keys=["img"]), ScaleIntensityd(keys=["img"]), Resized(keys=["img"], spatial_size=(96, 96, 96)), ToTensord(keys=["img"]), ]) # Define dataset, data loader check_ds = monai.data.Dataset(data=train_files, transform=train_transforms) check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) check_data = monai.utils.misc.first(check_loader) print(check_data["img"].shape, check_data["label"]) # create a training data loader train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4, pin_memory=torch.cuda.is_available()) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) # Create DenseNet121, CrossEntropyLoss and Adam optimizer device = torch.device("cuda:0") model = monai.networks.nets.densenet.densenet121( spatial_dims=3, in_channels=1, out_channels=2, ).to(device) loss_function = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), 1e-5) # start a typical PyTorch training val_interval = 2 best_metric = -1 best_metric_epoch = -1 writer = SummaryWriter() for epoch in range(5): print("-" * 10) print(f"epoch {epoch + 1}/{5}") model.train() epoch_loss = 0 step = 0 for batch_data in train_loader: step += 1 inputs, labels = batch_data["img"].to( device), batch_data["label"].to(device) optimizer.zero_grad() outputs = model(inputs) loss = loss_function(outputs, labels) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_len = len(train_ds) // train_loader.batch_size print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}") writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) epoch_loss /= step print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = val_data["img"].to( device), val_data["label"].to(device) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) auc_metric = compute_roc_auc(y_pred, y, to_onehot_y=True, add_softmax=True) if acc_metric > best_metric: best_metric = acc_metric best_metric_epoch = epoch + 1 torch.save(model.state_dict(), "best_metric_model.pth") print("saved new best metric model") print( "current epoch: {} current accuracy: {:.4f} current AUC: {:.4f} best accuracy: {:.4f} at epoch {}" .format(epoch + 1, acc_metric, auc_metric, best_metric, best_metric_epoch)) writer.add_scalar("val_accuracy", acc_metric, epoch + 1) print( f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}" ) writer.close()
def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", num_workers=10): monai.config.print_config() # define transforms for image and classification train_transforms = Compose([ LoadPNG(image_only=True), AddChannel(), ScaleIntensity(), RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True), RandFlip(spatial_axis=0, prob=0.5), RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5), ToTensor(), ]) train_transforms.set_random_state(1234) val_transforms = Compose( [LoadPNG(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()]) # create train, val data loaders train_ds = MedNISTDataset(train_x, train_y, train_transforms) train_loader = DataLoader(train_ds, batch_size=300, shuffle=True, num_workers=num_workers) val_ds = MedNISTDataset(val_x, val_y, val_transforms) val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers) model = densenet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(train_y))).to(device) loss_function = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), 1e-5) epoch_num = 4 val_interval = 1 # start training validation best_metric = -1 best_metric_epoch = -1 epoch_loss_values = list() metric_values = list() model_filename = os.path.join(root_dir, "best_metric_model.pth") for epoch in range(epoch_num): print("-" * 10) print(f"Epoch {epoch + 1}/{epoch_num}") model.train() epoch_loss = 0 step = 0 for batch_data in train_loader: step += 1 inputs, labels = batch_data[0].to(device), batch_data[1].to(device) optimizer.zero_grad() outputs = model(inputs) loss = loss_function(outputs, labels) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_loss /= step epoch_loss_values.append(epoch_loss) print(f"epoch {epoch + 1} average loss:{epoch_loss:0.4f}") if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = val_data[0].to( device), val_data[1].to(device) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) auc_metric = compute_roc_auc(y_pred, y, to_onehot_y=True, softmax=True) metric_values.append(auc_metric) acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) if auc_metric > best_metric: best_metric = auc_metric best_metric_epoch = epoch + 1 torch.save(model.state_dict(), model_filename) print("saved new best metric model") print( f"current epoch {epoch +1} current AUC: {auc_metric:0.4f} " f"current accuracy: {acc_metric:0.4f} best AUC: {best_metric:0.4f} at epoch {best_metric_epoch}" ) print( f"train completed, best_metric: {best_metric:0.4f} at epoch: {best_metric_epoch}" ) return epoch_loss_values, best_metric, best_metric_epoch
def compute(self): return compute_roc_auc(self._predictions, self._targets, self.to_onehot_y, self.add_softmax, self.average)
def engine(loader: Any, checkpoint: Dict[str, Any], batchsize: int, classes: int, reg_args: Any, is_train: bool): overall_loss = [] all_preds = torch.zeros((0, classes)).cuda() all_labels = torch.zeros((0, classes)).cuda() start = time.time() sigmoid = torch.nn.Sigmoid() with torch.set_grad_enabled(is_train): for iter_num, data in enumerate(loader): imgs = data[0].cuda().float() labels = data[1].cuda().float() predicted = checkpoint['model'](imgs) loss = checkpoint['criterion'](predicted, labels) if is_train: loss.backward() checkpoint['optimizer'].step() checkpoint['optimizer'].zero_grad() overall_loss.append(float(loss.item())) all_preds = torch.cat((predicted.detach(), all_preds)) all_labels = torch.cat((labels.detach(), all_labels)) speed = batchsize * iter_num // (time.time() - start) print('Epoch:', checkpoint['epoch'], 'Iter:', iter_num, 'Running loss:', round(np.mean(overall_loss), 3), 'Speed:', int(speed), 'img/s', end='\r', flush=True) loss = np.mean(overall_loss) if reg_args is None: rmetric = compute_roc_auc(all_preds, all_labels, other_act=sigmoid) sens = compute_confusion_metric(all_preds, all_labels, activation=sigmoid, metric_name='sensitivity') spec = compute_confusion_metric(all_preds, all_labels, activation=sigmoid, metric_name='specificity') summary = ( f'Epoch Summary- Loss:{round(loss, 3)} ROC:{round(rmetric * 100, 1)} ' + f'Sensitivity:{round(100 * sens, 1)} Specificity: {round(100 * spec, 1)}' ) else: error_range = reg_args['error_range'] all_labels = [((x * reg_args['max']) + reg_args['min']).item() for x in all_labels] all_preds = [((x * reg_args['max']) + reg_args['min']).item() for x in all_preds] rmetric = r2_score(all_labels, all_preds) a1 = regression_accuracy(all_labels, all_preds, error_range) a2 = regression_accuracy(all_labels, all_preds, error_range) summary = ( f'Epoch Summary- Loss:{round(loss, 3)} R2:{round(rmetric, 1)} ' + f'Accuracy at {error_range}:{round(100 * a1, 1)} ' + f'Accuracy at {(error_range * 2)}:{round(100 * a2, 1)}') print(summary) with open('/mnt/out/summary.txt', 'w') as f: f.write('Loss:{}\nROC:{}\nSensitivity:{}\nSpecificity:{}'.format( round(loss, 3), round(rmetric * 100, 1), round(100 * sens, 1), round(100 * spec, 1))) return loss, rmetric, summary
if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = ( val_data[0].to(device), val_data[1].to(device), ) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) y_onehot = to_onehot(y) y_pred_act = act(y_pred) auc_metric = compute_roc_auc(y_pred_act, y_onehot) del y_pred_act, y_onehot metric_values.append(auc_metric) acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) if auc_metric > best_metric: best_metric = auc_metric best_metric_epoch = epoch + 1 torch.save(model.state_dict(), os.path.join(root_dir, "best_metric_model.pth")) print("saved new best metric model") print( f"current epoch: {epoch + 1} current AUC: {auc_metric:.4f}" f" current accuracy: {acc_metric:.4f}" f" best AUC: {best_metric:.4f}" f" at epoch: {best_metric_epoch}")
def engine(loader: Any, checkpoint: Dict[str, Any], batchsize: int, classes: int, variable_type: str, error_range: int, is_train: bool): overall_loss = [] all_preds = torch.zeros((0, classes)) all_labels = torch.zeros((0, classes)) labels_onehot = torch.FloatTensor(batchsize, classes).cuda() start = time.time() sigmoid = torch.nn.Sigmoid() with torch.set_grad_enabled(is_train): for iter_num, data in enumerate(loader): # name = data[0] imgs = data[1].cuda().float() labels = data[2].cuda() predicted = checkpoint['model'](imgs) loss = checkpoint['criterion'](predicted, labels) predicted, labels = predicted.detach(), labels.detach() if is_train: loss.backward() checkpoint['optimizer'].step() checkpoint['optimizer'].zero_grad() overall_loss.append(float(loss.item())) all_preds = torch.cat((predicted, all_preds)) if variable_type == 'categorical': if labels_onehot.shape[0] != labels.shape[0]: labels_onehot = torch.FloatTensor(labels.shape[0], classes).cuda() labels_onehot.zero_() labels_onehot.scatter_(1, labels.unsqueeze(dim=1), 1) all_labels = torch.cat((labels_onehot.float(), all_labels)) predicted = predicted.max(dim=1)[1] # for correct printing else: all_labels = torch.cat((labels, all_labels)) speed = batchsize * iter_num // (time.time() - start) print('Epoch:', checkpoint['epoch'], 'Iter:', iter_num, 'Pred:', round(predicted.float().mean().item(), 3), 'Label:', round(labels.float().mean().item(), 3), 'Loss:', round(np.mean(overall_loss), 3), 'Speed:', int(speed), 'img/s', end='\r', flush=True) loss = np.mean(overall_loss) if variable_type == 'continous': all_labels, all_preds = all_labels.cpu(), all_preds.cpu() rmetric = r2_score(all_labels, all_preds) acc = regression_accuracy(all_labels, all_preds, error_range) spear, pvalue = spearmanr(all_preds, all_labels) summary = ( f'Epoch Summary - Loss:{round(loss, 3)} Spearman:{round(spear, 2)} PValue:{round(pvalue, 3)} ' + f'R2:{round(rmetric, 1)} Accuracy(at {error_range}):{round(100 * acc, 1)}' ) else: rmetric = compute_roc_auc(all_preds, all_labels, other_act=sigmoid) sens = compute_confusion_metric(all_preds, all_labels, activation=sigmoid, metric_name='sensitivity') spec = compute_confusion_metric(all_preds, all_labels, activation=sigmoid, metric_name='specificity') summary = ( f'Epoch Summary- Loss:{round(loss, 3)} ROC:{round(rmetric * 100, 1)} ' + f'Sensitivity:{round(100 * sens, 1)} Specificity: {round(100 * spec, 1)}' ) print(summary) return loss, rmetric, summary
def train(train_loader, optimizer, loss_function, epoch_num, model_name, output_dir="."): best_metric = -1 best_metric_epoch = -1 epoch_loss_values = list() metric_values = list() for epoch in range(epoch_num): print('-' * 10) print(f"epoch {epoch + 1}/{epoch_num}") model.train() epoch_loss = 0 step = 0 for batch_data in train_loader: step += 1 inputs, labels = batch_data[0].to(device), batch_data[1].to(device) optimizer.zero_grad() outputs = model(inputs) loss = loss_function(outputs, labels) loss.backward() optimizer.step() epoch_loss += loss.item() print( f"{step}/{len(train_ds) // train_loader.batch_size}, train_loss: {loss.item():.4f}" ) epoch_len = len(train_ds) // train_loader.batch_size epoch_loss /= step epoch_loss_values.append(epoch_loss) print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = val_data[0].to( device), val_data[1].to(device) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) auc_metric = compute_roc_auc(y_pred, y, to_onehot_y=True, softmax=True) metric_values.append(auc_metric) acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) if auc_metric > best_metric: best_metric = auc_metric best_metric_epoch = epoch + 1 torch.save(model.state_dict(), os.path.join(output_dir, model_name)) print('saved new best metric model') print( f"current epoch: {epoch + 1} current AUC: {auc_metric:.4f}" f" current accuracy: {acc_metric:.4f} best AUC: {best_metric:.4f}" f" at epoch: {best_metric_epoch}") print( f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}" ) return epoch_loss_values, metric_values
def test_value(self, input_data, expected_value): result = compute_roc_auc(**input_data) np.testing.assert_allclose(expected_value, result, rtol=1e-5)
if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = val_data['img'].to( device), val_data['label'].to(device) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) auc_metric = compute_roc_auc(y_pred, y, to_onehot_y=True, add_softmax=True) if acc_metric > best_metric: best_metric = acc_metric best_metric_epoch = epoch + 1 torch.save(model.state_dict(), 'best_metric_model.pth') print('saved new best metric model') print( "current epoch %d current accuracy: %0.4f current AUC: %0.4f best accuracy: %0.4f at epoch %d" % (epoch + 1, acc_metric, auc_metric, best_metric, best_metric_epoch)) writer.add_scalar('val_accuracy', acc_metric, epoch + 1) print('train completed, best_metric: %0.4f at epoch: %d' % (best_metric, best_metric_epoch)) writer.close()
def pytorch_train(self): acc_scores = dict() auc_scores = dict() train_scores = dict() # start a typical PyTorch training val_interval = 5 best_metric = -1 if self.task == "classification" else 1e8 best_metric_epoch = -1 writer = SummaryWriter() torch.save(self.model.state_dict(), self.saved_model_dict) # ADDED FOR SMALL EPOCH STUFF for epoch in range(self.epochs): print("-" * 10) print(f"epoch {epoch + 1}/{self.epochs}") self.model.train() epoch_loss = 0 step = 0 for batch_data in self.train_loader: step += 1 inputs, labels = batch_data["img"].to( self.device), batch_data["label"].to(self.device) self.optimizer.zero_grad() outputs = self.model(inputs) if self.pytorch_version == 1: outputs = torch.nn.functional.softmax(outputs, dim=0) if self.task == "classification": loss = self.loss_function(outputs, labels) else: loss = self.loss_function(outputs, labels.view(-1, 1).float()) loss.backward() self.optimizer.step() epoch_loss += loss.item() epoch_len = len(self.train_ds) // self.train_loader.batch_size if step % 3 == 0: print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}") writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) epoch_loss /= step train_scores[epoch] = epoch_loss print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") if (epoch + 1) % val_interval == 0: self.model.eval() with torch.no_grad(): y_pred = torch.tensor([], dtype=torch.float32, device=self.device) y = torch.tensor([], dtype=torch.long, device=self.device) real = [] predicted = [] for val_data in self.val_loader: val_images, val_labels = val_data["img"].to( self.device), val_data["label"].to(self.device) y_pred = torch.cat( [y_pred, self.model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) if self.task == "regression": real.append(val_labels.cpu().numpy()) predicted.append( self.model(val_images).argmax( dim=1).cpu().numpy()) if self.task == "classification": acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) if self.model_type == 1: auc_metric = 0 else: auc_metric = compute_roc_auc(y_pred, y, to_onehot_y=True, softmax=True) else: acc_metric = mean_squared_error( self.flatten_list(real), self.flatten_list(predicted)) auc_metric = 0 acc_scores[epoch] = acc_metric auc_scores[epoch] = auc_metric if (acc_metric >= best_metric and self.task == "classification") or ( acc_metric <= best_metric and self.task == "regression"): best_metric = acc_metric best_metric_epoch = epoch + 1 torch.save(self.model.state_dict(), self.saved_model_dict) print( "current epoch: {} current accuracy: {:.4f} current AUC: {:.4f} best accuracy: {:.4f} at epoch {}" .format(epoch + 1, acc_metric, auc_metric, best_metric, best_metric_epoch)) print("ACC SCORES: ", acc_scores) print("AUC SCORES: ", auc_scores) print("EPOCH LOSSES: ", train_scores) writer.add_scalar("val_accuracy", acc_metric, epoch + 1) print( f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}" ) writer.close() print(acc_scores, "\n", auc_scores)
def test_value(self, y_pred, y, softmax, to_onehot, average, expected_value): y_pred = Activations(softmax=softmax)(y_pred) y = AsDiscrete(to_onehot=to_onehot, n_classes=2)(y) result = compute_roc_auc(y_pred=y_pred, y=y, average=average) np.testing.assert_allclose(expected_value, result, rtol=1e-5)
def _compute_fn(pred, label): return compute_roc_auc( y_pred=pred, y=label, average=Average(average), )