def test_collation(self, _, transform, collate_fn, ndim): data = self.data_3d if ndim == 3 else self.data_2d if collate_fn: modified_transform = transform else: modified_transform = Compose( [transform, ResizeWithPadOrCropd(KEYS, 100), ToTensord(KEYS)]) # num workers = 0 for mac or gpu transforms num_workers = 0 if sys.platform != "linux" or torch.cuda.is_available( ) else 2 dataset = CacheDataset(data, transform=modified_transform, progress=False) loader = DataLoader(dataset, num_workers, batch_size=self.batch_size, collate_fn=collate_fn) for item in loader: np.testing.assert_array_equal( item["image_transforms"][0]["do_transforms"], item["label_transforms"][0]["do_transforms"])
def test_fail(self): t1 = SpatialPadd("image", [10, 5]) data = t1(self.all_data["2D"]) # Check that error is thrown when inverse are used out of order. t2 = ResizeWithPadOrCropd("image", [10, 5]) with self.assertRaises(RuntimeError): t2.inverse(data)
def test_pad_shape(self, input_param, input_data, expected_val): for p in TEST_NDARRAYS: if isinstance( p(0), torch.Tensor) and ("constant_values" in input_param or input_param["mode"] == "reflect"): continue paddcroper = ResizeWithPadOrCropd(**input_param) input_data["img"] = p(input_data["img"]) result = paddcroper(input_data) np.testing.assert_allclose(result["img"].shape, expected_val)
def test_collation(self, _, transform, collate_fn): if collate_fn: modified_transform = transform else: modified_transform = Compose([transform, ResizeWithPadOrCropd(KEYS, [100, 100, 100])]) # num workers = 0 for mac num_workers = 2 if sys.platform != "darwin" else 0 dataset = CacheDataset(self.data, transform=modified_transform, progress=False) loader = DataLoader(dataset, num_workers, batch_size=self.batch_size, collate_fn=collate_fn) for _ in loader: pass
def test_invert(self): set_determinism(seed=0) im_fname, seg_fname = ( make_nifti_image(i) for i in create_test_image_3d(101, 100, 107, noise_max=100)) transform = Compose([ LoadImaged(KEYS), AddChanneld(KEYS), Orientationd(KEYS, "RPS"), Spacingd(KEYS, pixdim=(1.2, 1.01, 0.9), mode=["bilinear", "nearest"], dtype=np.float32), ScaleIntensityd("image", minv=1, maxv=10), RandFlipd(KEYS, prob=0.5, spatial_axis=[1, 2]), RandAxisFlipd(KEYS, prob=0.5), RandRotate90d(KEYS, spatial_axes=(1, 2)), RandZoomd(KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), RandRotated(KEYS, prob=0.5, range_x=np.pi, mode="bilinear", align_corners=True, dtype=np.float64), RandAffined(KEYS, prob=0.5, rotate_range=np.pi, mode="nearest"), ResizeWithPadOrCropd(KEYS, 100), # test EnsureTensor for complicated dict data and invert it CopyItemsd(PostFix.meta("image"), times=1, names="test_dict"), # test to support Tensor, Numpy array and dictionary when inverting EnsureTyped(keys=["image", "test_dict"]), ToTensord("image"), CastToTyped(KEYS, dtype=[torch.uint8, np.uint8]), CopyItemsd("label", times=2, names=["label_inverted", "label_inverted1"]), CopyItemsd("image", times=2, names=["image_inverted", "image_inverted1"]), ]) data = [{"image": im_fname, "label": seg_fname} for _ in range(12)] # num workers = 0 for mac or gpu transforms num_workers = 0 if sys.platform != "linux" or torch.cuda.is_available( ) else 2 dataset = CacheDataset(data, transform=transform, progress=False) loader = DataLoader(dataset, num_workers=num_workers, batch_size=5) inverter = Invertd( # `image` was not copied, invert the original value directly keys=["image_inverted", "label_inverted", "test_dict"], transform=transform, orig_keys=["label", "label", "test_dict"], meta_keys=[ PostFix.meta("image_inverted"), PostFix.meta("label_inverted"), None ], orig_meta_keys=[ PostFix.meta("label"), PostFix.meta("label"), None ], nearest_interp=True, to_tensor=[True, False, False], device="cpu", ) inverter_1 = Invertd( # `image` was not copied, invert the original value directly keys=["image_inverted1", "label_inverted1"], transform=transform, orig_keys=["image", "image"], meta_keys=[ PostFix.meta("image_inverted1"), PostFix.meta("label_inverted1") ], orig_meta_keys=[PostFix.meta("image"), PostFix.meta("image")], nearest_interp=[True, False], to_tensor=[True, True], device="cpu", ) expected_keys = [ "image", "image_inverted", "image_inverted1", PostFix.meta("image_inverted1"), PostFix.meta("image_inverted"), PostFix.meta("image"), "image_transforms", "label", "label_inverted", "label_inverted1", PostFix.meta("label_inverted1"), PostFix.meta("label_inverted"), PostFix.meta("label"), "label_transforms", "test_dict", "test_dict_transforms", ] # execute 1 epoch for d in loader: d = decollate_batch(d) for item in d: item = inverter(item) item = inverter_1(item) self.assertListEqual(sorted(item), expected_keys) self.assertTupleEqual(item["image"].shape[1:], (100, 100, 100)) self.assertTupleEqual(item["label"].shape[1:], (100, 100, 100)) # check the nearest interpolation mode i = item["image_inverted"] torch.testing.assert_allclose( i.to(torch.uint8).to(torch.float), i.to(torch.float)) self.assertTupleEqual(i.shape[1:], (100, 101, 107)) i = item["label_inverted"] torch.testing.assert_allclose( i.to(torch.uint8).to(torch.float), i.to(torch.float)) self.assertTupleEqual(i.shape[1:], (100, 101, 107)) # test inverted test_dict self.assertTrue( isinstance(item["test_dict"]["affine"], np.ndarray)) self.assertTrue( isinstance(item["test_dict"]["filename_or_obj"], str)) # check the case that different items use different interpolation mode to invert transforms d = item["image_inverted1"] # if the interpolation mode is nearest, accumulated diff should be smaller than 1 self.assertLess( torch.sum( d.to(torch.float) - d.to(torch.uint8).to(torch.float)).item(), 1.0) self.assertTupleEqual(d.shape, (1, 100, 101, 107)) d = item["label_inverted1"] # if the interpolation mode is not nearest, accumulated diff should be greater than 10000 self.assertGreater( torch.sum( d.to(torch.float) - d.to(torch.uint8).to(torch.float)).item(), 10000.0) self.assertTupleEqual(d.shape, (1, 100, 101, 107)) # check labels match reverted = item["label_inverted"].detach().cpu().numpy().astype( np.int32) original = LoadImaged(KEYS)(data[-1])["label"] n_good = np.sum(np.isclose(reverted, original, atol=1e-3)) reverted_name = item[PostFix.meta("label_inverted")]["filename_or_obj"] original_name = data[-1]["label"] self.assertEqual(reverted_name, original_name) print("invert diff", reverted.size - n_good) # 25300: 2 workers (cpu, non-macos) # 1812: 0 workers (gpu or macos) # 1821: windows torch 1.10.0 self.assertTrue((reverted.size - n_good) in (34007, 1812, 1821), f"diff. {reverted.size - n_good}") set_determinism(seed=None)
TESTS.append(( "CenterSpatialCropd 3d", "3D", 0, CenterSpatialCropd(KEYS, roi_size=[95, 97, 98]), )) TESTS.append(("CropForegroundd 2d", "2D", 0, CropForegroundd(KEYS, source_key="label", margin=2))) TESTS.append( ("CropForegroundd 3d", "3D", 0, CropForegroundd(KEYS, source_key="label"))) TESTS.append(("ResizeWithPadOrCropd 3d", "3D", 0, ResizeWithPadOrCropd(KEYS, [201, 150, 105]))) TESTS.append(( "Flipd 3d", "3D", 0, Flipd(KEYS, [1, 2]), )) TESTS.append(( "Flipd 3d", "3D", 0, Flipd(KEYS, [1, 2]), ))
def test_invert(self): set_determinism(seed=0) im_fname, seg_fname = [ make_nifti_image(i) for i in create_test_image_3d(101, 100, 107, noise_max=100) ] transform = Compose([ LoadImaged(KEYS), AddChanneld(KEYS), Orientationd(KEYS, "RPS"), Spacingd(KEYS, pixdim=(1.2, 1.01, 0.9), mode=["bilinear", "nearest"], dtype=np.float32), ScaleIntensityd("image", minv=1, maxv=10), RandFlipd(KEYS, prob=0.5, spatial_axis=[1, 2]), RandAxisFlipd(KEYS, prob=0.5), RandRotate90d(KEYS, spatial_axes=(1, 2)), RandZoomd(KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), RandRotated(KEYS, prob=0.5, range_x=np.pi, mode="bilinear", align_corners=True), RandAffined(KEYS, prob=0.5, rotate_range=np.pi, mode="nearest"), ResizeWithPadOrCropd(KEYS, 100), ToTensord( "image" ), # test to support both Tensor and Numpy array when inverting CastToTyped(KEYS, dtype=[torch.uint8, np.uint8]), ]) data = [{"image": im_fname, "label": seg_fname} for _ in range(12)] # num workers = 0 for mac or gpu transforms num_workers = 0 if sys.platform == "darwin" or torch.cuda.is_available( ) else 2 dataset = CacheDataset(data, transform=transform, progress=False) loader = DataLoader(dataset, num_workers=num_workers, batch_size=5) # set up engine def _train_func(engine, batch): self.assertTupleEqual(batch["image"].shape[1:], (1, 100, 100, 100)) engine.state.output = batch engine.fire_event(IterationEvents.MODEL_COMPLETED) return engine.state.output engine = Engine(_train_func) engine.register_events(*IterationEvents) # set up testing handler TransformInverter( transform=transform, loader=loader, output_keys=["image", "label"], batch_keys="label", nearest_interp=True, postfix="inverted1", to_tensor=[True, False], device="cpu", num_workers=0 if sys.platform == "darwin" or torch.cuda.is_available() else 2, ).attach(engine) # test different nearest interpolation values TransformInverter( transform=transform, loader=loader, output_keys=["image", "label"], batch_keys="image", nearest_interp=[True, False], post_func=[lambda x: x + 10, lambda x: x], postfix="inverted2", num_workers=0 if sys.platform == "darwin" or torch.cuda.is_available() else 2, ).attach(engine) engine.run(loader, max_epochs=1) set_determinism(seed=None) self.assertTupleEqual(engine.state.output["image"].shape, (2, 1, 100, 100, 100)) self.assertTupleEqual(engine.state.output["label"].shape, (2, 1, 100, 100, 100)) # check the nearest inerpolation mode for i in engine.state.output["image_inverted1"]: torch.testing.assert_allclose( i.to(torch.uint8).to(torch.float), i.to(torch.float)) self.assertTupleEqual(i.shape, (1, 100, 101, 107)) for i in engine.state.output["label_inverted1"]: np.testing.assert_allclose( i.astype(np.uint8).astype(np.float32), i.astype(np.float32)) self.assertTupleEqual(i.shape, (1, 100, 101, 107)) # check labels match reverted = engine.state.output["label_inverted1"][-1].astype(np.int32) original = LoadImaged(KEYS)(data[-1])["label"] n_good = np.sum(np.isclose(reverted, original, atol=1e-3)) reverted_name = engine.state.output["label_meta_dict"][ "filename_or_obj"][-1] original_name = data[-1]["label"] self.assertEqual(reverted_name, original_name) print("invert diff", reverted.size - n_good) # 25300: 2 workers (cpu, non-macos) # 1812: 0 workers (gpu or macos) # 1824: torch 1.5.1 self.assertTrue((reverted.size - n_good) in (25300, 1812, 1824), "diff. in 3 possible values") # check the case that different items use different interpolation mode to invert transforms for i in engine.state.output["image_inverted2"]: # if the interpolation mode is nearest, accumulated diff should be smaller than 1 self.assertLess( torch.sum( i.to(torch.float) - i.to(torch.uint8).to(torch.float)).item(), 1.0) self.assertTupleEqual(i.shape, (1, 100, 101, 107)) for i in engine.state.output["label_inverted2"]: # if the interpolation mode is not nearest, accumulated diff should be greater than 10000 self.assertGreater( torch.sum( i.to(torch.float) - i.to(torch.uint8).to(torch.float)).item(), 10000.0) self.assertTupleEqual(i.shape, (1, 100, 101, 107))
TESTS.append( ( "CenterSpatialCropd 3d", "3D", 0, CenterSpatialCropd(KEYS, roi_size=[95, 97, 98]), ) ) TESTS.append(("CropForegroundd 2d", "2D", 0, CropForegroundd(KEYS, source_key="label", margin=2))) TESTS.append(("CropForegroundd 3d", "3D", 0, CropForegroundd(KEYS, source_key="label", k_divisible=[5, 101, 2]))) TESTS.append(("ResizeWithPadOrCropd 3d", "3D", 0, ResizeWithPadOrCropd(KEYS, [201, 150, 105]))) TESTS.append( ( "Flipd 3d", "3D", 0, Flipd(KEYS, [1, 2]), ) ) TESTS.append( ( "Flipd 3d", "3D", 0,
def test_pad_shape(self, input_param, input_data, expected_val): paddcroper = ResizeWithPadOrCropd(**input_param) result = paddcroper(input_data) np.testing.assert_allclose(result["img"].shape, expected_val)
def test_invert(self): set_determinism(seed=0) im_fname, seg_fname = [ make_nifti_image(i) for i in create_test_image_3d(101, 100, 107, noise_max=100) ] transform = Compose([ LoadImaged(KEYS), AddChanneld(KEYS), Orientationd(KEYS, "RPS"), Spacingd(KEYS, pixdim=(1.2, 1.01, 0.9), mode=["bilinear", "nearest"], dtype=np.float32), ScaleIntensityd("image", minv=1, maxv=10), RandFlipd(KEYS, prob=0.5, spatial_axis=[1, 2]), RandAxisFlipd(KEYS, prob=0.5), RandRotate90d(KEYS, spatial_axes=(1, 2)), RandZoomd(KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), RandRotated(KEYS, prob=0.5, range_x=np.pi, mode="bilinear", align_corners=True), RandAffined(KEYS, prob=0.5, rotate_range=np.pi, mode="nearest"), ResizeWithPadOrCropd(KEYS, 100), ToTensord( "image" ), # test to support both Tensor and Numpy array when inverting CastToTyped(KEYS, dtype=[torch.uint8, np.uint8]), ]) data = [{"image": im_fname, "label": seg_fname} for _ in range(12)] # num workers = 0 for mac or gpu transforms num_workers = 0 if sys.platform == "darwin" or torch.cuda.is_available( ) else 2 dataset = CacheDataset(data, transform=transform, progress=False) loader = DataLoader(dataset, num_workers=num_workers, batch_size=5) inverter = Invertd( keys=["image", "label"], transform=transform, loader=loader, orig_keys="label", nearest_interp=True, postfix="inverted", to_tensor=[True, False], device="cpu", num_workers=0 if sys.platform == "darwin" or torch.cuda.is_available() else 2, ) # execute 1 epoch for d in loader: d = inverter(d) # this unit test only covers basic function, test_handler_transform_inverter covers more self.assertTupleEqual(d["image"].shape[1:], (1, 100, 100, 100)) self.assertTupleEqual(d["label"].shape[1:], (1, 100, 100, 100)) # check the nearest inerpolation mode for i in d["image_inverted"]: torch.testing.assert_allclose( i.to(torch.uint8).to(torch.float), i.to(torch.float)) self.assertTupleEqual(i.shape, (1, 100, 101, 107)) for i in d["label_inverted"]: np.testing.assert_allclose( i.astype(np.uint8).astype(np.float32), i.astype(np.float32)) self.assertTupleEqual(i.shape, (1, 100, 101, 107)) set_determinism(seed=None)
def evaluta_model(test_files, model_name): test_transforms = Compose( [ LoadNiftid(keys=modalDataKey), AddChanneld(keys=modalDataKey), NormalizeIntensityd(keys=modalDataKey), # ScaleIntensityd(keys=modalDataKey), # Resized(keys=modalDataKey, spatial_size=(48, 48), mode='bilinear'), ResizeWithPadOrCropd(keys=modalDataKey, spatial_size=(64, 64)), ConcatItemsd(keys=modalDataKey, name="inputs"), ToTensord(keys=["inputs"]), ] ) # create a validation data loader device = torch.device("cpu") print(len(test_files)) test_ds = monai.data.Dataset(data=test_files, transform=test_transforms) test_loader = DataLoader(test_ds, batch_size=len(test_files), num_workers=2, pin_memory=torch.device) # model = monai.networks.nets.se_resnet101(spatial_dims=2, in_ch=3, num_classes=6).to(device) model = DenseNetASPP(spatial_dims=2, in_channels=2, out_channels=5).to(device) # Evaluate the model on test dataset # # print(os.path.basename(model_name).split('.')[0]) checkpoint = torch.load(model_name) model.load_state_dict(checkpoint['model']) # optimizer.load_state_dict(checkpoint['optimizer']) # epochs = checkpoint['epoch'] # model.load_state_dict(torch.load(log_dir)) model.eval() with torch.no_grad(): saver = CSVSaver(output_dir="../result/GLeason/2d_output/", filename=os.path.basename(model_name).split('.')[0] + '.csv') for test_data in test_loader: test_images, test_labels = test_data["inputs"].to(device), test_data["label"].to(device) pred = model(test_images) # Gleason Classification # y_soft_label = (test_labels / 0.25).long() # y_soft_pred = (pred / 0.25).round().squeeze_().long() # print(test_data) probabilities = torch.sigmoid(pred) # pred2 = model(test_images).argmax(dim=1) # print(test_data) # saver.save_batch(probabilities.argmax(dim=1), test_data["t2Img_meta_dict"]) # zero = torch.zeros_like(probabilities) # one = torch.ones_like(probabilities) # y_pred_ordinal = torch.where(probabilities > 0.5, one, zero) # y_pred_acc = (y_pred_ordinal.sum(1)).to(torch.long) saver.save_batch(probabilities.argmax(dim=1), test_data["dwiImg_meta_dict"]) # print(test_labels) # print(probabilities[:, 1]) # for x in np.nditer(probabilities[:, 1]): # print(x) # prob_list.append(x) # falseList = [] # trueList = [] # for pre, label in zip(pred2.tolist(), test_labels.tolist() ): # if pre == 0 and label == 0: # falseList.append(0) # elif pre == 1 and label == 1: # trueList.append(1) # specificity = (falseList.count(0) / test_labels.tolist().count(0)) # sensitivity = (trueList.count(1) / test_labels.tolist().count(1)) # print('specificity:' + '%.4f' % specificity + ',', # 'sensitivity:' + '%.4f' % sensitivity + ',', # 'accuracy:' + '%.4f' % ((specificity + sensitivity) / 2)) # print(type(test_labels), type(pred)) # fpr, tpr, thresholds = roc_curve(test_labels, probabilities[:, 1]) # roc_auc = auc(fpr, tpr) # print('AUC = ' + str(roc_auc)) # AUC_list.append(roc_auc) # # print(accuracy_score(test_labels, pred2)) # accuracy_list.append(accuracy_score(test_labels, pred2)) # plt.plot(fpr, tpr, linewidth=2, label="ROC") # plt.xlabel("false presitive rate") # plt.ylabel("true presitive rate") # # plt.ylim(0, 1.05) # # plt.xlim(0, 1.05) # plt.legend(loc=4) # 图例的位置 # plt.show() saver.finalize() # cm = confusion_matrix(test_labels, y_pred_acc) cm = confusion_matrix(test_labels, probabilities.argmax(dim=1)) # cm = confusion_matrix(y_soft_label, y_soft_pred) # kappa_value = cohen_kappa_score(test_labels, y_pred_acc, weights='quadratic') kappa_value = cohen_kappa_score(test_labels, probabilities.argmax(dim=1), weights='quadratic') print('quadratic weighted kappa=' + str(kappa_value)) kappa_list.append(kappa_value) plot_confusion_matrix(cm, 'confusion_matrix.png', title='confusion matrix') from sklearn.metrics import classification_report print(classification_report(test_labels, probabilities.argmax(dim=1), digits=4)) accuracy_list.append( classification_report(test_labels, probabilities.argmax(dim=1), digits=4, output_dict=True)["accuracy"])
def training(train_files, val_files, log_dir): # Define transforms for image print(log_dir) train_transforms = Compose( [ LoadNiftid(keys=modalDataKey), AddChanneld(keys=modalDataKey), NormalizeIntensityd(keys=modalDataKey), # ScaleIntensityd(keys=modalDataKey), ResizeWithPadOrCropd(keys=modalDataKey, spatial_size=(64, 64)), # Resized(keys=modalDataKey, spatial_size=(48, 48), mode='bilinear'), ConcatItemsd(keys=modalDataKey, name="inputs"), RandRotate90d(keys=["inputs"], prob=0.8, spatial_axes=[0, 1]), RandAffined(keys=["inputs"], prob=0.8, scale_range=[0.1, 0.5]), RandZoomd(keys=["inputs"], prob=0.8, max_zoom=1.5, min_zoom=0.5), # RandFlipd(keys=["inputs"], prob=0.5, spatial_axis=1), ToTensord(keys=["inputs"]), ] ) val_transforms = Compose( [ LoadNiftid(keys=modalDataKey), AddChanneld(keys=modalDataKey), NormalizeIntensityd(keys=modalDataKey), # ScaleIntensityd(keys=modalDataKey), ResizeWithPadOrCropd(keys=modalDataKey, spatial_size=(64, 64)), # Resized(keys=modalDataKey, spatial_size=(48, 48), mode='bilinear'), ConcatItemsd(keys=modalDataKey, name="inputs"), ToTensord(keys=["inputs"]), ] ) # data_size = len(full_files) # split = data_size // 2 # indices = list(range(data_size)) # train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) # valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) # full_loader = DataLoader(full_files, batch_size=64, sampler=sampler(full_files), pin_memory=True) # train_loader = DataLoader(full_files, batch_size=128, sampler=train_sampler, collate_fn=collate_fn) # val_loader = DataLoader(full_files, batch_size=split, sampler=valid_sampler, collate_fn=collate_fn) # DL = DataLoader(train_files, batch_size=64, shuffle=True, num_workers=0, drop_last=True, collate_fn=collate_fn) # randomBatch_sizeList = [8, 16, 32, 64, 128] # randomLRList = [1e-4, 1e-5, 5e-5, 5e-4, 1e-3] # batch_size = random.choice(randomBatch_sizeList) # lr = random.choice(randomLRList) lr = 0.01 batch_size = 256 # print(batch_size) # print(lr) # Define dataset, data loader check_ds = monai.data.Dataset(data=train_files, transform=train_transforms) check_loader = DataLoader(check_ds, batch_size=batch_size, num_workers=2, pin_memory=torch.device) check_data = monai.utils.misc.first(check_loader) # print(check_data) # create a training data loader train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=2, pin_memory=torch.device) # train_data = monai.utils.misc.first(train_loader) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = DataLoader(val_ds, batch_size=batch_size, num_workers=2, pin_memory=torch.device) # Create Net, CrossEntropyLoss and Adam optimizer # model = monai.networks.nets.se_resnet101(spatial_dims=2, in_ch=3, num_classes=6).to(device) # model = densenet121(spatial_dims=2, in_channels=3, out_channels=5).to(device) # im_size = (2,) + tuple(train_ds[0]["inputs"].shape) model = DenseNetASPP(spatial_dims=2, in_channels=2, out_channels=5).to(device) classes = np.array([0, 1, 2, 3, 4]) # print(check_data["label"].numpy()) class_weights = class_weight.compute_class_weight('balanced', classes, check_data["label"].numpy()) class_weights_tensor = torch.Tensor(class_weights).to(device) # print(class_weights_tensor) # loss_function = nn.BCEWithLogitsLoss() loss_function = torch.nn.CrossEntropyLoss(weight=class_weights_tensor) # loss_function = torch.nn.MSELoss() # m = torch.nn.LogSoftmax(dim=1) optimizer = torch.optim.Adam(model.parameters(), lr) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 50, gamma=0.5, last_epoch=-1) # 如果有保存的模型,则加载模型,并在其基础上继续训练 if os.path.exists(log_dir): checkpoint = torch.load(log_dir) model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) start_epoch = checkpoint['epoch'] print('加载 epoch {} 成功!'.format(start_epoch)) else: start_epoch = 0 print('无保存模型,将从头开始训练!') # start a typical PyTorch training epoch_num = 300 val_interval = 2 best_metric = -1 best_metric_epoch = -1 epoch_loss_values = list() metric_values = list() writer = SummaryWriter() # checkpoint_interval = 100 for epoch in range(start_epoch + 1, epoch_num): print("-" * 10) print(f"epoch {epoch + 1}/{epoch_num}") # print(scheduler.get_last_lr()) model.train() epoch_loss = 0 step = 0 # for i, (inputs, labels, imgName) in enumerate(train_loader): for batch_data in train_loader: step += 1 inputs, labels = batch_data["inputs"].to(device), batch_data["label"].to(device) # batch_arr = [] # for j in range(len(inputs)): # batch_arr.append(inputs[i]) # batch_img = Variable(torch.from_numpy(np.array(batch_arr)).to(device)) # labels = Variable(torch.from_numpy(np.array(labels)).to(device)) # batch_img = batch_img.type(torch.FloatTensor).to(device) outputs = model(inputs) # y_ordinal_encoding = transformOrdinalEncoding(labels, labels.shape[0], 5) # loss = loss_function(outputs, torch.from_numpy(y_ordinal_encoding).to(device)) loss = loss_function(outputs, labels.long()) optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss += loss.item() print(f"{step}/{len(train_loader)}, train_loss: {loss.item():.4f}") epoch_len = len(train_loader) // train_loader.batch_size writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) epoch_loss /= step scheduler.step() print(epoch, 'lr={:.6f}'.format(scheduler.get_last_lr()[0])) epoch_loss_values.append(epoch_loss) print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") # if (epoch + 1) % checkpoint_interval == 0: # 每隔checkpoint_interval保存一次 # checkpoint = {'model': model.state_dict(), # 'optimizer': optimizer.state_dict(), # 'epoch': epoch # } # path_checkpoint = './model/checkpoint_{}_epoch.pth'.format(epoch) # torch.save(checkpoint, path_checkpoint) if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) # for i, (inputs, labels, imgName) in enumerate(val_loader): for val_data in val_loader: val_images, val_labels = val_data["inputs"].to(device), val_data["label"].to(device) # val_batch_arr = [] # for j in range(len(inputs)): # val_batch_arr.append(inputs[i]) # val_img = Variable(torch.from_numpy(np.array(val_batch_arr)).to(device)) # labels = Variable(torch.from_numpy(np.array(labels)).to(device)) # val_img = val_img.type(torch.FloatTensor).to(device) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) # y_ordinal_encoding = transformOrdinalEncoding(y, y.shape[0], 5) # y_pred = torch.sigmoid(y_pred) # y = (y / 0.25).long() # print(y) # auc_metric = compute_roc_auc(y_pred, y, to_onehot_y=True, softmax=True) # zero = torch.zeros_like(y_pred) # one = torch.ones_like(y_pred) # y_pred_label = torch.where(y_pred > 0.5, one, zero) # print((y_pred_label.sum(1)).to(torch.long)) # y_pred_acc = (y_pred_label.sum(1)).to(torch.long) # print(y_pred.argmax(dim=1)) # kappa_value = kappa(cm) kappa_value = cohen_kappa_score(y.to("cpu"), y_pred.argmax(dim=1).to("cpu"), weights='quadratic') # kappa_value = cohen_kappa_score(y.to("cpu"), y_pred_acc.to("cpu"), weights='quadratic') metric_values.append(kappa_value) acc_value = torch.eq(y_pred.argmax(dim=1), y) # print(acc_value) acc_metric = acc_value.sum().item() / len(acc_value) if kappa_value > best_metric: best_metric = kappa_value best_metric_epoch = epoch + 1 checkpoint = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch } torch.save(checkpoint, log_dir) print("saved new best metric model") print( "current epoch: {} current Kappa: {:.4f} current accuracy: {:.4f} best Kappa: {:.4f} at epoch {}".format( epoch + 1, kappa_value, acc_metric, best_metric, best_metric_epoch ) ) writer.add_scalar("val_accuracy", acc_metric, epoch + 1) print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}") writer.close() plt.figure('train', (12, 6)) plt.subplot(1, 2, 1) plt.title("Epoch Average Loss") x = [i + 1 for i in range(len(epoch_loss_values))] y = epoch_loss_values plt.xlabel('epoch') plt.plot(x, y) plt.subplot(1, 2, 2) plt.title("Validation: Area under the ROC curve") x = [val_interval * (i + 1) for i in range(len(metric_values))] y = metric_values plt.xlabel('epoch') plt.plot(x, y) plt.show() evaluta_model(val_files, log_dir)
def test_invert(self): set_determinism(seed=0) im_fname, seg_fname = [ make_nifti_image(i) for i in create_test_image_3d(101, 100, 107, noise_max=100) ] transform = Compose([ LoadImaged(KEYS), AddChanneld(KEYS), Orientationd(KEYS, "RPS"), Spacingd(KEYS, pixdim=(1.2, 1.01, 0.9), mode=["bilinear", "nearest"], dtype=np.float32), ScaleIntensityd("image", minv=1, maxv=10), RandFlipd(KEYS, prob=0.5, spatial_axis=[1, 2]), RandAxisFlipd(KEYS, prob=0.5), RandRotate90d(KEYS, spatial_axes=(1, 2)), RandZoomd(KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), RandRotated(KEYS, prob=0.5, range_x=np.pi, mode="bilinear", align_corners=True), RandAffined(KEYS, prob=0.5, rotate_range=np.pi, mode="nearest"), ResizeWithPadOrCropd(KEYS, 100), ToTensord(KEYS), CastToTyped(KEYS, dtype=torch.uint8), ]) data = [{"image": im_fname, "label": seg_fname} for _ in range(12)] # num workers = 0 for mac or gpu transforms num_workers = 0 if sys.platform == "darwin" or torch.cuda.is_available( ) else 2 dataset = CacheDataset(data, transform=transform, progress=False) loader = DataLoader(dataset, num_workers=num_workers, batch_size=5) # set up engine def _train_func(engine, batch): self.assertTupleEqual(batch["image"].shape[1:], (1, 100, 100, 100)) engine.state.output = batch engine.fire_event(IterationEvents.MODEL_COMPLETED) return engine.state.output engine = Engine(_train_func) engine.register_events(*IterationEvents) # set up testing handler TransformInverter( transform=transform, loader=loader, output_keys=["image", "label"], batch_keys="label", nearest_interp=True, num_workers=0 if sys.platform == "darwin" or torch.cuda.is_available() else 2, ).attach(engine) engine.run(loader, max_epochs=1) set_determinism(seed=None) self.assertTupleEqual(engine.state.output["image"].shape, (2, 1, 100, 100, 100)) self.assertTupleEqual(engine.state.output["label"].shape, (2, 1, 100, 100, 100)) for i in engine.state.output["image_inverted"] + engine.state.output[ "label_inverted"]: torch.testing.assert_allclose( i.to(torch.uint8).to(torch.float), i.to(torch.float)) self.assertTupleEqual(i.shape, (1, 100, 101, 107)) # check labels match reverted = engine.state.output["label_inverted"][-1].detach().cpu( ).numpy()[0].astype(np.int32) original = LoadImaged(KEYS)(data[-1])["label"] n_good = np.sum(np.isclose(reverted, original, atol=1e-3)) reverted_name = engine.state.output["label_meta_dict"][ "filename_or_obj"][-1] original_name = data[-1]["label"] self.assertEqual(reverted_name, original_name) print("invert diff", reverted.size - n_good) self.assertTrue((reverted.size - n_good) in (25300, 1812), "diff. in two possible values")