def __init__( self, output_dir: str = "./", filename: str = "predictions.csv", overwrite: bool = True, batch_transform: Callable = lambda x: x, output_transform: Callable = lambda x: x, name: Optional[str] = None, ): """ Args: output_dir: output CSV file directory. filename: name of the saved CSV file name. overwrite: whether to overwriting existing CSV file content. If we are not overwriting, then we check if the results have been previously saved, and load them to the prediction_dict. batch_transform: a callable that is used to transform the ignite.engine.batch into expected format to extract the meta_data dictionary. output_transform: a callable that is used to transform the ignite.engine.output into the form expected model prediction data. The first dimension of this transform's output will be treated as the batch dimension. Each item in the batch will be saved individually. name: identifier of logging.logger to use, defaulting to `engine.logger`. """ self.saver = CSVSaver(output_dir, filename, overwrite) self.batch_transform = batch_transform self.output_transform = output_transform self.logger = None if name is None else logging.getLogger(name) self._name = name
class ClassificationSaver: """ Event handler triggered on completing every iteration to save the classification predictions as CSV file. """ def __init__( self, output_dir: str = "./", filename: str = "predictions.csv", overwrite: bool = True, batch_transform: Callable = lambda x: x, output_transform: Callable = lambda x: x, name: Optional[str] = None, ) -> None: """ Args: output_dir: output CSV file directory. filename: name of the saved CSV file name. overwrite: whether to overwriting existing CSV file content. If we are not overwriting, then we check if the results have been previously saved, and load them to the prediction_dict. batch_transform: a callable that is used to transform the ignite.engine.batch into expected format to extract the meta_data dictionary. output_transform: a callable that is used to transform the ignite.engine.output into the form expected model prediction data. The first dimension of this transform's output will be treated as the batch dimension. Each item in the batch will be saved individually. name: identifier of logging.logger to use, defaulting to `engine.logger`. """ self.saver = CSVSaver(output_dir, filename, overwrite) self.batch_transform = batch_transform self.output_transform = output_transform self.logger = logging.getLogger(name) self._name = name def attach(self, engine: Engine) -> None: """ Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ if self._name is None: self.logger = engine.logger if not engine.has_event_handler(self, Events.ITERATION_COMPLETED): engine.add_event_handler(Events.ITERATION_COMPLETED, self) if not engine.has_event_handler(self.saver.finalize, Events.COMPLETED): engine.add_event_handler(Events.COMPLETED, lambda engine: self.saver.finalize()) def __call__(self, engine: Engine) -> None: """ This method assumes self.batch_transform will extract metadata from the input batch. Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ meta_data = self.batch_transform(engine.state.batch) engine_output = self.output_transform(engine.state.output) self.saver.save_batch(engine_output, meta_data)
def _finalize(self, _engine: Engine) -> None: """ All gather classification results from ranks and save to CSV file. Args: _engine: Ignite Engine, unused argument. """ ws = idist.get_world_size() if self.save_rank >= ws: raise ValueError( "target save rank is greater than the distributed group size.") outputs = torch.stack(self._outputs, dim=0) filenames = self._filenames if ws > 1: outputs = evenly_divisible_all_gather(outputs, concat=True) filenames = string_list_all_gather(filenames) if len(filenames) == 0: meta_dict = None else: if len(filenames) != len(outputs): warnings.warn( f"filenames length: {len(filenames)} doesn't match outputs length: {len(outputs)}." ) meta_dict = {Key.FILENAME_OR_OBJ: filenames} # save to CSV file only in the expected rank if idist.get_rank() == self.save_rank: saver = self.saver or CSVSaver(output_dir=self.output_dir, filename=self.filename, overwrite=self.overwrite, delimiter=self.delimiter) saver.save_batch(outputs, meta_dict) saver.finalize()
def pytorch_eval(self): print("Evaluating...") self.model.load_state_dict(torch.load(self.saved_model_dict)) self.model.eval() with torch.no_grad(): num_correct = 0.0 metric_count = 0 saver = CSVSaver(output_dir="./output") real = [] predicted = [] for val_data in self.val_loader: val_images, val_labels = val_data["img"].to( self.device), val_data["label"].to(self.device) val_outputs = self.model(val_images).argmax(dim=1) real.append(val_labels.cpu().numpy()) predicted.append(val_outputs.cpu().numpy()) value = torch.eq(val_outputs, val_labels) metric_count += len(value) num_correct += value.sum().item() saver.save_batch(val_outputs, val_data["img_meta_dict"]) flat_real = self.flatten_list(real) flat_predicted = self.flatten_list(predicted) if self.task == "classification": score = self.binary_classification(flat_real, flat_predicted) metric = num_correct / metric_count else: print("REAL: ", flat_real) print("PRED: ", flat_predicted) score = mean_squared_error(flat_real, flat_predicted) print(score) saver.finalize() return score
def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz', '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz', '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz', '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz', '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz', '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz', '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz', '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz', '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz', '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz' ] # 2 binary labels for gender classification: man and woman labels = np.array([ 0, 0, 1, 0, 1, 0, 1, 0, 1, 0 ]) # Define transforms for image val_transforms = Compose([ ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor() ]) # Define nifti dataset val_ds = NiftiDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False) # create a validation data loader val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) # Create DenseNet121 device = torch.device('cuda:0') model = monai.networks.nets.densenet.densenet121( spatial_dims=3, in_channels=1, out_channels=2, ).to(device) model.load_state_dict(torch.load('best_metric_model.pth')) model.eval() with torch.no_grad(): num_correct = 0. metric_count = 0 saver = CSVSaver(output_dir='./output') for val_data in val_loader: val_images, val_labels = val_data[0].to(device), val_data[1].to(device) val_outputs = model(val_images).argmax(dim=1) value = torch.eq(val_outputs, val_labels) metric_count += len(value) num_correct += value.sum().item() saver.save_batch(val_outputs, val_data[2]) metric = num_correct / metric_count print('evaluation metric:', metric) saver.finalize()
def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]), os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]), os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]), os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]), os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]), os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]), os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]), os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]), os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]), os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]), ] # 2 binary labels for gender classification: man and woman labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64) val_files = [{"img": img, "label": label} for img, label in zip(images, labels)] # Define transforms for image val_transforms = Compose( [ LoadNiftid(keys=["img"]), AddChanneld(keys=["img"]), ScaleIntensityd(keys=["img"]), Resized(keys=["img"], spatial_size=(96, 96, 96)), ToTensord(keys=["img"]), ] ) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) # Create DenseNet121 device = torch.device("cuda:0") model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device) model.load_state_dict(torch.load("best_metric_model.pth")) model.eval() with torch.no_grad(): num_correct = 0.0 metric_count = 0 saver = CSVSaver(output_dir="./output") for val_data in val_loader: val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device) val_outputs = model(val_images).argmax(dim=1) value = torch.eq(val_outputs, val_labels) metric_count += len(value) num_correct += value.sum().item() saver.save_batch(val_outputs, val_data["img_meta_dict"]) metric = num_correct / metric_count print("evaluation metric:", metric) saver.finalize()
def test_saved_content(self): with tempfile.TemporaryDirectory() as tempdir: saver = CSVSaver(output_dir=tempdir, filename="predictions.csv") meta_data = {"filename_or_obj": ["testfile" + str(i) for i in range(8)]} saver.save_batch(torch.zeros(8), meta_data) saver.finalize() filepath = os.path.join(tempdir, "predictions.csv") self.assertTrue(os.path.exists(filepath)) with open(filepath, "r") as f: reader = csv.reader(f) i = 0 for row in reader: self.assertEqual(row[0], "testfile" + str(i)) self.assertEqual(np.array(row[1:]).astype(np.float32), 0.0) i += 1 self.assertEqual(i, 8)
def test_saved_content(self): default_dir = os.path.join(".", "tempdir") shutil.rmtree(default_dir, ignore_errors=True) saver = CSVSaver(output_dir=default_dir, filename="predictions.csv") meta_data = {"filename_or_obj": ["testfile" + str(i) for i in range(8)]} saver.save_batch(torch.zeros(8), meta_data) saver.finalize() filepath = os.path.join(default_dir, "predictions.csv") self.assertTrue(os.path.exists(filepath)) with open(filepath, "r") as f: reader = csv.reader(f) i = 0 for row in reader: self.assertEqual(row[0], "testfile" + str(i)) self.assertEqual(np.array(row[1:]).astype(np.float32), 0.0) i += 1 self.assertEqual(i, 8) shutil.rmtree(default_dir)
def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ # the path of ixi IXI-T1 dataset data_path = os.sep.join( [".", "workspace", "data", "medical", "ixi", "IXI-T1"]) images = [ "IXI607-Guys-1097-T1.nii.gz", "IXI175-HH-1570-T1.nii.gz", "IXI385-HH-2078-T1.nii.gz", "IXI344-Guys-0905-T1.nii.gz", "IXI409-Guys-0960-T1.nii.gz", "IXI584-Guys-1129-T1.nii.gz", "IXI253-HH-1694-T1.nii.gz", "IXI092-HH-1436-T1.nii.gz", "IXI574-IOP-1156-T1.nii.gz", "IXI585-Guys-1130-T1.nii.gz", ] images = [os.sep.join([data_path, f]) for f in images] # 2 binary labels for gender classification: man and woman labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64) # Define transforms for image val_transforms = Compose( [ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), EnsureType()]) # Define image dataset val_ds = ImageDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False) # create a validation data loader val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) # Create DenseNet121 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = monai.networks.nets.DenseNet121(spatial_dims=3, in_channels=1, out_channels=2).to(device) model.load_state_dict( torch.load("best_metric_model_classification3d_array.pth")) model.eval() with torch.no_grad(): num_correct = 0.0 metric_count = 0 saver = CSVSaver(output_dir="./output") for val_data in val_loader: val_images, val_labels = val_data[0].to(device), val_data[1].to( device) val_outputs = model(val_images).argmax(dim=1) value = torch.eq(val_outputs, val_labels) metric_count += len(value) num_correct += value.sum().item() saver.save_batch(val_outputs, val_data[2]) metric = num_correct / metric_count print("evaluation metric:", metric) saver.finalize()
class ClassificationSaver: """ Event handler triggered on completing every iteration to save the classification predictions as CSV file. If running in distributed data parallel, only saves CSV file in the specified rank. """ def __init__( self, output_dir: str = "./", filename: str = "predictions.csv", overwrite: bool = True, batch_transform: Callable = lambda x: x, output_transform: Callable = lambda x: x, name: Optional[str] = None, save_rank: int = 0, ) -> None: """ Args: output_dir: output CSV file directory. filename: name of the saved CSV file name. overwrite: whether to overwriting existing CSV file content. If we are not overwriting, then we check if the results have been previously saved, and load them to the prediction_dict. batch_transform: a callable that is used to transform the ignite.engine.batch into expected format to extract the meta_data dictionary. output_transform: a callable that is used to transform the ignite.engine.output into the form expected model prediction data. The first dimension of this transform's output will be treated as the batch dimension. Each item in the batch will be saved individually. name: identifier of logging.logger to use, defaulting to `engine.logger`. save_rank: only the handler on specified rank will save to CSV file in multi-gpus validation, default to 0. """ self._expected_rank: bool = idist.get_rank() == save_rank self.saver = CSVSaver(output_dir, filename, overwrite) self.batch_transform = batch_transform self.output_transform = output_transform self.logger = logging.getLogger(name) self._name = name def attach(self, engine: Engine) -> None: """ Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ if self._name is None: self.logger = engine.logger if not engine.has_event_handler(self, Events.ITERATION_COMPLETED): engine.add_event_handler(Events.ITERATION_COMPLETED, self) if self._expected_rank and not engine.has_event_handler( self.saver.finalize, Events.COMPLETED): engine.add_event_handler(Events.COMPLETED, lambda engine: self.saver.finalize()) def __call__(self, engine: Engine) -> None: """ This method assumes self.batch_transform will extract metadata from the input batch. Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ _meta_data = self.batch_transform(engine.state.batch) if Key.FILENAME_OR_OBJ in _meta_data: # all gather filenames across ranks _meta_data[Key.FILENAME_OR_OBJ] = string_list_all_gather( _meta_data[Key.FILENAME_OR_OBJ]) # all gather predictions across ranks _engine_output = evenly_divisible_all_gather( self.output_transform(engine.state.output)) if self._expected_rank: self.saver.save_batch(_engine_output, _meta_data)
def test_saved_content(self): with tempfile.TemporaryDirectory() as tempdir: data = [ { "pred": torch.zeros(8), PostFix.meta("image"): { "filename_or_obj": ["testfile" + str(i) for i in range(8)] }, }, { "pred": torch.zeros(8), PostFix.meta("image"): { "filename_or_obj": ["testfile" + str(i) for i in range(8, 16)] }, }, { "pred": torch.zeros(8), PostFix.meta("image"): { "filename_or_obj": ["testfile" + str(i) for i in range(16, 24)] }, }, ] saver = CSVSaver(output_dir=Path(tempdir), filename="predictions2.csv", overwrite=False, flush=False, delimiter="\t") # set up test transforms post_trans = Compose([ CopyItemsd(keys=PostFix.meta("image"), times=1, names=PostFix.meta("pred")), # 1st saver saves data into CSV file SaveClassificationd( keys="pred", saver=None, meta_keys=None, output_dir=Path(tempdir), filename="predictions1.csv", delimiter="\t", overwrite=True, ), # 2rd saver only saves data into the cache, manually finalize later SaveClassificationd(keys="pred", saver=saver, meta_key_postfix=PostFix.meta()), ]) # simulate inference 2 iterations d = decollate_batch(data[0]) for i in d: post_trans(i) d = decollate_batch(data[1]) for i in d: post_trans(i) # write into CSV file saver.finalize() # 3rd saver will not delete previous data due to `overwrite=False` trans2 = SaveClassificationd( keys="pred", saver=None, meta_keys=PostFix.meta( "image"), # specify meta key, so no need to copy anymore output_dir=tempdir, filename="predictions1.csv", delimiter="\t", overwrite=False, ) d = decollate_batch(data[2]) for i in d: trans2(i) def _test_file(filename, count): filepath = os.path.join(tempdir, filename) self.assertTrue(os.path.exists(filepath)) with open(filepath) as f: reader = csv.reader(f, delimiter="\t") i = 0 for row in reader: self.assertEqual(row[0], "testfile" + str(i)) self.assertEqual( np.array(row[1:]).astype(np.float32), 0.0) i += 1 self.assertEqual(i, count) _test_file("predictions1.csv", 24) _test_file("predictions2.csv", 16)
batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) # Create DenseNet121 device = torch.device('cuda:0') model = monai.networks.nets.densenet.densenet121( spatial_dims=3, in_channels=1, out_channels=2, ).to(device) model.load_state_dict(torch.load('best_metric_model.pth')) model.eval() with torch.no_grad(): num_correct = 0. metric_count = 0 saver = CSVSaver(output_dir='./output') for val_data in val_loader: val_images, val_labels = val_data['img'].to( device), val_data['label'].to(device) val_outputs = model(val_images).argmax(dim=1) value = torch.eq(val_outputs, val_labels) metric_count += len(value) num_correct += value.sum().item() saver.save_batch(val_outputs, {'filename_or_obj': val_data['img.filename_or_obj']}) metric = num_correct / metric_count print('evaluation metric:', metric) saver.finalize()
# create a validation data loader val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) # Create DenseNet121 device = torch.device('cuda:0') model = monai.networks.nets.densenet.densenet121( spatial_dims=3, in_channels=1, out_channels=2, ).to(device) model.load_state_dict(torch.load('best_metric_model.pth')) model.eval() with torch.no_grad(): num_correct = 0. metric_count = 0 saver = CSVSaver(output_dir='./output') for val_data in val_loader: val_images, val_labels = val_data[0].to(device), val_data[1].to(device) val_outputs = model(val_images).argmax(dim=1) value = torch.eq(val_outputs, val_labels) metric_count += len(value) num_correct += value.sum().item() saver.save_batch(val_outputs, val_data[2]) metric = num_correct / metric_count print('evaluation metric:', metric) saver.finalize()
def evaluta_model(test_files, model_name): test_transforms = Compose( [ LoadNiftid(keys=modalDataKey), AddChanneld(keys=modalDataKey), NormalizeIntensityd(keys=modalDataKey), # ScaleIntensityd(keys=modalDataKey), # Resized(keys=modalDataKey, spatial_size=(48, 48), mode='bilinear'), ResizeWithPadOrCropd(keys=modalDataKey, spatial_size=(64, 64)), ConcatItemsd(keys=modalDataKey, name="inputs"), ToTensord(keys=["inputs"]), ] ) # create a validation data loader device = torch.device("cpu") print(len(test_files)) test_ds = monai.data.Dataset(data=test_files, transform=test_transforms) test_loader = DataLoader(test_ds, batch_size=len(test_files), num_workers=2, pin_memory=torch.device) # model = monai.networks.nets.se_resnet101(spatial_dims=2, in_ch=3, num_classes=6).to(device) model = DenseNetASPP(spatial_dims=2, in_channels=2, out_channels=5).to(device) # Evaluate the model on test dataset # # print(os.path.basename(model_name).split('.')[0]) checkpoint = torch.load(model_name) model.load_state_dict(checkpoint['model']) # optimizer.load_state_dict(checkpoint['optimizer']) # epochs = checkpoint['epoch'] # model.load_state_dict(torch.load(log_dir)) model.eval() with torch.no_grad(): saver = CSVSaver(output_dir="../result/GLeason/2d_output/", filename=os.path.basename(model_name).split('.')[0] + '.csv') for test_data in test_loader: test_images, test_labels = test_data["inputs"].to(device), test_data["label"].to(device) pred = model(test_images) # Gleason Classification # y_soft_label = (test_labels / 0.25).long() # y_soft_pred = (pred / 0.25).round().squeeze_().long() # print(test_data) probabilities = torch.sigmoid(pred) # pred2 = model(test_images).argmax(dim=1) # print(test_data) # saver.save_batch(probabilities.argmax(dim=1), test_data["t2Img_meta_dict"]) # zero = torch.zeros_like(probabilities) # one = torch.ones_like(probabilities) # y_pred_ordinal = torch.where(probabilities > 0.5, one, zero) # y_pred_acc = (y_pred_ordinal.sum(1)).to(torch.long) saver.save_batch(probabilities.argmax(dim=1), test_data["dwiImg_meta_dict"]) # print(test_labels) # print(probabilities[:, 1]) # for x in np.nditer(probabilities[:, 1]): # print(x) # prob_list.append(x) # falseList = [] # trueList = [] # for pre, label in zip(pred2.tolist(), test_labels.tolist() ): # if pre == 0 and label == 0: # falseList.append(0) # elif pre == 1 and label == 1: # trueList.append(1) # specificity = (falseList.count(0) / test_labels.tolist().count(0)) # sensitivity = (trueList.count(1) / test_labels.tolist().count(1)) # print('specificity:' + '%.4f' % specificity + ',', # 'sensitivity:' + '%.4f' % sensitivity + ',', # 'accuracy:' + '%.4f' % ((specificity + sensitivity) / 2)) # print(type(test_labels), type(pred)) # fpr, tpr, thresholds = roc_curve(test_labels, probabilities[:, 1]) # roc_auc = auc(fpr, tpr) # print('AUC = ' + str(roc_auc)) # AUC_list.append(roc_auc) # # print(accuracy_score(test_labels, pred2)) # accuracy_list.append(accuracy_score(test_labels, pred2)) # plt.plot(fpr, tpr, linewidth=2, label="ROC") # plt.xlabel("false presitive rate") # plt.ylabel("true presitive rate") # # plt.ylim(0, 1.05) # # plt.xlim(0, 1.05) # plt.legend(loc=4) # 图例的位置 # plt.show() saver.finalize() # cm = confusion_matrix(test_labels, y_pred_acc) cm = confusion_matrix(test_labels, probabilities.argmax(dim=1)) # cm = confusion_matrix(y_soft_label, y_soft_pred) # kappa_value = cohen_kappa_score(test_labels, y_pred_acc, weights='quadratic') kappa_value = cohen_kappa_score(test_labels, probabilities.argmax(dim=1), weights='quadratic') print('quadratic weighted kappa=' + str(kappa_value)) kappa_list.append(kappa_value) plot_confusion_matrix(cm, 'confusion_matrix.png', title='confusion matrix') from sklearn.metrics import classification_report print(classification_report(test_labels, probabilities.argmax(dim=1), digits=4)) accuracy_list.append( classification_report(test_labels, probabilities.argmax(dim=1), digits=4, output_dict=True)["accuracy"])