Example #1
0
    def __init__(
        self,
        summary_writer=None,
        interval=1,
        log_dir="./runs",
        tag_name="val_dice",
        compute_metric=True,
        images=True,
        image_interval=1,
        max_channels=1,
        max_frames=64,
        add_scalar=True,
        merge_scalar=False,
        fold_size=0,
    ):
        self.writer = SummaryWriter(
            log_dir=log_dir) if summary_writer is None else summary_writer
        self.interval = interval
        self.tag_name = tag_name
        self.compute_metric = compute_metric
        self.images = images
        self.image_interval = image_interval
        self.max_channels = max_channels
        self.max_frames = max_frames
        self.add_scalar = add_scalar
        self.merge_scalar = merge_scalar
        self.fold_size = fold_size
        self.logger = logging.getLogger(__name__)

        if torch.distributed.is_initialized():
            self.tag_name = "{}-r{}".format(self.tag_name,
                                            torch.distributed.get_rank())

        self.plot_data = {}
        self.metric_data = {}
"""
"""## Download dataset

Downloads and extracts the dataset.  
The dataset comes from http://medicaldecathlon.com/.
"""

md5 = "410d4a301da4e5b2f6f86ec3ddba524e"

root_dir = "//home//imoreira//Data"
#root_dir = "C:\\Users\\isasi\\Downloads"
data_dir = os.path.join(root_dir, "Kidneys_Data")
out_dir = os.path.join(root_dir, "Kidneys_Best_Model")
tensorboard_dir = "//home//imoreira//Data//Tensorboard_Kidneys"

writer = SummaryWriter(log_dir=tensorboard_dir)
"""## Set MSD Spleen dataset path"""

train_images = sorted(glob.glob(os.path.join(data_dir, "imagesTr",
                                             "*.nii.gz")))
train_labels = sorted(glob.glob(os.path.join(data_dir, "labelsTr",
                                             "*.nii.gz")))
data_dicts = [{
    "image": image_name,
    "label": label_name
} for image_name, label_name in zip(train_images, train_labels)]
#n = len(data_dicts)
#train_files, val_files = data_dicts[:-3], data_dicts[-3:]
#train_files, val_files = data_dicts[:int(n*0.8)], data_dicts[int(n*0.2):]

val_files, train_files, test_files = data_dicts[0:8], data_dicts[