def test_nvtx_transfroms_array(self, input):
     # with prob == 0.0
     transforms = Compose([
         RandMark("Mark: Transforms Start!"),
         RandRangePush("Range: RandFlip"),
         RandFlip(prob=0.0),
         RandRangePop(),
         RangePush("Range: ToTensor"),
         ToTensor(),
         RangePop(),
         Mark("Mark: Transforms End!"),
     ])
     output = transforms(input)
     self.assertIsInstance(output, torch.Tensor)
     np.testing.assert_array_equal(input, output)
     # with prob == 1.0
     transforms = Compose([
         RandMark("Mark: Transforms Start!"),
         RandRangePush("Range: RandFlip"),
         RandFlip(prob=1.0),
         RandRangePop(),
         RangePush("Range: ToTensor"),
         ToTensor(),
         RangePop(),
         Mark("Mark: Transforms End!"),
     ])
     output = transforms(input)
     self.assertIsInstance(output, torch.Tensor)
     np.testing.assert_array_equal(input, Flip()(output.numpy()))
Exemplo n.º 2
0
 def test_correct_results(self, _, spatial_axis):
     flip = RandFlip(prob=1.0, spatial_axis=spatial_axis)
     expected = list()
     for channel in self.imt[0]:
         expected.append(np.flip(channel, spatial_axis))
     expected = np.stack(expected)
     self.assertTrue(np.allclose(expected, flip(self.imt[0])))
Exemplo n.º 3
0
 def test_correct_results(self, _, spatial_axis):
     for p in TEST_NDARRAYS:
         im = p(self.imt[0])
         flip = RandFlip(prob=1.0, spatial_axis=spatial_axis)
         expected = [
             np.flip(channel, spatial_axis) for channel in self.imt[0]
         ]
         expected = np.stack(expected)
         result = flip(im)
         assert_allclose(result, p(expected))
Exemplo n.º 4
0
 def test_correct_results(self, _, spatial_axis):
     for p in TEST_NDARRAYS_ALL:
         im = p(self.imt[0])
         flip = RandFlip(prob=1.0, spatial_axis=spatial_axis)
         set_track_meta(False)
         result = flip(im)
         self.assertNotIsInstance(result, MetaTensor)
         self.assertIsInstance(result, torch.Tensor)
         set_track_meta(True)
         expected = [
             np.flip(channel, spatial_axis) for channel in self.imt[0]
         ]
         expected = np.stack(expected)
         result = flip(im)
         assert_allclose(result, p(expected), type_test="tensor")
         test_local_inversion(flip, result, im)
Exemplo n.º 5
0
    def test_invert(self):
        set_determinism(seed=0)
        im_fname = make_nifti_image(create_test_image_3d(101, 100, 107, noise_max=100)[1])  # label image, discrete
        data = [im_fname for _ in range(12)]
        transform = Compose(
            [
                LoadImage(image_only=True),
                EnsureChannelFirst(),
                Orientation("RPS"),
                Spacing(pixdim=(1.2, 1.01, 0.9), mode="bilinear", dtype=np.float32),
                RandFlip(prob=0.5, spatial_axis=[1, 2]),
                RandAxisFlip(prob=0.5),
                RandRotate90(prob=0, spatial_axes=(1, 2)),
                RandZoom(prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True),
                RandRotate(prob=0.5, range_x=np.pi, mode="bilinear", align_corners=True, dtype=np.float64),
                RandAffine(prob=0.5, rotate_range=np.pi, mode="nearest"),
                ResizeWithPadOrCrop(100),
                CastToType(dtype=torch.uint8),
            ]
        )

        # num workers = 0 for mac or gpu transforms
        num_workers = 0 if sys.platform != "linux" or torch.cuda.is_available() else 2
        dataset = Dataset(data, transform=transform)
        self.assertIsInstance(transform.inverse(dataset[0]), MetaTensor)
        loader = DataLoader(dataset, num_workers=num_workers, batch_size=1)
        inverter = Invert(transform=transform, nearest_interp=True, device="cpu")

        for d in loader:
            d = decollate_batch(d)
            for item in d:
                orig = deepcopy(item)
                i = inverter(item)
                self.assertTupleEqual(orig.shape[1:], (100, 100, 100))
                # check the nearest interpolation mode
                torch.testing.assert_allclose(i.to(torch.uint8).to(torch.float), i.to(torch.float))
                self.assertTupleEqual(i.shape[1:], (100, 101, 107))
        # check labels match
        reverted = i.detach().cpu().numpy().astype(np.int32)
        original = LoadImage(image_only=True)(data[-1])
        n_good = np.sum(np.isclose(reverted, original.numpy(), atol=1e-3))
        reverted_name = i.meta["filename_or_obj"]
        original_name = original.meta["filename_or_obj"]
        self.assertEqual(reverted_name, original_name)
        print("invert diff", reverted.size - n_good)
        self.assertTrue((reverted.size - n_good) < 300000, f"diff. {reverted.size - n_good}")
        set_determinism(seed=None)
    def test_tranform_randomized(self, input):
        # Compose deterministic and randomized transforms
        transforms = Compose([
            Range("flip")(Flip()),
            Rotate90(),
            Range()(RandAdjustContrast(prob=0.0)),
            Range("random flip")(RandFlip(prob=1.0)),
            ToTensor(),
        ])
        # Apply transforms
        output = transforms(input)

        # Decorate with NVTX Range
        transforms1 = Range()(transforms)
        transforms2 = Range("Transforms2")(transforms)
        transforms3 = Range(name="Transforms3", methods="__call__")(transforms)

        # Apply transforms with Range
        output1 = transforms1(input)
        output2 = transforms2(input)
        output3 = transforms3(input)

        # Check if the outputs are equal
        self.assertIsInstance(output, torch.Tensor)
        self.assertIsInstance(output1, torch.Tensor)
        self.assertIsInstance(output2, torch.Tensor)
        self.assertIsInstance(output3, torch.Tensor)
        np.testing.assert_equal(output.numpy(), output1.numpy())
        np.testing.assert_equal(output.numpy(), output2.numpy())
        np.testing.assert_equal(output.numpy(), output3.numpy())

        # Check if the first randomized is RandAdjustContrast
        for tran in transforms.transforms:
            if isinstance(tran, Randomizable):
                self.assertIsInstance(tran, RandAdjustContrast)
                break
Exemplo n.º 7
0
def run_training_test(root_dir,
                      train_x,
                      train_y,
                      val_x,
                      val_y,
                      device="cuda:0",
                      num_workers=10):

    monai.config.print_config()
    # define transforms for image and classification
    train_transforms = Compose([
        LoadPNG(image_only=True),
        AddChannel(),
        ScaleIntensity(),
        RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True),
        RandFlip(spatial_axis=0, prob=0.5),
        RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5),
        ToTensor(),
    ])
    train_transforms.set_random_state(1234)
    val_transforms = Compose(
        [LoadPNG(image_only=True),
         AddChannel(),
         ScaleIntensity(),
         ToTensor()])

    # create train, val data loaders
    train_ds = MedNISTDataset(train_x, train_y, train_transforms)
    train_loader = DataLoader(train_ds,
                              batch_size=300,
                              shuffle=True,
                              num_workers=num_workers)

    val_ds = MedNISTDataset(val_x, val_y, val_transforms)
    val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers)

    model = densenet121(spatial_dims=2,
                        in_channels=1,
                        out_channels=len(np.unique(train_y))).to(device)
    loss_function = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), 1e-5)
    epoch_num = 4
    val_interval = 1

    # start training validation
    best_metric = -1
    best_metric_epoch = -1
    epoch_loss_values = list()
    metric_values = list()
    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    for epoch in range(epoch_num):
        print("-" * 10)
        print(f"Epoch {epoch + 1}/{epoch_num}")
        model.train()
        epoch_loss = 0
        step = 0
        for batch_data in train_loader:
            step += 1
            inputs, labels = batch_data[0].to(device), batch_data[1].to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
        epoch_loss /= step
        epoch_loss_values.append(epoch_loss)
        print(f"epoch {epoch + 1} average loss:{epoch_loss:0.4f}")

        if (epoch + 1) % val_interval == 0:
            model.eval()
            with torch.no_grad():
                y_pred = torch.tensor([], dtype=torch.float32, device=device)
                y = torch.tensor([], dtype=torch.long, device=device)
                for val_data in val_loader:
                    val_images, val_labels = val_data[0].to(
                        device), val_data[1].to(device)
                    y_pred = torch.cat([y_pred, model(val_images)], dim=0)
                    y = torch.cat([y, val_labels], dim=0)
                auc_metric = compute_roc_auc(y_pred,
                                             y,
                                             to_onehot_y=True,
                                             softmax=True)
                metric_values.append(auc_metric)
                acc_value = torch.eq(y_pred.argmax(dim=1), y)
                acc_metric = acc_value.sum().item() / len(acc_value)
                if auc_metric > best_metric:
                    best_metric = auc_metric
                    best_metric_epoch = epoch + 1
                    torch.save(model.state_dict(), model_filename)
                    print("saved new best metric model")
                print(
                    f"current epoch {epoch +1} current AUC: {auc_metric:0.4f} "
                    f"current accuracy: {acc_metric:0.4f} best AUC: {best_metric:0.4f} at epoch {best_metric_epoch}"
                )
    print(
        f"train completed, best_metric: {best_metric:0.4f}  at epoch: {best_metric_epoch}"
    )
    return epoch_loss_values, best_metric, best_metric_epoch
Exemplo n.º 8
0
from monai.transforms.spatial.dictionary import RandAffined, RandRotate90d
from monai.utils import optional_import, set_determinism
from monai.utils.enums import InverseKeys
from tests.utils import make_nifti_image

_, has_nib = optional_import("nibabel")

KEYS = ["image"]

TESTS_DICT: List[Tuple] = []
TESTS_DICT.append((SpatialPadd(KEYS, 150), RandFlipd(KEYS, prob=1.0, spatial_axis=1)))
TESTS_DICT.append((RandRotate90d(KEYS, prob=0.0, max_k=1),))
TESTS_DICT.append((RandAffined(KEYS, prob=0.0, translate_range=10),))

TESTS_LIST: List[Tuple] = []
TESTS_LIST.append((SpatialPad(150), RandFlip(prob=1.0, spatial_axis=1)))
TESTS_LIST.append((RandRotate90(prob=0.0, max_k=1),))
TESTS_LIST.append((RandAffine(prob=0.0, translate_range=10),))


TEST_BASIC = [
    [("channel", "channel"), ["channel", "channel"]],
    [torch.Tensor([1, 2, 3]), [torch.tensor(1.0), torch.tensor(2.0), torch.tensor(3.0)]],
    [
        [[torch.Tensor((1.0, 2.0, 3.0)), torch.Tensor((2.0, 3.0, 1.0))]],
        [
            [[torch.tensor(1.0), torch.tensor(2.0)]],
            [[torch.tensor(2.0), torch.tensor(3.0)]],
            [[torch.tensor(3.0), torch.tensor(1.0)]],
        ],
    ],
Exemplo n.º 9
0
 def test_invalid_inputs(self, _, spatial_axis, raises):
     with self.assertRaises(raises):
         flip = RandFlip(prob=1.0, spatial_axis=spatial_axis)
         flip(self.imt[0])
Exemplo n.º 10
0
    train_indices = indices[val_split:]

    train_x = [image_files_list[i] for i in train_indices]
    train_y = [image_class[i] for i in train_indices]
    val_x = [image_files_list[i] for i in val_indices]
    val_y = [image_class[i] for i in val_indices]
    test_x = [image_files_list[i] for i in test_indices]
    test_y = [image_class[i] for i in test_indices]

    # MONAI transforms, Dataset and Dataloader for preprocessing
    train_transforms = Compose([
        LoadImage(image_only=True),
        AddChannel(),
        ScaleIntensity(),
        RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True),
        RandFlip(spatial_axis=0, prob=0.5),
        RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5),
        ToTensor(),
    ])

    val_transforms = Compose([
        LoadImage(image_only=True),
        AddChannel(),
        ScaleIntensity(),
        ToTensor()
    ])

    act = Activations(softmax=True)
    to_onehot = AsDiscrete(to_onehot=True, n_classes=num_class)

    class MedNISTDataset(torch.utils.data.Dataset):
Exemplo n.º 11
0
TEST_CASE_DICT_0 = [{"image": np.random.randn(3, 3)}]
TEST_CASE_DICT_1 = [{"image": np.random.randn(3, 10, 10)}]

TEST_CASE_TORCH_0 = [torch.randn(3, 3)]
TEST_CASE_TORCH_1 = [torch.randn(3, 10, 10)]

TEST_CASE_WRAPPER = [np.random.randn(3, 10, 10)]

TEST_CASE_RECURSIVE_0 = [
    torch.randn(3, 3),
    Compose([
        ToNumpy(),
        Flip(),
        RandAdjustContrast(prob=0.0),
        RandFlip(prob=1.0),
        ToTensor()
    ]),
]
TEST_CASE_RECURSIVE_1 = [
    torch.randn(3, 3),
    Compose([
        ToNumpy(),
        Flip(),
        Compose([RandAdjustContrast(prob=0.0),
                 RandFlip(prob=1.0)]),
        ToTensor()
    ]),
]
TEST_CASE_RECURSIVE_2 = [
    torch.randn(3, 3),
Exemplo n.º 12
0
class Loader():
    """Loader for different image datasets with built in split function and download if needed.
    
    Functions:
        load_IXIT1: Loads the IXIT1 3D brain MRI dataset.
        load_MedNIST: Loads the MedNIST 2D image dataset.
    """
    
    ixi_train_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), RandRotate90()])
    ixi_test_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96))])
    
    mednist_train_transforms = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(),
                                        RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True), 
                                        RandFlip(spatial_axis=0, prob=0.5), 
                                        RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5)])
    mednist_test_transforms = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity()])
    
    
    @staticmethod
    def load_IXIT1(download: bool = False, train_transforms: object = ixi_train_transforms, 
                   test_transforms: object = ixi_test_transforms, test_size: float = 0.2, 
                   val_size: float = 0.0, sample_size: float = 0.01, shuffle: bool = True):
        """Loads the IXIT1 3D Brain MRI dataset.
        
        Consists of ~566 images of 3D Brain MRI scans and labels (0) for male and (1) for female.
        
        Args:
            download (bool): If true, then data is downloaded before loading it as dataset.
            train_transforms (Compose): Specify the transformations to be applied to the training dataset.
            test_transforms (Compose): Specify the transformations to be applied to the test dataset.
            sample_size (float): Percentage of available images to be used.
            test_size (float): Precantage of sample to be used as test data.
            val_size (float): Percentage of sample to be used as validation data.
            shuffle (bool): Whether or not the data should be shuffled after loading.
        """
        # Download data if needed
        if download:
            data_url = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar'
            compressed_file = os.sep.join(['Data', 'IXI-T1.tar'])
            data_dir = os.sep.join(['Data', 'IXI-T1'])

            # Data download
            monai.apps.download_and_extract(data_url, compressed_file, './Data/IXI-T1')

            # Labels document download
            labels_url = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'
            monai.apps.download_url(labels_url, './Data/IXI.xls')
            
        # Get all the images and corresponding Labels
        images = [impath for impath in os.listdir('./Data/IXI-T1')]

        df = pd.read_excel('./Data/IXI.xls')

        data = []
        labels = []
        for i in images:
            ixi_id = int(i[3:6])
            row = df.loc[df['IXI_ID'] == ixi_id]
            if not row.empty:
                data.append(os.sep.join(['Data', 'IXI-T1', i]))
                labels.append(int(row.iat[0, 1] - 1)) # Sex labels are 1/2 but need to be 0/1

        data, labels = data[:int(len(data) * sample_size)], labels[:int(len(data) * sample_size)]
        
        # Make train test validation split
        train_data, train_labels, test_data, test_labels, val_data, val_labels = _split(data, labels, 
                                                                                        test_size, val_size)
        
        # Construct and return Datasets
        train_ds = IXIT1Dataset(train_data, train_labels, train_transforms, shuffle)
        test_ds = IXIT1Dataset(test_data, test_labels, test_transforms, shuffle)
        
        if val_size == 0:
            return train_ds, test_ds
        else:
            val_ds = IXIT1Dataset(val_data, val_labels, test_transforms, shuffle)
            return train_ds, test_ds, val_ds
        
    
    @staticmethod
    def load_MedNIST(download: bool = False, train_transforms: object = mednist_train_transforms, 
                   test_transforms: object = mednist_test_transforms, test_size: float = 0.2, 
                   val_size: float = 0.0, sample_size: float = 0.01, shuffle: bool = True):
        """Loads the MedNIST 2D image dataset.
        
        Consists of ~60.000 2D images from 6 classes: AbdomenCT, BreastMRI, ChestCT, CXR, Hand, HeadCT.
        
        Args:
            download (bool): If true, then data is downloaded before loading it as dataset.
            train_transforms (Compose): Specify the transformations to be applied to the training dataset.
            test_transforms (Compose): Specify the transformations to be applied to the test dataset.
            sample_size (float): Percentage of available images to be used.
            test_size (float): Precantage of sample to be used as test data.
            val_size (float): Percentage of sample to be used as validation data.
            shuffle (bool): Whether or not the data should be shuffled after loading.
        """
        
        root_dir = './Data'
        resource = "https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz?dl=1"
        md5 = "0bc7306e7427e00ad1c5526a6677552d"

        compressed_file = os.path.join(root_dir, "MedNIST.tar.gz")
        data_dir = os.path.join(root_dir, "MedNIST")
            
        if download:
            monai.apps.download_and_extract(resource, compressed_file, root_dir, md5)

        # Reading image filenames from dataset folders and assigning labels
        class_names = sorted(x for x in os.listdir(data_dir)
                             if os.path.isdir(os.path.join(data_dir, x)))
        num_class = len(class_names)

        image_files = [
            [
                os.path.join(data_dir, class_names[i], x)
                for x in os.listdir(os.path.join(data_dir, class_names[i]))
            ]
            for i in range(num_class)
        ]
        
        image_files = [images[:int(len(images) * sample_size)] for images in image_files]
        
        # Constructing data and labels
        num_each = [len(image_files[i]) for i in range(num_class)]
        data = []
        labels = []

        for i in range(num_class):
            data.extend(image_files[i])
            labels.extend([int(i)] * num_each[i])
            
        if shuffle:
            np.random.seed(42)
            indicies = np.arange(len(data))
            np.random.shuffle(indicies)
            
            data = [data[i] for i in indicies]
            labels = [labels[i] for i in indicies]
        
        # Make train test validation split
        train_data, train_labels, test_data, test_labels, val_data, val_labels = _split(data, labels, 
                                                                                        test_size, val_size)
        
        # Construct and return datasets
        train_ds = MedNISTDataset(train_data, train_labels, train_transforms, shuffle)
        test_ds = MedNISTDataset(test_data, test_labels, test_transforms, shuffle)
        
        if val_size == 0:
            return train_ds, test_ds
        else:
            val_ds = MedNISTDataset(val_data, val_labels, test_transforms, shuffle)
            return train_ds, test_ds, val_ds
Exemplo n.º 13
0
                                             is_val_split=is_val_split)

    # data preprocessing/augmentation
    trans_train = MozartTheComposer([

        #ScaleIntensity(),
        #             AddChannel(),
        #             RandSpatialCrop(roi_size=256, random_size=False),
        #CenterSpatialCrop(roi_size=2154),  # 2154
        #             RandScaleIntensity(factors=0.25, prob=aug_prob),
        RandRotate(range_x=15,
                   prob=aug_prob,
                   keep_size=True,
                   padding_mode="reflection"),
        RandRotate90(prob=aug_prob, spatial_axes=(1, 2)),
        RandFlip(spatial_axis=(1, 2), prob=aug_prob),
        ToTensor()
    ])

    trans_val = MozartTheComposer([
        #         LoadImage(PILReader(), image_only=True),
        #ScaleIntensity(),
        #         AddChannel(),
        #         RandSpatialCrop(roi_size=256, random_size=False),
        #CenterSpatialCrop(roi_size=2154),
        ToTensor()
    ])

    # create dataset class
    train_dataset = OurDataset(data=train_split,
                               data_reader=PILReader(),
Exemplo n.º 14
0
Arquivo: utils.py Projeto: ckbr0/RIS
def transform_and_copy(data, cahce_dir):
    copy_dir = os.path.join(cahce_dir, 'copied_images')
    if not os.path.exists(copy_dir):
        os.mkdir(copy_dir)
    copy_list_path = os.path.join(copy_dir, 'copied_images.npy')
    if not os.path.exists(copy_list_path):
        print("transforming and copying images...")
        imageLoader = LoadImage()
        to_copy_list = [x for x in data if int(x['_label']) == 1]
        mul = 1  #int(len(data)/len(to_copy_list) - 1)

        rand_x_flip = RandFlip(spatial_axis=0, prob=0.50)
        rand_y_flip = RandFlip(spatial_axis=1, prob=0.50)
        rand_z_flip = RandFlip(spatial_axis=2, prob=0.50)
        rand_affine = RandAffine(prob=1.0,
                                 rotate_range=(0, 0, np.pi / 10),
                                 shear_range=(0.12, 0.12, 0.0),
                                 translate_range=(0, 0, 0),
                                 scale_range=(0.12, 0.12, 0.0),
                                 padding_mode="zeros")
        rand_gaussian_noise = RandGaussianNoise(prob=0.5, mean=0.0, std=0.05)
        transform = Compose([
            AddChannel(),
            rand_x_flip,
            rand_y_flip,
            rand_z_flip,
            rand_affine,
            SqueezeDim(),
        ])
        copy_list = []
        n = len(to_copy_list)
        for i in range(len(to_copy_list)):
            print(f'Copying image {i+1}/{n}', end="\r")
            to_copy = to_copy_list[i]
            image_file = to_copy['image']
            _image_file = replace_suffix(image_file, '.nii.gz', '')
            label = to_copy['label']
            _label = to_copy['_label']
            image_data, _ = imageLoader(image_file)
            seg_file = to_copy['seg']
            seg_data, _ = nrrd.read(seg_file)

            for i in range(mul):
                rand_seed = np.random.randint(1e8)
                transform.set_random_state(seed=rand_seed)
                new_image_data = rand_gaussian_noise(
                    np.array(transform(image_data)))
                transform.set_random_state(seed=rand_seed)
                new_seg_data = np.array(transform(seg_data))
                #multi_slice_viewer(image_data, image_file)
                #multi_slice_viewer(seg_data, seg_file)
                #seg_image = MaskIntensity(seg_data)(image_data)
                #multi_slice_viewer(seg_image, seg_file)
                image_basename = os.path.basename(_image_file)
                seg_basename = image_basename + f'_seg_{i}.nrrd'
                image_basename = image_basename + f'_{i}.nii.gz'

                new_image_file = os.path.join(copy_dir, image_basename)
                write_nifti(new_image_data, new_image_file, resample=False)
                new_seg_file = os.path.join(copy_dir, seg_basename)
                nrrd.write(new_seg_file, new_seg_data)
                copy_list.append({
                    'image': new_image_file,
                    'seg': new_seg_file,
                    'label': label,
                    '_label': _label
                })

        np.save(copy_list_path, copy_list)
        print("done transforming and copying!")

    copy_list = np.load(copy_list_path, allow_pickle=True)
    return copy_list
Exemplo n.º 15
0
for i, elem in enumerate(image_files_list):
    if elem in list_all_images:
        image_files_list_updated.append(elem)
        image_class_list.append(image_class[i])


"""
transforms 
"""
train_transforms = Compose(
    [
        LoadPNG(image_only=True),
        AddChannel(),
        ScaleIntensity(), 
        RandRotate(range_x=15, prob=0.1, keep_size=True), # low probability for rotation 
        RandFlip(spatial_axis=0, prob=0.5),# left right flip 
        RandFlip(spatial_axis=1, prob=0.5), # horizontal flip
        RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5), 
        ToTensor(),
        Lambda(lambda x: torch.cat([x, x, x], 0)),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ]
)

val_transforms = Compose(
    [
        LoadPNG(image_only=True),
        # Resize((480,640)),
        AddChannel(), 
        ScaleIntensity(),
        ToTensor(),