Ejemplo n.º 1
0
def train_net(flag):
    if flag == 0:
        agent1 = ONet(n_input=config.N_INPUT, n_output=config.N_OUTPUT)
        agent1.start_train()
    elif flag == 1:
        agent2 = LNet(n_input=config.N_INPUT, n_output=config.N_OUTPUT)
        agent2.start_train()
    else:
        agent3 = UNet(n_input=config.N_INPUT, n_output=config.N_OUTPUT)
        agent3.start_train()
Ejemplo n.º 2
0
    def __create_graph(self):
        self.discriminator = Discriminator(cfg.train.discriminator_size)
        self.discriminator.build((None, cfg.train.image_size,  cfg.train.image_size, cfg.train.image_channels))

        self.siamese = Discriminator(cfg.train.siamese_size)
        self.siamese.build((None, cfg.train.image_size,  cfg.train.image_size, cfg.train.image_channels))

        self.unet = UNet()
        self.unet.build((None, cfg.train.image_size,  cfg.train.image_size, cfg.train.image_channels))

        self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)

        self.BC_loss = tf.keras.losses.BinaryCrossentropy()
        self.max_margin_loss = MaxMarginLoss(cfg.train.delta)
        self.distance_loss = DistanceLoss()

        return
Ejemplo n.º 3
0
def main():
    # create pytorch dataset
    dataset_tr = CRC_Dataset(
        root_dir=os.path.join(os.getcwd(), "data\\train"),
        transforms=[
            transforms.RandomCrop(crop_shape=(256, 256)),
            transforms.RandomFlip(),
            #transforms.MirrorPad(padding=((3,), (3,), (0,))),
            transforms.ToTensor(),
            transforms.Normalize(means=(0.7942, 0.6693, 0.7722),
                                 stds=(0.1998, 0.3008, 0.2037))
        ])

    # set model, optimizer and loss criterion
    model = UNet((256, 256), (256, 256),
                 32,
                 64,
                 128,
                 256,
                 512,
                 droprate=0.5,
                 Norm=nn.BatchNorm2d)
    optimizer = optim.Adam(model.parameters(), lr=5e-4, weight_decay=3e-5)

    # compute class weights
    #weights = torch.tensor([0.181, 0.345, 0.474]).to(device)
    #criterion = nn.CrossEntropyLoss(weights)
    criterion = Dice_and_CE()
    lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                        mode="min",
                                                        factor=0.2,
                                                        patience=5,
                                                        min_lr=1e-6,
                                                        verbose=True)

    # initialize trainer class
    trainer = Trainer(model, optimizer, criterion, lr_scheduler, dataset_tr,
                      **train_dict)

    # start training
    trainer.run_training()
Ejemplo n.º 4
0
def main():

    # get test set and test set loader
    test_set = CRC_Dataset(root_dir=os.path.join(os.getcwd(), "data\\test"),
                           transforms=[
                               MirrorPad(((6, ), (6, ), (0, ))),
                               ToTensor(),
                               Normalize(means=(0.7942, 0.6693, 0.7722),
                                         stds=(0.1998, 0.3008, 0.2037))
                           ])

    test_loader = torch.utils.data.DataLoader(
        test_set,
        batch_size=args.batch_size if args.batch_size else 8,
        num_workers=args.workers if args.workers else 1,
        pin_memory=use_cuda,
    )

    model = UNet((512, 512), (500, 500),
                 32,
                 64,
                 128,
                 256,
                 512,
                 droprate=0.5,
                 Norm=torch.nn.BatchNorm2d)
    #model = UNet((512, 512), (500, 500), 32, 64, 128, 256, Norm=torch.nn.BatchNorm2d)
    model.load_state_dict(torch.load(chkpt))
    model.to(device)

    dice, acc = compute_metrics_on_test_set(model, test_loader)
    print(dice, acc)
Ejemplo n.º 5
0
def main():
    # frames to infer
    files = [
        "C:\\AML_seg_proj\\CRC-Segmentation\\data\\test\\frames\\frame#1058.npz",
        "C:\\AML_seg_proj\\CRC-Segmentation\\data\\test\\frames\\frame#139.npz",
        "C:\\AML_seg_proj\\CRC-Segmentation\\data\\test\\frames\\frame#26.npz"
    ]
    chkpt = "C:\\AML_seg_proj\\CRC-Segmentation\\model_large_drop_batch_wce\\model_chkpt_30.pt"
    # transforms to apply
    composed = Compose([
        transforms.MirrorPad(((6, ), (6, ), (0, ))),
        transforms.ToTensor(),
        transforms.Normalize(means=(0.7942, 0.6693, 0.7722),
                             stds=(0.1998, 0.3008, 0.2037))
    ])

    # model
    #model = UNet((512, 512), (500, 500), 32, 64, 128, 256, 512, droprate=0.5, Norm=torch.nn.BatchNorm2d)
    model = UNet((512, 512), (500, 500),
                 32,
                 64,
                 128,
                 256,
                 512,
                 droprate=0.5,
                 Norm=torch.nn.BatchNorm2d)
    model.load_state_dict(torch.load(chkpt, map_location='cpu'))
    model.eval()

    # evaluate metrics on the fly
    dice_sc, px_acc = Dice_Score(), Pixel_Accuracy((500, 500))

    # make predictions and write images and masks to disk as png files
    with torch.no_grad():
        for file in files:
            # load img, mask
            img, ground_truth = np.load(file)["arr_0"], np.load(
                file.replace("frame", "mask"))["arr_0"]
            img_copy = img.copy()
            # transform img
            img, ground_truth = composed([img, ground_truth])
            # prediction shape (1, 3, 500, 500)
            pred = model(img.unsqueeze(0))
            dice, pp_acc = dice_sc(pred, ground_truth.unsqueeze(0)), px_acc(
                pred, ground_truth.unsqueeze(0))
            print(f"Dice Score: {dice}, PP-Accuracy: {pp_acc}")
            # mask shape (1, 500, 500)
            mask = (torch.argmax(F.softmax(pred, dim=1),
                                 dim=1).squeeze(0).numpy() / 2 * 255).astype(
                                     np.uint8)
            # prep image for writing, shape (1, 3, 512, 512)
            img = (img.squeeze(0).numpy() * 255).astype(np.uint8)
            identifier = file.split("\\")[-1].replace(".npz", ".png").replace(
                "#", "_")

            w_PNG(identifier=identifier, np_img=img_copy)
            w_PNG(identifier=identifier.replace("frame", "mask"), np_img=mask)

            plot_triple(img_copy, ground_truth, mask,
                        identifier.replace(".png", "_triple.png"))
Ejemplo n.º 6
0
class TraVeLGAN:
    def __init__(self):
        self.data_augmentation = DataAugmentation(cfg.train)
        self.__load_data()
        self.__create_graph()

        return

    def __load_data(self):
        # (self.x_train, self.y_train), (self.x_test, self.y_test) = tf.keras.datasets.cifar10.load_data()
        # self.dataset = tfds.load(name="celeb_a", split=tfds.Split.TRAIN)
        # with h5py.File('/home/firiuza/PycharmProjects/TraVeLGAN/shoes_128.hdf5', 'r') as f:
        #     dset = f

        x = hdf5storage.loadmat('/home/firiuza/MachineLearning/zap_dataset/ut-zap50k-data/image-path.mat')

        self.data_paths = [os.path.join(cfg.dataset.data_dir, el) for el in os.listdir(cfg.dataset.data_dir)[:10]]

        return

    def __create_graph(self):
        self.discriminator = Discriminator(cfg.train.discriminator_size)
        self.discriminator.build((None, cfg.train.image_size,  cfg.train.image_size, cfg.train.image_channels))

        self.siamese = Discriminator(cfg.train.siamese_size)
        self.siamese.build((None, cfg.train.image_size,  cfg.train.image_size, cfg.train.image_channels))

        self.unet = UNet()
        self.unet.build((None, cfg.train.image_size,  cfg.train.image_size, cfg.train.image_channels))

        self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)

        self.BC_loss = tf.keras.losses.BinaryCrossentropy()
        self.max_margin_loss = MaxMarginLoss(cfg.train.delta)
        self.distance_loss = DistanceLoss()

        return

    def run_train_epoch(self):
        dataset = tf.data.Dataset.from_tensor_slices((self.data_paths))
        dataset = dataset.shuffle(buffer_size=len(self.data_paths))
        dataset = dataset.map(map_func=self.data_augmentation.preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
        dataset = dataset.batch(batch_size=cfg.train.batch_size).prefetch(buffer_size=cfg.train.prefetch_buffer_size)

        for image in dataset:
            with tf.GradientTape() as tape:
                image = tf.ones(shape=(cfg.train.batch_size, cfg.train.image_size,  cfg.train.image_size, cfg.train.image_channels))
                generated_image = self.unet(image, True)

                real_predictions = tf.sigmoid(self.discriminator(image, True))
                fake_predictions = tf.sigmoid(self.discriminator(generated_image, True))

                D_real = self.BC_loss(real_predictions, tf.ones_like(real_predictions))
                D_fake = self.BC_loss(fake_predictions, tf.zeros_like(fake_predictions))
                D_loss = D_real + D_fake

                G_adv = self.BC_loss(fake_predictions, tf.ones_like(fake_predictions))

                real_embeddings = self.siamese(image, True)
                fake_embeddings = self.siamese(generated_image, True)

                TraVeL_loss = self.distance_loss(real_embeddings, fake_embeddings)
                siamese_loss = self.max_margin_loss(real_embeddings, None)

                G_loss = G_adv + TraVeL_loss
                S_loss = siamese_loss + TraVeL_loss

            # grads = tape.gradient(loss_value, self.unet.trainable_variables)
            # self.optimizer.apply_gradients(zip(grads, self.unet.trainable_variables))


        return
Ejemplo n.º 7
0
import torch
import numpy as np
from torch.autograd import grad
import matplotlib.pyplot as plt

from U_Net import UNet

model_chkpt = "C:\\AML_seg_proj\\CRC-Segmentation\\model_large_drop_batch_dice_ce\\model_chkpt_60.pt"
# define input
inp = torch.zeros(size=(1, 3, 32, 32), requires_grad=True)

model = UNet((32, 32), (32, 32),
             32,
             64,
             128,
             256,
             512,
             droprate=0.5,
             Norm=torch.nn.BatchNorm2d)
model.load_state_dict(torch.load(model_chkpt, map_location='cpu'))
model.eval()

grad_tensor = grad(model(inp)[0, 0, 15, 15], inp)

# plot sum of grad tensor
np_img = np.abs(grad_tensor[0][0][0].numpy())
np_img = np_img / np.max(np_img)
coords = np.column_stack(np.where(np_img != 0))
rf_size = np.max(coords, axis=0) - np.min(coords, axis=0)
print(f"Receptive field: {rf_size[0]}x{rf_size[1]}")