コード例 #1
0
def load_data(task : str):

    # Define the Augmentation here only
    augments = Compose([
        # Convert the image to Tensor
        transforms.Lambda(lambda x: torch.Tensor(x)),
        # Randomly rotate the image with an angle
        # between -25 degrees to 25 degrees
        RandomRotate(25),
        # Randomly translate the image by 11% of 
        # image height and width
        RandomTranslate([0.11, 0.11]),
        # Randomly flip the image
        RandomFlip(),
        # Change the order of image channels
        transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
    ])

    print('Loading Train Dataset of {} task...'.format(task))
    # Load training dataset
    train_data = MRData(task, train=True, transform=augments)
    train_loader = data.DataLoader(
        train_data, batch_size=1, num_workers=11, shuffle=True
    )

    print('Loading Validation Dataset of {} task...'.format(task))
    # Load validation dataset
    val_data = MRData(task, train=False)
    val_loader = data.DataLoader(
        val_data, batch_size=1, num_workers=11, shuffle=False
    )

    return train_loader, val_loader, train_data.weights, val_data.weights
コード例 #2
0
ファイル: polyp_loader.py プロジェクト: zhawhjw/Thesis
def transform(x,y,crop_range,rot_range,shear_range,zoom_range,t):
  
    if t == False:                                               # If t is set to false the input is only cropped
        crop_type = np.random.randint(0,5,1)[0]                  # Randomly crop image from either center or a corner.
    
        x_new = SpecialCrop((crop_range),crop_type=crop_type)(x)
        y_new = SpecialCrop((crop_range),crop_type=crop_type)(y)
    else:
        rot = RandomRotate(rot_range,lazy=True)(x)
        shear = RandomShear(shear_range,lazy=True)(x)
        zoom = RandomZoom(zoom_range,lazy=True)(x)
        flip = RandomFlip(v=True,p = np.random.randint(0,2,1)[0])# Images and label is flipped with 0.5 prob.
        
        crop_type = np.random.randint(0,5,1)[0]
    
        x_new = SpecialCrop((crop_range),crop_type=crop_type)(x)
        y_new = SpecialCrop((crop_range),crop_type=crop_type)(y)
                        
        x_new = th_affine2d(x_new,rot)
        y_new = th_affine2d(y_new,rot)
    
        x_new = th_affine2d(x_new,shear)
        y_new = th_affine2d(y_new,shear)
    
        x_new = th_affine2d(x_new,zoom)
        y_new = th_affine2d(y_new,zoom)
    
        x_new = flip(x_new)
        y_new = flip(y_new)    
        
    return AddChannel()(x_new), AddChannel()(y_new)              # Add channel for concatenating batch.
コード例 #3
0
def load_data(task: str):

    # Define the Augmentation here only
    augments = Compose([
        transforms.Lambda(lambda x: torch.Tensor(x)),
        RandomRotate(25),
        RandomTranslate([0.11, 0.11]),
        RandomFlip(),
        transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
    ])

    print('Loading Train Dataset of {} task...'.format(task))
    train_data = MRData(task, train=True, transform=augments)
    train_loader = data.DataLoader(train_data,
                                   batch_size=1,
                                   num_workers=4,
                                   shuffle=True)

    print('Loading Validation Dataset of {} task...'.format(task))
    val_data = MRData(task, train=False)
    val_loader = data.DataLoader(val_data,
                                 batch_size=1,
                                 num_workers=4,
                                 shuffle=False)

    return train_loader, val_loader, train_data.weights, val_data.weights
コード例 #4
0
ファイル: attention.py プロジェクト: ajravikumar/MRNet
def run(args):


	augmentor = Compose([
        transforms.Lambda(lambda x: torch.Tensor(x)),
        RandomRotate(25),
        RandomTranslate([0.11, 0.11]),
        RandomFlip(),
        transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
    ])

	train_dataset = MRDataset('./data/', args.task, args.plane, transform=augmentor, train=True)
	array, label, weight = train_dataset[0]
	print(array.shape, label)
	train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=1, shuffle=True, num_workers=11, drop_last=False)

	validation_dataset = MRDataset('./data/', args.task, args.plane, train=False)
	validation_loader = torch.utils.data.DataLoader( validation_dataset, batch_size=1, shuffle=-True, num_workers=11, drop_last=False)
コード例 #5
0
def load_data(diagnosis, use_gpu=False):
    train_dirs = [
        '/home/niamh/Documents/MRNET/External_data/data/vol08',
        '/home/niamh/Documents/MRNET/External_data/data/vol04',
        '/home/niamh/Documents/MRNET/External_data/data/vol03',
        '/home/niamh/Documents/MRNET/External_data/data/vol09',
        '/home/niamh/Documents/MRNET/External_data/data/vol06',
        '/home/niamh/Documents/MRNET/External_data/data/vol07'
    ]
    valid_dirs = [
        '/home/niamh/Documents/MRNET/External_data/data/vol10',
        '/home/niamh/Documents/MRNET/External_data/data/vol05'
    ]
    test_dirs = [
        '/home/niamh/Documents/MRNET/External_data/data/vol01',
        '/home/niamh/Documents/MRNET/External_data/data/vol02'
    ]

    augmentor = Compose([
        transforms.Lambda(lambda x: torch.Tensor(x)),
        RandomRotate(25),
        RandomTranslate([0.11, 0.11]),
        RandomFlip(),
        # transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
    ])

    train_dataset = Dataset(train_dirs, diagnosis, augmentor, use_gpu)
    valid_dataset = Dataset(valid_dirs, diagnosis, None, use_gpu)
    test_dataset = Dataset(test_dirs, diagnosis, None, use_gpu)

    train_loader = data.DataLoader(train_dataset,
                                   batch_size=1,
                                   num_workers=8,
                                   shuffle=True)
    valid_loader = data.DataLoader(valid_dataset,
                                   batch_size=1,
                                   num_workers=8,
                                   shuffle=False)
    test_loader = data.DataLoader(test_dataset,
                                  batch_size=1,
                                  num_workers=8,
                                  shuffle=False)

    return train_loader, valid_loader, test_loader
コード例 #6
0
ファイル: train.py プロジェクト: MRNet-UCD/niamh
def run(args):

    indexes = list(range(0, 1130))
    random.seed(26)
    random.shuffle(indexes)

    for fold in range(0, 8):

        if fold == 0:
            train_ind = indexes[0:141] + indexes[282:]
            valid_ind = indexes[141:282]
        elif fold == 1:
            train_ind = indexes[0:282] + indexes[423:]
            valid_ind = indexes[282:423]
        elif fold == 2:
            train_ind = indexes[0:564] + indexes[705:]
            valid_ind = indexes[564:705]
        elif fold == 3:
            train_ind = indexes[:705] + indexes[846:]
            valid_ind = indexes[705:846]
        elif fold == 4:
            train_ind = indexes[:846] + indexes[987:]
            valid_ind = indexes[846:987]
        elif fold == 5:
            train_ind = indexes[:987]
            valid_ind = indexes[987:]
        elif fold == 6:
            train_ind = indexes[141:]
            valid_ind = indexes[0:141]
        elif fold == 7:
            train_ind = indexes[0:423] + indexes[568:]
            valid_ind = indexes[423:568]

        log_root_folder = "./logs/{0}/{1}/".format(args.task, args.plane)
        if args.flush_history == 1:
            objects = os.listdir(log_root_folder)
            for f in objects:
                if os.path.isdir(log_root_folder + f):
                    shutil.rmtree(log_root_folder + f)

        now = datetime.now()
        logdir = log_root_folder + now.strftime("%Y%m%d-%H%M%S") + "/"
        os.makedirs(logdir)

        writer = SummaryWriter(logdir)

        augmentor = Compose([
            transforms.Lambda(lambda x: torch.Tensor(x)),
            RandomRotate(25),
            RandomTranslate([0.11, 0.11]),
            RandomFlip(),
            #  transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
        ])
        mrnet = model.MRNet()

        if torch.cuda.is_available():
            mrnet = mrnet.cuda()

        optimizer = optim.Adam(mrnet.parameters(),
                               lr=args.lr,
                               weight_decay=0.1)

        if args.lr_scheduler == "plateau":
            scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
                optimizer, patience=2, factor=.3, threshold=1e-4, verbose=True)
        elif args.lr_scheduler == "step":
            scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                        step_size=3,
                                                        gamma=args.gamma)

        best_val_loss = float('inf')
        best_val_auc = float(0)

        num_epochs = args.epochs
        iteration_change_loss = 0
        patience = args.patience
        log_every = args.log_every

        t_start_training = time.time()
        train_dataset = MRDataset(train_ind,
                                  '/content/data/',
                                  args.task,
                                  args.plane,
                                  valid=False,
                                  transform=augmentor)
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=1,
                                                   shuffle=True,
                                                   num_workers=11,
                                                   drop_last=False)

        validation_dataset = MRDataset(valid_ind,
                                       '/content/data/',
                                       args.task,
                                       args.plane,
                                       valid=False,
                                       transform=None)
        validation_loader = torch.utils.data.DataLoader(validation_dataset,
                                                        batch_size=1,
                                                        shuffle=- True,
                                                        num_workers=11,
                                                        drop_last=False)

        valid_dataset = MRDataset([0],
                                  '/content/data/',
                                  args.task,
                                  args.plane,
                                  valid=True,
                                  transform=None)
        valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                                   batch_size=1,
                                                   shuffle=- True,
                                                   num_workers=11,
                                                   drop_last=False)

        for epoch in range(num_epochs):
            current_lr = get_lr(optimizer)

            t_start = time.time()

            train_loss, train_auc = train_model(mrnet, train_loader, epoch,
                                                num_epochs, optimizer, writer,
                                                current_lr, log_every)
            val_loss, val_auc, test_auc = evaluate_model(
                mrnet, validation_loader, valid_loader, epoch, num_epochs,
                writer, current_lr)

            if args.lr_scheduler == 'plateau':
                scheduler.step(val_loss)
            elif args.lr_scheduler == 'step':
                scheduler.step()

            t_end = time.time()
            delta = t_end - t_start

            print(
                "fold : {0} | train loss : {1} | train auc {2} | val loss {3} | val auc {4} | elapsed time {5} s"
                .format(fold, train_loss, train_auc, val_loss, val_auc, delta))

            iteration_change_loss += 1
            print('-' * 30)

            if val_auc > best_val_auc:
                best_val_auc = val_auc
                if bool(args.save_model):
                    file_name = f'model_fold{fold}_{args.prefix_name}_{args.task}_{args.plane}_test_auc_{test_auc:0.4f}_val_auc_{val_auc:0.4f}_train_auc_{train_auc:0.4f}_epoch_{epoch+1}.pth'
                    for f in os.listdir('./models/'):
                        if (args.task in f) and (args.prefix_name
                                                 in f) and ('fold' + str(fold)
                                                            in f):
                            os.remove(f'./models/{f}')
                    torch.save(mrnet, f'./models/{file_name}')

            if val_loss < best_val_loss:
                best_val_loss = val_loss
                iteration_change_loss = 0

            if iteration_change_loss == patience:
                print(
                    'Early stopping after {0} iterations without the decrease of the val loss'
                    .format(iteration_change_loss))
                break

    t_end_training = time.time()
    print(f'training took {t_end_training - t_start_training} s')
コード例 #7
0
from torchsample.transforms import RandomRotate, RandomTranslate, RandomFlip, ToTensor, Compose, RandomAffine
from torchvision import transforms
import torch.nn.functional as F
from data_preprocessor import MRDataset
import model

from sklearn import metrics




augmentor = Compose([
    transforms.Lambda(lambda x: torch.Tensor(x)),
    RandomRotate(25),
    RandomTranslate([0.11, 0.11]),
    RandomFlip(),
    transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
])

#Load Dataset
train_dataset = MRDataset('./data/', True,
                            'axial', transform=augmentor, train=True)
train_loader = torch.utils.data.DataLoader(
    train_dataset, batch_size=1, shuffle=True, num_workers=11, drop_last=False)

validation_dataset = MRDataset(
    './data/', False, 'axial', train=False)
validation_loader = torch.utils.data.DataLoader(
    validation_dataset, batch_size=1, shuffle=-True, num_workers=11, drop_last=False)

mrnet = model.MRNet()
コード例 #8
0
def RandomFlip_setup():
    tforms = {}

    tforms['randomflip_h_01'] = RandomFlip(h=True, v=False)
    tforms['randomflip_h_02'] = RandomFlip(h=True, v=False, p=0)
    tforms['randomflip_h_03'] = RandomFlip(h=True, v=False, p=1)
    tforms['randomflip_h_04'] = RandomFlip(h=True, v=False, p=0.3)
    tforms['randomflip_v_01'] = RandomFlip(h=False, v=True)
    tforms['randomflip_v_02'] = RandomFlip(h=False, v=True, p=0)
    tforms['randomflip_v_03'] = RandomFlip(h=False, v=True, p=1)
    tforms['randomflip_v_04'] = RandomFlip(h=False, v=True, p=0.3)
    tforms['randomflip_hv_01'] = RandomFlip(h=True, v=True)
    tforms['randomflip_hv_02'] = RandomFlip(h=True, v=True, p=0)
    tforms['randomflip_hv_03'] = RandomFlip(h=True, v=True, p=1)
    tforms['randomflip_hv_04'] = RandomFlip(h=True, v=True, p=0.3)
    return tforms
コード例 #9
0
def main():
    ## For loading complete tensor data into memory
    # image_data_file = 'C:/dev/data/Endoviz2018/GIANA/polyp_detection_segmentation/image_data_all_640x640.npy'
    # label_data_file = 'C:/dev/data/Endoviz2018/GIANA/polyp_detection_segmentation/gt_data_all_640x640.npy'

    ## For using csv file to read image locations and use them to load images at run time
    image_gt_file_list_all = 'C:/dev/data/Endoviz2018/GIANA/polyp_detection_segmentation/image_gt_data_file_list_all_640x640.csv'

    ## Transforms to apply
    image_transform = transforms.Compose([
        ResizePad(640, 'RGB'),
        TypeCast('float'),
        # RangeNormalize(0, 1),
        # StdNormalize(),
        RandomGamma(0.2, 1.0),
        Brightness(0.1),
        RandomSaturation(0.1, 0.2)
    ])
    label_transform = transforms.Compose(
        [ResizePad(640, 'L'), TypeCast('float')])
    joint_transform = Compose([
        RandomFlip(h=True, v=True)
        #AffineCompose([Rotate(10), Translate((0.2, 0.2))]),
        #SpecialCrop((400, 400))
    ])

    image_gt_file_list_all_df = pd.read_csv(image_gt_file_list_all,
                                            header=None)

    ## Create Dataset object to read from CSV file
    giana_dataset = CSVDataset(image_gt_file_list_all_df,
                               input_transform=image_transform,
                               target_transform=label_transform,
                               co_transform=joint_transform)

    ## When loading all the data into memory
    # image_data = np.load(image_data_file)
    # label_data = np.load(label_data_file)
    # label_data = np.expand_dims(label_data, axis=3)

    # giana_dataset = TensorDataset(image_data, label_data, input_transform=image_transform,
    #                                 target_transform=label_transform, co_transform=joint_transform)

    ## Create pytorch dataloader
    giana_dataloader = DataLoader(giana_dataset, batch_size=4, shuffle=True)

    idx = 0
    for images, labels in giana_dataloader:
        print(images.size(), labels.size())
        fig = plt.figure(figsize=(10, 5))
        timer = fig.canvas.new_timer(interval=2000)
        timer.add_callback(close_event)
        # idx = 0
        # for image, label in zip(images, labels):
        #     # plt.imshow(np.rollaxis(image.numpy(), 0, 3))
        plot_image_label(np.rollaxis(images.numpy(), 1, 4),
                         np.rollaxis(labels.numpy(), 1, 4))

        timer.start()
        plt.show()

        idx += 1
        if idx == 4:
            break
コード例 #10
0
ファイル: train-2.py プロジェクト: MRNet-UCD/niamh
def run(args):
    log_root_folder = "./logs/{0}/{1}/".format(args.task, args.plane)
    if args.flush_history == 1:
        objects = os.listdir(log_root_folder)
        for f in objects:
            if os.path.isdir(log_root_folder + f):
                shutil.rmtree(log_root_folder + f)

    now = datetime.now()
    logdir = log_root_folder + now.strftime("%Y%m%d-%H%M%S") + "/"
    os.makedirs(logdir)

    writer = SummaryWriter(logdir)

    augmentor = Compose([
        transforms.Lambda(lambda x: torch.Tensor(x)),
        RandomRotate(25),
        RandomTranslate([0.11, 0.11]),
        RandomFlip(),
        transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
    ])

    train_dataset = MRDataset('/home/niamh/Documents/MRNET/data/', args.task,
                              args.plane, transform=augmentor, train=True)
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=1, shuffle=True, num_workers=11, drop_last=False)

    validation_dataset = MRDataset(
        '/home/niamh/Documents/MRNET/data/', args.task, args.plane, train=False)
    validation_loader = torch.utils.data.DataLoader(
        validation_dataset, batch_size=1, shuffle=-True, num_workers=11, drop_last=False)

    mrnet = model.UNet(n_channels=3, n_classes=1, bilinear=True)

    if torch.cuda.is_available():
        mrnet = mrnet.cuda()

    optimizer = optim.Adam(mrnet.parameters(), lr=args.lr, weight_decay=0.1)

    if args.lr_scheduler == "plateau":
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, patience=3, factor=.3, threshold=1e-4, verbose=True)
    elif args.lr_scheduler == "step":
        scheduler = torch.optim.lr_scheduler.StepLR(
            optimizer, step_size=3, gamma=args.gamma)

    best_val_loss = float('inf')
    best_val_auc = float(0)

    num_epochs = args.epochs
    iteration_change_loss = 0
    patience = args.patience
    log_every = args.log_every

    t_start_training = time.time()

    for epoch in range(num_epochs):
        current_lr = get_lr(optimizer)

        t_start = time.time()
        
        train_loss, train_auc = train_model(
            mrnet, train_loader, epoch, num_epochs, optimizer, writer, current_lr, log_every)
        val_loss, val_auc = evaluate_model(
            mrnet, validation_loader, epoch, num_epochs, writer, current_lr)

        if args.lr_scheduler == 'plateau':
            scheduler.step(val_loss)
        elif args.lr_scheduler == 'step':
            scheduler.step()

        t_end = time.time()
        delta = t_end - t_start

        print("train loss : {0} | train auc {1} | val loss {2} | val auc {3} | elapsed time {4} s".format(
            train_loss, train_auc, val_loss, val_auc, delta))

        iteration_change_loss += 1
        print('-' * 30)

        if val_auc > best_val_auc:
            best_val_auc = val_auc
            if bool(args.save_model):
                file_name = f'model_{args.prefix_name}_{args.task}_{args.plane}_val_auc_{val_auc:0.4f}_train_auc_{train_auc:0.4f}_epoch_{epoch+1}.pth'
                for f in os.listdir('./models/'):
                    if (args.task in f) and (args.plane in f) and (args.prefix_name in f):
                        os.remove(f'./models/{f}')
                torch.save(mrnet, f'./models/{file_name}')

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            iteration_change_loss = 0

        if iteration_change_loss == patience:
            print('Early stopping after {0} iterations without the decrease of the val loss'.
                  format(iteration_change_loss))
            break

    t_end_training = time.time()
    print(f'training took {t_end_training - t_start_training} s')
コード例 #11
0
ファイル: train.py プロジェクト: linhduongtuan/mrnet
def run(args):
    log_root_folder = "./logs/{0}/{1}/".format(args.task, args.plane)
    if args.flush_history == 1:
        objects = os.listdir(log_root_folder)
        for f in objects:
            if os.path.isdir(log_root_folder + f):
                shutil.rmtree(log_root_folder + f)

    now = datetime.now()
    logdir = log_root_folder + now.strftime("%Y%m%d-%H%M%S") + "/"
    os.makedirs(logdir)

    writer = SummaryWriter(logdir)

    augmentor = Compose([
        transforms.Lambda(lambda x: torch.Tensor(x)),
        RandomRotate(25),
        RandomTranslate([0.11, 0.11]),
        RandomFlip(),
        transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
    ])

    train_dataset = MRDataset('./data/',
                              args.task,
                              args.plane,
                              transform=augmentor,
                              train=True)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=1,
                                               shuffle=True,
                                               num_workers=11,
                                               drop_last=False)

    validation_dataset = MRDataset('./data/',
                                   args.task,
                                   args.plane,
                                   train=False)
    validation_loader = torch.utils.data.DataLoader(validation_dataset,
                                                    batch_size=1,
                                                    shuffle=- True,
                                                    num_workers=11,
                                                    drop_last=False)

    mrnet = model.MRNet()
    mrnet = mrnet.cuda()

    optimizer = optim.Adam(mrnet.parameters(), lr=1e-5, weight_decay=0.1)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=3,
                                                           factor=.3,
                                                           threshold=1e-4,
                                                           verbose=True)

    best_val_loss = float('inf')
    best_val_auc = float(0)

    num_epochs = args.epochs
    iteration_change_loss = 0
    patience = args.patience

    for epoch in range(num_epochs):

        train_loss, train_auc = train_model(mrnet, train_loader, epoch,
                                            num_epochs, optimizer, writer)
        val_loss, val_auc = evaluate_model(mrnet, validation_loader, epoch,
                                           num_epochs, writer)

        print("train loss : {0} | train auc {1} | val loss {2} | val auc {3}".
              format(train_loss, train_auc, val_loss, val_auc))

        if args.lr_scheduler == 1:
            scheduler.step(val_loss)

        iteration_change_loss += 1
        print('-' * 30)

        if val_auc > best_val_auc:
            best_val_auc = val_auc
            if bool(args.save_model):
                file_name = f'model_{args.task}_{args.plane}_val_auc_{val_auc:0.4f}_train_auc_{train_auc:0.4f}_epoch_{epoch+1}.pth'
                for f in os.listdir('./models/'):
                    if (args.task in f) and (args.plane in f):
                        os.remove(f'./models/{f}')
                torch.save(mrnet, f'./models/{file_name}')

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            iteration_change_loss = 0

        if iteration_change_loss == patience:
            print(
                'Early stopping after {0} iterations without the decrease of the val loss'
                .format(iteration_change_loss))
            break