Ejemplo n.º 1
0
 def __init__(self, params, use_cuda=False):
     super(MModel, self).__init__()
     self.params = params
     self.src_mask_delta_UNet = U_Net(params, img_ch=3, output_ch=11)
     self.FG_UNet = U_Net(params, img_ch=30, output_ch=3)
     # self.BG_UNet = U_Net(params, img_ch=4, output_ch=3)
     self.use_cuda = use_cuda
Ejemplo n.º 2
0
def train():
    model = U_Net(n_channels=num_channels, n_classes=num_classes).to(device)
    model_graph = SummaryWriter(comment="UNet")
    input_c = torch.rand(1, 3, 256, 256)
    # model_graph.add_graph(model, (input_c.to(device),))
    model.train()
    # criterion = nn.BCELoss()
    criterion = nn.NLLLoss2d()
    # criterion=MulticlassDiceLoss()
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    dataset = My_Dataset(root,
                         num_classes,
                         size,
                         transform=img_transforms,
                         mask_transform=mask_transforms)
    data_loaders = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4)
    train_model(model,
                criterion,
                optimizer,
                data_loaders,
                model_graph=model_graph,
                num_epochs=num_epochs)
    model_graph.close()
Ejemplo n.º 3
0
    def build_model(self):
        """Build generator and discriminator."""
        if self.model_type == 'U_Net':
            self.unet = U_Net(img_ch=self.img_ch, output_ch=1)
        elif self.model_type == 'R2U_Net':
            self.unet = R2U_Net(img_ch=self.img_ch, output_ch=1, t=self.t)
        elif self.model_type == 'AttU_Net':
            self.unet = AttU_Net(img_ch=self.img_ch, output_ch=1)
        elif self.model_type == 'R2AttU_Net':
            self.unet = R2AttU_Net(img_ch=self.img_ch, output_ch=1, t=self.t)
        elif self.model_type == 'MixU_Net':
            self.unet = MixU_Net(img_ch=self.img_ch, output_ch=1)
        elif self.model_type == 'MixAttU_Net':
            self.unet = MixAttU_Net(img_ch=self.img_ch, output_ch=1)
        elif self.model_type == 'MixR2U_Net':
            self.unet = MixR2U_Net(img_ch=self.img_ch, output_ch=1)
        elif self.model_type == 'MixR2AttU_Net':
            self.unet = MixR2AttU_Net(img_ch=self.img_ch, output_ch=1)
        elif self.model_type == 'GhostU_Net':
            self.unet = GhostU_Net(img_ch=self.img_ch, output_ch=1)
        elif self.model_type == 'GhostU_Net1':
            self.unet = GhostU_Net1(img_ch=self.img_ch, output_ch=1)
        elif self.model_type == 'GhostU_Net2':
            self.unet = GhostU_Net2(img_ch=self.img_ch, output_ch=1)

        #pytorch_total_params = sum(p.numel() for p in self.unet.parameters() if p.requires_grad)
        #print (pytorch_total_params)
        #raise
        self.optimizer = optim.Adam(list(self.unet.parameters()), self.lr,
                                    [self.beta1, self.beta2])
        self.unet.to(self.device)
Ejemplo n.º 4
0
    def build_model(self):
        """Build our deep learning model."""
        if self.model_type == 'U_Net':
            self.unet = U_Net(img_ch=1,
                              output_ch=1,
                              first_layer_numKernel=self.first_layer_numKernel)
        elif self.model_type == 'R2U_Net':
            self.unet = R2U_Net(
                img_ch=1,
                output_ch=1,
                t=self.t,
                first_layer_numKernel=self.first_layer_numKernel)
        elif self.model_type == 'AttU_Net':
            self.unet = AttU_Net(
                img_ch=1,
                output_ch=1,
                first_layer_numKernel=self.first_layer_numKernel)
        elif self.model_type == 'R2AttU_Net':
            self.unet = R2AttU_Net(
                img_ch=1,
                output_ch=1,
                t=self.t,
                first_layer_numKernel=self.first_layer_numKernel)
        elif self.model_type == 'ResAttU_Net':
            self.unet = ResAttU_Net(
                UnetLayer=self.UnetLayer,
                img_ch=1,
                output_ch=1,
                first_layer_numKernel=self.first_layer_numKernel)

        if self.initialization != 'NA':
            init_weights(self.unet, init_type=self.initialization)
        self.unet.to(self.device)
Ejemplo n.º 5
0
 def __init__(self, params, use_cuda=False):
     super(MModel, self).__init__()
     self.params = params
     self.src_mask_delta_UNet = U_Net(params, img_ch=3, output_ch=11)
     self.FG_UNet = U_Net(params, img_ch=30, output_ch=3, last_conv=False)
     self.FG_tgt_Conv = nn.Conv2d(64,
                                  3,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1,
                                  padding_mode='replicate')
     self.FG_mask_Conv = nn.Conv2d(64,
                                   1,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   padding_mode='replicate')
     self.BG_UNet = U_Net(params, img_ch=4, output_ch=3)
     self.use_cuda = use_cuda
Ejemplo n.º 6
0
def get_model(model_name=None):
    if model_name==None:
        print('Undefined model - Model Name = None')
        return None
    elif model_name == 'unet':
        model = U_Net(UnetLayer=5,img_ch=INPUT_CHANNELS,output_ch=OUTPUT_CHANNELS)
    elif model_name == 'res_att_unet':
        model = ResAttU_Net(UnetLayer=5,img_ch=INPUT_CHANNELS,output_ch=OUTPUT_CHANNELS)

    return model
    def build_model(self):
        """Build generator and discriminator."""
        if self.model_type == 'U_Net':
            self.unet = U_Net(img_ch=1, output_ch=1)
        elif self.model_type == 'R2U_Net':
            self.unet = R2U_Net(img_ch=1, output_ch=1, t=self.t)
            #init_weights(self.unet, 'normal')

        self.optimizer = optim.Adam(list(self.unet.parameters()), self.lr,
                                    (self.beta1, self.beta2))
        self.unet.to(self.device)
Ejemplo n.º 8
0
    def build_model(self):
        """Build generator and discriminator."""
        if self.model_type == 'U_Net':
            self.unet = U_Net(img_ch=3, output_ch=2)
        elif self.model_type == 'R2U_Net':
            self.unet = R2U_Net(img_ch=3, output_ch=1, t=self.t)
        elif self.model_type == 'AttU_Net':
            self.unet = AttU_Net(img_ch=3, output_ch=1)
        elif self.model_type == 'R2AttU_Net':
            self.unet = R2AttU_Net(img_ch=3, output_ch=1, t=self.t)

        self.optimizer = optim.Adam(list(self.unet.parameters()), self.lr,
                                    [self.beta1, self.beta2])
        self.unet.to(self.device)
Ejemplo n.º 9
0
    def build_model(self):
        # Load required model
        if self.model_type == 'U_Net':
            self.unet = U_Net(img_ch=3, output_ch=1)
        elif self.model_type == 'R2U_Net':
            self.unet = R2U_Net(img_ch=3, output_ch=1, t=self.t)
        elif self.model_type == 'AttU_Net':
            self.unet = AttU_Net(img_ch=3, output_ch=1)
        elif self.model_type == 'R2AttU_Net':
            self.unet = R2AttU_Net(img_ch=3, output_ch=1, t=self.t)

        # Load optimizer
        self.optimizer = optim.Adam(list(self.unet.parameters()), self.lr,
                                    [self.beta1, self.beta2])
        # Move model to device
        self.unet.to(self.device)
Ejemplo n.º 10
0
    def build_model(self, config):
        """Build generator and discriminator."""
        if self.model_type == 'U_Net':
            self.unet = U_Net(img_ch=3, output_ch=1)
        elif self.model_type == 'R2U_Net':
            self.unet = R2U_Net(img_ch=3, output_ch=1, t=self.t)
        elif self.model_type == 'AttU_Net':
            self.unet = AttU_Net(img_ch=3, output_ch=1)
        elif self.model_type == 'R2AttU_Net':
            self.unet = R2AttU_Net(img_ch=3, output_ch=1, t=self.t)

        # # Load the pretrained Encoder
        # if os.path.isfile(config.pretrained):
        # 	self.unet.load_state_dict(torch.load(config.pretrained))
        # 	print('%s is Successfully Loaded from %s'%(self.model_type,config.pretrained))

        self.optimizer = optim.Adam(list(self.unet.parameters()), self.lr,
                                    [self.beta1, self.beta2])
        self.unet.to(self.device)
Ejemplo n.º 11
0
    def build_model(self):
        """Build generator and discriminator."""
        if self.model_type == 'U_Net':
            self.unet = U_Net(UnetLayer=self.UnetLayer,
                              img_ch=self.img_ch,
                              output_ch=self.output_ch,
                              first_layer_numKernel=self.first_layer_numKernel)
        elif self.model_type == 'R2U_Net':
            self.unet = R2U_Net(
                img_ch=self.img_ch,
                output_ch=self.output_ch,
                t=self.t,
                first_layer_numKernel=self.first_layer_numKernel)
        elif self.model_type == 'AttU_Net':
            self.unet = AttU_Net(
                img_ch=self.img_ch,
                output_ch=self.output_ch,
                first_layer_numKernel=self.first_layer_numKernel)
        elif self.model_type == 'R2AttU_Net':
            self.unet = R2AttU_Net(
                img_ch=self.img_ch,
                output_ch=self.output_ch,
                t=self.t,
                first_layer_numKernel=self.first_layer_numKernel)
        elif self.model_type == 'ResAttU_Net':
            self.unet = ResAttU_Net(
                UnetLayer=self.UnetLayer,
                img_ch=self.img_ch,
                output_ch=self.output_ch,
                first_layer_numKernel=self.first_layer_numKernel)

        if self.optimizer_choice == 'Adam':
            self.optimizer = optim.Adam(list(self.unet.parameters()),
                                        self.initial_lr,
                                        [self.beta1, self.beta2])
        elif self.optimizer_choice == 'SGD':
            self.optimizer = optim.SGD(list(self.unet.parameters()),
                                       self.initial_lr, self.momentum)
        else:
            pass

        self.unet.to(self.device)
Ejemplo n.º 12
0
    def build_model(self):
        """Build generator and discriminator."""
        if self.model_type == 'U_Net':
            self.unet = U_Net(img_ch=3, output_ch=3)
        elif self.model_type == 'R2U_Net':
            print("------> using R2U <--------")
            self.unet = R2U_Net(img_ch=3, output_ch=3, t=self.t)
        elif self.model_type == 'AttU_Net':
            print("------> using AttU <--------")
            self.unet = AttU_Net(img_ch=3, output_ch=3)
        elif self.model_type == 'R2AttU_Net':
            print("------> using R2-AttU <--------")
            self.unet = R2AttU_Net(img_ch=3, output_ch=3, t=self.t)
        elif self.model_type == 'ABU_Net':
            print("------> using ABU_Net <--------")
            self.unet = U_Net_AB(img_ch=3, output_ch=1)
        elif self.model_type == 'Multi_Task':
            print("------> using Multi_Task Learning <--------")
            model = torch.hub.load('pytorch/vision',
                                   'mobilenet_v2',
                                   pretrained=True)
            model_infeatures_final_layer = model.classifier[1].in_features
            model.classifier = torch.nn.Sequential(
                *list(model.classifier.children())[:-1])
            for param in model.parameters():
                param.requires_grad = True
            for param in model.features[18].parameters():
                param.requires_grad = True
            for param in model.classifier.parameters():
                param.requires_grad = True
            model_trained_mobilenet = model
            print("All trainable parameters of model are")
            for name, param in model_trained_mobilenet.named_parameters():
                if param.requires_grad:
                    print(name, param.shape)
            self.unet = multi_task_model_classification(
                model_trained_mobilenet)

        self.optimizer = optim.AdamW(list(self.unet.parameters()), self.lr,
                                     [self.beta1, self.beta2])
        self.unet.to(self.device)
    def build_model(self):
        """Build generator and discriminator."""
        if self.model_type == 'U_Net':
            self.unet = U_Net(img_ch=1, output_ch=1)
        elif self.model_type == 'R2U_Net':
            self.unet = R2U_Net(img_ch=1, output_ch=1, t=self.t)
        elif self.model_type == 'AttU_Net':
            self.unet = AttU_Net(img_ch=1, output_ch=1)
        elif self.model_type == 'R2AttU_Net':
            self.unet = R2AttU_Net(img_ch=1, output_ch=1, t=self.t)

        if self.optimizer_choice == 'Adam':
            self.optimizer = optim.Adam(list(self.unet.parameters()),
                                        self.initial_lr,
                                        [self.beta1, self.beta2])
        elif self.optimizer_choice == 'SGD':
            self.optimizer = optim.SGD(list(self.unet.parameters()),
                                       self.initial_lr, self.momentum)
        else:
            pass

        self.unet.to(self.device)
Ejemplo n.º 14
0
def train(cycle_num,
          dirs,
          path_to_net,
          plotter,
          batch_size=12,
          test_split=0.3,
          random_state=666,
          epochs=100,
          learning_rate=0.0001,
          momentum=0.9,
          num_folds=5,
          num_slices=155,
          n_classes=4):
    """
    Applies training on the network
        Args: 
            cycle_num (int): number of cycle in n-fold (num_folds) cross validation
            dirs (string): path to dataset subject directories 
            path_to_net (string): path to directory where to save network
            plotter (callable): visdom plotter
            batch_size - default (int): batch size
            test_split - default (float): percentage of test split 
            random_state - default (int): seed for k-fold cross validation
            epochs - default (int): number of epochs
            learning_rate - default (float): learning rate 
            momentum - default (float): momentum
            num_folds - default (int): number of folds in cross validation
            num_slices - default (int): number of slices per volume
            n_classes - default (int): number of classes (regions)
    """
    print('Setting started', flush=True)

    # Creating data indices
    # arange len of list of subject dirs
    indices = np.arange(len(glob.glob(dirs + '*')))
    test_indices, trainset_indices = get_test_indices(indices, test_split)
    # kfold index generator
    for cv_num, (train_indices, val_indices) in enumerate(
            get_train_cv_indices(trainset_indices, num_folds, random_state)):
        # splitted the 5-fold CV in 5 jobs
        if cv_num != int(cycle_num):
            continue

        net = U_Net()
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        num_GPU = torch.cuda.device_count()
        if num_GPU > 1:
            print('Let us use {} GPUs!'.format(num_GPU), flush=True)
            net = nn.DataParallel(net)
        net.to(device)
        criterion = nn.CrossEntropyLoss()
        if cycle_num % 2 == 0:
            optimizer = optim.SGD(net.parameters(),
                                  lr=learning_rate,
                                  momentum=momentum)
        else:
            optimizer = optim.Adam(net.parameters(), lr=learning_rate)

        scheduler = ReduceLROnPlateau(optimizer, threshold=1e-6, patience=0)

        print('cv cycle number: ', cycle_num, flush=True)
        start = time.time()
        print('Start Train and Val loading', flush=True)

        MRIDataset_train = dataset.MRIDataset(dirs, train_indices)

        MRIDataset_val = dataset.MRIDataset(dirs, val_indices)

        datalengths = {
            'train': len(MRIDataset_train),
            'val': len(MRIDataset_val)
        }
        dataloaders = {
            'train': get_dataloader(MRIDataset_train, batch_size, num_GPU),
            'val': get_dataloader(MRIDataset_val, batch_size, num_GPU)
        }
        print('Train and Val loading took: ', time.time() - start, flush=True)
        # make loss and acc history for train and val separatly
        # Setup Metrics
        running_metrics_val = runningScore(n_classes)
        running_metrics_train = runningScore(n_classes)
        val_loss_meter = averageMeter()
        train_loss_meter = averageMeter()
        itr = 0
        iou_best = 0.
        for epoch in tqdm(range(epochs), desc='Epochs'):
            print('Epoch: ', epoch + 1, flush=True)
            phase = 'train'
            print('Phase: ', phase, flush=True)
            start = time.time()
            # Set model to training mode
            net.train()
            # Iterate over data.
            for i, data in tqdm(enumerate(dataloaders[phase]),
                                desc='Data Iteration ' + phase):
                if (i + 1) % 100 == 0:
                    print('Number of Iteration [{}/{}]'.format(
                        i + 1, int(datalengths[phase] / batch_size)),
                          flush=True)
                # get the inputs
                inputs = data['mri_data'].to(device)
                GT = data['seg'].to(device)
                subject_slice_path = data['subject_slice_path']
                # Clear all accumulated gradients
                optimizer.zero_grad()
                # Predict classes using inputs from the train set
                SR = net(inputs)
                # Compute the loss based on the predictions and
                # actual segmentation
                loss = criterion(SR, GT)
                # Backpropagate the loss
                loss.backward()
                # Adjust parameters according to the computed
                # gradients
                # -- weight update
                optimizer.step()
                # Trake and plot metrics and loss, and save network
                predictions = SR.data.max(1)[1].cpu().numpy()
                GT_cpu = GT.data.cpu().numpy()
                running_metrics_train.update(GT_cpu, predictions)
                train_loss_meter.update(loss.item(), n=1)
                if (i + 1) % 100 == 0:
                    itr += 1
                    score, class_iou = running_metrics_train.get_scores()
                    for k, v in score.items():
                        plotter.plot(k, 'itr', phase, k, itr, v)
                    for k, v in class_iou.items():
                        print('Class {} IoU: {}'.format(k, v), flush=True)
                        plotter.plot(
                            str(k) + ' Class IoU', 'itr', phase,
                            str(k) + ' Class IoU', itr, v)
                    print('Loss Train', train_loss_meter.avg, flush=True)
                    plotter.plot('Loss', 'itr', phase, 'Loss Train', itr,
                                 train_loss_meter.avg)
            print('Phase {} took {} s for whole {}set!'.format(
                phase,
                time.time() - start, phase),
                  flush=True)

            # Validation Phase
            phase = 'val'
            print('Phase: ', phase, flush=True)
            start = time.time()
            # Set model to evaluation mode
            net.eval()
            start = time.time()
            with torch.no_grad():
                # Iterate over data.
                for i, data in tqdm(enumerate(dataloaders[phase]),
                                    desc='Data Iteration ' + phase):
                    if (i + 1) % 100 == 0:
                        print('Number of Iteration [{}/{}]'.format(
                            i + 1, int(datalengths[phase] / batch_size)),
                              flush=True)
                    # get the inputs
                    inputs = data['mri_data'].to(device)
                    GT = data['seg'].to(device)
                    subject_slice_path = data['subject_slice_path']
                    # Clear all accumulated gradients
                    optimizer.zero_grad()
                    # Predict classes using inputs from the train set
                    SR = net(inputs)
                    # Compute the loss based on the predictions and
                    # actual segmentation
                    loss = criterion(SR, GT)
                    # Trake and plot metrics and loss
                    predictions = SR.data.max(1)[1].cpu().numpy()
                    GT_cpu = GT.data.cpu().numpy()
                    running_metrics_val.update(GT_cpu, predictions)
                    val_loss_meter.update(loss.item(), n=1)
                    if (i + 1) % 100 == 0:
                        itr += 1
                        score, class_iou = running_metrics_val.get_scores()
                        for k, v in score.items():
                            plotter.plot(k, 'itr', phase, k, itr, v)
                        for k, v in class_iou.items():
                            print('Class {} IoU: {}'.format(k, v), flush=True)
                            plotter.plot(
                                str(k) + ' Class IoU', 'itr', phase,
                                str(k) + ' Class IoU', itr, v)
                        print('Loss Val', val_loss_meter.avg, flush=True)
                        plotter.plot('Loss ', 'itr', phase, 'Loss Val', itr,
                                     val_loss_meter.avg)
                if (epoch + 1) % 10 == 0:
                    if score['Mean IoU'] > iou_best:
                        save_net(path_to_net, batch_size, epoch, cycle_num,
                                 train_indices, val_indices, test_indices, net,
                                 optimizer)
                        iou_best = score['Mean IoU']
                    save_output(epoch, path_to_net, subject_slice_path,
                                SR.data.cpu().numpy(), GT_cpu)
                print('Phase {} took {} s for whole {}set!'.format(
                    phase,
                    time.time() - start, phase),
                      flush=True)
            # Call the learning rate adjustment function after every epoch
            scheduler.step(val_loss_meter.avg)
    # save network after training
    save_net(path_to_net,
             batch_size,
             epochs,
             cycle_num,
             train_indices,
             val_indices,
             test_indices,
             net,
             optimizer,
             iter_num=None)
Ejemplo n.º 15
0
from utils import COLOR_DICT
from utils import dense_crf
from utils import intersectionAndUnion
from PIL import Image
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
# 是否使用cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_classes = 2
num_channels = 3
batch_size = 4
size = (256, 256)
root = "data/membrane/test"
img_file = search_file(root, [".png"])
# print(img_file)
if __name__ == "__main__":
    model = U_Net(num_channels, num_classes).to(device)
    model.load_state_dict(torch.load('UNet_weights_bilinear_weight.pth'))
    model.eval()
    with torch.no_grad():
        for i in range(1):
            print(img_file[i])
            input = cv2.imread(img_file[i], cv2.IMREAD_COLOR)
            input = cv2.resize(input, size)
            original_img = input
            print(
                os.path.join(
                    "data/membrane/result1",
                    os.path.splitext(os.path.basename(img_file[i]))[0] +
                    "_predict.png"), )
            label = cv2.imread(
                os.path.join(
Ejemplo n.º 16
0
# noinspection PyArgumentList
train, val, test = random_split(dataset, [228, 32, 66])
train_loader = DataLoader(dataset=train, batch_size=batch_size, shuffle=True, num_workers=4)
val_loader = DataLoader(dataset=val, batch_size=batch_size // 2, shuffle=True, num_workers=4)
test_loader = DataLoader(dataset=test, batch_size=batch_size // 4, shuffle=True, num_workers=4)


from nissl_dataset import Nissl_mask_dataset
from network import U_Net
from network import ResAttU_Net


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

modelunet = U_Net(UnetLayer=5, img_ch=3, output_ch=4).to(device)
modelresunet = ResAttU_Net(UnetLayer=5,img_ch=3,output_ch=4).to(device)
modelunet.load_state_dict(torch.load('/gdrive/MyDrive/models/unet'), strict=False)

# output = modelunet(image.to(device))

modelresunet.load_state_dict(torch.load('/gdrive/MyDrive/models/resunet'), strict=False)

def gt_to_colorimg(masks):
    
    
    #colors = np.asarray([(201, 58, 64), (242, 207, 1), (0, 152, 75), (101, 172, 228)])#,(56, 34, 132), (160, 194, 56)])

    colors = np.asarray([(0,0,0), (255,0,0), (0,255,0), (0,0,255)])
    colorimg = np.ones((masks.shape[1], masks.shape[2], 3), dtype=np.float32) * 255
    channels, height, width = masks.shape