Exemplo n.º 1
0
 def init_model(self, device_id, last_ckpt=None):
     os.environ['CUDA_VISIBLE_DEVICES'] = device_id #string
     self.model = UNet3D(input_nc=1, output_nc=2)
     if last_ckpt is not None:
         state_dict = torch.load(last_ckpt)
         self.model.load_state_dict(state_dict)
     self.model = self.model.cuda()
     self.criterion = dice_loss().cuda()
     self.opt = torch.optim.Adam(self.model.parameters(), lr=1e-4)
     
     self.model_is_initialized = True
Exemplo n.º 2
0
 def init_model(self, device_id, last_ckpt=None):
     os.environ['CUDA_VISIBLE_DEVICES'] = device_id #string
     self.model = UNet3D(input_nc=1, output_nc=2)
     if last_ckpt is not None:
         state_dict = torch.load(last_ckpt)
         self.model.load_state_dict(state_dict)
     self.model = self.model.cuda()
     self.criterion = nn.CrossEntropyLoss(weight=torch.tensor([1.,8.])).cuda()
     self.mse = nn.MSELoss()
     self.opt = torch.optim.Adam(self.model.parameters(), lr=1e-4)
     self.brainmask = 
     
     self.model_is_initialized = True
Exemplo n.º 3
0
if not os.path.isdir(save_results_path):
    os.mkdir(save_results_path)

# Get folder paths and names (IDS) of folders that store the preprocessed data
valist = glob.glob(preprocessed_data_path + '/Brats*')
valid_set = Dataset(valist, seg_provided=False, nozero=False)

# Use GPU
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
torch.backends.cudnn.benchmark = True
num_folds = 2
models = []
for epoch_nr in epoch_nrs:
    for fold in range(1, num_folds + 1):
        model = UNet3D(4, 4, False, 16, 'crg', 8)
        print("Loading {}/Fold_fold{}_Epoch_{}.tar".format(
            model_saves_path, fold, epoch_nr))
        checkpoint = torch.load("{}/Fold_fold{}_Epoch_{}.tar".format(
            model_saves_path, fold, epoch_nr))
        #         if list(checkpoint['model_state_dict'].keys())[0].find("module") > -1:
        #             new_state_dict = OrderedDict()
        #             for k, v in checkpoint['model_state_dict'].items():
        #                 name = k[7:]  # remove module.
        #                 new_state_dict[name] = v
        #             model.load_state_dict(new_state_dict)
        #         else:
        model.load_state_dict(checkpoint['model_state_dict'])
        model.to(device)
        model.eval()
        models.append(model)
Exemplo n.º 4
0
    epoch_center_loss = running_center_loss / len(data_set)
    epoch_rotation_loss = running_rotation_loss / len(data_set)
    epoch_miou = running_miou / len(data_set)

    print('Model Loss: {:.4f}'.format(float(epoch_model_loss)))
    print('MSE Loss: {:.4f}'.format(float(epoch_loss)))
    print('L1 Loss: {:.4f}'.format(float(epoch_l1_loss)))
    print('Scale L1 Loss: {:.4f}'.format(float(epoch_scale_loss)))
    print('Center L1 Loss: {:.4f}'.format(float(epoch_center_loss)))
    print('M IOU: {:.4f}'.format(float(epoch_miou)))


if __name__ == '__main__':

    model_cfg = cfg['UNet']
    model = UNet3D(model_cfg['in_channels'],
                   model_cfg['out_channels'],
                   final_sigmoid=model_cfg['final_sigmoid'],
                   f_maps=model_cfg['f_maps'],
                   layer_order=model_cfg['layer_order'],
                   num_groups=model_cfg['num_groups'],
                   is_segmentation=model_cfg['is_segmentation'])
    if use_gpu:
        model.cuda()
    model = nn.DataParallel(model)
    model.load_state_dict(
        torch.load(os.path.join(root_path, cfg['load_model'])))
    model.eval()

    test_model(model)
Exemplo n.º 5
0
    from params import params5 as params
    print_config()

    # Create model output directory if doesn't exist
    if not os.path.exists('saved_models/'):
        os.makedirs('saved_models/')

    # Create datasets
    train_ds, train_loader, val_ds, val_loader = create_datasets(
        root_dir=params['data_folder'],
        end_image_shape=params['image_shape'],
        batch_size=params['batch_size'],
        validation_proportion=params['validation_proportion'])

    # Create model
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet3D().to(device)

    # Create loss function and optimizer
    loss_function = DiceLoss(to_onehot_y=True, softmax=True)
    optimizer = torch.optim.Adam(model.parameters(), params['learning_rate'])

    # Save model metadata to json file
    save_model_metadata(params, train_ds, val_ds)

    train()
    """
    Go to Terminal tab in PyCharm and run:
    >> tensorboard --logdir=runs
    Click on the output url http://localhost:6006/ 
    """
Exemplo n.º 6
0
                                   num_workers=args.n_workers)

    # input_size = (args.patch_size, args.patch_size, args.patch_size)

    if args.arch == "teacher":
        # model = FC_teacher_max_p.FC_teacher_max_p(n_filters=args.initial_filters, k_conv=args.kernel_size).to(args.device)
        student = FC_deeper_teacher.FC_deeper_teacher(
            n_filters=args.initial_filters,
            k_conv=args.kernel_size).to(args.device)
    elif args.arch == "student":
        student = FC_student.FC_student(n_filters=args.initial_filters,
                                        k_conv=args.kernel_size).to(
                                            args.device)
    elif args.arch == "UNet3D":
        student = UNet3D.UNet3D(in_channels=1,
                                out_channels=1,
                                final_sigmoid=True)
        student = nn.DataParallel(student)
        student = student.to(args.device)
    else:
        raise ValueError("Unrecognized architecture")
    if args.arch != "UNet3D":
        student.apply(utils.weights_init)

    if args.teacher_arch == 'deep':
        teacher = FC_deeper_teacher.FC_deeper_teacher(
            n_filters=args.n_filter_teacher,
            k_conv=args.k_teacher).to(args.device)
    elif args.teacher_arch == 'shallow':
        teacher = FC_student.FC_student(n_filters=args.n_filter_teacher,
                                        k_conv=args.k_teacher).to(args.device)
Exemplo n.º 7
0
def predict_valid():
    inputdir = "../Lung_GTV/"
    outdir = "../Lung_GTV_val_pred/190917/Unet3D-bs4-0/"

    transform = valid_aug(image_size=512)

    # nii_files = glob.glob(inputdir + "/*/data.nii.gz")
    threshold = 0.5

    folds = [0]

    for fold in folds:
        log_dir = f"../logs/190918/Unet3D-bs4-fold-{fold}"
        model = UNet3D(in_channels=1, out_channels=1, f_maps=64)

        ckp = os.path.join(log_dir, "checkpoints/best.pth")
        checkpoint = torch.load(ckp)
        model.load_state_dict(checkpoint['model_state_dict'])
        model = nn.DataParallel(model)
        model = model.to(device)

        df = pd.read_csv(f'./csv/5folds/valid_{fold}.csv')
        patient_ids = df.patient_id.values
        for patient_id in patient_ids:
            print(patient_id)
            nii_file = f"{inputdir}/{patient_id}/data.nii.gz"

            image_slices, n_slices, ct_image = extract_slice(nii_file)

            # import pdb
            # pdb.set_trace()

            dataset = TestDataset(image_slices, None)
            dataloader = DataLoader(dataset=dataset,
                                    num_workers=4,
                                    batch_size=2,
                                    drop_last=False)

            pred_mask = predict(model, dataloader)

            # pred_mask = torch.FloatTensor(pred_mask)
            # pred_mask = F.upsample(pred_mask, (size, 512, 512), mode='trilinear').detach().cpu().numpy()
            pred_mask = (pred_mask > threshold).astype(np.int16)
            # pred_mask = pred_mask.reshpae(-1, 512, 512)
            pred_mask = np.transpose(pred_mask, (1, 0, 2, 3, 4))
            pred_mask = pred_mask[0]
            pred_mask = pred_mask.reshape(-1, 256, 256)
            count = n_slices - pred_mask.shape[0]
            if count > 0:
                pred_mask = np.concatenate(
                    [pred_mask, pred_mask[-count:, :, :]], axis=0)

            pred_mask = ndimage.zoom(
                pred_mask, (slice_thickness / ct_image.GetSpacing()[-1],
                            1 / down_scale, 1 / down_scale),
                order=3)

            pred_mask = SimpleITK.GetImageFromArray(pred_mask)
            pred_mask.SetDirection(ct_image.GetDirection())
            pred_mask.SetOrigin(ct_image.GetOrigin())
            pred_mask.SetSpacing(ct_image.GetSpacing())

            # patient_id = nii_file.split("/")[-2]
            patient_dir = f"{outdir}/{patient_id}"
            os.makedirs(patient_dir, exist_ok=True)
            patient_pred = f"{patient_dir}/predict.nii.gz"
            SimpleITK.WriteImage(pred_mask, patient_pred)
Exemplo n.º 8
0
from train import train_model, test_model
from loss import LossFunction
from utils import log

# Hiperparameters and configurations
RUN_NAME = ''
BATCH_SIZE = 8
VAL_FILE_PATH = ''
MODEL_STATE_PATH = ''
EXPOSURE = 'under'

# Set dataloaders
val_loader = BddDaloaderFactory(EXPOSURE, TRAIN_FILE_PATH, BATCH_SIZE)

# Set model and lod weights
model = UNet3D(3, 3).to(device)
model.load_state_dict(torch.load(MODEL))

# Set optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
#optimizer = torch.optim.Adam(model.parameters())

# Set criterion
criterion = LossFunction().to(device)

val_loss = []
# Iterate over videos.
for video_step, video_loader in enumerate(val_loader):
    # Iterate over frames.
    for sample_step, sample in enumerate(video_loader):
Exemplo n.º 9
0
# Setup KFold Cross Validation
kf = KFold(n_splits=n_folds, shuffle=False)  # Shuffle=false to get the same shuffling scheme every run
fold_nr = 1

# Training Loop
for fold in kf.split(folder_paths):
    iter_nr = 1
    train_idx = fold[0]
    valid_idx = fold[1]
    train_set = Dataset([folder_paths[i] for i in train_idx], [folder_ids[i] for i in train_idx])
    valid_set = Dataset([folder_paths[i] for i in valid_idx], [folder_ids[i] for i in valid_idx])
    train_loader = data.DataLoader(train_set, **params)
    valid_loader = data.DataLoader(valid_set, **params)

    # Model
    model = UNet3D(in_channels, n_classes, False, base_n_filter, 'crg', 8)
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)

    #If training was interrupted (need to change epoch loop range as well):
    #checkpoint = torch.load("/home/ajurgens/Brats2019/Model_Saves_V4/Fold_1_Epoch_140.tar")
    #model.load_state_dict(checkpoint['model_state_dict'])

    # Loss and optimizer
    criterion = GeneralizedDiceLoss(1e-5, None, None, False).to(device)
    optimizer = torch.optim.Adam(model.parameters(), weight_decay=10**-7)
    model.to(device)

    #If training was interrupted (need to change epoch loop range as well):
    #model.train()
Exemplo n.º 10
0
                                   shuffle=False,
                                   num_workers=args.n_workers)

    # input_size = (args.patch_size, args.patch_size, args.patch_size)

    if args.arch == "teacher":
        #model = FC_teacher_max_p.FC_teacher_max_p(n_filters=args.initial_filters, k_conv=args.kernel_size).to(args.device)
        model = FC_deeper_teacher.FC_deeper_teacher(
            n_filters=args.initial_filters,
            k_conv=args.kernel_size).to(args.device)
    elif args.arch == "student":
        model = FC_student.FC_student(n_filters=args.initial_filters,
                                      k_conv=args.kernel_size).to(args.device)
    elif args.arch == "UNet3D":
        model = UNet3D.UNet3D(in_channels=1,
                              out_channels=1,
                              final_sigmoid=True)
        model = nn.DataParallel(model)
        model = model.to(args.device)
    else:
        raise ValueError("Unrecognized architecture")
    if args.arch != "UNet3D":
        model.apply(utils.weights_init)
    # loss = nn.CrossEntropyLoss()
    loss = nn.BCELoss()
    # pos_w = torch.Tensor([1, args.soma_weight])
    #loss = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    ############
    # Main loop