示例#1
0
def main():
    # patches_file = "50_patches_dataset.h5"
    # hf = h5py.File(patches_file, 'r')
    # We obtain a list with all the IDs of the patches
    # all_groups = list(hf)
    # Dividing the dataset into train and validation. Shuffle has to be false otherwise the model might be trained
    # on what was previously validation set and validated on what was previously train set.
    # X_train, X_validation = train_test_split(all_groups, test_size=0.2, shuffle=False)
    # print(X_train, X_validation)

    # for testing
    datapath = "Data/"
    train_file = datapath + "patches_dataset_test.h5"
    val_file = datapath + "val250.h5"

    # Loader Parameters
    params = {'batch_size': 2, 'shuffle': False, 'num_workers': 0}

    train_dataset = PatchDataset(train_file, n_classes=3)
    print(len(train_dataset))
    val_dataset = PatchDataset(val_file, n_classes=3)
    print(len(val_dataset))

    train_loader = DataLoader(train_dataset, **params)
    val_loader = DataLoader(val_dataset, **params)

    loaders = {'train': train_loader, 'val': val_loader}

    # Model and param
    model = Modified3DUNet(in_channels=1, n_classes=3)
    optimizer = optim.Adam(model.parameters())
    max_epochs = 10

    # Median foreground percentage = 0.2 (= class 1,2)
    # Median cancer percentage = 0.01 (= class 2)
    # Median pancreas percentage = 0.2 - 0.01 = 0.19 (= class 1)
    # Median background percentage = 1-0.2 = 99.8 (=class 0)
    # [99.8, 0.19, 0.01] => corresponding class weights = [1, 525, 9980]
    # class_weights = torch.tensor([1., 525., 9980.])
    # loss_criterion = GeneralizedDiceLoss(weight=class_weights)
    # loss_criterion = WeightedCrossEntropyLoss(weight=class_weights)

    weights = [1, 100, 500]
    class_weights = torch.FloatTensor(weights)

    loss_criterion = nn.CrossEntropyLoss(weight=class_weights)

    # trainer = UNetTrainer(model, optimizer, loaders, max_epochs, loss_criterion=loss_criterion)
    # trainer.train()

    # Load from last epoch
    checkpoint_trainer = UNetTrainer.load_checkpoint(
        "WCEL_1_10_50_last_model",
        model,
        optimizer,
        loaders,
        max_epochs,
        loss_criterion=loss_criterion)
    pred = checkpoint_trainer.single_image_forward(val_dataset[0][0])
示例#2
0
def main():
    # setup environments and seeds
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)

    # setup networks
    #Network = getattr(models, args.net)
    #model = Network(**args.net_params)

    model = Modified3DUNet(in_channels=1, n_classes=2, base_n_filter=16)
    model = model.cuda()
    '''optimizer = getattr(torch.optim, args.opt)(
            model.parameters(), **args.opt_params)'''
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.001,
                                 weight_decay=0.0001)
    #optimizer = torch.optim.SGD(model.parameters(),lr = 0.1,momentum=0.9)

    criterion = getattr(criterions, args.criterion)
    msg = '-------------- New training session -----------------'
    msg += '\n' + str(args)
    logging.info(msg)
    num_gpus = len(args.gpu.split(','))
    args.batch_size *= num_gpus
    args.workers *= num_gpus
    args.opt_params['lr'] *= num_gpus
    # create dataloaders
    #Dataset = getattr(datasets, args.dataset)
    dset = cell_training('/home/tom/Modified-3D-UNet-Pytorch/PNAS/')
    train_loader = DataLoader(dset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)
    file_name_best = os.path.join(ckpts, 'cell/model_best.tar')
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        # train for one epoch
        train_loss = train(train_loader, model, criterion, optimizer, epoch)
        # remember best lost and save checkpoint
        ckpt = {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'optim_dict': optimizer.state_dict(),
            'train_loss': train_loss,
        }
        file_name = os.path.join(ckpts, 'model_last.tar')
        torch.save(ckpt, file_name)
        msg = 'Epoch: {:02d} Train loss {:.4f}'.format(epoch + 1, train_loss)
        logging.info(msg)
示例#3
0
文件: main.py 项目: Kraas/My_Works
logging.getLogger('').addHandler(console)


global args, best_loss
best_loss = float('inf')
args = parser.parse_args()
#print os.environ['CUDA_VISIBLE_DEVICES']
dtype = torch.float
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# input = data.to(device)

# Loading the model
in_channels = 4
n_classes = 4
base_n_filter = 16
model = Modified3DUNet(in_channels, n_classes, base_n_filter).to(device)
#print args.data


# Split the training and testing dataset

test_size = 0.1
train_idx, test_idx = train_test_split(range(285), test_size = test_size)
train_data = load_dataset(train_idx)
test_data = load_dataset(test_idx)


#print all_data.keys()
# create your optimizer
#optimizer = optim.adam(net.parameteres(), lr=)
'''
                                         shuffle=True,
                                         num_workers=int(opt.workers))

test_dataset = PartDataset(root='..//Thingi10K//sdf_polar', train=False)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
try:
    os.makedirs(opt.outf)
except OSError:
    pass

classifier = Modified3DUNet(in_channels=1, n_classes=1)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=opt.lr, momentum=0.9)
classifier.cuda()

loss_fn = torch.nn.MSELoss(reduce=True, size_average=True)

num_batch = len(dataset) / opt.batchSize

for epoch in range(opt.nowepoch, opt.nowepoch + opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
示例#5
0
def main():
    start_time = time.time()
    # setup environments and seeds
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # setup networks
    #Network = getattr(models, args.net)
    #model = Network(**args.net_params)

    model = Modified3DUNet(in_channels=1, n_classes=2, base_n_filter=16)
    #load_model
    model_file = os.path.join(ckpts, 'model_last.tar')
    print model_file
    checkpoint = torch.load(model_file,
                            map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.cuda()
    '''optimizer = getattr(torch.optim, args.opt)(
            model.parameters(), **args.opt_params)'''
    #optimizer = torch.optim.SGD(model.parameters(),lr = 0.1,momentum=0.9)

    criterion = getattr(criterions, args.criterion)
    num_gpus = len(args.gpu.split(','))
    args.batch_size *= num_gpus
    args.workers *= num_gpus

    # create dataloaders
    #Dataset = getattr(datasets, args.dataset)
    dset = cell_testing_inter('/home/tom/data1_match/dataset4/')
    print dset.__len__()
    test_loader = DataLoader(dset,
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=args.workers,
                             pin_memory=True)
    model.eval()
    torch.set_grad_enabled(False)
    inputs = []
    outputs = []
    ground_truth = []
    for i, sample in enumerate(test_loader):
        input = sample['data']
        img_size = sample['image_size']
        print img_size[0]
        file_name = sample['name']
        file_name = str(file_name[0])
        _, _, z, x, y = input.shape
        seg = np.zeros((z, x, y))
        target = sample['seg']
        for j in range(z / 16):
            #ground_truth.append(target)
            #print file_name[0]
            input_temp = input[0, 0, j * 16:(j + 1) * 16].float()
            input_temp = input_temp[None, None, ...]
            output_temp = nn.parallel.data_parallel(model, input_temp)
            output_temp = output_temp.detach().cpu().numpy()
            output_temp = output_temp[0]
            seg_temp = output_temp.argmax(0)
            seg[j * 16:(j + 1) * 16] = seg_temp
        data = input.detach().numpy()
        #print data.shape
        data = data[0, 0, :, :, :]
        data = (255 * data[0:5 * img_size[0]]).astype('uint8')
        data_img = sitk.GetImageFromArray(data)
        sitk.WriteImage(data_img, '/home/tom/result/' + file_name)
        #outputs.append(output)
        #output = output[0]
        #print output.shape
        #seg = output.argmax(0)
        seg = (seg[0:5 * img_size[0]] * 255).astype('uint8')
        seg = seg.astype('float32')
        seg = seg / 255.0
        seg = np.multiply(data, seg)
        result = np.zeros(img_size)
        result = seg[0:img_size[0], 0:512, 0:512]
        '''
        result = result/255
        threshold = 0.05
        result[result>0.06] = 1
        result[result<=0.02] = 0
        result = binary_closing(result)
        gt = target[0]
        gt = gt[0]
        gt = gt[0:img_size[0],0:512,0:512]
        gt = gt.numpy()
        print ("precision:%f",Precision_img(result,gt))
        print ("Recall:%f",Recall_img(result,gt))
        print ("f1_score:%f",F1_score_img(result,gt))'''
        result = (result * 255).astype('uint8')
        seg = sitk.GetImageFromArray(result)
        sitk.WriteImage(seg, '/home/tom/membrane/' + file_name + 'mem.tif')
        print("running time %s" % (time.time() - start_time))
示例#6
0
def main(model_path, cell_hist_datadir, prob_map_datadir):
    props = readprops(os.path.join(model_path, 'cfg.txt'))
    # setup environments and seeds
    os.environ['CUDA_VISIBLE_DEVICES'] = props['gpu']

    # setup networks
    #Network = getattr(models, args.net)
    #model = Network(**args.net_params)

    model = Modified3DUNet(in_channels=1, n_classes=2, base_n_filter=16)
    #load_model
    model_file = os.path.join(model_path, 'model_last.tar')
    checkpoint = torch.load(model_file,
                            map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.cuda()

    criterion = getattr(criterions, props['criterion'])
    num_gpus = len(props['gpu'].split(','))
    batch_size = int(props['batch_size']) * num_gpus
    workers = int(props['workers']) * num_gpus

    # create dataloaders
    #Dataset = getattr(datasets, args.dataset)
    dset = cell_testing_inter(cell_hist_datadir)
    print dset.__len__()
    test_loader = DataLoader(dset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=workers,
                             pin_memory=True)
    model.eval()
    torch.set_grad_enabled(False)
    inputs = []
    outputs = []
    ground_truth = []
    for i, sample in enumerate(test_loader):
        input = sample['data']
        img_size = sample['image_size']
        print img_size[0]
        file_name = sample['name']
        print file_name
        file_name = os.path.splitext(file_name[0])[0]  # str(file_name[0])
        _, _, z, x, y = input.shape
        seg = np.zeros((z, x, y))
        for j in range(z / 16):
            target = sample['seg']
            #ground_truth.append(target)
            #print file_name[0]
            input_temp = input[0, 0, j * 16:(j + 1) * 16].float()
            input_temp = input_temp[None, None, ...]
            output_temp = nn.parallel.data_parallel(model, input_temp)
            output_temp = output_temp.detach().cpu().numpy()
            output_temp = output_temp[0]
            seg_temp = output_temp.argmax(0)
            seg[j * 16:(j + 1) * 16] = seg_temp
        data = input.detach().numpy()
        #print data.shape
        data = data[0, 0, :, :, :]
        data = (255 * data[0:5 * img_size[0]]).astype('uint8')
        #outputs.append(output)
        #output = output[0]
        #print output.shape
        #seg = output.argmax(0)
        prob_map = (seg[0:5 * img_size[0]] * 255).astype('uint8')
        prob_map = prob_map.astype('float32')
        prob_map = prob_map / 255.0
        prob_map = np.multiply(data, prob_map)
        prob_map = resize(
            prob_map,
            (prob_map.shape[0] / 5, prob_map.shape[1], prob_map.shape[2]))
        prob_map_img = sitk.GetImageFromArray(prob_map.astype('uint8'))
        sitk.WriteImage(prob_map_img,
                        prob_map_datadir + '/' + file_name + '-prob.tif')
示例#7
0
train_dataset = CustomDataset(train_image_paths, train_mask_paths, train=True)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=1,
                                           shuffle=True,
                                           num_workers=12)

test_dataset = CustomDataset(test_image_paths, test_mask_paths, train=False)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=12)

in_channels = 1
n_classes = 2
base_n_filter = 16
model = Modified3DUNet(in_channels, n_classes, base_n_filter).cuda()
# weights = [0.01, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0,10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0,10.0,10.0,10.0,10.0,10.0]
weights = [0.1, 50.0]
class_weights = torch.FloatTensor(weights).cuda()
# loss_function = GeneralizedDiceLoss(weight = class_weights)
loss_function = nn.CrossEntropyLoss(weight=class_weights)
# loss_function = FocalLoss()
optimizer = optim.Adam(model.parameters())
# criterion = FocalLoss(num_class = 2)

epochs = 300

# model = nn.DataParallel(model, NumerofGPU)

for epoch in range(epochs):
    model.train()
示例#8
0
train_path = os.path.join(root_path, 'training_data')
test_path = os.path.join(
    root_path, 'testing_data')  # /media/NAS/NAS/tms_data/testing_data
model_save_path = os.path.join('xxx')
print('model_saved_path:', model_save_path)

## loada data
batch_size = 4

## build network
# Loading the model
in_channels = 6
out_channels = 3
base_n_filter = 16
initial_lr = 0.002
net = Modified3DUNet(in_channels, out_channels, base_n_filter)
net = net.float()
# select gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
    net = nn.DataParallel(net)
# put the model on GPUs by model.to(device)
net = net.to(device)
optimizer = RD.RAdam(net.parameters(),
                     lr=initial_lr,
                     betas=(0.9, 0.999),
                     eps=1e-08)
# set learning_rate strategry: Decay LR by a factor of 0.1 every 10 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)