Exemplo n.º 1
0
# coding=utf-8

from unet import Unet
import torch
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from load_LIDC_data import LIDC_IDRI
from utils import l2_regularisation, show_curve
from save_load_net import save_model, load_model




device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset = LIDC_IDRI(dataset_location = 'data/')

dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(0.1 * dataset_size))
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)

train_loader = DataLoader(dataset, batch_size=5, sampler=train_sampler)
test_loader = DataLoader(dataset, batch_size=1, sampler=test_sampler)
print("Number of training/test patches:", (len(train_indices),len(test_indices)))

net = ProbabilisticUnet(input_channels=1, num_classes=1, num_filters=[32,64,128,192], latent_dim=2, no_convs_fcomb=4, beta=10.0)
net.to(device)
Exemplo n.º 2
0
from load_LIDC_data import LIDC_IDRI
from model import ProbabilisticUNet, SegModel, geco_ce
from utils import l2_regularisation

# settings
# segmentation_model = SegModel.U_SQUARED_BIG.value     # Make sure these two
segmentation_model = SegModel.UNET_SIMPLE.value
LOAD_MODEL_FROM = '6_unet_Indep_elbo_nonf_noposw_lat2_b5__epoch0_step532_loss633.pth'  # correspond
dataset_path = 'data/'
num_test_samples = 6
shuffle = True

# data
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset = LIDC_IDRI(dataset_location=dataset_path)
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(0.1 * dataset_size))
if shuffle:
    np.random.shuffle(indices)
# train_indices, test_indices = indices[split:], indices[:split]

test_indices = indices[:num_test_samples]
test_sampler = SubsetRandomSampler(test_indices)
test_loader = DataLoader(dataset, batch_size=1, sampler=test_sampler)
print("Number of test patches:", (len(test_indices)))

# model
net = ProbabilisticUNet(segmentation_model=segmentation_model,
                        input_channels=1,
Exemplo n.º 3
0
# TODO Add all geco or loss_fn params to settings + distrib params.
settings.latent_distribution = None
settings.loss = None
settings.pos_weighting = None
settings.loss_params = None

settings.use_lr_scheduler = False
settings.batch_size = 5
settings.latent_dim = 2
settings.beta = 2.0

settings.visdom_port = 7789

# Data
dataset = LIDC_IDRI(dataset_location=settings.dataset_location)
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(0.1 * dataset_size))
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_loader = DataLoader(dataset,
                          batch_size=settings.batch_size,
                          sampler=train_sampler)
test_loader = DataLoader(dataset, batch_size=1, sampler=test_sampler)
print("Number of training/test patches:",
      (len(train_indices), len(test_indices)))

if settings.visualize:
Exemplo n.º 4
0
import torch
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from load_LIDC_data import LIDC_IDRI
from probabilistic_unet import ProbabilisticUnet
from utils import l2_regularisation

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset = LIDC_IDRI(dataset_location="data/")
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(0.1 * dataset_size))
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_loader = DataLoader(dataset, batch_size=5, sampler=train_sampler)
test_loader = DataLoader(dataset, batch_size=1, sampler=test_sampler)
print("Number of training/test patches:",
      (len(train_indices), len(test_indices)))

net = ProbabilisticUnet(
    input_channels=1,
    num_classes=1,
    num_filters=[32, 64, 128, 192],
    latent_dim=2,
    no_convs_fcomb=4,
    beta=10.0,
)
net.to(device)