Exemple #1
0
training_dist = [2, 4, 10, 20, 25, 35]
unseen_dist = [1, 3, 15, 30]

dataset_folder = os.path.join(data_dir, 'dataset')
root_path = partial(os.path.join, saved_models)

regex = re.compile(r'\d+')

selected_testdata = select_dist(dist_list=training_dist,
                                root_folder=dataset_folder)
selected_unseen = select_dist(dist_list=unseen_dist,
                              root_folder=dataset_folder)

data_loaders_test, data_lengths_test = define_dataset(
    dataset_folder,
    batch_size=8,
    include_list=selected_testdata,
    alldata=False)

print(data_lengths_test)

data_loaders_unseen, data_lengths_unseen = define_dataset(
    dataset_folder, batch_size=8, include_list=selected_unseen, alldata=True)
print(data_lengths_unseen)

model_names = os.listdir(saved_models)


def get_fnames(coeff, prefix='Trained'):
    idx_coef = 6 if prefix == 'Trained' else 4
    idx_epoch = 5 if prefix == 'Trained' else 3
Exemple #2
0
        train_transformers = [
            RandomCrop(p=1),
            Swap(p=0.7),
            FlipLR(p=0.7),
            FlipUD(p=0.7),
            GaussianNoise(p=0.75, mean=noise, sigma=1),
            Rescale(0.25),
            ChannelsFirst(),
            ToTensor()
        ]
        print("Load dataset")

        data_loaders, data_length = define_dataset(
            root_folder=ROOT_DIR,
            base_transformers=base_transformers,
            train_transformers=train_transformers,
            batch_size=16,
            excluded_list=EXCLUDED,
            alldata=False,
            multi_processing=4)
        print("combined loss: {}*dice_loss + {} mse".format(coef, 1.0 - coef))
        torch.cuda.empty_cache()
        print("Train model")
        model = rUNet(out_size=1)
        optimizer = optim.Adam(model.parameters(), lr=1e-4)
        checkpoint_file = os.path.join(
            SRC_DIR, 'saved_models', 'trained_6positions_multi_loss',
            'Trained_rUNet_pytorch_6positions_dataset_100epochs_{}coeff_mask.pkl'
            .format(coef))
        print(torch.load(checkpoint_file).keys())
        history = retrain_rUNet_multi_loss(
            model=model,
Exemple #3
0

SEED = 8
torch.manual_seed(SEED)
np.random.seed(SEED)

DATA_DIR_DEEPTHOUGHT = "/storage/yw18581/data"
data_dir = DATA_DIR_DEEPTHOUGHT
src_dir = os.path.join("/", "storage", "yw18581", "src", "leaf_reco")
root_dir = os.path.join(data_dir, "dataset")
selected_distances = select_dist(dist_list=[2, 4, 10, 25], root_folder=root_dir, keys_list=['first'])
excluded_distances = select_dist(root_folder=root_dir, keys_list=['second', 'third'])
print(selected_distances)

print("Load dataset")
data_loaders, data_lengths = define_dataset(root_folder=root_dir, batch_size=16, include_list=selected_distances,
                                            excluded_list=excluded_distances, multi_processing=4)

print(data_lengths)
print("Define model")
n_epochs = 100

torch.cuda.empty_cache()
print("Train model")
model = UNet()
optimizer = optim.Adam(model.parameters(), lr=1e-4)
print("here")
history = training_UNet(model=model, optimizer=optimizer, criterion_mask=dice_loss,
                               src_dir='/storage/yw18581/src/leaf_reco',
                               task_folder_name="trained_UNet_4positions_firstbatch",
                               data_loaders=data_loaders, data_lengths=data_lengths,
                               epochs=n_epochs, batch_size=16, model_checkpoint=5,
Exemple #4
0
SEED = 42
torch.manual_seed(SEED)
np.random.seed(SEED)

DATA_DIR_DEEPTHOUGHT = "/storage/yw18581/data"

DATA_DIR = DATA_DIR_DEEPTHOUGHT
SRC_DIR = os.path.join("/", "storage", "yw18581", "src", "leaf_reco")
ROOT_DIR = os.path.join(DATA_DIR, "dataset")
EXCLUDED = select_dist(dist_list=[1, 3, 15, 30], root_folder=ROOT_DIR)
print(EXCLUDED)

print("Load dataset")
data_loaders, data_lengths = define_dataset(root_folder=ROOT_DIR,
                                            batch_size=16,
                                            add_noise=10000,
                                            excluded_list=EXCLUDED,
                                            multi_processing=4)

print(data_lengths)
print("Define model")
coeffs = [0.40, 0.70]

n_epochs = 25

for coef in coeffs:
    print("combined loss: {}*dice_loss + {} mse".format(coef, 1.0 - coef))
    torch.cuda.empty_cache()
    print("Train model")
    model = rUNet(out_size=1)
    optimizer = optim.Adam(model.parameters(), lr=1e-4)
Exemple #5
0
    train_transformers = [
        RandomCrop(p=1),
        Swap(p=0.7),
        FlipLR(p=0.7),
        FlipUD(p=0.7),
        GaussianNoise(p=0.75, mean=150, sigma=1),
        Rescale(0.25),
        ChannelsFirst(),
        ToTensor()
    ]
    print("Load dataset")

    data_loaders, data_length = define_dataset(
        root_folder=root_dir,
        base_transformers=base_transformers,
        train_transformers=train_transformers,
        batch_size=16,
        include_list=selected_distances,
        alldata=False,
        multi_processing=4)
    print("combined loss: {}*dice_loss + {} mse".format(coef, 1.0 - coef))
    torch.cuda.empty_cache()
    print("Train model")
    model = rUNet(out_size=1)
    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    history = training_phase_rUNet_multi_loss(
        model=model,
        optimizer=optimizer,
        loss_coeff=coef,
        criterion_dist=nn.MSELoss(),
        criterion_mask=dice_loss,
Exemple #6
0
import torch.optim as optim
import torch.nn as nn

SEED = 42
torch.manual_seed(SEED)
np.random.seed(SEED)

DATA_DIR_DEEPTHOUGHT = "/storage/yw18581/data"
data_dir = DATA_DIR_DEEPTHOUGHT
src_dir = os.path.join("/", "storage", "yw18581", "src", "leaf_reco")
root_dir = os.path.join(data_dir, "dataset")
excluded = select_dist(dist_list=[1, 3, 15, 30], root_folder=root_dir)
print(excluded)

print("Load dataset")
data_loaders, data_lengths = define_dataset(root_folder=root_dir, batch_size=16, excluded_list=excluded,
                                            multi_processing=4)

print(data_lengths)
print("Define model")
coeffs = [0.75, 0.70, 0.60, 0.50]
coeffs_2 = [0.25, 0.30, 0.40]
n_epochs = 100

for coef in coeffs_2:
    print("combined loss: {}*dice_loss + {} mse".format(coef, 1.0 - coef))
    torch.cuda.empty_cache()
    print("Train model")
    model = rUNet(out_size=1)
    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    history = training_phase_rUNet(model=model, optimizer=optimizer, loss_coeff=coef,