Esempio n. 1
0
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import pdb
import sys
sys.path.append('../')
import experiment_init.init_acdc as cfg
import experiment_init.data_cfg_acdc as data_list

import utils

from dataloaders import dataloaderObj
dt = dataloaderObj(cfg)

unl_list = data_list.train_data("tr2", "c4")
imgs, label, pixel_size = dt.load_acdc_imgs(unl_list)


# transform = transforms.Compose(
#     [transforms.ToTensor(),
#      transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

# trainset = torchvision.datasets.data_list.train_data(root='./data', train=True,
#                                                      download=True, transform=transform)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
#                                           shuffle=True, num_workers=2)
#
# testset = torchvision.datasets.data_list.test_data(root='./data', train=False,
#                                                    download=True, transform=transform)
# testloader = torch.utils.data.DataLoader(testset, batch_size=4,
Esempio n. 2
0
from f1_utils import f1_utilsObj
f1_util = f1_utilsObj(cfg, dt)

if (parse_config.rd_en == 1):
    parse_config.en_1hot = 1
else:
    parse_config.en_1hot = 0
struct_name = cfg.struct_name
val_step_update = cfg.val_step_update
######################################

######################################
# Load training and validation images & labels
######################################
#load training volumes id numbers to train the unet
train_list = data_list.train_data(parse_config.no_of_tr_imgs,
                                  parse_config.comb_tr_imgs)
#load saved training data in cropped dimensions directly
print('loading train volumes')
train_imgs, train_labels = dt.load_cropped_img_labels(train_list)
#print('train shape',train_imgs.shape,train_labels.shape)

#load validation volumes id numbers to save the best model during training
val_list = data_list.val_data(parse_config.no_of_tr_imgs,
                              parse_config.comb_tr_imgs)
#load val data both in original dimensions and its cropped dimensions
print('loading val volumes')
val_label_orig, val_img_crop, val_label_crop, pixel_val_list = load_val_imgs(
    val_list, dt, orig_img_dt)

# get test volumes id list
print('get test volumes list')
Esempio n. 3
0
for new_var in tf.trainable_variables():
    for var, var_val in zip(variables_names, var_values):
        if (str(var) == str(new_var.name) and ('reg_' not in str(new_var.name))):
            #print('match name',new_var.name,var)
            tmp_op=new_var.assign(var_val)
            assign_op.append(tmp_op)

sess.run(assign_op)
print('init done for all the encoder network weights and biases from pre-trained model')
######################################

######################################
# Load training and validation images & labels
######################################
#load training volumes id numbers to train the unet
train_list = data_list.train_data()
#load saved training data in cropped dimensions directly
print('load train volumes')
train_imgs, train_labels = dt.load_cropped_img_labels(train_list)
#print('train shape',train_imgs.shape,train_labels.shape)

#load validation volumes id numbers to save the best model during training
val_list = data_list.val_data()
#load val data both in original dimensions and its cropped dimensions
print('load val volumes')
val_label_orig,val_img_crop,val_label_crop,pixel_val_list=load_val_imgs(val_list,dt,orig_img_dt)

# get test volumes id list
print('get test volumes list')
test_list = data_list.test_data()
######################################