def build_model(): l_in = nn.layers.InputLayer((None, nslices, 30) + patch_size) l_in_slice_mask = nn.layers.InputLayer((None, nslices)) l_in_slice_location = nn.layers.InputLayer((None, nslices, 1)) l_in_sex_age = nn.layers.InputLayer((None, 2)) l_ins = [l_in, l_in_slice_mask, l_in_slice_location, l_in_sex_age] l_in_rshp = nn.layers.ReshapeLayer(l_in, (-1, 30) + patch_size) # (batch_size*nslices, 30, h,w) submodel = subconfig().build_model(l_in_rshp) # ------------------ systole l_mu0 = submodel.mu_layers[0] l_sigma0 = submodel.sigma_layers[0] l_mu0 = nn.layers.ReshapeLayer(l_mu0, (-1, nslices, [1])) l_sigma0 = nn.layers.ReshapeLayer(l_sigma0, (-1, nslices, [1])) l_volume_mu_sigma0 = nn_heart.JeroenLayer([nn.layers.flatten(l_mu0), nn.layers.flatten(l_sigma0), l_in_slice_mask, nn.layers.flatten(l_in_slice_location)], trainable_scale=False) l_volume_mu0 = nn.layers.SliceLayer(l_volume_mu_sigma0, indices=0, axis=-1) l_volume_sigma0 = nn.layers.SliceLayer(l_volume_mu_sigma0, indices=1, axis=-1) l_cdf0 = nn_heart.NormalCDFLayer(nn.layers.reshape(l_volume_mu0, ([0], 1)), nn.layers.reshape(l_volume_sigma0, ([0], 1))) # ------------------ diastole l_mu1 = submodel.mu_layers[1] l_sigma1 = submodel.sigma_layers[1] l_mu1 = nn.layers.ReshapeLayer(l_mu1, (-1, nslices, [1])) l_sigma1 = nn.layers.ReshapeLayer(l_sigma1, (-1, nslices, [1])) l_volume_mu_sigma1 = nn_heart.JeroenLayer([nn.layers.flatten(l_mu1), nn.layers.flatten(l_sigma1), l_in_slice_mask, nn.layers.flatten(l_in_slice_location)], trainable_scale=False) l_volume_mu1 = nn.layers.SliceLayer(l_volume_mu_sigma1, indices=0, axis=-1) l_volume_sigma1 = nn.layers.SliceLayer(l_volume_mu_sigma1, indices=1, axis=-1) l_cdf1 = nn_heart.NormalCDFLayer(nn.layers.reshape(l_volume_mu1, ([0], 1)), nn.layers.reshape(l_volume_sigma1, ([0], 1))) l_outs = [l_cdf0, l_cdf1] l_top = nn.layers.MergeLayer(l_outs) l_target_mu0 = nn.layers.InputLayer((None, 1)) l_target_mu1 = nn.layers.InputLayer((None, 1)) l_targets = [l_target_mu0, l_target_mu1] train_params = nn.layers.get_all_params(l_top) test_layes = [l_volume_mu_sigma0, l_volume_mu_sigma1] return namedtuple('Model', ['l_ins', 'l_outs', 'l_targets', 'l_top', 'train_params', 'submodel', 'test_layers'])( l_ins, l_outs, l_targets, l_top, train_params, submodel, test_layes)
from collections import namedtuple import lasagne as nn import data_iterators import numpy as np import theano.tensor as T import nn_heart from configuration import subconfig import utils_heart from pathfinder import PKL_TRAIN_DATA_PATH, TRAIN_LABELS_PATH, PKL_VALIDATE_DATA_PATH import utils caching = None restart_from_save = None rng = subconfig().rng patch_size = subconfig().patch_size train_transformation_params = subconfig().train_transformation_params valid_transformation_params = subconfig().valid_transformation_params test_transformation_params = subconfig().test_transformation_params batch_size = 8 nbatches_chunk = 2 chunk_size = batch_size * nbatches_chunk train_valid_ids = utils.get_train_valid_split(PKL_TRAIN_DATA_PATH) train_data_iterator = data_iterators.PatientsDataGenerator(data_path=PKL_TRAIN_DATA_PATH, batch_size=chunk_size, transform_params=train_transformation_params, patient_ids=train_valid_ids['train'], labels_path=TRAIN_LABELS_PATH,
from collections import namedtuple import lasagne as nn import data_iterators import numpy as np import theano.tensor as T import nn_heart from configuration import subconfig import utils_heart import utils from pathfinder import PKL_TRAIN_DATA_PATH, TRAIN_LABELS_PATH, PKL_VALIDATE_DATA_PATH caching = None restart_from_save = None rng = subconfig().rng patch_size = subconfig().patch_size train_transformation_params = subconfig().train_transformation_params valid_transformation_params = subconfig().valid_transformation_params test_transformation_params = subconfig().test_transformation_params data_prep_fun = subconfig().data_prep_fun batch_size = 8 nbatches_chunk = 2 chunk_size = batch_size * nbatches_chunk train_valid_ids = utils.get_train_valid_split(PKL_TRAIN_DATA_PATH) train_data_iterator = data_iterators.PatientsDataGenerator(data_path=PKL_TRAIN_DATA_PATH, batch_size=chunk_size, transform_params=train_transformation_params, patient_ids=train_valid_ids['train'],