) # expected: [img_size_z*num_images, img_size_x, vol_size_y, img_size_t, n_channels] logging.info( '============================================================\n') # ================================================================================= # ==== If slicing preprocessing is enabled, do preprocessing of masked the data now # ================================================================================= if preprocess_enabled == "slice": logging.info( '============================================================') logging.info('Loading training data from: ' + project_data_root) data_tr = data_freiburg_numpy_to_preprocessed_hdf5.load_cropped_data_sliced( basepath=project_data_root, idx_start=0, idx_end=10, train_test='train') images_tr = data_tr['sliced_images_train'] logging.info(type(images_tr)) logging.info( 'Shape of training images: %s' % str(images_tr.shape) ) # expected: [img_size_z*num_images, img_size_x, vol_size_y, img_size_t, n_channels] logging.info( '============================================================') logging.info('Loading validation data from: ' + project_data_root) data_vl = data_freiburg_numpy_to_preprocessed_hdf5.load_cropped_data_sliced( basepath=project_data_root, idx_start=10, idx_end=13,
vae_network = VariationalAutoencoder model = VAEModel(vae_network, config, model_name, log_dir) # Load the vae model as our baseline path = os.path.join(project_code_root, config["model_directory"]) model.load_from_path(path, config["model_name"] , config["latest_model_epoch"]) # ================================================================================================================================================================== # ============== LOAD THE DATA ===================================================================================================================================== # ================================================================================================================================================================== if preprocess_enabled == "slice": logging.info('============================================================') logging.info('Loading training data from: ' + project_data_root) data_tr = data_freiburg_numpy_to_preprocessed_hdf5.load_cropped_data_sliced(basepath = project_data_root, idx_start = config['train_data_start_idx'], idx_end = config['train_data_end_idx'], train_test='train') images_tr_sl = data_tr['sliced_images_train'] logging.info(type(images_tr_sl)) logging.info('Shape of training images: %s' %str(images_tr_sl.shape)) # expected: [img_size_z*num_images, img_size_x, vol_size_y, img_size_t, n_channels] logging.info('============================================================') logging.info('Loading validation data from: ' + project_data_root) data_vl = data_freiburg_numpy_to_preprocessed_hdf5.load_cropped_data_sliced(basepath = project_data_root, idx_start = config['validation_data_start_idx'], idx_end = config['validation_data_end_idx'], train_test='validation') images_vl_sl = data_vl['sliced_images_validation'] logging.info('Shape of validation images: %s' %str(images_vl_sl.shape)) # expected: [img_size_z*num_images, img_size_x, vol_size_y, img_size_t, n_channels] logging.info('============================================================\n')