def get_initialised_driver(starting_iter=0): system_param = { 'SYSTEM': ParserNamespace(action='train', num_threads=2, num_gpus=4, cuda_devices='6', model_dir='./testing_data'), 'NETWORK': ParserNamespace(batch_size=20, name='tests.toy_application.TinyNet'), 'TRAINING': ParserNamespace( starting_iter=starting_iter, max_iter=500, save_every_n=20, tensorboard_every_n=1, max_checkpoints=20, optimiser='niftynet.engine.application_optimiser.Adagrad', lr=0.01), 'CUSTOM': ParserNamespace(vector_size=100, mean=10.0, stddev=2.0, name='tests.toy_application.ToyApplication') } app_driver = ApplicationDriver() app_driver.initialise_application(system_param, {}) return app_driver
def _generate_base_params(): # initialise compulsory params that are irrelevant # to this unit test user_param = dict() user_param['SYSTEM'] = ParserNamespace(model_dir='./testing_data', num_threads=2, num_gpus=1, cuda_devices='') user_param['NETWORK'] = ParserNamespace( batch_size=20, name='tests.toy_application.TinyNet') user_param['TRAINING'] = ParserNamespace(starting_iter=0, max_iter=2, save_every_n=2, tensorboard_every_n=0, max_checkpoints=100) user_param['INFERENCE'] = ParserNamespace(inference_iter=-1) user_param['CUSTOM'] = ParserNamespace( name='tests.toy_application.ToyApplication', vector_size=100, mean=10.0, stddev=2.0) return user_param
def _generate_data_param(): user_param = dict() user_param['modality'] = ParserNamespace(csv_file=os.path.join( 'testing_data', 'mod1test.csv'), path_to_search='testing_data') user_param['modality2'] = ParserNamespace(csv_file=os.path.join( 'testing_data', 'mod2test.csv'), path_to_search='testing_data') return user_param
def get_ill_image_window_2(): return dict(source_names={ 'image': (u'modality1', u'modality2'), 'label': (u'modality3', ) }, image_shapes={ 'image': (192, 160, 192, 1, 2), 'label': (192, 160, 192, 1, 1) }, image_dtypes={'image': tf.float32}, data_param={ 'modality1': ParserNamespace(spatial_window_size=(10, 10)), 'modality2': ParserNamespace(spatial_window_size=(10, 10)), 'modality3': ParserNamespace(spatial_window_size=()) })
def get_dynamic_image_window(): window = ImageWindow.from_data_reader_properties( source_names={ 'image': (u'modality1', u'modality2'), 'label': (u'modality3', ) }, image_shapes={ 'image': (192, 160, 192, 1, 2), 'label': (192, 160, 192, 1, 1) }, image_dtypes={ 'image': tf.float32, 'label': tf.float32 }, data_param={ 'modality1': ParserNamespace(spatial_window_size=(10, 10)), 'modality2': ParserNamespace(spatial_window_size=(10, 10)), 'modality3': ParserNamespace(spatial_window_size=(5, 5, 1)) }) return window
def get_initialised_driver(starting_iter=0): system_param = { 'SYSTEM': ParserNamespace( action='train', num_threads=2, num_gpus=4, cuda_devices='6', model_dir=os.path.join('.', 'testing_data'), dataset_split_file=os.path.join( '.', 'testing_data', 'testtoyapp.csv')), 'NETWORK': ParserNamespace( batch_size=20, name='tests.toy_application.TinyNet'), 'TRAINING': ParserNamespace( starting_iter=starting_iter, max_iter=500, save_every_n=20, tensorboard_every_n=1, max_checkpoints=20, optimiser='niftynet.engine.application_optimiser.Adagrad', validation_every_n=-1, exclude_fraction_for_validation=0.1, exclude_fraction_for_inference=0.1, lr=0.01), 'CUSTOM': ParserNamespace( vector_size=100, mean=10.0, stddev=2.0, name='tests.toy_application.ToyApplication') } app_driver = ApplicationDriver() app_driver.initialise_application(system_param, {}) # set parameters without __init__ app_driver.app.action_param = system_param['TRAINING'] app_driver.app.net_param = system_param['NETWORK'] app_driver.app.is_training = True return app_driver
import numpy as np import tensorflow as tf from niftynet.engine.sampler_grid import GridSampler from niftynet.engine.sampler_grid import _enumerate_step_points from niftynet.engine.sampler_grid import grid_spatial_coordinates from niftynet.io.image_reader import ImageReader from tests.test_util import ParserNamespace MULTI_MOD_DATA = { 'T1': ParserNamespace( csv_file=os.path.join('testing_data', 'T1sampler.csv'), path_to_search='testing_data', filename_contains=('_o_T1_time',), filename_not_contains=('Parcellation',), interp_order=3, pixdim=None, axcodes=None, spatial_window_size=(8, 10, 2) ), 'FLAIR': ParserNamespace( csv_file=os.path.join('testing_data', 'FLAIRsampler.csv'), path_to_search='testing_data', filename_contains=('FLAIR_',), filename_not_contains=('Parcellation',), interp_order=3, pixdim=None, axcodes=None, spatial_window_size=(8, 10, 2) ) }
import tensorflow as tf from niftynet.io.image_reader import ImageReader from niftynet.io.image_sets_partitioner import ImageSetsPartitioner from niftynet.layer.discrete_label_normalisation import \ DiscreteLabelNormalisationLayer from niftynet.layer.pad import PadLayer from tests.test_util import ParserNamespace # test multiple modalities MULTI_MOD_DATA = { 'T1': ParserNamespace(csv_file=os.path.join('testing_data', 'T1reader.csv'), path_to_search='testing_data', filename_contains=('_o_T1_time', ), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=None, axcodes=None), 'FLAIR': ParserNamespace(csv_file=os.path.join('testing_data', 'FLAIRreader.csv'), path_to_search='testing_data', filename_contains=('FLAIR_', ), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=None, axcodes=None) } MULTI_MOD_TASK = ParserNamespace(image=('T1', 'FLAIR')) # test single modalities
import tensorflow as tf from niftynet.engine.sampler_resize import ResizeSampler from niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator from niftynet.io.image_reader import ImageReader from niftynet.layer.discrete_label_normalisation import \ DiscreteLabelNormalisationLayer from niftynet.layer.pad import PadLayer from tests.test_util import ParserNamespace MULTI_MOD_DATA = { 'T1': ParserNamespace(csv_file=os.path.join('testing_data', 'T1sampler.csv'), path_to_search='testing_data', filename_contains=('_o_T1_time', '23'), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=(2.4, 5.0, 2.0), axcodes='LAS', spatial_window_size=(23, 32, 15)), 'FLAIR': ParserNamespace(csv_file=os.path.join('testing_data', 'FLAIRsampler.csv'), path_to_search='testing_data', filename_contains=('FLAIR_', '23'), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=(2.4, 5.0, 2.0), axcodes='LAS', spatial_window_size=(23, 32, 15)) } MULTI_MOD_TASK = ParserNamespace(image=('T1', 'FLAIR'))
import os import tensorflow as tf from niftynet.io.image_sets_partitioner import COLUMN_UNIQ_ID from niftynet.io.image_sets_partitioner import ImageSetsPartitioner from niftynet.io.image_sets_partitioner import TRAIN, VALID, INFER from tests.test_util import ParserNamespace test_sections = { 'T1': ParserNamespace(csv_file=os.path.join('testing_data', 'test_reader.csv'), path_to_search='testing_data', filename_contains=('_o_T1_time', ), filename_not_contain=('Parcellation', ), interp_order=3, pixdim=None, axcodes=None), 'Flair': ParserNamespace(csv_file=os.path.join('testing_data', 'test_Flairreader.csv'), path_to_search='testing_data', filename_contains=('FLAIR_', ), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=None, axcodes=None) } partition_output = os.path.join('testing_data', 'partition.csv')
from niftynet.io.image_reader import ImageReader from niftynet.layer.binary_masking import BinaryMaskingLayer from niftynet.layer.histogram_normalisation import \ HistogramNormalisationLayer from niftynet.layer.mean_variance_normalisation import \ MeanVarNormalisationLayer from tests.test_util import ParserNamespace from niftynet.io.image_sets_partitioner import ImageSetsPartitioner DATA_PARAM = { 'T1': ParserNamespace(csv_file=os.path.join('testing_data', 'T1.csv'), path_to_search='testing_data', filename_contains=('_o_T1_time', ), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=None, axcodes=None), 'FLAIR': ParserNamespace(csv_file=os.path.join('testing_data', 'FLAIR.csv'), path_to_search='testing_data', filename_contains=('FLAIR_', ), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=None, axcodes=None) } TASK_PARAM = ParserNamespace(image=('T1', 'FLAIR')) MODEL_FILE = os.path.join('testing_data', 'std_models.txt') data_partitioner = ImageSetsPartitioner()
else: if c > num_min: to_add -= 1 if to_add > 0: print('to add initial is ', num_labels_add) print('Not enough in additional labels') return False return True MULTI_MOD_DATA = { 'T1': ParserNamespace(csv_file=os.path.join('testing_data', 'T1sampler.csv'), path_to_search='testing_data', filename_contains=('_o_T1_time', ), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=None, axcodes=None, spatial_window_size=(7, 10, 2)), 'FLAIR': ParserNamespace(csv_file=os.path.join('testing_data', 'FLAIRsampler.csv'), path_to_search='testing_data', filename_contains=('FLAIR_', ), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=None, axcodes=None, spatial_window_size=(7, 10, 2)), 'Label': ParserNamespace(csv_file=os.path.join('testing_data', 'lesion.csv'), path_to_search='testing_data',