def test_load_data(h5file): @custom_preprocessor class PlusOnePreprocessor(BasePreprocessor): def transform(self, x, y): return x + 1, y + 1 actual_dr = load_data(load_json_config( read_file('tests/json/dataset_config.json'))) expected_dr = HDF5Reader( filename=h5file, batch_size=8, preprocessors=PlusOnePreprocessor(), x_name='input', y_name='target', train_folds=[0, 1, 2], val_folds=[3], test_folds=[4, 5] ) assert isinstance(actual_dr, HDF5Reader) assert isinstance(actual_dr.preprocessors[0], PlusOnePreprocessor) actual_train_data = actual_dr.train_generator actual_val_data = actual_dr.val_generator actual_test_data = actual_dr.test_generator expected_train_data = expected_dr.train_generator expected_val_data = expected_dr.val_generator expected_test_data = expected_dr.test_generator check_equal_data_generator(actual_train_data, expected_train_data) check_equal_data_generator(actual_val_data, expected_val_data) check_equal_data_generator(actual_test_data, expected_test_data)
def test_load_params(): params = load_params( load_json_config(read_file('tests/json/model_param_config.json'))) assert isinstance(params['optimizer'], Adam) assert isinstance(params['loss'], BinaryFbetaLoss) assert isinstance(params['metrics'][0], BinaryFbeta)
def test_load_params(): params = load_train_params(load_json_config( read_file('tests/json/train_param_config.json'))) assert params['epochs'] == 5 assert len(params['callbacks']) == 3 assert isinstance(params['callbacks'][0], CSVLogger) assert isinstance(params['callbacks'][1], ModelCheckpoint) assert isinstance(params['callbacks'][2], TerminateOnNaN)
def test_load_vnet_model_resize(): architecture = load_json_config( read_file('tests/json/vnet_architecture.json') ) input_params = {'shape': [129, 129, 129, 3]} model = load_architecture(architecture, input_params) assert np.all(model.input_shape == (None, 129, 129, 129, 3)) assert np.all(model.output_shape == (None, 129, 129, 129, 1))
def test_load_sequential_model(): architecture = load_json_config( read_file('tests/json/sequential_architecture.json')) input_params = {'shape': [32, 32]} model = load_architecture(architecture, input_params) input_layer = Input(shape=(32, 32)) flatten = Flatten()(input_layer) dense = Dense(units=128, activation='relu')(flatten) dropout = Dropout(rate=0.2)(dense) output_layer = Dense(units=10, activation='softmax')(dropout) expected_model = Model(inputs=input_layer, outputs=output_layer) assert check_same_models(model, expected_model)
def add_experiment(me): name = input('Name: ') description = input('Description: ') tried = 0 while tried < 3: try: filepath = input('Path to config file: ') config = read_file(filepath) me.new_experiment_from_full_config(name, config, description=description) print('New experiment added successfully.') return True except Exception as e: print(e) print('Error. Try again.') tried += 1 print('Failed to add experiment. Back to previous page') return True
raise RuntimeError("GPU Unavailable") parser = argparse.ArgumentParser() parser.add_argument("config_file") parser.add_argument("log_folder") parser.add_argument("--epochs", default=500, type=int) parser.add_argument("--model_checkpoint_period", default=25, type=int) parser.add_argument("--prediction_checkpoint_period", default=25, type=int) args = parser.parse_args() # strategy = tf.distribute.MirroredStrategy() # print('Number of devices: {}'.format(strategy.num_replicas_in_sync)) # with strategy.scope(): config = read_file(args.config_file) exp = Experiment( log_base_path=args.log_folder).from_full_config(config).run_experiment( train_history_log=True, model_checkpoint_period=args.model_checkpoint_period, prediction_checkpoint_period=args.prediction_checkpoint_period, epochs=args.epochs, ) # 42 images for 2d, 15 images for 3d img_num = 42 if '3d' in args.log_folder: img_num = 15
""" To use comet.ml, uncomment this line """ ############################################### # from comet_ml import Experiment as CometEx ############################################### from deoxys.experiment import Experiment from deoxys.utils import read_file if __name__ == '__main__': """ To use comet.ml, uncomment this line """ ###################################################################### # # Create an experiment with your api key # experiment = CometEx(api_key="YOUR_API_KEY", # project_name="YOUR_PROJECT_NAME", # workspace="YOUR_WORKSPACE") ###################################################################### config = read_file('examples/json/sequential-config.json') Experiment(log_base_path='../../mnist/logs').from_full_config( config).run_experiment()
""" Example of loading a model from config and weights. The full config of the model is in 'examples/json/unet-sample-config.json', and 'model2.h5' contains the weights of the trained model. """ from deoxys.model import model_from_full_config from deoxys.utils import read_file if __name__ == '__main__': config = read_file('examples/json/unet-sample-config.json') model = model_from_full_config(config, weights_file='model2.h5') model.fit_train(verbose=1, epochs=3, initial_epoch=2) print("After 3 epochs") model.save('model3.h5') score = model.evaluate_test(verbose=1) print(score)
def test_from_config(): config = read_file('tests/json/sequential-config.json') model_from_full_config(config)
The model is loaded from 'examples/json/oxford-pet-config.json'. The outputs are in '../../oxford_perf/logs'. In this experiment, it first runs 10 epochs, saves model and predicts validation data at epoch 5 and 10. After that, it runs 10 more epochs. The logs are appended to previous log file. Saving model and prediction of validation happen at epoch 12, 14, 16, 18, 20. Finally, performance plots are created and prediction images in both contour and separated form are created. """ from deoxys.experiment import Experiment from deoxys.utils import read_file if __name__ == '__main__': config = read_file('examples/json/oxford-pet-config.json') exp = Experiment( log_base_path='../../oxford_perf/logs', best_model_monitors=[ 'val_loss', 'val_accuracy', 'val_binary_fbeta', 'val_fbeta' ]).from_full_config(config).run_experiment( train_history_log=True, model_checkpoint_period=5, prediction_checkpoint_period=5, epochs=30).run_experiment( model_checkpoint_period=2, prediction_checkpoint_period=2, epochs=10, initial_epoch=30).plot_performance().plot_prediction( masked_images=[i for i in range(10)], ).plot_prediction(
def test_load_vnet_model(): architecture = load_json_config( read_file('tests/json/vnet_architecture.json') ) input_params = {'shape': [128, 128, 128, 3]} model = load_architecture(architecture, input_params) def conv_layers(filters, pre_layer): conv = BatchNormalization()( Conv3D(filters, kernel_size=3, activation='relu', padding='same')(pre_layer)) return BatchNormalization()( Conv3D(filters, kernel_size=3, activation='relu', padding='same')(conv)) input_layer = Input(shape=[128, 128, 128, 3]) conv_1 = conv_layers(4, input_layer) max_pool_1 = MaxPooling3D()(conv_1) conv_2 = conv_layers(8, max_pool_1) max_pool_2 = MaxPooling3D()(conv_2) conv_3 = conv_layers(16, max_pool_2) max_pool_3 = MaxPooling3D()(conv_3) conv_4 = conv_layers(32, max_pool_3) max_pool_4 = MaxPooling3D()(conv_4) conv_5 = conv_layers(64, max_pool_4) max_pool_5 = MaxPooling3D()(conv_5) conv_t_kwargs = {"kernel_size": 3, "strides": [ 2, 2, 2 ], "padding": "same"} conv_6 = conv_layers(128, max_pool_5) conv_trans_1 = Conv3DTranspose(32, **conv_t_kwargs)(conv_6) upconv_1 = conv_layers(64, concatenate([conv_5, conv_trans_1])) conv_trans_2 = Conv3DTranspose(16, **conv_t_kwargs)(upconv_1) upconv_2 = conv_layers(32, concatenate([conv_4, conv_trans_2])) conv_trans_3 = Conv3DTranspose(8, **conv_t_kwargs)(upconv_2) upconv_3 = conv_layers(16, concatenate([conv_3, conv_trans_3])) conv_trans_4 = Conv3DTranspose(4, **conv_t_kwargs)(upconv_3) upconv_4 = conv_layers(8, concatenate([conv_2, conv_trans_4])) conv_trans_5 = Conv3DTranspose(2, **conv_t_kwargs)(upconv_4) upconv_5 = conv_layers(4, concatenate([conv_1, conv_trans_5])) output = Conv3D(1, kernel_size=1, activation='sigmoid')(upconv_5) expected_model = Model(inputs=input_layer, outputs=output) assert check_same_models(model, expected_model)
from deoxys.model import model_from_full_config import matplotlib.pyplot as plt from deoxys.loaders.architecture import BaseModelLoader from tensorflow.keras.models import Model as KerasModel from tensorflow.keras.layers import Input, concatenate from tensorflow.keras.models import Sequential from tensorflow import image import tensorflow as tf from tensorflow.keras.callbacks import TensorBoard import tensorboard import numpy as np import scipy.ndimage import math from deoxys.model.layers import layer_from_config from deoxys.customize import custom_architecture import os import h5py if __name__ == '__main__': #ex_comet = ex(api_key="zoPcSaPo6mhKthsM8SOcgq9Uk",project_name="masterthesisafreen", workspace="afreen3010") # config = read_file('json/layer_32_filter_new.json') experiment = Experiment() # #from pdb import set_trace; set_trace() experiment.from_full_config(config).run_experiment( train_history_log=True, model_checkpoint_period=10, epochs=100).plot_performance()