def run(self):
        ###### Create the datasets

        data_path = "./datasets/report_ultimate_random/"

        training_data_path = data_path + "train_2.h5"
        testing_data_path = data_path + "test_2.h5"
        ds_training = DatasetBrainParcellation()
        ds_training.read(training_data_path)
        # image = PIL.Image.fromarray(tile_raster_images(X=ds_training.inputs[0:100],
        #                                                img_shape=(29, 29), tile_shape=(10, 10),
        #                                                tile_spacing=(1, 1)))
        # image.save(self.path + "filters_corruption_30.png")

        prop_validation = 0.5  # Percentage of the testing dataset that is used for validation (early stopping)
        ds_testing = DatasetBrainParcellation()
        ds_testing.read(testing_data_path)
        ds_validation, ds_testing = ds_testing.split_datapoints_proportion(prop_validation)
        # Few stats about the targets
        analyse_classes(np.argmax(ds_training.outputs, axis=1))

        # Scale some part of the data
        s = pickle.load(open(self.path + "s.scaler", "rb"))
        s.scale(ds_testing.inputs)

        ###### Create the network

        # Load the networks
        net1 = NetworkUltimateMLPWithoutCentroids()
        net1.init(29, 13, 135)
        net1.load_parameters(open_h5file(self.path + "net_no_centroids.net"))

        net2 = NetworkUltimateMLP()
        net2.init(29, 13, 134, 135)
        net2.load_parameters(open_h5file(self.path + "net.net"))

        ###### Evaluate on testing

        compute_centroids_estimate(ds_testing, net1, net2, s)
    def run(self):
        ###### Create the datasets

        data_path = "./datasets/report_ultimate_random/"

        training_data_path = data_path + "train_2.h5"
        testing_data_path = data_path + "test_2.h5"
        ds_training = DatasetBrainParcellation()
        ds_training.read(training_data_path)
        # image = PIL.Image.fromarray(tile_raster_images(X=ds_training.inputs[0:100],
        #                                                img_shape=(29, 29), tile_shape=(10, 10),
        #                                                tile_spacing=(1, 1)))
        # image.save(self.path + "filters_corruption_30.png")

        prop_validation = 0.5  # Percentage of the testing dataset that is used for validation (early stopping)
        ds_testing = DatasetBrainParcellation()
        ds_testing.read(testing_data_path)
        ds_validation, ds_testing = ds_testing.split_datapoints_proportion(prop_validation)
        # Few stats about the targets
        analyse_classes(np.argmax(ds_training.outputs, axis=1))

        # Scale some part of the data
        s = pickle.load(open(self.path + "s.scaler", "rb"))
        s.scale(ds_testing.inputs)

        ###### Create the network

        # Load the networks
        net1 = NetworkUltimateMLPWithoutCentroids()
        net1.init(29, 13, 135)
        net1.load_parameters(open_h5file(self.path + "net_no_centroids.net"))

        net2 = NetworkUltimateMLP()
        net2.init(29, 13, 134, 135)
        net2.load_parameters(open_h5file(self.path + "net.net"))

        ###### Evaluate on testing

        compute_centroids_estimate(ds_testing, net1, net2, s)
Example #3
0
    def run(self):
        ###### Create the datasets

        training_data_path = self.data_path + "train.h5"
        testing_data_path = self.data_path + "test.h5"

        # If files don't already exist, create them
        if not os.path.isfile(training_data_path):
            transform_mnist_to_h5()

        prop_validation = 0.3  # Percentage of the training dataset that is used for validation (early stopping)
        ds_training = Dataset.create_and_read(training_data_path)
        ds_validation, ds_training = ds_training.split_dataset_proportions(
            [prop_validation, 1 - prop_validation])
        ds_testing = Dataset.create_and_read(testing_data_path)

        # Few stats about the targets
        analyse_classes(np.argmax(ds_training.outputs, axis=1),
                        "Training data")

        # Scale the data
        s = Scaler([slice(None, None)])
        s.compute_parameters(ds_training.inputs)
        s.scale(ds_training.inputs)
        s.scale(ds_validation.inputs)
        s.scale(ds_testing.inputs)

        ###### Create the network

        net = NetworkMNIST()
        net.init(28, 28, 10)
        print net

        ###### Configure the trainer

        # Cost function
        cost_function = CostNegLL()

        # Learning update
        learning_rate = 0.13
        momentum = 0.5
        lr_update = LearningUpdateGDMomentum(learning_rate, momentum)

        # Create monitors and add them to the trainer
        err_training = MonitorErrorRate(1, "Training", ds_training)
        err_testing = MonitorErrorRate(1, "Testing", ds_testing)
        err_validation = MonitorErrorRate(1, "Validation", ds_validation)

        # Create stopping criteria and add them to the trainer
        max_epoch = MaxEpoch(300)
        early_stopping = EarlyStopping(err_validation)

        # Create the network selector
        params_selector = ParamSelectorBestMonitoredValue(err_validation)

        # Create the trainer object
        batch_size = 200
        t = Trainer(net, cost_function, params_selector,
                    [max_epoch, early_stopping], lr_update, ds_training,
                    batch_size, [err_training, err_testing, err_validation])

        ###### Train the network

        t.train()

        ###### Plot the records

        save_records_plot(self.path,
                          [err_training, err_testing, err_validation],
                          "errors", t.n_train_batches)

        ###### Save the network

        net.save_parameters(self.path + "net.net")
    def run(self):

        data_path = "./datasets/final_exp_n_layers_2000/"
        range_n_layers = np.arange(0, 6, 1)
        error_rates = np.zeros(range_n_layers.shape)
        dice_coeffs = np.zeros(range_n_layers.shape)

        for idx, n_layers in enumerate(range_n_layers):

            print "patch width {}".format(n_layers)

            ### Load the config file
            data_cf_train = load_config("cfg_training_data_creation.py")
            data_cf_test = load_config("cfg_testing_data_creation.py")

            # Create the folder if it does not exist
            if not os.path.exists(data_path):
                os.makedirs(data_path)
            data_cf_train.data_path = data_path
            data_cf_test.data_path = data_path
            data_cf_train.general["file_path"] = data_path + "train.h5"
            data_cf_test.general["file_path"] = data_path + "test.h5"

            ### Generate and write on file the dataset
            generate_and_save(data_cf_train)
            generate_and_save(data_cf_test)

            ###### Create the datasets

            training_data_path = data_path + "train.h5"
            testing_data_path = data_path + "test.h5"
            ds_training = DatasetBrainParcellation()
            ds_training.read(training_data_path)
            # image = PIL.Image.fromarray(tile_raster_images(X=ds_training.inputs[0:100],
            #                                                img_shape=(29, 29), tile_shape=(10, 10),
            #                                                tile_spacing=(1, 1)))
            # image.save(self.path + "filters_corruption_30.png")

            ds_validation = DatasetBrainParcellation()
            ds_validation.read(testing_data_path)
            # Few stats about the targets
            analyse_classes(np.argmax(ds_training.outputs, axis=1))

            # Scale some part of the data
            # s = Scaler([slice(-134, None, None)])
            # s.compute_parameters(ds_training.inputs)
            # s.scale(ds_training.inputs)
            # s.scale(ds_validation.inputs)
            # s.scale(ds_testing.inputs)
            # pickle.dump(s, open(self.path + "s.scaler", "wb"))

            ###### Create the network

            net = MLP()
            net.init([29**2] + [2000]*n_layers +[135])

            print net

            # image = PIL.Image.fromarray(tile_raster_images(X=net.get_layer(0).get_layer_block(0).w.get_value().reshape((10,-1)),
            #                                                img_shape=(5, 5), tile_shape=(3, 3),
            #                                                tile_spacing=(1, 1)))
            # image.save(self.path + "filters_corruption_30.png")

            ###### Configure the trainer

            # Cost function
            cost_function = CostNegLL()

            # Learning update
            learning_rate = 0.05
            momentum = 0.5
            lr_update = LearningUpdateGDMomentum(learning_rate, momentum)

            # Create monitors and add them to the trainer
            err_validation = MonitorErrorRate(1, "Validation", ds_validation)
            dice_validation = MonitorDiceCoefficient(1, "Validation", ds_validation, 135)

            # Create stopping criteria and add them to the trainer
            max_epoch = MaxEpoch(300)
            early_stopping = EarlyStopping(err_validation, 5, 0.99, 5)

            # Create the network selector
            params_selector = ParamSelectorBestMonitoredValue(err_validation)

            # Create the trainer object
            batch_size = 200
            t = Trainer(net, cost_function, params_selector, [max_epoch, early_stopping],
                        lr_update, ds_training, batch_size,
                        [err_validation, dice_validation])


            ###### Train the network

            t.train()

            ###### Plot the records

            error_rates[idx] = err_validation.get_minimum()
            dice_coeffs[idx] = dice_validation.get_maximum()

        print error_rates
        print dice_coeffs

        plt.figure()
        plt.plot(range_n_layers, error_rates, label="Validation error rates")
        plt.plot(range_n_layers, dice_coeffs, label="Validation dice coefficient")

        plt.xlabel('Number of layers')
        plt.savefig(self.path + "res.png")
        tikz_save(self.path + "res.tikz", figureheight = '\\figureheighttik', figurewidth = '\\figurewidthtik')
    def run(self):
        ###### Create the datasets

        # aa = CostNegLLWeighted(np.array([0.9, 0.1]))
        # e = theano.function(inputs=[], outputs=aa.test())
        # print e()

        ## Load the data
        training_data_path = self.data_path + "train.h5"
        ds_training = DatasetBrainParcellation()
        ds_training.read(training_data_path)

        [ds_training, ds_validation] = ds_training.split_dataset_proportions([0.95, 0.05])

        testing_data_path = self.data_path + "test.h5"
        ds_testing = DatasetBrainParcellation()
        ds_testing.read(testing_data_path)

        ## Display data sample
        # image = PIL.Image.fromarray(tile_raster_images(X=ds_training.inputs[0:50],
        #                                                img_shape=(29, 29), tile_shape=(5, 10),
        #                                                tile_spacing=(1, 1)))
        # image.save(self.path + "filters_corruption_30.png")

        ## Few stats about the targets
        classes, proportion_class = analyse_classes(np.argmax(ds_training.outputs, axis=1), "Training data:")
        print classes
        ## Scale some part of the data
        print "Scaling"
        s = Scaler([slice(-134, None, None)])
        s.compute_parameters(ds_training.inputs)
        s.scale(ds_training.inputs)
        s.scale(ds_validation.inputs)
        s.scale(ds_testing.inputs)
        pickle.dump(s, open(self.path + "s.scaler", "wb"))

        ###### Create the network
        net = NetworkUltimateConv()
        net.init(33, 29, 5, 134, 135)

        print net

        ###### Configure the trainer

        # Cost function
        cost_function = CostNegLL(net.ls_params)

        # Learning update
        learning_rate = 0.05
        momentum = 0.5
        lr_update = LearningUpdateGDMomentum(learning_rate, momentum)

        # Create monitors and add them to the trainer
        freq = 1
        freq2 = 0.00001
        # err_training = MonitorErrorRate(freq, "Train", ds_training)
        # err_testing = MonitorErrorRate(freq, "Test", ds_testing)
        err_validation = MonitorErrorRate(freq, "Val", ds_validation)
        # dice_training = MonitorDiceCoefficient(freq, "Train", ds_training, 135)
        dice_testing = MonitorDiceCoefficient(freq, "Test", ds_testing, 135)
        # dice_validation = MonitorDiceCoefficient(freq, "Val", ds_validation, 135)

        # Create stopping criteria and add them to the trainer
        max_epoch = MaxEpoch(300)
        early_stopping = EarlyStopping(err_validation, 10, 0.99, 5)

        # Create the network selector
        params_selector = ParamSelectorBestMonitoredValue(err_validation)

        # Create the trainer object
        batch_size = 200
        t = Trainer(net, cost_function, params_selector, [max_epoch, early_stopping],
                    lr_update, ds_training, batch_size,
                    [err_validation, dice_testing])

        ###### Train the network

        t.train()

        ###### Plot the records

        # pred = np.argmax(t.net.predict(ds_testing.inputs, 10000), axis=1)
        # d = compute_dice(pred, np.argmax(ds_testing.outputs, axis=1), 134)
        # print "Dice test: {}".format(np.mean(d))
        # print "Error rate test: {}".format(error_rate(np.argmax(ds_testing.outputs, axis=1), pred))

        save_records_plot(self.path, [err_validation], "err", t.n_train_batches, "upper right")
        # save_records_plot(self.path, [dice_testing], "dice", t.n_train_batches, "lower right")

        ###### Save the network

        net.save_parameters(self.path + "net.net")
    def run(self):

        data_path = "./datasets/final_exp_size_patch/"
        range_patch_size = np.arange(3, 37, 2)
        error_rates = np.zeros(range_patch_size.shape)
        dice_coeffs = np.zeros(range_patch_size.shape)

        for idx, patch_size in enumerate(range_patch_size):

            print "patch width {}".format(patch_size)

            ### Load the config file
            data_cf_train = load_config("cfg_training_data_creation.py")
            data_cf_test = load_config("cfg_testing_data_creation.py")
            data_cf_train.data_path = data_path
            data_cf_test.data_path = data_path
            data_cf_train.pick_features[0]["patch_width"] = patch_size
            data_cf_test.pick_features[0]["patch_width"] = patch_size

            ### Generate and write on file the dataset
            generate_and_save(data_cf_train)
            generate_and_save(data_cf_test)

            ###### Create the datasets

            training_data_path = data_path + "train.h5"
            testing_data_path = data_path + "test.h5"
            ds_training = DatasetBrainParcellation()
            ds_training.read(training_data_path)
            # image = PIL.Image.fromarray(tile_raster_images(X=ds_training.inputs[0:100],
            #                                                img_shape=(29, 29), tile_shape=(10, 10),
            #                                                tile_spacing=(1, 1)))
            # image.save(self.path + "filters_corruption_30.png")

            ds_validation = DatasetBrainParcellation()
            ds_validation.read(testing_data_path)
            # Few stats about the targets
            analyse_classes(np.argmax(ds_training.outputs, axis=1))

            # Scale some part of the data
            # s = Scaler([slice(-134, None, None)])
            # s.compute_parameters(ds_training.inputs)
            # s.scale(ds_training.inputs)
            # s.scale(ds_validation.inputs)
            # s.scale(ds_testing.inputs)
            # pickle.dump(s, open(self.path + "s.scaler", "wb"))

            ###### Create the network

            net = MLP()
            net.init([
                patch_size**2, patch_size**2, patch_size**2, patch_size**2, 135
            ])

            print net

            # image = PIL.Image.fromarray(tile_raster_images(X=net.get_layer(0).get_layer_block(0).w.get_value().reshape((10,-1)),
            #                                                img_shape=(5, 5), tile_shape=(3, 3),
            #                                                tile_spacing=(1, 1)))
            # image.save(self.path + "filters_corruption_30.png")

            ###### Configure the trainer

            # Cost function
            cost_function = CostNegLL()

            # Learning update
            learning_rate = 0.05
            momentum = 0.5
            lr_update = LearningUpdateGDMomentum(learning_rate, momentum)

            # Create monitors and add them to the trainer
            err_validation = MonitorErrorRate(1, "Validation", ds_validation)
            dice_validation = MonitorDiceCoefficient(1, "Validation",
                                                     ds_validation, 135)

            # Create stopping criteria and add them to the trainer
            max_epoch = MaxEpoch(300)
            early_stopping = EarlyStopping(err_validation, 5, 0.99, 5)

            # Create the network selector
            params_selector = ParamSelectorBestMonitoredValue(err_validation)

            # Create the trainer object
            batch_size = 200
            t = Trainer(net, cost_function, params_selector,
                        [max_epoch, early_stopping], lr_update, ds_training,
                        batch_size, [err_validation, dice_validation])

            ###### Train the network

            t.train()

            ###### Plot the records

            error_rates[idx] = err_validation.get_minimum()
            dice_coeffs[idx] = dice_validation.get_maximum()

        print error_rates
        print dice_coeffs

        plt.figure()
        plt.plot(range_patch_size, error_rates, label="Validation error rates")
        plt.plot(range_patch_size,
                 dice_coeffs,
                 label="Validation dice coefficient")

        plt.xlabel('Patch size')
        plt.savefig(self.path + "res.png")
        tikz_save(self.path + "res.tikz",
                  figureheight='\\figureheighttik',
                  figurewidth='\\figurewidthtik')
    def run(self):
        ###### Create the datasets

        # aa = CostNegLLWeighted(np.array([0.9, 0.1]))
        # e = theano.function(inputs=[], outputs=aa.test())
        # print e()

        ## Load the data
        training_data_path = self.data_path + "train.h5"
        ds_training = DatasetBrainParcellation()
        ds_training.read(training_data_path)

        [ds_training,
         ds_validation] = ds_training.split_dataset_proportions([0.95, 0.05])

        testing_data_path = self.data_path + "test.h5"
        ds_testing = DatasetBrainParcellation()
        ds_testing.read(testing_data_path)

        ## Display data sample
        # image = PIL.Image.fromarray(tile_raster_images(X=ds_training.inputs[0:50],
        #                                                img_shape=(29, 29), tile_shape=(5, 10),
        #                                                tile_spacing=(1, 1)))
        # image.save(self.path + "filters_corruption_30.png")

        ## Few stats about the targets
        classes, proportion_class = analyse_classes(
            np.argmax(ds_training.outputs, axis=1), "Training data:")
        print(classes)
        ## Scale some part of the data
        print("Scaling")
        s = Scaler([slice(-134, None, None)])
        s.compute_parameters(ds_training.inputs)
        s.scale(ds_training.inputs)
        s.scale(ds_validation.inputs)
        s.scale(ds_testing.inputs)
        pickle.dump(s, open(self.path + "s.scaler", "wb"))

        ###### Create the network
        # net = NetworkUltimateConv()
        # net = NetworkWithoutCentroidConv()
        # net.init(29,13,135)
        net = NetworkConvDropout()
        # net = NetworkConvVGGDropout()
        # net = ResNet()
        net.init(29, 29, 13, 134, 135)
        # net = NetworkDeeperConv()
        # net.init(29, 29, 13, 134, 135,3)

        print(net)

        ###### Configure the trainer

        # Cost function
        # cost_function = CostNegLL(net.ls_params)
        cost_function = CostNegLL()

        # Learning update
        learning_rate = 0.05
        momentum = 0.5
        lr_update = LearningUpdateGDMomentum(learning_rate, momentum)

        # Create monitors and add them to the trainer
        freq = 1
        freq2 = 0.00001
        # err_training = MonitorErrorRate(freq, "Train", ds_training)
        err_testing = MonitorErrorRate(freq, "Test", ds_testing)
        err_validation = MonitorErrorRate(freq, "Val", ds_validation)
        # dice_training = MonitorDiceCoefficient(freq, "Train", ds_training, 135)
        dice_testing = MonitorDiceCoefficient(freq, "Test", ds_testing, 135)
        dice_validation = MonitorDiceCoefficient(freq, "Val", ds_validation,
                                                 135)

        # Create stopping criteria and add them to the trainer
        max_epoch = MaxEpoch(300)
        early_stopping = EarlyStopping(err_validation, 10, 0.99, 5)

        # Create the network selector
        params_selector = ParamSelectorBestMonitoredValue(err_validation)

        # Create the trainer object
        # batch_size = 200
        batch_size = 10
        t = Trainer(
            net, cost_function, params_selector, [max_epoch, early_stopping],
            lr_update, ds_training, batch_size,
            [err_testing, err_validation, dice_testing, dice_validation])

        ###### Train the network

        t.train()

        ###### Plot the records

        # pred = np.argmax(t.net.predict(ds_testing.inputs, 10000), axis=1)
        # d = compute_dice(pred, np.argmax(ds_testing.outputs, axis=1), 134)
        # print "Dice test: {}".format(np.mean(d))
        # print "Error rate test: {}".format(error_rate(np.argmax(ds_testing.outputs, axis=1), pred))

        save_records_plot(self.path, [err_validation, err_testing], "err",
                          t.n_train_batches, "upper right")
        save_records_plot(self.path, [dice_validation, dice_testing], "dice",
                          t.n_train_batches, "lower right")

        ###### Save the network

        net.save_parameters(self.path + "net.net")
Example #8
0
__author__ = 'adeb'

import matplotlib
matplotlib.use('Agg')
import nibabel as nib
import numpy as np

from spynet.utils.utilities import analyse_classes

if __name__ == '__main__':
    nib.nifti1.FLOAT32_EPS_3 = -1e-6
    lab_file = "./datasets/miccai/1000_3.nii"
    lab = nib.load(lab_file).get_data().squeeze()
    lab = np.asarray(lab, dtype=np.int16)

    analyse_classes(lab[lab.nonzero()])

Example #9
0
    def run(self):
        ###### Create the datasets

        training_data_path = self.data_path + "train.h5"
        testing_data_path = self.data_path + "test.h5"

        # If files don't already exist, create them
        if not os.path.isfile(training_data_path):
            transform_mnist_to_h5()

        prop_validation = 0.3  # Percentage of the training dataset that is used for validation (early stopping)
        ds_training = Dataset.create_and_read(training_data_path)
        ds_validation, ds_training = ds_training.split_dataset_proportions([prop_validation, 1-prop_validation])
        ds_testing = Dataset.create_and_read(testing_data_path)

        # Few stats about the targets
        analyse_classes(np.argmax(ds_training.outputs, axis=1), "Training data")

        # Scale the data
        s = Scaler([slice(None, None)])
        s.compute_parameters(ds_training.inputs)
        s.scale(ds_training.inputs)
        s.scale(ds_validation.inputs)
        s.scale(ds_testing.inputs)

        ###### Create the network

        net = NetworkMNIST()
        net.init(28, 28, 10)
        print net

        ###### Configure the trainer

        # Cost function
        cost_function = CostNegLL()

        # Learning update
        learning_rate = 0.13
        momentum = 0.5
        lr_update = LearningUpdateGDMomentum(learning_rate, momentum)

        # Create monitors and add them to the trainer
        err_training = MonitorErrorRate(1, "Training", ds_training)
        err_testing = MonitorErrorRate(1, "Testing", ds_testing)
        err_validation = MonitorErrorRate(1, "Validation", ds_validation)

        # Create stopping criteria and add them to the trainer
        max_epoch = MaxEpoch(300)
        early_stopping = EarlyStopping(err_validation)

        # Create the network selector
        params_selector = ParamSelectorBestMonitoredValue(err_validation)

        # Create the trainer object
        batch_size = 200
        t = Trainer(net, cost_function, params_selector, [max_epoch, early_stopping],
                    lr_update, ds_training, batch_size,
                    [err_training, err_testing, err_validation])

        ###### Train the network

        t.train()

        ###### Plot the records

        save_records_plot(self.path, [err_training, err_testing, err_validation], "errors", t.n_train_batches)

        ###### Save the network

        net.save_parameters(self.path + "net.net")
    def run(self):
        ###### Create the datasets

        # aa = CostNegLLWeighted(np.array([0.9, 0.1]))
        # e = theano.function(inputs=[], outputs=aa.test())
        # print e()

        ## Load the data
        training_data_path = self.data_path + "train.h5"
        ds_training = DatasetBrainParcellation()
        ds_training.read(training_data_path)

        ds_training, ds_validation = ds_training.split_dataset_proportions(
            [0.95, 0.05])

        testing_data_path = self.data_path + "test.h5"
        ds_testing = DatasetBrainParcellation()
        ds_testing.read(testing_data_path)

        ## Display data sample
        # image = PIL.Image.fromarray(tile_raster_images(X=ds_training.inputs[0:50],
        #                                                img_shape=(29, 29), tile_shape=(5, 10),
        #                                                tile_spacing=(1, 1)))
        # image.save(self.path + "filters_corruption_30.png")

        ## Few stats about the targets
        classes, proportion_class = analyse_classes(
            np.argmax(ds_training.outputs, axis=1), "Training data:")
        print(classes)
        ## Scale some part of the data
        print("Scaling")
        s = Scaler([slice(-134, None, None)])
        s.compute_parameters(ds_training.inputs)
        s.scale(ds_training.inputs)
        s.scale(ds_validation.inputs)
        s.scale(ds_testing.inputs)
        pickle.dump(s, open(self.path + "s.scaler", "wb"))

        ###### Create the network
        input_var = T.matrix('inputs')
        target_var = T.matrix('targets')
        # net = ConvNet(135, input_var, target_var, 29, 29, 13, 134)
        # net = ResNet(135, input_var, target_var, 29, 29, 13, 134)
        # net = VGGNet(135, input_var, target_var, 29, 29, 13, 134)
        # net = Conv3DNet_Multidropout(135, input_var, target_var, 29, 29, 13, 134)
        # net = Conv3DNetComp_Lg(135, input_var, target_var, 29, 29, 134)
        # net = GoogLeNet(135, input_var, target_var, 29, 29, 13, 134)
        # net = Conv3DNet_SmCompFilter(135, input_var, target_var, 29, 29, 13, 134)
        # net = Conv3DNet_HeNorm(135, input_var, target_var, 29, 29, 13, 134)
        # net = SmallInception(135, input_var, target_var, 29, 29, 13, 134)
        # net = Conv3DNet_NoCentroid(135, input_var, target_var, 29, 29, 13, 134)
        # net = ConvNet_VerySmall(135, input_var, target_var, 29, 29, 13, 134)
        #try:
        net = Inceptionv4Simple(135, input_var, target_var, 29, 29, 13, 134)
        # except Exception as e:
        # print("Program terminated at: " + datetime.datetime.now())
        #  print(e)

        # print net.net
        learning_rate = 0.045
        # learning_rate = 0.0001

        # Create stopping criteria and add them to the trainer
        max_epoch = 15
        # early_stopping = EarlyStopping(err_validation, 10, 0.99, 5)

        # Create the trainer object
        batch_size = 50
        t = Trainer(net, ds_testing, ds_validation, ds_training, batch_size,
                    learning_rate)

        ###### Train the network

        t.train()

        ###### Plot the records

        # pred = np.argmax(t.net.predict(ds_testing.inputs, 10000), axis=1)
        # d = compute_dice(pred, np.argmax(ds_testing.outputs, axis=1), 134)
        # print "Dice test: {}".format(np.mean(d))
        # print "Error rate test: {}".format(error_rate(np.argmax(ds_testing.outputs, axis=1), pred))

        save_records_plot(self.path, [t.val_errs, t.test_errs],
                          ["validation", "test", "Error"], "upper right")
        save_records_plot(self.path, [t.val_dices, t.test_dices],
                          ["validation", "test", "Dice coefficient"],
                          "lower right")

        ###### Save the network
        np.savez(self.path + 'model.npz', *t.best_net_param)
        print("network model saved")