Exemplo n.º 1
0
def train_Segment_GBM(data_directory):

    # Define input modalities to load.
    training_modality_dict = {'input_modalities': 
    ['*FLAIR_pp.*', '*T2_pp.*', '*T1_pp.*', '*T1post_pp.*', '*full_edemamask_pp.*'],
    'ground_truth': ['*full_edemamask_pp.*']}

    # Create a Data Collection
    training_data_collection = DataCollection(data_directory, training_modality_dict, verbose=True)
    training_data_collection.fill_data_groups()

    # Add left-right flips
    flip_augmentation = Flip_Rotate_2D(flip=True, rotate=False, data_groups=['input_modalities', 'ground_truth'])
    training_data_collection.append_augmentation(flip_augmentation, multiplier=2)

    # Define patch sampling regions
    def brain_region(data):
        return (data['ground_truth'] != 1) & (data['input_modalities'] != 0)
    def roi_region(data):
        return data['ground_truth'] == 1

    # Add patch augmentation
    patch_augmentation = ExtractPatches(patch_shape=(32,32,32), patch_region_conditions=[[brain_region, .3], [roi_region, .7]], data_groups=['input_modalities', 'ground_truth'])
    training_data_collection.append_augmentation(patch_augmentation, multiplier=70)

    # Write the data to hdf5
    training_data_collection.write_data_to_file('./test.h5')

    # Define model parameters
    model_parameters = {input_shape: (32, 32, 32, 4),
                    downsize_filters_factor: 1,
                    pool_size: (2, 2, 2), 
                    filter_shape: (3, 3, 3), 
                    dropout: .1, 
                    batch_norm: False, 
                    initial_learning_rate: 0.00001, 
                    output_type: 'binary_label',
                    num_outputs: 1, 
                    activation: 'relu',
                    padding: 'same', 
                    implementation: 'keras',
                    depth: 4,
                    max_filter=512}

    # Create U-Net
    if True:
        unet_model = UNet(**model_parameters)

    # Or load an old one
    else:
        unet_model = load_old_model('model.h5')

    # Define training parameters
    training_parameters = {}

    # Define training generators
    training_generator = None
Exemplo n.º 2
0
def train_Segment_GBM(data_directory, val_data_directory):

    # Define input modalities to load.
    training_modality_dict = {
        'input_modalities':
        ['FLAIR_pp.*', 'T2_pp.*', 'T1_pp.*', 'T1post_pp.*'],
        'ground_truth': ['enhancingmask_pp.nii.gz']
    }

    load_data = False
    train_model = False
    load_test_data = False
    predict = True

    training_data = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/BRATS_enhancing_prediction_only_data.h5'
    model_file = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/BRATS_enhancing_prediction_only_model.h5'
    testing_data = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/BRATS_enhancing_prediction_only_data.h5'

    # Write the data to hdf5
    if (not os.path.exists(training_data) and train_model) or load_data:

        # Create a Data Collection
        training_data_collection = DataCollection(
            data_directory, modality_dict=training_modality_dict, verbose=True)
        training_data_collection.fill_data_groups()

        # Define patch sampling regions
        def brain_region(data):
            return (data['ground_truth'] != 1) & (data['input_modalities'] !=
                                                  0)

        def roi_region(data):
            return data['ground_truth'] == 1

        def empty_region(data):
            return data['input_modalities'] == 0

        # Add patch augmentation
        patch_augmentation = ExtractPatches(
            patch_shape=(32, 32, 32),
            patch_region_conditions=[[empty_region, .05], [brain_region, .25],
                                     [roi_region, .7]],
            data_groups=['input_modalities', 'ground_truth'],
            patch_dimensions={
                'ground_truth': [1, 2, 3],
                'input_modalities': [1, 2, 3]
            })
        training_data_collection.append_augmentation(patch_augmentation,
                                                     multiplier=2000)

        # Write data to hdf5
        training_data_collection.write_data_to_file(training_data)

    if train_model:
        # Or load pre-loaded data.
        training_data_collection = DataCollection(data_storage=training_data,
                                                  verbose=True)
        training_data_collection.fill_data_groups()

        # Add left-right flips
        flip_augmentation = Flip_Rotate_2D(
            flip=True,
            rotate=False,
            data_groups=['input_modalities', 'ground_truth'])
        # flip_augmentation = Flip_Rotate_3D(data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(flip_augmentation,
                                                     multiplier=2)

        # Define model parameters
        model_parameters = {
            'input_shape': (32, 32, 32, 4),
            'downsize_filters_factor': 1,
            'pool_size': (2, 2, 2),
            'filter_shape': (5, 5, 5),
            'dropout': 0,
            'batch_norm': True,
            'initial_learning_rate': 0.000001,
            'output_type': 'regression',
            'num_outputs': 1,
            'activation': 'relu',
            'padding': 'same',
            'implementation': 'keras',
            'depth': 4,
            'max_filter': 512
        }

        # Create U-Net
        unet_model = UNet(**model_parameters)
        plot_model(unet_model.model,
                   to_file='model_image_dn.png',
                   show_shapes=True)
        training_parameters = {
            'input_groups': ['input_modalities', 'ground_truth'],
            'output_model_filepath': model_file,
            'training_batch_size': 64,
            'num_epochs': 1000,
            'training_steps_per_epoch': 20
        }
        unet_model.train(training_data_collection, **training_parameters)
    else:
        unet_model = load_old_model(model_file)

    # Define input modalities to load.
    testing_modality_dict = {
        'input_modalities':
        ['FLAIR_pp.*', 'T2_pp.*', 'T1_pp.*', 'T1post_pp.*']
    }

    if predict:
        testing_data_collection = DataCollection(
            val_data_directory,
            modality_dict=testing_modality_dict,
            verbose=True)
        testing_data_collection.fill_data_groups()

        if load_test_data:
            # Write data to hdf5
            testing_data_collection.write_data_to_file(testing_data)

        testing_parameters = {
            'inputs': ['input_modalities'],
            'output_filename': 'brats_enhancing_only_prediction.nii.gz',
            'batch_size': 250,
            'patch_overlaps': 1,
            'output_patch_shape': (26, 26, 26, 4),
            'save_all_steps': True
        }

        prediction = ModelPatchesInference(**testing_parameters)

        label_binarization = BinarizeLabel(postprocessor_string='_label')

        prediction.append_postprocessor([label_binarization, largest_island])

        unet_model.append_output([prediction])
        unet_model.generate_outputs(testing_data_collection)
Exemplo n.º 3
0
def train_Segment_GBM(data_directory, val_data_directory):

    # Define input modalities to load.
    training_modality_dict = {
        'input_modalities': [
            '*FLAIR*', ['*T2SPACE*', '*T2_pp*'], ['*T1_pp.*', '*MPRAGE_Pre*'],
            ['*T1post_pp.*', '*MPRAGE_POST*'], ['enhancing*'],
            ['wholetumor*', 'full_edemamask*']
        ],
        'ground_truth': [['enhancing*'], ['wholetumor*', 'full_edemamask*']]
    }

    load_data = False
    train_model = False
    load_test_data = True
    predict = True

    training_data = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/enhancing_label_upsampling_323232.h5'
    model_file = 'label_upsampling_323232_model_correct.h5'
    testing_data = './FLAIR_upsampling_323232_test.h5'

    # Write the data to hdf5
    if (not os.path.exists(training_data) and train_model) or load_data:

        # Create a Data Collection
        training_data_collection = DataCollection(
            data_directory, modality_dict=training_modality_dict, verbose=True)
        training_data_collection.fill_data_groups()

        # Define patch sampling regions
        def brain_region(data):
            return (data['ground_truth'] != 1) & (data['input_modalities'] !=
                                                  0)

        def roi_region(data):
            return data['ground_truth'] == 1

        # Add patch augmentation
        patch_augmentation = ExtractPatches(
            patch_shape=(32, 32, 32),
            patch_region_conditions=[[roi_region, 1]],
            data_groups=['input_modalities', 'ground_truth'],
            patch_dimensions={
                'ground_truth': [0, 1, 2],
                'input_modalities': [0, 1, 2]
            })
        training_data_collection.append_augmentation(patch_augmentation,
                                                     multiplier=70)

        # Write data to hdf5
        training_data_collection.write_data_to_file(training_data)

    if train_model:
        # Or load pre-loaded data.
        training_data_collection = DataCollection(data_storage=training_data,
                                                  verbose=True)
        training_data_collection.fill_data_groups()

        # Choose a modality
        choice_augmentation = ChooseData(
            axis={
                'input_modalities': -1,
                'ground_truth': -1
            },
            choices=[-1, -2],
            data_groups=['input_modalities', 'ground_truth'],
            random_sample=False)
        training_data_collection.append_augmentation(choice_augmentation,
                                                     multiplier=2)

        # Add down-sampling
        mask_augmentation = Downsample(channel=4,
                                       axes={'input_modalities': [-4, -3, -2]},
                                       factor=3,
                                       data_groups=['input_modalities'])
        training_data_collection.append_augmentation(mask_augmentation,
                                                     multiplier=4)

        # Add left-right flips
        flip_augmentation = Flip_Rotate_2D(
            flip=True,
            rotate=False,
            data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(flip_augmentation,
                                                     multiplier=2)

        # Define model parameters
        model_parameters = {
            'input_shape': (32, 32, 32, 5),
            'downsize_filters_factor': 1,
            'pool_size': (2, 2, 2),
            'filter_shape': (5, 5, 5),
            'dropout': 0,
            'batch_norm': True,
            'initial_learning_rate': 0.000001,
            'output_type': 'binary_label',
            'num_outputs': 1,
            'activation': 'relu',
            'padding': 'same',
            'implementation': 'keras',
            'depth': 4,
            'max_filter': 512
        }

        # Create U-Net
        unet_model = UNet(**model_parameters)
        plot_model(unet_model.model,
                   to_file='model_image_dn.png',
                   show_shapes=True)
        training_parameters = {
            'input_groups': ['input_modalities', 'ground_truth'],
            'output_model_filepath': model_file,
            'training_batch_size': 64,
            'num_epochs': 1000,
            'training_steps_per_epoch': 20
        }
        unet_model.train(training_data_collection, **training_parameters)
    else:
        unet_model = load_old_model(model_file)

    # Load testing data..
    if not os.path.exists(testing_data) or load_test_data:
        # Create a Data Collection
        testing_data_collection = DataCollection(
            val_data_directory,
            modality_dict=training_modality_dict,
            verbose=True)
        testing_data_collection.fill_data_groups()
        # Write data to hdf5
        testing_data_collection.write_data_to_file(testing_data)

    if predict:
        testing_data_collection = DataCollection(data_storage=testing_data,
                                                 verbose=True)
        testing_data_collection.fill_data_groups()

        # Choose a modality
        choice_augmentation = ChooseData(
            axis={
                'input_modalities': -1,
                'ground_truth': -1
            },
            choices=[-1, -2],
            data_groups=['input_modalities', 'ground_truth'],
            random_sample=False)
        testing_data_collection.append_augmentation(choice_augmentation,
                                                    multiplier=2)

        # Add down-sampling
        mask_augmentation = Downsample(channel=4,
                                       axes={'input_modalities': [-4, -3, -2]},
                                       factor=3,
                                       data_groups=['input_modalities'],
                                       random_sample=False)
        testing_data_collection.append_augmentation(mask_augmentation,
                                                    multiplier=3)

        testing_parameters = {
            'inputs': ['input_modalities'],
            'output_filename': 'deepneuro-label.nii.gz',
            'batch_size': 250,
            'patch_overlaps': 6,
            'output_patch_shape': (26, 26, 26, 4)
        }

        prediction = ModelPatchesInference(testing_data_collection,
                                           **testing_parameters)

        unet_model.append_output([prediction])
        unet_model.generate_outputs()
Exemplo n.º 4
0
def train_Segment_GBM(data_directory, val_data_directory):

    # Define input modalities to load.
    if True:
        training_modality_dict = {
            'input_modalities':
            ['*FLAIR_pp.*', '*T2_pp.*', '*T1_pp.*', '*T1post_pp.*'],
            'ground_truth': ['*full_edemamask_pp.*']
        }
    else:
        training_modality_dict = {
            'input_modalities': [['*FLAIR_pp.*', 'FLAIR_norm2*'],
                                 ['*T1post_pp.*', 'T1post_norm2*']],
            'ground_truth': ['*full_edemamask_pp.*', 'FLAIRmask-label.nii.gz']
        }

    load_data = True
    train_model = True
    load_test_data = True
    predict = True

    training_data = './wholetumor_predict_patches_test3.h5'
    model_file = 'wholetumor_segnet-58-0.38.h5'
    testing_data = './brats_test_case.h5'

    # Write the data to hdf5
    if (not os.path.exists(training_data) and train_model) or load_data:

        # Create a Data Collection
        training_data_collection = DataCollection(
            data_directory, modality_dict=training_modality_dict, verbose=True)
        training_data_collection.fill_data_groups()

        # Define patch sampling regions
        def brain_region(data):
            return (data['ground_truth'] != 1) & (data['input_modalities'] !=
                                                  0)

        def roi_region(data):
            return data['ground_truth'] == 1

        # Add patch augmentation
        patch_augmentation = ExtractPatches(
            patch_shape=(32, 32, 32),
            patch_region_conditions=[[brain_region, 1]],
            data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(patch_augmentation,
                                                     multiplier=200)

        # Add left-right flips
        flip_augmentation = Flip_Rotate_2D(
            flip=True,
            rotate=False,
            data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(flip_augmentation,
                                                     multiplier=2)

        # Write data to hdf5
        training_data_collection.write_data_to_file(training_data)

    # Or load pre-loaded data.
    training_data_collection = DataCollection(data_storage=training_data,
                                              verbose=True)
    training_data_collection.fill_data_groups()

    # Define model parameters
    model_parameters = {
        'input_shape': (32, 32, 32, 4),
        'downsize_filters_factor': 1,
        'pool_size': (2, 2, 2),
        'filter_shape': (3, 3, 3),
        'dropout': 0,
        'batch_norm': True,
        'initial_learning_rate': 0.000001,
        'output_type': 'binary_label',
        'num_outputs': 1,
        'activation': 'relu',
        'padding': 'same',
        'implementation': 'keras',
        'depth': 4,
        'max_filter': 512
    }

    # Create U-Net
    if train_model:
        unet_model = UNet(**model_parameters)
        plot_model(unet_model.model,
                   to_file='model_image_dn.png',
                   show_shapes=True)
        training_parameters = {
            'input_groups': ['input_modalities', 'ground_truth'],
            'output_model_filepath':
            'wholetumor_segnet-{epoch:02d}-{loss:.2f}.h5',
            'training_batch_size': 2,
            'num_epochs': 100,
            'training_steps_per_epoch': 200,
            'save_best_only': False
        }
        unet_model.train(training_data_collection, **training_parameters)
    else:
        unet_model = load_old_model(model_file)

    # Load testing data..
    if not os.path.exists(testing_data) or load_test_data:
        # Create a Data Collection
        testing_data_collection = DataCollection(
            val_data_directory,
            modality_dict=training_modality_dict,
            verbose=True)
        testing_data_collection.fill_data_groups()
        # Write data to hdf5
        testing_data_collection.write_data_to_file(testing_data)

    if predict:
        testing_data_collection = DataCollection(data_storage=testing_data,
                                                 verbose=True)
        testing_data_collection.fill_data_groups()

        testing_parameters = {
            'inputs': ['input_modalities'],
            'output_filename': 'deepneuro.nii.gz',
            'batch_size': 200,
            'patch_overlaps': 1
        }

        prediction = ModelPatchesInference(testing_data_collection,
                                           **testing_parameters)

        unet_model.append_output([prediction])
        unet_model.generate_outputs()
Exemplo n.º 5
0
def train_Segment_GBM(data_directory, val_data_directory):

    # Define input modalities to load.
    training_modality_dict = {'input_modalities': 
    ['*FLAIR*nii.gz', ['*T2SPACE*nii.gz'], ['*MPRAGE_POST*nii.gz'], ['*MPRAGE_Pre*nii.gz']],
    'ground_truth': ['*SUV_r_T2_raw.nii.gz*']}

    load_data = False
    train_model = False
    load_test_data = True
    predict = True

    training_data = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/TMZ_4_323232.h5'
    model_file = 'TMZ_4_323232_model.h5'
    testing_data = './TMZ_4_323232_test.h5'

    # Write the data to hdf5
    if (not os.path.exists(training_data) and train_model) or load_data:

        # Create a Data Collection
        training_data_collection = DataCollection(data_directory, modality_dict=training_modality_dict, verbose=True)
        training_data_collection.fill_data_groups()

        # Define patch sampling regions
        def brain_region(data):
            return (data['ground_truth'] != 1) & (data['input_modalities'] != 0)
        def roi_region(data):
            return data['ground_truth'] >= 1.5

        # Add patch augmentation
        patch_augmentation = ExtractPatches(patch_shape=(32, 32, 32), patch_region_conditions=[[brain_region, .5], [roi_region, .5]], data_groups=['input_modalities', 'ground_truth'], patch_dimensions={'ground_truth': [0,1,2], 'input_modalities': [0,1,2]})
        training_data_collection.append_augmentation(patch_augmentation, multiplier=2000)

        # Write data to hdf5
        training_data_collection.write_data_to_file(training_data)

    if train_model:
        # Or load pre-loaded data.
        training_data_collection = DataCollection(data_storage=training_data, verbose=True)
        training_data_collection.fill_data_groups()

        # Add left-right flips
        flip_augmentation = Flip_Rotate_2D(flip=True, rotate=False, data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(flip_augmentation, multiplier=2)

        # Define model parameters
        model_parameters = {'input_shape': (32, 32, 32, 4),
                        'downsize_filters_factor': 1,
                        'pool_size': (2, 2, 2), 
                        'filter_shape': (5, 5, 5), 
                        'dropout': 0, 
                        'batch_norm': True, 
                        'initial_learning_rate': 0.000001, 
                        'output_type': 'regression',
                        'num_outputs': 1, 
                        'activation': 'relu',
                        'padding': 'same', 
                        'implementation': 'keras',
                        'depth': 4,
                        'max_filter': 512}

        # Create U-Net
        unet_model = UNet(**model_parameters)
        plot_model(unet_model.model, to_file='model_image_dn.png', show_shapes=True)
        training_parameters = {'input_groups': ['input_modalities', 'ground_truth'],
                        'output_model_filepath': model_file,
                        'training_batch_size': 64,
                        'num_epochs': 1000,
                        'training_steps_per_epoch': 20}
        unet_model.train(training_data_collection, **training_parameters)
    else:
        unet_model = load_old_model(model_file)

    # Load testing data..
    if not os.path.exists(testing_data) or load_test_data:
        # Create a Data Collection
        testing_data_collection = DataCollection(val_data_directory, modality_dict=training_modality_dict, verbose=True)
        testing_data_collection.fill_data_groups()
        # Write data to hdf5
        testing_data_collection.write_data_to_file(testing_data)

    if predict:
        testing_data_collection = DataCollection(data_storage=testing_data, verbose=True)
        testing_data_collection.fill_data_groups()

        flip_augmentation = Copy(data_groups=['input_modalities', 'ground_truth'])
        testing_data_collection.append_augmentation(flip_augmentation, multiplier=1)

        testing_parameters = {'inputs': ['input_modalities'], 
                        'output_filename': 'deepneuro_suv_4.nii.gz',
                        'batch_size': 50,
                        'patch_overlaps': 6,
                        'output_patch_shape': (26,26,26,4)}

        prediction = ModelPatchesInference(testing_data_collection, **testing_parameters)

        unet_model.append_output([prediction])
        unet_model.generate_outputs()
Exemplo n.º 6
0
    def build_tensorflow_model(self, batch_size):

        """ Break it out into functions?
        """

        # Set input/output shapes for reference during inference.
        self.model_input_shape = tuple([batch_size] + list(self.input_shape))
        self.model_output_shape = tuple([batch_size] + list(self.input_shape))

        self.latent = tf.placeholder(tf.float32, [None, self.latent_size])
        self.reference_images = tf.placeholder(tf.float32, [None] + list(self.model_input_shape)[1:])

        self.synthetic_images = generator(self, self.latent, depth=self.depth, name='generator')

        _, _, _, self.discriminator_real_logits = discriminator(self, self.reference_images, depth=self.depth + 1, name='discriminator')
        _, _, _, self.discriminator_fake_logits = discriminator(self, self.synthetic_images, depth=self.depth + 1, name='discriminator', reuse=True)

        self.basic_loss = tf.reduce_mean(tf.square(self.reference_images - self.synthetic_images))

        # Loss functions
        self.D_loss = tf.reduce_mean(self.discriminator_fake_logits) - tf.reduce_mean(self.discriminator_real_logits)
        self.G_loss = -tf.reduce_mean(self.discriminator_fake_logits)

        # Gradient Penalty from Wasserstein GAN GP, I believe? Check on it --andrew
        # Also investigate more what's happening here --andrew
        self.differences = self.synthetic_images - self.reference_images
        self.alpha = tf.random_uniform(shape=[tf.shape(self.differences)[0], 1, 1, 1], minval=0., maxval=1.)
        interpolates = self.reference_images + (self.alpha * self.differences)
        _, _, _, discri_logits = discriminator(self, interpolates, reuse=True, depth=self.depth + 1, name='discriminator')
        gradients = tf.gradients(discri_logits, [interpolates])[0]

        # Some sort of norm from papers, check up on it. --andrew
        slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1, 2, 3]))
        self.gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
        tf.summary.scalar("gp_loss", self.gradient_penalty)

        # Update Loss functions..
        self.D_origin_loss = self.D_loss
        self.D_loss += 10 * self.gradient_penalty
        self.D_loss += 0.001 * tf.reduce_mean(tf.square(self.discriminator_real_logits - 0.0))

        # vgg_model = tf.keras.applications.VGG19(include_top=False,
        #                                     weights='imagenet',
        #                                     input_tensor=self.synthetic_images,
        #                                     input_shape=(64, 64, 3),
        #                                     pooling=None,
        #                                     classes=1000)
        # print(vgg_model)

        # self.load_reference_model()

        input_tensor = keras.layers.Input(tensor=self.synthetic_images, shape=self.input_shape)

        model_parameters = {'input_shape': self.input_shape,
                    'downsize_filters_factor': 1,
                    'pool_size': (2, 2), 
                    'kernel_size': (3, 3), 
                    'dropout': 0, 
                    'batch_norm': True, 
                    'initial_learning_rate': 0.00001, 
                    'output_type': 'binary_label',
                    'num_outputs': 1, 
                    'activation': 'relu',
                    'padding': 'same', 
                    'implementation': 'keras',
                    'depth': 3,
                    'max_filter': 128,
                    'stride_size': (1, 1),
                    'input_tensor': input_tensor}

        unet_output = UNet(**model_parameters)
        unet_model = keras.models.Model(input_tensor, unet_output.output_layer)
        unet_model.load_weights('retinal_seg_weights.h5')

        if self.hyperverbose:
            self.model_summary()

        # self.find_layers(['sampling'])

        self.activated_tensor = self.grab_tensor(self.activated_tensor_name)
        print self.activated_tensor
        self.activated_tensor = tf.stack([self.activated_tensor[..., self.filter_num]], axis=-1)
        print self.activated_tensor
        # self.input_tensor = self.grab_tensor(self.input_tensor_name)

        self.activation_loss = -1 * tf.reduce_mean(self.activated_tensor)
        self.activaton_graidents = tf.gradients(self.activation_loss, self.synthetic_images)
        print self.activaton_graidents

        # Hmmm.. better way to do this? Or at least move to function.
        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'discriminator' in var.name]
        self.g_vars = [var for var in t_vars if 'generator' in var.name]

        # Create save/load operation
        self.saver = tf.train.Saver(self.g_vars + self.d_vars)

        self.G_activation_loss = self.G_loss + .000 * self.activation_loss

        # Create Optimizers
        self.opti_D = tf.train.AdamOptimizer(learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(
            self.D_loss, var_list=self.d_vars)
        self.opti_G = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.G_activation_loss, var_list=self.g_vars)

        self.combined_loss = 1 * self.activation_loss + 1 * self.basic_loss

        self.combined_optimizer = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.combined_loss, var_list=self.g_vars)

        self.basic_optimizer = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.basic_loss, var_list=self.g_vars)

        self.activation_optimizer = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.activation_loss, var_list=self.g_vars)