Beispiel #1
0
    def wrapper(*args,**kwargs):
        # Load configuration if experiment is specified
        update_params = {}
        if 'experiment' in kwargs:
            exp_id = kwargs['experiment']
            with open('utils/experiments.yml','r') as conf_file:
                loaded_yaml = yaml.load(conf_file)
            loaded_params = loaded_yaml.get(exp_id,{})
            if 'model_exp_ids' in loaded_params:
                if isinstance(loaded_params['model_exp_ids'],list):
                    pass
            # Filter configuration params and update by **kwargs
            varnames = func.__code__.co_varnames
            for key,value in loaded_params.items():
                if key in varnames:
                    update_params[key] = value
        update_params.update(kwargs)
        print(update_params)
        kwargs = update_params
        # Load model if net path in args
        if 'net' in kwargs and isinstance(kwargs['net'],str):
            kwargs['net'] = NDN.load_model(kwargs['net'])

        # Load dataset
        if 'dataset' in kwargs and isinstance(kwargs['dataset'],str):
            kwargs['dataset'] = get_data_loader(kwargs['dataset'])

        return func(**kwargs)
Beispiel #2
0
def plot_invariance_summary(net,
                            chosen_neurons,
                            perc,
                            experiment='000',
                            mask=False,
                            max_error=0.05,
                            num_samples=8,
                            scale_first_separately=False):
    """
    Generate invariances from generator for all `chosen_neurons` and plot them in common summary with MEIs
        Parameters:
            net (NDN): trained Neural network
            chosen_neurons (List[int]): neuron IDs
            perc (float): Target percentage of MEI activation
            experiment (str): experiment ID
            mask (bool): If true stimuli will be masked on a plot
            max_error (float): Max deviation from target activation
            num_samples (int): number of samples for each neuron
    """
    row_names = [str(n) for n in chosen_neurons]
    mei = np.load(f'output/04_mei/{experiment}_mei_n.npy')
    mei_act = np.load(f'output/04_mei/{experiment}_mei_activations_n.npy')
    size_x, size_y = net.input_sizes[0][1:]
    titles = []
    all_images = []

    for neuron in chosen_neurons:
        generator = NDN.load_model(
            f'output/08_generators/{experiment}_neuron{neuron}_generator.pkl')
        noise_shape = generator.input_sizes[0][1]
        noise_input = np.random.uniform(-2, 2, size=(1000, noise_shape))
        invariances = generator.generate_prediction(noise_input)
        images, activations = choose_representant(
            num_samples,
            net,
            neuron,
            invariances,
            activation_lowerbound=(perc - max_error) *
            mei_act[neuron] if perc is not None else -np.inf,
            activation_upperbound=(perc + max_error) *
            mei_act[neuron] if perc is not None else np.inf)
        #print(mask_stimuli)
        images[0] = np.reshape(mask_stimuli(mei[[neuron]], experiment, neuron),
                               (1, -1))
        all_images.append(images)
        activations[0] = mei_act[neuron]
        titles += [f'{act/activations[0]:.2f}' for act in activations]
    all_images = np.reshape(np.vstack(all_images), (-1, size_x, size_y))
    plot_grid(
        all_images,
        titles,
        num_cols=num_samples,
        save_path=f'output/08_generators/{experiment}_invariance_overview.png',
        show=False,
        row_names=row_names,
        scale_first_separately=scale_first_separately,
        ignore_assertion=True)
Beispiel #3
0
def compute_mask(net: NDN, images: np.array, try_pixels=range(0, 2, 1)):
    """
    Computes ROI/masks 
        Parameters:
            net (NDN): A trained neural network
            images (np.array): Images to be presented to the `net` with changing values of pixel luminance
            try_pixels (iterable): Iterable object of values of pixel luminance to add on `images`

        Returns:
            mask (np.array): Array of masks for each neuron
    """
    num_images, num_pixels = np.shape(images)
    activations = net.generate_prediction(images)
    mask = np.zeros(num_pixels)
    net.batch_size = 512
    size_x, size_y = net.input_sizes[0][1:]
    size_out = net.output_sizes[0]

    # Generate images by changing single pixel value
    inputs = []
    for pixel_position in tqdm(range(num_pixels)):
        for i, pixel in enumerate(try_pixels):
            modified_images = np.copy(images)
            modified_images[:, pixel_position] += np.repeat(pixel, num_images)
            inputs.append(modified_images)
    modified_images = np.vstack(inputs)

    # Predict and subtract base activation
    predicted_activations = net.generate_prediction(modified_images)
    differences = predicted_activations - \
        np.tile(activations, (len(try_pixels)*num_pixels, 1))

    # Compute std of each pixel and arrange result into grid shaped like stimuli
    differences = np.reshape(differences, (size_x, size_y, -1, size_out))
    mask = np.std(differences, axis=2).transpose((2, 0, 1))
    return mask
Beispiel #4
0
def plot_from_generator(net,
                        neuron,
                        num_representants,
                        experiment='000',
                        include_mei=False,
                        mask=False,
                        max_error=0.05,
                        perc=0.95):
    """
    Generate and plot invariances from generator
        Parameters:
            net (NDN): trained Neural network
            neuron (int): neuron ID
            num_representants (int): number of represntats to plot
            experiment (str): experiment ID
            include_mei (bool): If true MEI will be placed as last image in the figure
            mask (bool): If true stimuli will be masked on a plot
            max_error (float): Max deviation from target activation
            perc (float): Target percentage of MEI activation
    """
    inputs = []
    generator = NDN.load_model(
        f'output/08_generators/{experiment}_neuron{neuron}_generator.pkl')
    _, input_size_x, input_size_y = net.input_sizes[0]
    noise_shape = input_size_x
    mei_act = np.load(
        f'output/04_mei/{experiment}_mei_activations_n.npy')[neuron]
    noise_input = np.random.uniform(-2, 2, size=(10000, noise_shape))
    invariances = generator.generate_prediction(noise_input)

    images, activations = choose_representant(
        num_representants,
        net,
        neuron,
        invariances,
        activation_lowerbound=(perc - max_error) * mei_act,
        activation_upperbound=(perc + max_error) * mei_act)

    np.save(
        f'output/06_invariances/{experiment}_neuron{neuron}_equivariance.npy',
        np.reshape(images, (-1, input_size_x, input_size_y)))
    np.save(
        f'output/06_invariances/{experiment}_neuron{neuron}_activations.npy',
        activations)
    plot_invariances(experiment=experiment,
                     neuron=neuron,
                     include_mei=include_mei,
                     mask=mask)
Beispiel #5
0
def plot_interpolations(net,
                        neuron,
                        experiment='000',
                        mask=False,
                        num_interpolations=3,
                        num_samples=6):
    """
    Generate invariances from diameters of unit sphere in latent space
        Parameters:
            net (NDN): trained Neural network
            neuron (int): neuron ID
            experiment (str): experiment ID
            mask (bool): If true stimuli will be masked on a plot
            num_interpolations (int): number of diameters
            num_samples (int): number of sampled points from diameter

    """
    inputs = []
    generator = NDN.load_model(
        f'output/08_generators/{experiment}_neuron{neuron}_generator.pkl')
    size_x, size_y = net.input_sizes[0][1:]
    noise_shape = generator.input_sizes[0][1]
    mei_act = np.load(
        f'output/04_mei/{experiment}_mei_activations_n.npy')[neuron]

    # Generate points on sphere
    for i in range(num_interpolations):
        first_point = np.random.normal(0.0, 1.0, noise_shape)
        first_point /= np.linalg.norm(first_point, axis=0)
        inputs.append(np.linspace(first_point, -first_point, num_samples))
    noise_samples = np.vstack(inputs)

    # Predict
    invariances = generator.generate_prediction(noise_samples)
    mask_text = ''
    if mask:
        invariances = mask_stimuli(invariances, experiment, neuron)
        mask_text = '_masked'
    activations = net.generate_prediction(invariances)[:, neuron]
    titles = [f'{act/mei_act:.2f}' for act in activations]
    invariances = np.reshape(invariances, (-1, size_x, size_y))
    plot_grid(
        invariances,
        titles,
        num_cols=6,
        save_path=
        f'output/06_invariances/{experiment}_{neuron}_interpolations_plot{mask_text}.png',
        show=False)
Beispiel #6
0
    def extract_generator(self):
        # Extracts generator as a simple 1-layer net with biases
        generator_subnet = NDN([
            copy.deepcopy(
                self.net_with_generator.network_list[self.generator_subnet_id])
        ],
                               noise_dist='max',
                               input_dim_list=[
                                   self.net_with_generator.network_list[
                                       self.generator_subnet_id]['input_dims']
                               ])

        # Copy weights
        GeneratorNet._copy_net_params(self.net_with_generator,
                                      generator_subnet,
                                      self.generator_subnet_id, 0)
        if self.current_norm == 'post':
            generator_subnet.networks[-1].layers[-1].normalize_output = True

        return generator_subnet
Beispiel #7
0
def plot_sphere_samples(net,
                        neuron,
                        experiment='000',
                        mask=False,
                        num_samples=18):
    """
    Generate invariances from points in latent space uniformly sampled from unit sphere
        Parameters:
            dataset (DataLoader): dataset 
            net (NDN): trained Neural network
            experiment (str): experiment ID
            mask (bool): If true stimuli will be masked on a plot
            num_samples (int): number of samples from unit sphere in latent space
    """
    inputs = []
    generator = NDN.load_model(
        f'output/08_generators/{experiment}_neuron{neuron}_generator.pkl')
    size_x, size_y = net.input_sizes[0][1:]
    noise_shape = generator.input_sizes[0][1]

    mei_act = np.load(
        f'output/04_mei/{experiment}_mei_activations_n.npy')[neuron]
    noise_samples = sample_sphere(noise_shape, num_samples)
    invariances = generator.generate_prediction(noise_samples)
    mask_text = ''
    if mask:
        invariances = mask_stimuli(invariances, experiment, neuron)
        mask_text = '_masked'
    activations = net.generate_prediction(invariances)[:, neuron]
    titles = [f'{act/mei_act:.2f}' for act in activations]
    invariances = np.reshape(invariances, (-1, size_x, size_y))
    plot_grid(
        invariances,
        titles,
        num_cols=6,
        save_path=
        f'output/06_invariances/{experiment}_{neuron}_sphere_plot{mask_text}.png',
        show=False)
Beispiel #8
0
def compare_generators(neuron,
                       net,
                       experiment='000',
                       generator_experiment=[],
                       generator_names=[],
                       percentage=[],
                       num_per_net=6,
                       mask=False,
                       max_error=0.05):
    """
    Compare generators from different experiments on same neuron and plot results. Experiments must share net and dataset parameters
        Parameters:
            neuron (int): neuron ID
            net (NDN): trained Neural network
            experiment (str): experiment ID
            generator_experiment (List[str]): List of experiment IDs to compare
            generator_names (List[str]): List of generator names. Will be displayed on a plot
            percentage (List[float]): list of target percentage of MEI for each experiment
            num_per_net (int): Number of representants per experiment
            mask (bool): If true stimuli will be masked on a plot
            max_error (float): Max deviation from target activation
    """
    images = []
    titles = []
    base_exp = generator_experiment[0]
    mei_act = np.load(
        f'output/04_mei/{base_exp}_mei_activations_n.npy')[neuron]
    noise_input = np.random.uniform(-2, 2, size=(10000, 128))
    mei = np.load(f'output/04_mei/{base_exp}_mei_n.npy')[neuron]
    size_x, size_y = net.input_sizes[0][1:]

    for generator_exp, generator_name, perc in zip(generator_experiment,
                                                   generator_names,
                                                   percentage):
        generator = NDN.load_model(
            f'output/08_generators/{generator_exp}_neuron{neuron}_generator.pkl'
        )
        invariances = generator.generate_prediction(noise_input)
        invariances, activations = choose_representant(
            num_per_net,
            net,
            neuron,
            invariances,
            activation_lowerbound=(perc - max_error) *
            mei_act if perc is not None else -np.inf,
            activation_upperbound=(perc + max_error) *
            mei_act if perc is not None else np.inf)
        if len(invariances) < num_per_net:
            raise ValueError('Cannot generate samples with given conditions')
        images.append(invariances)
        titles += list(activations)
    images.append(np.reshape(mei, (1, -1)))
    images = np.vstack(images)

    titles.append(mei_act)
    titles = [f'{tit/mei_act:.2f}' for tit in titles]

    images = np.reshape(images, (-1, size_x, size_y))
    if mask:
        images = mask_stimuli(images, base_exp, neuron)

    plot_grid(
        images,
        titles,
        num_cols=num_per_net,
        save_path=f'output/08_generators/neuron{neuron}_compare_generator.png',
        row_names=generator_names + ['MEI'],
        ignore_assertion=True)
Beispiel #9
0
def train_MEI(net: NDN, input_data, output_data, data_filters, opt_args,
              fit_vars, var_layer):
    """
    Optimization of weights in variable layer to maximize neurons output
        Parameters:
            net (NDN): A trained neural network with variable layer
            input_data (np.array): input data
            output_data (np.array): output data
            data_filters (np.array): data_filters will be passed to NDN.train
            opt_args (dict): optimizer arguments
            fit_vars (dict): fit variables will be passed to NDN.train
            var_layer (NDN.Layer): Variable layer

    """
    opt_params = net.optimizer_defaults(opt_args['opt_params'],
                                        opt_args['learning_alg'])
    epochs = opt_args['opt_params']['epochs_training']
    lr = opt_args['opt_params']['learning_rate']
    _, sizex, sizey = net.input_sizes[0]
    input_data, output_data, data_filters = net._data_format(
        input_data, output_data, data_filters)
    net._build_graph(opt_args['learning_alg'],
                     opt_params,
                     fit_vars,
                     batch_size=1)

    with tf.Session(graph=net.graph, config=net.sess_config) as sess:
        # Define train step
        train_step, learning_rate = define_mei_train_step(
            net, var_layer, sizex, sizey, 0.1)

        # Restore parameters and setup metrics
        net._restore_params(sess, input_data, output_data, data_filters)
        i = 0
        cost = float('inf')
        best_cost = float('inf')
        without_increase = 0

        # Training
        while without_increase < 100 and i < epochs:
            sess.run(train_step,
                     feed_dict={
                         net.indices: [0],
                         learning_rate: lr
                     })
            cost = sess.run(net.cost, feed_dict={net.indices: [0]})
            cost_reg = sess.run(net.cost_reg, feed_dict={net.indices: [0]})
            if i % 20 == 0:
                print(
                    f'Cost: {cost:.5f}+{cost_reg:.5f}, best: {best_cost:.3f}')
            if i % 10 == 0:
                # Apply gausssian blur
                sess.run(
                    var_layer.gaussian_blur_std_var.assign(
                        var_layer.gaussian_blur_std_var * 0.99))
            if cost < best_cost:
                best_cost = cost + cost_reg
                without_increase = 0

            sess.run(var_layer.gaussian_blur)
            i += 1
            without_increase += 1

        # Save trained weights
        net._write_model_params(sess)

    print(f'Trained with {i} epochs')
Beispiel #10
0
    def __init__(self,
                 original_nets,
                 input_noise_size,
                 loss='oneside-gaussian',
                 norm='online',
                 is_aegan=False,
                 mask=None,
                 gen_type='conv'):
        # Save parameters
        if not isinstance(original_nets, list):
            original_nets = [original_nets]

        self.original_nets = [copy.deepcopy(net) for net in original_nets]
        self.noise_size = input_noise_size
        self.is_aegan = is_aegan
        if norm not in ['online', 'post', 'none']:
            raise ValueError(
                f'Incorrect norm \'{norm}\'. Norm should be one of online/post/none'
            )
        self.current_norm = norm

        # Assert all networks has only one input of a same shape
        input_stimuli_size = self.original_nets[-1].input_sizes[0]
        for i, network in enumerate(self.original_nets):
            assert len(
                network.input_sizes
            ) == 1, f'Network {i} has more than one input. Input sizes: {network.input_sizes}.'
            assert input_stimuli_size == network.input_sizes[
                0], f'Network {i} has different input shape. Expected {input_stimuli_size}, given {network.input_sizes}'

        # Create generator
        generator = self.get_gan_subnet(input_noise_size, input_stimuli_size,
                                        gen_type)
        self.generator_subnet_id = 0
        merged_networks = [generator]
        net_prefix_num = 1

        # Merge networks and rewire inputs
        ffnet_out = []
        losses = []
        network_mapping = []
        for i_net, network in enumerate(self.original_nets):
            for i_subnet, subnetwork in enumerate(network.network_list):
                network_mapping.append(
                    ((i_net, i_subnet), len(merged_networks)))
                if subnetwork['ffnet_n'] is not None:
                    subnetwork['ffnet_n'] = [
                        f + net_prefix_num for f in subnetwork['ffnet_n']
                    ]
                if subnetwork['xstim_n'] is not None:
                    subnetwork['xstim_n'] = None
                    subnetwork['ffnet_n'] = [0]
                merged_networks.append(subnetwork)
            out_nets = [
                len(network.network_list) - 1 if x == -1 else x
                for x in network.ffnet_out
            ]
            ffnet_out += [f + net_prefix_num for f in out_nets]
            losses.append(loss)
            net_prefix_num += len(network.network_list)

        # Optionally create decoder
        self.encoder_subnet_id = None
        if is_aegan:
            ffnet_out += [len(merged_networks)]
            merged_networks.append(
                self.get_encoder(input_noise_size, input_stimuli_size, 0))
            self.encoder_subnet_id = len(merged_networks) - 1
            losses.append('gaussian')

        # Define new NDN
        self.net_with_generator = NDN(
            merged_networks,
            input_dim_list=[[1, input_noise_size]],
            batch_size=self.original_nets[0].batch_size
            if self.original_nets[0].batch_size is not None else 265,
            ffnet_out=ffnet_out,
            noise_dist=losses,
            tf_seed=250)

        # Copy weight from original net
        for (i_net, i_subnet), target_i_net in network_mapping:
            GeneratorNet._copy_net_params(self.original_nets[i_net],
                                          self.net_with_generator, i_subnet,
                                          target_i_net)

        # Set mask
        if mask is None:
            # Set all ones mask = no change of input
            mask = np.ones(input_stimuli_size, dtype=np.float32)
        self.net_with_generator.networks[0].layers[-1].weights = mask.astype(
            np.float32)

        # Construct fit vars
        layers_to_skip = []
        for i, net in enumerate(self.net_with_generator.networks):
            if i == self.generator_subnet_id:
                # Fit all except mask
                layers_to_skip.append([len(net.layers) - 1])
            elif i == self.encoder_subnet_id:
                # Fit all
                layers_to_skip.append([])
            else:
                # Freeze weights
                layers_to_skip.append([x for x in range(len(net.layers))])

        self.generator_fit_vars = self.net_with_generator.fit_variables(
            layers_to_skip=layers_to_skip, fit_biases=False)
Beispiel #11
0
class GeneratorNet:
    def __init__(self,
                 original_nets,
                 input_noise_size,
                 loss='oneside-gaussian',
                 norm='online',
                 is_aegan=False,
                 mask=None,
                 gen_type='conv'):
        # Save parameters
        if not isinstance(original_nets, list):
            original_nets = [original_nets]

        self.original_nets = [copy.deepcopy(net) for net in original_nets]
        self.noise_size = input_noise_size
        self.is_aegan = is_aegan
        if norm not in ['online', 'post', 'none']:
            raise ValueError(
                f'Incorrect norm \'{norm}\'. Norm should be one of online/post/none'
            )
        self.current_norm = norm

        # Assert all networks has only one input of a same shape
        input_stimuli_size = self.original_nets[-1].input_sizes[0]
        for i, network in enumerate(self.original_nets):
            assert len(
                network.input_sizes
            ) == 1, f'Network {i} has more than one input. Input sizes: {network.input_sizes}.'
            assert input_stimuli_size == network.input_sizes[
                0], f'Network {i} has different input shape. Expected {input_stimuli_size}, given {network.input_sizes}'

        # Create generator
        generator = self.get_gan_subnet(input_noise_size, input_stimuli_size,
                                        gen_type)
        self.generator_subnet_id = 0
        merged_networks = [generator]
        net_prefix_num = 1

        # Merge networks and rewire inputs
        ffnet_out = []
        losses = []
        network_mapping = []
        for i_net, network in enumerate(self.original_nets):
            for i_subnet, subnetwork in enumerate(network.network_list):
                network_mapping.append(
                    ((i_net, i_subnet), len(merged_networks)))
                if subnetwork['ffnet_n'] is not None:
                    subnetwork['ffnet_n'] = [
                        f + net_prefix_num for f in subnetwork['ffnet_n']
                    ]
                if subnetwork['xstim_n'] is not None:
                    subnetwork['xstim_n'] = None
                    subnetwork['ffnet_n'] = [0]
                merged_networks.append(subnetwork)
            out_nets = [
                len(network.network_list) - 1 if x == -1 else x
                for x in network.ffnet_out
            ]
            ffnet_out += [f + net_prefix_num for f in out_nets]
            losses.append(loss)
            net_prefix_num += len(network.network_list)

        # Optionally create decoder
        self.encoder_subnet_id = None
        if is_aegan:
            ffnet_out += [len(merged_networks)]
            merged_networks.append(
                self.get_encoder(input_noise_size, input_stimuli_size, 0))
            self.encoder_subnet_id = len(merged_networks) - 1
            losses.append('gaussian')

        # Define new NDN
        self.net_with_generator = NDN(
            merged_networks,
            input_dim_list=[[1, input_noise_size]],
            batch_size=self.original_nets[0].batch_size
            if self.original_nets[0].batch_size is not None else 265,
            ffnet_out=ffnet_out,
            noise_dist=losses,
            tf_seed=250)

        # Copy weight from original net
        for (i_net, i_subnet), target_i_net in network_mapping:
            GeneratorNet._copy_net_params(self.original_nets[i_net],
                                          self.net_with_generator, i_subnet,
                                          target_i_net)

        # Set mask
        if mask is None:
            # Set all ones mask = no change of input
            mask = np.ones(input_stimuli_size, dtype=np.float32)
        self.net_with_generator.networks[0].layers[-1].weights = mask.astype(
            np.float32)

        # Construct fit vars
        layers_to_skip = []
        for i, net in enumerate(self.net_with_generator.networks):
            if i == self.generator_subnet_id:
                # Fit all except mask
                layers_to_skip.append([len(net.layers) - 1])
            elif i == self.encoder_subnet_id:
                # Fit all
                layers_to_skip.append([])
            else:
                # Freeze weights
                layers_to_skip.append([x for x in range(len(net.layers))])

        self.generator_fit_vars = self.net_with_generator.fit_variables(
            layers_to_skip=layers_to_skip, fit_biases=False)

    def train_generator_on_neuron(self,
                                  optimize_neuron,
                                  data_len,
                                  max_activation=None,
                                  perc=0.9,
                                  epochs=5,
                                  noise_input=None,
                                  output=None,
                                  train_log=None):
        if output is not None and noise_input is None:
            raise ValueError('Output specified, but no input provided')

        # Create input if not specified
        if noise_input is None:
            input_shape = (data_len, self.noise_size)
            # Create input
            noise_input = np.random.normal(size=input_shape, scale=1)
            # TODO: Is normalizing necesary?
            #input_norm = np.sqrt(np.sum(noise_input**2,axis=1))/np.sqrt(self.noise_size)
        else:
            noise_input = noise_input

        # Create output if not specified
        if output is None:
            output_shape = (data_len, self.net_with_generator.output_sizes[0])
            # Create_output
            output = np.zeros(output_shape)
            if max_activation is not None:
                output[:, optimize_neuron] = output[:, optimize_neuron] + \
                    (perc*np.ones(output_shape[0]) * max_activation)

        # Setup data filter to filter only desired neuron
        tmp_filters = np.zeros(
            (data_len, self.net_with_generator.output_sizes[0]))
        tmp_filters[:, optimize_neuron] = 1

        output = [output]
        tmp_filters = [tmp_filters]
        if self.encoder_subnet_id is not None:
            output.append(noise_input)
            tmp_filters.append(np.ones((data_len, self.noise_size)))
        print(len(output))
        # Set L2-norm on output
        if self.current_norm == 'online':
            # if not isinstance(l2_norm,list):
            #     l2_norm = [l2_norm]
            # if not self.is_aegan:
            self.net_with_generator.networks[
                self.generator_subnet_id].layers[-1].normalize_output = True
            # else:
            #    self.net_with_generator.networks[-1].layers[0].normalize_output = l2_norm

        # Generator training
        self.net_with_generator.train(noise_input,
                                      output,
                                      fit_variables=self.generator_fit_vars,
                                      data_filters=tmp_filters,
                                      learning_alg='adam',
                                      train_indxs=np.arange(data_len * 0.9),
                                      test_indxs=np.arange(
                                          data_len * 0.9, data_len),
                                      output_dir=train_log,
                                      opt_params={
                                          'display': 1,
                                          'batch_size': 256,
                                          'epochs_summary': 1,
                                          'use_gpu': False,
                                          'epochs_training': epochs,
                                          'learning_rate': 0.0001
                                      })
        # print(self.net_with_generator.eval_preds(noise_input,output_data=output))

    def extract_generator(self):
        # Extracts generator as a simple 1-layer net with biases
        generator_subnet = NDN([
            copy.deepcopy(
                self.net_with_generator.network_list[self.generator_subnet_id])
        ],
                               noise_dist='max',
                               input_dim_list=[
                                   self.net_with_generator.network_list[
                                       self.generator_subnet_id]['input_dims']
                               ])

        # Copy weights
        GeneratorNet._copy_net_params(self.net_with_generator,
                                      generator_subnet,
                                      self.generator_subnet_id, 0)
        if self.current_norm == 'post':
            generator_subnet.networks[-1].layers[-1].normalize_output = True

        return generator_subnet

    def generate_stimulus(self, num_samples=1000, noise_input=None):
        # Generate noise_input if not specified
        if noise_input is None:
            noise_input = np.random.uniform(-2,
                                            2,
                                            size=(num_samples,
                                                  self.noise_size))
        generator = self.extract_generator()
        image_out = generator.generate_prediction(noise_input)
        return image_out

    @staticmethod
    def _copy_net_params(original_NDN_net, target_NDN_net, net_num_original,
                         net_num_target):
        for layer_source, layer_target in zip(
                original_NDN_net.networks[net_num_original].layers,
                target_NDN_net.networks[net_num_target].layers):
            layer_target.copy_layer_params(layer_source)

    def get_gan_subnet(self,
                       input_noise_size,
                       output_shape,
                       generator_type='conv'):
        output_shape = output_shape[1:]
        layers = 5
        if generator_type == 'conv':
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[[64, 8, 8], 32, 16, 1, 1],
                layer_types=['normal', 'deconv', 'deconv', 'deconv', 'mask'],
                act_funcs=['relu', 'relu', 'relu', 'tanh', 'lin'],
                conv_filter_widths=[None, 5, 5, 5, None],
                shift_spacing=[None, 2, 2, 1, None],
                reg_list={'d2x': [None, None, 0.01, 0.01, None]},
                verbose=False)
            params['output_shape'] = [
                None, None, output_shape, output_shape, None
            ]

        elif generator_type == 'deepconv':
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[[512, 4, 4], 256, 128, 1, 1],
                layer_types=['normal', 'deconv', 'deconv', 'deconv', 'mask'],
                act_funcs=['relu', 'relu', 'relu', 'tanh', 'lin'],
                conv_filter_widths=[None, 5, 5, 5, None],
                shift_spacing=[None, 2, 2, 2, None],
                reg_list={'d2x': [None, None, None, 0.01, None]},
                verbose=False)
            params['output_shape'] = [None, None, None, output_shape, None]

        elif generator_type == 'lin' or generator_type == 'lin_tanh':
            act = 'lin' if generator_type == 'lin' else 'tanh'
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[512, 1024, [1, 31, 31], 1],
                layer_types=['normal', 'normal', 'normal', 'mask'],
                act_funcs=['tanh', 'tanh', act, 'lin'],
                reg_list={
                    'l2': [0.01, 0.01, 0.01, None],
                },
                verbose=False)
            layers = 4

        elif generator_type == 'hybrid':
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[256, [16, 16, 16], 8, 1, 1],
                layer_types=['normal', 'normal', 'deconv', 'deconv', 'mask'],
                act_funcs=['relu', 'relu', 'relu', 'tanh', 'lin'],
                conv_filter_widths=[None, 5, 5, 5, None],
                shift_spacing=[None, 2, 2, 1, None],
                reg_list={'d2x': [None, None, 0.01, 0.01, None]},
                verbose=False)
            params['output_shape'] = [
                None, None, output_shape, output_shape, None
            ]
        else:
            raise ValueError(
                f'Generator type \'{generator_type}\' not implemented.')

        params['xstim_n'] = [0]
        params['normalize_output'] = [None] * layers
        params['weights_initializers'] = ['normal'] * (layers - 1) + ['ones']
        params['biases_initializers'] = ['zeros'] * layers
        return params

    def get_encoder(self,
                    noise_size,
                    input_shape,
                    ffnet_in,
                    generator_type='conv'):
        if generator_type == 'conv':
            params = NDNutils.ffnetwork_params(
                input_dims=input_shape,
                layer_sizes=[8, 8, 16, noise_size],
                layer_types=['conv', 'conv', 'conv', 'normal'],
                act_funcs=['relu', 'relu', 'relu', 'lin'],
                conv_filter_widths=[5, 5, 7, None],
                shift_spacing=[1, 2, 2, None],
                reg_list={'d2x': [0.1, 0.1, None, None]},
                verbose=False)
        elif generator_type == 'lin':
            params = NDNutils.ffnetwork_params(
                input_dims=input_shape,
                layer_sizes=[8, 8, 16, noise_size],
                layer_types=['normal', 'normal', 'normal'],
                act_funcs=[
                    'relu',
                    'relu',
                    'relu',
                ],
                reg_list={'d2x': [0.1, 0.1, None, None]},
                verbose=False)
        elif generator_type == 'hybrid':
            params = NDNutils.ffnetwork_params(
                input_dims=input_shape,
                layer_sizes=[8, 8, 16, noise_size],
                layer_types=['conv', 'conv', 'normal', 'normal'],
                act_funcs=['relu', 'relu', 'relu', 'lin'],
                conv_filter_widths=[5, 5, 7, None],
                shift_spacing=[2, 2, None, None],
                reg_list={'d2x': [0.1, 0.1, None, None]},
                verbose=False)
        else:
            raise ValueError(
                f'Generator type \'{generator_type}\' not implemented.')

        params['xstim_n'] = None
        params['ffnet_n'] = [ffnet_in]
        return params