Beispiel #1
0
def get_most_active_units_in_layer(Inputs,
                                   layers,
                                   layer_ind,
                                   filter,
                                   n_units=None,
                                   unique=True,
                                   abs_act=False):
    """Get most active (sorted) units in specific layer for inputs.

    Inputs: Inputs to calculate unit activations with
    layers: Complete list of network layers
    layer_ind: Index in layers in which unit activation should be computed
    filter: Specific filter(s) in which the units are. If None: All filters in layer
    n_Units: Number of most active units. If None: all units sorted (default: None)
    unique: If only the most active Unit for each input should be kept. (default: True)
    abs_act: If absolute unit activity should be used. (default: False)

    Returns:
    Most active units (Nx4) Input|Filter|X|Y
    """

    if layers is not list:
        layers = lasagne.layers.get_all_layers(layers)
    if filter == None:
        filter = np.arange(layers[layer_ind].output_shape[1])

    pred_fn = create_pred_fn(layers)
    output = pred_fn(Inputs)

    return get_most_active_units_in_layer_from_output(output,
                                                      layer_ind,
                                                      filter,
                                                      n_units=n_units,
                                                      unique=unique,
                                                      abs_act=abs_act)
Beispiel #2
0
    def __init__(self,
                 ExtractorData,
                 layer_ind,
                 finalReshapeAux,
                 filter_batch_size=None):
        self.layer_ind = layer_ind

        self.model_tmp, self.layer_tmp = check_if_finalreshape_is_needed(
            ExtractorData.model, layer_ind)
        self.model_RF = receptive_field_build_deconv_layers(
            self.model_tmp[self.layer_tmp],
            self.model_tmp[1],
            use_learned_W=False,
            X_reshape=finalReshapeAux)
        self.pred_fn = create_pred_fn(self.model_tmp[self.layer_tmp])

        all_outputs = list()

        if filter_batch_size is None:
            filter_splits = 1
        else:
            filter_splits = len(ExtractorData.inputs) / filter_batch_size + 1
        input_batches = np.array_split(np.arange(len(ExtractorData.inputs)),
                                       filter_splits)
        for batch in input_batches:
            if len(batch) == 0:
                break
            tmp = self.pred_fn(list(ExtractorData.inputs[batch])).astype(
                np.float16)
            tmp[tmp < 0] = 0
            all_outputs.extend(tmp)
        self.outputs = np.asarray(all_outputs, dtype=np.float16)
Beispiel #3
0
def recompute_bnorm_layer_statistics(final_layer, dataset, iterator):
    all_layers = lasagne.layers.get_all_layers(final_layer)
    bnorm_layers = [
        l for l in all_layers if l.__class__.__name__ == 'BatchNormLayer'
    ]
    for bnorm_layer in bnorm_layers:
        log.info("Compiling bnorm layer...")
        this_layer_pred_fn = create_pred_fn(bnorm_layer)
        log.info("Predictions for bnorm layer...")
        outs = [
            this_layer_pred_fn(b[0])
            for b in iterator.get_batches(dataset, shuffle=False)
        ]
        outs = np.concatenate(outs)
        outs_before_transform = (
            (outs - bnorm_layer.beta.get_value()[None, :, None, None]) /
            bnorm_layer.gamma.get_value()[None, :, None, None])
        outs_before_transform = (
            (outs_before_transform /
             bnorm_layer.inv_std.get_value()[None, :, None, None]) +
            bnorm_layer.mean.get_value()[None, :, None, None])
        mean_this_layer = np.mean(outs_before_transform, axis=(0, 2, 3))
        stds_this_layer = np.std(outs_before_transform, axis=(0, 2, 3))
        bnorm_layer.mean.set_value(mean_this_layer)
        bnorm_layer.inv_std.set_value(1.0 / stds_this_layer)
Beispiel #4
0
def compute_trial_acts(model, i_layer, iterator, train_set):
    """Compute activations per trial per sample for given layer of the model.
    
    Parameters
    ----------
    model: Lasagne layer
        Final layer of the model.
    i_layer: int
        Index of layer to compute activations for.
    iterator: DatasetIterator
        Iterator to get batches from.
    train_set: Dataset (Cnt)
        Dataset to use.
    
    Returns
    -------
    trial_acts: 3darray of float
        Activations per trial per sample. #trialx#channelx#sample
    """
    # compute number of inputs per trial
    i_trial_starts, i_trial_ends = compute_trial_start_end_samples(
        train_set.y,
        check_trial_lengths_equal=True,
        input_time_length=iterator.input_time_length)
    # +1 since trial ends is inclusive
    n_trial_len = i_trial_ends[0] - i_trial_starts[0] + 1
    n_inputs_per_trial = int(
        np.ceil(n_trial_len / float(iterator.n_sample_preds)))
    log.info("Create theano function...")
    all_layers = lasagne.layers.get_all_layers(model)
    all_out_fn = create_pred_fn(all_layers[i_layer])
    assert (iterator.input_time_length == get_input_time_length(model))
    assert (iterator.n_sample_preds == get_n_sample_preds(model))
    log.info("Compute activations...")
    all_outs_per_batch = [
        all_out_fn(batch[0])
        for batch in iterator.get_batches(train_set, False)
    ]
    batch_sizes = [
        len(batch[0]) for batch in iterator.get_batches(train_set, False)
    ]
    all_outs_per_batch = np.array(all_outs_per_batch)
    n_trials = len(i_trial_starts)
    log.info("Transform to trial activations...")
    trial_acts = get_trial_acts(all_outs_per_batch,
                                batch_sizes,
                                n_trials=n_trials,
                                n_inputs_per_trial=n_inputs_per_trial,
                                n_trial_len=n_trial_len,
                                n_sample_preds=iterator.n_sample_preds)
    log.info("Done.")
    return trial_acts
def load_exp_pred_fn(basename, after_softmax):
    exp, model = load_exp_and_model(basename)
    if not after_softmax:
        # replace softmax by identity to get better correlations
        assert (model.nonlinearity.func_name == 'softmax'
                or model.nonlinearity.func_name == 'safe_softmax')
        model.nonlinearity = identity
    # load dataset
    log.info("Load dataset")
    exp.dataset.load()
    log.info("Create prediction function...")
    pred_fn = create_pred_fn(model)
    log.info("Done.")
    return exp, pred_fn
def compute_rand_preds_topo_corr(exp, rand_model, trial_env):
    pred_fn = create_pred_fn(rand_model)
    train_set = exp.dataset_provider.get_train_merged_valid_test(exp.dataset)['train']
    batches = [b[0] for b in exp.iterator.get_batches(train_set, shuffle=False)]
    all_batch_sizes = [len(b) for b in batches]
    
    all_preds = [pred_fn(b) for b in batches]
    preds_per_trial = compute_preds_per_trial(train_set.y, all_preds, all_batch_sizes,
        exp.iterator.input_time_length)
    preds_per_trial = np.array(preds_per_trial)
    # transpose from trialxsamplesxclasses to trialsxclassesxsamples
    # as compute topo corr expects like this
    rand_topo_corrs = compute_topo_corrs(trial_env, 
        preds_per_trial.transpose(0,2,1)) 
    return rand_topo_corrs
Beispiel #7
0
def compute_trial_acts(model, i_layer, iterator, train_set):
    """Compute activations per trial per sample for given layer of the model.
    
    Parameters
    ----------
    model: Lasagne layer
        Final layer of the model.
    i_layer: int
        Index of layer to compute activations for.
    iterator: DatasetIterator
        Iterator to get batches from.
    train_set: Dataset (Cnt)
        Dataset to use.
    
    Returns
    -------
    trial_acts: 3darray of float
        Activations per trial per sample. #trialx#channelx#sample
    """
    # compute number of inputs per trial
    i_trial_starts, i_trial_ends =  compute_trial_start_end_samples(
                train_set.y, check_trial_lengths_equal=True,
                input_time_length=iterator.input_time_length)
    # +1 since trial ends is inclusive
    n_trial_len = i_trial_ends[0] - i_trial_starts[0] + 1
    n_inputs_per_trial = int(np.ceil(n_trial_len / float(iterator.n_sample_preds)))
    log.info("Create theano function...")
    all_layers = lasagne.layers.get_all_layers(model)
    all_out_fn = create_pred_fn(all_layers[i_layer])
    assert(iterator.input_time_length == get_input_time_length(model))
    assert(iterator.n_sample_preds == get_n_sample_preds(model))
    log.info("Compute activations...")
    all_outs_per_batch = [all_out_fn(batch[0]) 
          for batch in iterator.get_batches(train_set, False)]
    batch_sizes = [len(batch[0]) for batch in iterator.get_batches(train_set, False)]
    all_outs_per_batch = np.array(all_outs_per_batch)
    n_trials = len(i_trial_starts)
    log.info("Transform to trial activations...")
    trial_acts = get_trial_acts(all_outs_per_batch,
                                  batch_sizes, n_trials=n_trials, 
                                n_inputs_per_trial=n_inputs_per_trial,
                                  n_trial_len=n_trial_len, 
                                n_sample_preds=iterator.n_sample_preds)
    log.info("Done.")
    return trial_acts
Beispiel #8
0
def compute_rand_preds_topo_corr(exp, rand_model, trial_env):
    pred_fn = create_pred_fn(rand_model)
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    batches = [
        b[0] for b in exp.iterator.get_batches(train_set, shuffle=False)
    ]
    all_batch_sizes = [len(b) for b in batches]

    all_preds = [pred_fn(b) for b in batches]
    preds_per_trial = compute_preds_per_trial(train_set.y, all_preds,
                                              all_batch_sizes,
                                              exp.iterator.input_time_length)
    preds_per_trial = np.array(preds_per_trial)
    # transpose from trialxsamplesxclasses to trialsxclassesxsamples
    # as compute topo corr expects like this
    rand_topo_corrs = compute_topo_corrs(trial_env,
                                         preds_per_trial.transpose(0, 2, 1))
    return rand_topo_corrs
Beispiel #9
0
def compute_pred_target_labels_for_cnt_exp(exp, model):
    exp.dataset.ensure_is_loaded()
    test_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['test']
    all_batches = list(exp.iterator.get_batches(test_set, shuffle=False))
    log.info("Compile prediction function...")
    pred_fn = create_pred_fn(model)
    log.info("Done.")
    log.info("Computing predictions")
    all_preds = []
    for batch in all_batches:
        all_preds.append(pred_fn(batch[0]))
    log.info("Done")
    # second monitor should be correct one, else you would have to loop to find it
    monitor = exp.monitors[2]
    assert monitor.__class__.__name__ == 'CntTrialMisclassMonitor'
    all_batch_sizes = [len(b[0]) for b in all_batches]
    pred_labels, target_labels = monitor.compute_pred_and_target_labels(
        test_set, all_preds, all_batch_sizes)
    return pred_labels, target_labels
Beispiel #10
0
def recompute_bnorm_layer_statistics(final_layer, dataset, iterator):
    all_layers = lasagne.layers.get_all_layers(final_layer)
    bnorm_layers = [l for l in all_layers
        if l.__class__.__name__ == 'BatchNormLayer']
    for bnorm_layer in bnorm_layers:
        log.info("Compiling bnorm layer...")
        this_layer_pred_fn = create_pred_fn(bnorm_layer)
        log.info("Predictions for bnorm layer...")
        outs = [this_layer_pred_fn(b[0]) for b in iterator.get_batches(dataset,
            shuffle=False)]
        outs = np.concatenate(outs)
        outs_before_transform = ((outs -
            bnorm_layer.beta.get_value()[None,:,None,None]) /
            bnorm_layer.gamma.get_value()[None,:,None,None])
        outs_before_transform = ((outs_before_transform /
            bnorm_layer.inv_std.get_value()[None,:,None,None]) +
            bnorm_layer.mean.get_value()[None,:,None,None])
        mean_this_layer = np.mean(outs_before_transform, axis=(0,2,3))
        stds_this_layer = np.std(outs_before_transform, axis=(0,2,3))
        bnorm_layer.mean.set_value(mean_this_layer)
        bnorm_layer.inv_std.set_value(1.0 / stds_this_layer)
Beispiel #11
0
def get_receptive_field_mask(Units, l_RF, combined_units=False):
    """Get receptive field of the specified units.
    Only works for 4D layers

    Units: Active units in input layer of l_RF (Nx4) Input|Filter|X|Y
    l_RF: Receptive field layer (original input layer)
    combined_units: Separate receptive fields for each unit or combined for all (default: False)

    Returns:
    RF_output: Activation of units in the receptive field layer.
                Activation > 0: unit lies in receptive field
    """
    bot_layer = l_RF
    top_layer = lasagne.layers.get_all_layers(l_RF)[0]

    assert (len(top_layer.shape) == 4 and len(bot_layer.output_shape)
            == 4), "Currently only works for 4D tensors"

    if Units.shape[1] == 4:
        Units = Units[:, 1:]

    n_Units = len(Units)

    in_indcs = np.arange(n_Units)
    if combined_units:
        in_indcs = 0

    In_Units = np.zeros(
        (n_Units, top_layer.shape[1], top_layer.shape[2], top_layer.shape[3]),
        dtype=np.float32)
    In_Units[in_indcs, Units[:, 0], Units[:, 1], Units[:, 2]] = 1

    assert np.array_equal(top_layer.shape[1:], In_Units.shape[1:]), "Weird"

    pred_fn = create_pred_fn(bot_layer)
    RF_output = pred_fn(In_Units)

    return RF_output