Esempio n. 1
0
def check_shallow_env_corrs():
    i_layer = 5
    for i_exp in xrange(1, 21):
        file_base_name = 'data/models-backup/paper/ours/cnt/shallow//car/{:d}'.format(
            i_exp)
        result = np.load(file_base_name + '.result.pkl')
        log.info("Running {:d} of {:d}:\n{:s}".format(i_exp, 20,
                                                      file_base_name))
        exp_shallow, model_shallow = load_exp_and_model(file_base_name)
        exp_shallow.dataset.load()
        env_filename = dataset_to_env_file(
            result.parameters['dataset_filename'])
        trial_env = load_trial_env(env_filename,
                                   model_shallow,
                                   i_layer,
                                   exp_shallow.dataset.sets[0],
                                   n_inputs_per_trial=2)
        trial_acts = compute_trial_acts(model_shallow, i_layer,
                                        exp_shallow.iterator,
                                        exp_shallow.dataset.train_set)
        log.info("Compute topo corrs")
        topo_corrs = compute_topo_corrs(trial_env, trial_acts)
        topo_corrs_old = np.load(file_base_name + '5.env_corrs.npy')
        diff = np.mean(np.abs(topo_corrs - topo_corrs_old))
        log.info("Diff ok {:f}".format(diff))
        assert diff < 1e-3, ("Diff too big for {:s}: {:f}".format(
            file_base_name, diff))
Esempio n. 2
0
def compute_trial_topo_corrs(model, i_layer, train_set, iterator, trial_env):
    """Now split per class."""
    
    trial_acts = compute_trial_acts(model, i_layer, iterator, train_set)
    trial_labels = determine_trial_labels(train_set, iterator)
    topo_corrs_per_class = []
    for i_label in xrange(trial_labels.shape[1]):
        this_trial_mask = trial_labels[:,i_label] == 1
        this_trial_env = trial_env[:, this_trial_mask]
        this_trial_acts = trial_acts[this_trial_mask]
        topo_corrs = compute_topo_corrs(this_trial_env, this_trial_acts)
        topo_corrs_per_class.append(topo_corrs)
    topo_corrs_per_class = np.array(topo_corrs_per_class)
    # TODELETE (old without split input per label): 
    # topo_corrs = compute_topo_corrs(trial_env, trial_acts)
    # TODO compute within trial corrs -> remove mean for each trial
    # compute between trial corrs -> compute means for trial env and trial acts
    # with keepdims=True
    return topo_corrs_per_class
def unit_output_class_corrs(model, iterator, train_set, i_layer):
    # only need targets, ignore trials
    # always get targets as from final layer
    this_final_layer = lasagne.layers.get_all_layers(model)[-1]
    _, targets = get_trials_targets(train_set,
        get_n_sample_preds(this_final_layer), iterator)
    trial_acts = compute_trial_acts(this_final_layer, i_layer, iterator,
        train_set)
    # only take those targets where we have predictions for
    # a bit hacky: we know targets are same for each trial, so we just 
    # take last ones, eventhough they come form overlapping batches
    # targets are #trials x #samples x #classes
    unmeaned_targets = targets - np.mean(targets, axis=(1), keepdims=True)
    assert np.all(unmeaned_targets == 0), ("Each trial should only have one "
        "unique label")
    relevant_targets = targets[:,:trial_acts.shape[2]]
    unit_class_corrs = wrap_reshape_topo(corr, trial_acts, relevant_targets,
                      axis_a=(0,2), axis_b=(0,1))
    return unit_class_corrs
Esempio n. 4
0
def compute_trial_topo_corrs(model, i_layer, train_set, iterator, trial_env,
    split_per_class):
    """Now with split per class arg."""
    
    trial_acts = compute_trial_acts(model, i_layer, iterator, train_set)
    trial_labels = determine_trial_labels(train_set, iterator)
    if split_per_class:
        topo_corrs_per_class = []
        for i_label in xrange(trial_labels.shape[1]):
            this_trial_mask = trial_labels[:,i_label] == 1
            this_trial_env = trial_env[:, this_trial_mask]
            this_trial_acts = trial_acts[this_trial_mask]
            topo_corrs = compute_topo_corrs(this_trial_env, this_trial_acts)
            topo_corrs_per_class.append(topo_corrs)
        topo_corrs_per_class = np.array(topo_corrs_per_class)
        # TODO compute within trial corrs -> remove mean for each trial
        # compute between trial corrs -> compute means for trial env and trial acts
        # with keepdims=True
        return topo_corrs_per_class
    else:
        topo_corrs = compute_topo_corrs(trial_env, trial_acts)
        return topo_corrs
def check_shallow_env_corrs():
    i_layer = 5
    for i_exp in xrange(1,21):
        file_base_name = 'data/models-backup/paper/ours/cnt/shallow//car/{:d}'.format(
            i_exp)
        result = np.load(file_base_name + '.result.pkl')
        log.info("Running {:d} of {:d}:\n{:s}".format(i_exp, 20,
            file_base_name))
        exp_shallow, model_shallow = load_exp_and_model(file_base_name)
        exp_shallow.dataset.load()
        env_filename = dataset_to_env_file(result.parameters['dataset_filename'])
        trial_env = load_trial_env(env_filename, model_shallow,
                            i_layer, exp_shallow.dataset.sets[0],
                            n_inputs_per_trial=2)
        trial_acts = compute_trial_acts(model_shallow, i_layer,
            exp_shallow.iterator, exp_shallow.dataset.train_set)
        log.info("Compute topo corrs")
        topo_corrs = compute_topo_corrs(trial_env, trial_acts)
        topo_corrs_old = np.load(file_base_name  + '5.env_corrs.npy')
        diff = np.mean(np.abs(topo_corrs - topo_corrs_old))
        log.info("Diff ok {:f}".format(diff))
        assert diff < 1e-3, (
            "Diff too big for {:s}: {:f}".format(file_base_name, diff))
def unit_output_class_corrs(model, iterator, train_set, i_layer):
    # only need targets, ignore trials
    # always get targets as from final layer
    this_final_layer = lasagne.layers.get_all_layers(model)[-1]
    _, targets = get_trials_targets(train_set,
                                    get_n_sample_preds(this_final_layer),
                                    iterator)
    trial_acts = compute_trial_acts(this_final_layer, i_layer, iterator,
                                    train_set)
    # only take those targets where we have predictions for
    # a bit hacky: we know targets are same for each trial, so we just
    # take last ones, eventhough they come form overlapping batches
    # targets are #trials x #samples x #classes
    unmeaned_targets = targets - np.mean(targets, axis=(1), keepdims=True)
    assert np.all(unmeaned_targets == 0), ("Each trial should only have one "
                                           "unique label")
    relevant_targets = targets[:, :trial_acts.shape[2]]
    unit_class_corrs = wrap_reshape_topo(corr,
                                         trial_acts,
                                         relevant_targets,
                                         axis_a=(0, 2),
                                         axis_b=(0, 1))
    return unit_class_corrs