Exemple #1
0
def check_shallow_env_corrs():
    i_layer = 5
    for i_exp in xrange(1, 21):
        file_base_name = 'data/models-backup/paper/ours/cnt/shallow//car/{:d}'.format(
            i_exp)
        result = np.load(file_base_name + '.result.pkl')
        log.info("Running {:d} of {:d}:\n{:s}".format(i_exp, 20,
                                                      file_base_name))
        exp_shallow, model_shallow = load_exp_and_model(file_base_name)
        exp_shallow.dataset.load()
        env_filename = dataset_to_env_file(
            result.parameters['dataset_filename'])
        trial_env = load_trial_env(env_filename,
                                   model_shallow,
                                   i_layer,
                                   exp_shallow.dataset.sets[0],
                                   n_inputs_per_trial=2)
        trial_acts = compute_trial_acts(model_shallow, i_layer,
                                        exp_shallow.iterator,
                                        exp_shallow.dataset.train_set)
        log.info("Compute topo corrs")
        topo_corrs = compute_topo_corrs(trial_env, trial_acts)
        topo_corrs_old = np.load(file_base_name + '5.env_corrs.npy')
        diff = np.mean(np.abs(topo_corrs - topo_corrs_old))
        log.info("Diff ok {:f}".format(diff))
        assert diff < 1e-3, ("Diff too big for {:s}: {:f}".format(
            file_base_name, diff))
Exemple #2
0
def compute_trial_topo_corrs(model, i_layer, train_set, iterator, trial_env,
    split_per_class):
    """Now with split per class arg."""
    
    trial_acts = compute_trial_acts(model, i_layer, iterator, train_set)
    trial_labels = determine_trial_labels(train_set, iterator)
    if split_per_class:
        topo_corrs_per_class = []
        for i_label in xrange(trial_labels.shape[1]):
            this_trial_mask = trial_labels[:,i_label] == 1
            this_trial_env = trial_env[:, this_trial_mask]
            this_trial_acts = trial_acts[this_trial_mask]
            topo_corrs = compute_topo_corrs(this_trial_env, this_trial_acts)
            topo_corrs_per_class.append(topo_corrs)
        topo_corrs_per_class = np.array(topo_corrs_per_class)
        # TODO compute within trial corrs -> remove mean for each trial
        # compute between trial corrs -> compute means for trial env and trial acts
        # with keepdims=True
        return topo_corrs_per_class
    else:
        topo_corrs = compute_topo_corrs(trial_env, trial_acts)
        return topo_corrs
def compute_rand_preds_topo_corr(exp, rand_model, trial_env):
    pred_fn = create_pred_fn(rand_model)
    train_set = exp.dataset_provider.get_train_merged_valid_test(exp.dataset)['train']
    batches = [b[0] for b in exp.iterator.get_batches(train_set, shuffle=False)]
    all_batch_sizes = [len(b) for b in batches]
    
    all_preds = [pred_fn(b) for b in batches]
    preds_per_trial = compute_preds_per_trial(train_set.y, all_preds, all_batch_sizes,
        exp.iterator.input_time_length)
    preds_per_trial = np.array(preds_per_trial)
    # transpose from trialxsamplesxclasses to trialsxclassesxsamples
    # as compute topo corr expects like this
    rand_topo_corrs = compute_topo_corrs(trial_env, 
        preds_per_trial.transpose(0,2,1)) 
    return rand_topo_corrs
def compute_env_class_corr(exp, trial_env):
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']

    i_trial_starts, i_trial_ends = compute_trial_start_end_samples(
        train_set.y,
        check_trial_lengths_equal=True,
        input_time_length=exp.iterator.input_time_length)
    assert len(i_trial_ends) == trial_env.shape[1]
    # +1 as i_end is inclusive
    y_signal = [exp.dataset.train_set.y[i_start:i_end+1]
        for i_start, i_end in zip(i_trial_starts, i_trial_ends)]
    y_signal = np.array(y_signal).transpose(0,2,1)
    assert y_signal.shape[2] == trial_env.shape[3]
    topo_corrs = compute_topo_corrs(trial_env, y_signal)
    return topo_corrs
Exemple #5
0
def compute_env_class_corr(exp, trial_env):
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']

    i_trial_starts, i_trial_ends = compute_trial_start_end_samples(
        train_set.y,
        check_trial_lengths_equal=True,
        input_time_length=exp.iterator.input_time_length)
    assert len(i_trial_ends) == trial_env.shape[1]
    # +1 as i_end is inclusive
    y_signal = [
        exp.dataset.train_set.y[i_start:i_end + 1]
        for i_start, i_end in zip(i_trial_starts, i_trial_ends)
    ]
    y_signal = np.array(y_signal).transpose(0, 2, 1)
    assert y_signal.shape[2] == trial_env.shape[3]
    topo_corrs = compute_topo_corrs(trial_env, y_signal)
    return topo_corrs
Exemple #6
0
def compute_rand_preds_topo_corr(exp, rand_model, trial_env):
    pred_fn = create_pred_fn(rand_model)
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    batches = [
        b[0] for b in exp.iterator.get_batches(train_set, shuffle=False)
    ]
    all_batch_sizes = [len(b) for b in batches]

    all_preds = [pred_fn(b) for b in batches]
    preds_per_trial = compute_preds_per_trial(train_set.y, all_preds,
                                              all_batch_sizes,
                                              exp.iterator.input_time_length)
    preds_per_trial = np.array(preds_per_trial)
    # transpose from trialxsamplesxclasses to trialsxclassesxsamples
    # as compute topo corr expects like this
    rand_topo_corrs = compute_topo_corrs(trial_env,
                                         preds_per_trial.transpose(0, 2, 1))
    return rand_topo_corrs
def compute_trial_topo_corrs(model, i_layer, train_set, iterator, trial_env):
    """Now split per class."""
    
    trial_acts = compute_trial_acts(model, i_layer, iterator, train_set)
    trial_labels = determine_trial_labels(train_set, iterator)
    topo_corrs_per_class = []
    for i_label in xrange(trial_labels.shape[1]):
        this_trial_mask = trial_labels[:,i_label] == 1
        this_trial_env = trial_env[:, this_trial_mask]
        this_trial_acts = trial_acts[this_trial_mask]
        topo_corrs = compute_topo_corrs(this_trial_env, this_trial_acts)
        topo_corrs_per_class.append(topo_corrs)
    topo_corrs_per_class = np.array(topo_corrs_per_class)
    # TODELETE (old without split input per label): 
    # topo_corrs = compute_topo_corrs(trial_env, trial_acts)
    # TODO compute within trial corrs -> remove mean for each trial
    # compute between trial corrs -> compute means for trial env and trial acts
    # with keepdims=True
    return topo_corrs_per_class
def check_shallow_env_corrs():
    i_layer = 5
    for i_exp in xrange(1,21):
        file_base_name = 'data/models-backup/paper/ours/cnt/shallow//car/{:d}'.format(
            i_exp)
        result = np.load(file_base_name + '.result.pkl')
        log.info("Running {:d} of {:d}:\n{:s}".format(i_exp, 20,
            file_base_name))
        exp_shallow, model_shallow = load_exp_and_model(file_base_name)
        exp_shallow.dataset.load()
        env_filename = dataset_to_env_file(result.parameters['dataset_filename'])
        trial_env = load_trial_env(env_filename, model_shallow,
                            i_layer, exp_shallow.dataset.sets[0],
                            n_inputs_per_trial=2)
        trial_acts = compute_trial_acts(model_shallow, i_layer,
            exp_shallow.iterator, exp_shallow.dataset.train_set)
        log.info("Compute topo corrs")
        topo_corrs = compute_topo_corrs(trial_env, trial_acts)
        topo_corrs_old = np.load(file_base_name  + '5.env_corrs.npy')
        diff = np.mean(np.abs(topo_corrs - topo_corrs_old))
        log.info("Diff ok {:f}".format(diff))
        assert diff < 1e-3, (
            "Diff too big for {:s}: {:f}".format(file_base_name, diff))