Ejemplo n.º 1
0
def check_shallow_env_corrs():
    i_layer = 5
    for i_exp in xrange(1, 21):
        file_base_name = 'data/models-backup/paper/ours/cnt/shallow//car/{:d}'.format(
            i_exp)
        result = np.load(file_base_name + '.result.pkl')
        log.info("Running {:d} of {:d}:\n{:s}".format(i_exp, 20,
                                                      file_base_name))
        exp_shallow, model_shallow = load_exp_and_model(file_base_name)
        exp_shallow.dataset.load()
        env_filename = dataset_to_env_file(
            result.parameters['dataset_filename'])
        trial_env = load_trial_env(env_filename,
                                   model_shallow,
                                   i_layer,
                                   exp_shallow.dataset.sets[0],
                                   n_inputs_per_trial=2)
        trial_acts = compute_trial_acts(model_shallow, i_layer,
                                        exp_shallow.iterator,
                                        exp_shallow.dataset.train_set)
        log.info("Compute topo corrs")
        topo_corrs = compute_topo_corrs(trial_env, trial_acts)
        topo_corrs_old = np.load(file_base_name + '5.env_corrs.npy')
        diff = np.mean(np.abs(topo_corrs - topo_corrs_old))
        log.info("Diff ok {:f}".format(diff))
        assert diff < 1e-3, ("Diff too big for {:s}: {:f}".format(
            file_base_name, diff))
def load_exp_pred_fn(basename, after_softmax):
    exp, model = load_exp_and_model(basename)
    if not after_softmax:
        # replace softmax by identity to get better correlations
        assert (model.nonlinearity.func_name == 'softmax'
                or model.nonlinearity.func_name == 'safe_softmax')
        model.nonlinearity = identity
    # load dataset
    log.info("Load dataset")
    exp.dataset.load()
    log.info("Create prediction function...")
    pred_fn = create_pred_fn(model)
    log.info("Done.")
    return exp, pred_fn
Ejemplo n.º 3
0
def add_labels_to_cnt_exp_result(basename):
    result_file_name = basename + '.result.pkl'
    result = np.load(result_file_name)
    # check if only dummy targets there, in that case add targets and
    # predictions
    if np.array_equal(result.targets, [3, 4, 1, 2, 3, 4]):
        exp, model = load_exp_and_model(basename)
        pred_labels, target_labels = compute_pred_target_labels_for_cnt_exp(
            exp, model)
        # add targets
        result.predictions = pred_labels
        result.targets = target_labels
        pickle.dump(result, open(result_file_name, 'w'))
    else:
        log.warn("Targets/predictions already there for {:s}".format(basename))
def create_unit_output_class_corrs(basename, i_all_layers):
    exp, model = load_exp_and_model(basename)
    exp.dataset.load()
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    rand_model = create_experiment(basename + '.yaml').final_layer
    for i_layer in i_all_layers:
        trained_corrs = unit_output_class_corrs(model, exp.iterator,
            train_set, i_layer)
        untrained_corrs = unit_output_class_corrs(rand_model, exp.iterator,
            train_set, i_layer)
        file_name_end = '{:d}.npy'.format(i_layer)
        trained_filename = '{:s}.unit_class_corrs.{:s}'.format(basename,
            file_name_end)
        untrained_filename = '{:s}.rand_unit_class_corrs.{:s}'.format(basename,
            file_name_end)
        log.info("Saving to {:s} and {:s}".format(trained_filename,
            untrained_filename))
        np.save(trained_filename, trained_corrs)
        np.save(untrained_filename, untrained_corrs)
def create_unit_output_class_corrs(basename, i_all_layers):
    exp, model = load_exp_and_model(basename)
    exp.dataset.load()
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    rand_model = create_experiment(basename + '.yaml').final_layer
    for i_layer in i_all_layers:
        trained_corrs = unit_output_class_corrs(model, exp.iterator, train_set,
                                                i_layer)
        untrained_corrs = unit_output_class_corrs(rand_model, exp.iterator,
                                                  train_set, i_layer)
        file_name_end = '{:d}.npy'.format(i_layer)
        trained_filename = '{:s}.unit_class_corrs.{:s}'.format(
            basename, file_name_end)
        untrained_filename = '{:s}.rand_unit_class_corrs.{:s}'.format(
            basename, file_name_end)
        log.info("Saving to {:s} and {:s}".format(trained_filename,
                                                  untrained_filename))
        np.save(trained_filename, trained_corrs)
        np.save(untrained_filename, untrained_corrs)
Ejemplo n.º 6
0
    def __init__(self,
                 modelpath,
                 savepath,
                 classes,
                 layer_indeces,
                 nUnits=50,
                 nFilters=None,
                 use_mean_filter_diff=False):
        print 'Init extractor'
        exp, model = load_exp_and_model(modelpath, set_invalid_to_NaN=False)
        self.exp = exp
        self.model = model
        self.update_output = True

        exp.dataset.load()

        datasets = exp.dataset_provider.get_train_merged_valid_test(
            exp.dataset)
        exp.iterator.batch_size = 999999  # dirty hack for simply getting all inputs
        test_batches = list(
            exp.iterator.get_batches(datasets['train'], shuffle=False))
        inputs, targets = test_batches[0]

        targets = targets.reshape((len(inputs), -1, 4))
        targets = targets.sum(axis=1).argmax(axis=1)

        self.inputs = inputs.astype(np.float32)
        self.targets = targets.astype(np.int8)

        self.n_chans = lasagne.layers.get_all_layers(model)[0].shape[1]
        self.sensor_names = exp.dataset.test_set.sensor_names
        self.sampling_rate = exp.dataset.test_set.signal_processor.cnt_preprocessors[
            1][1]['newfs']

        self.classes = classes
        self.layer_indeces = layer_indeces
        self.nUnits = nUnits
        self.nFilters = nFilters
        self.use_mean_filter_diff = use_mean_filter_diff

        self.savepath = savepath
def check_shallow_env_corrs():
    i_layer = 5
    for i_exp in xrange(1,21):
        file_base_name = 'data/models-backup/paper/ours/cnt/shallow//car/{:d}'.format(
            i_exp)
        result = np.load(file_base_name + '.result.pkl')
        log.info("Running {:d} of {:d}:\n{:s}".format(i_exp, 20,
            file_base_name))
        exp_shallow, model_shallow = load_exp_and_model(file_base_name)
        exp_shallow.dataset.load()
        env_filename = dataset_to_env_file(result.parameters['dataset_filename'])
        trial_env = load_trial_env(env_filename, model_shallow,
                            i_layer, exp_shallow.dataset.sets[0],
                            n_inputs_per_trial=2)
        trial_acts = compute_trial_acts(model_shallow, i_layer,
            exp_shallow.iterator, exp_shallow.dataset.train_set)
        log.info("Compute topo corrs")
        topo_corrs = compute_topo_corrs(trial_env, trial_acts)
        topo_corrs_old = np.load(file_base_name  + '5.env_corrs.npy')
        diff = np.mean(np.abs(topo_corrs - topo_corrs_old))
        log.info("Diff ok {:f}".format(diff))
        assert diff < 1e-3, (
            "Diff too big for {:s}: {:f}".format(file_base_name, diff))
Ejemplo n.º 8
0
def create_topo_env_corrs_files(base_name, i_all_layers, with_square):
    # Load env first to make sure env is actually there.
    result = np.load(base_name + '.result.pkl')
    env_file_name = dataset_to_env_file(result.parameters['dataset_filename'])
    exp, model = load_exp_and_model(base_name)
    exp.dataset.load()
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    rand_model = create_experiment(base_name + '.yaml').final_layer
    for i_layer in i_all_layers:
        log.info("Layer {:d}".format(i_layer))
        trial_env = load_trial_env(env_file_name, model, 
            i_layer, train_set, n_inputs_per_trial=2, square_before_mean=with_square)
        topo_corrs = compute_trial_topo_corrs(model, i_layer, train_set, 
            exp.iterator, trial_env, split_per_class=True)
        
        rand_topo_corrs = compute_trial_topo_corrs(rand_model, i_layer, train_set, 
            exp.iterator, trial_env, split_per_class=True)
        file_name_end = '{:d}.npy'.format(i_layer)
        if with_square:
            file_name_end = 'square.' + file_name_end
        np.save('{:s}.labelsplitted.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs)
        np.save('{:s}.labelsplitted.env_rand_corrs.{:s}'.format(base_name, file_name_end), rand_topo_corrs)
    return
Ejemplo n.º 9
0
def create_topo_env_corrs_files(base_name, i_all_layers, with_square):
    # Load env first to make sure env is actually there.
    result = np.load(base_name + '.result.pkl')
    env_file_name = dataset_to_env_file(result.parameters['dataset_filename'])
    exp, model = load_exp_and_model(base_name)
    exp.dataset.load()
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    rand_model = create_experiment(base_name + '.yaml').final_layer
    for i_layer in i_all_layers:
        log.info("Layer {:d}".format(i_layer))
        trial_env = load_trial_env(env_file_name, model, 
            i_layer, train_set, n_inputs_per_trial=2, square_before_mean=with_square)
        topo_corrs = compute_trial_topo_corrs(model, i_layer, train_set, 
            exp.iterator, trial_env)
        
        rand_topo_corrs = compute_trial_topo_corrs(rand_model, i_layer, train_set, 
            exp.iterator, trial_env)
        file_name_end = '{:d}.npy'.format(i_layer)
        if with_square:
            file_name_end = 'square.' + file_name_end
        np.save('{:s}.labelsplitted.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs)
        np.save('{:s}.labelsplitted.env_rand_corrs.{:s}'.format(base_name, file_name_end), rand_topo_corrs)
    return
Ejemplo n.º 10
0
def create_env_class_corr_file(base_name, with_square):
    exp, model = load_exp_and_model(base_name)
    exp.dataset.load()
    create_env_class_corr_files_after_load(base_name, with_square, exp, model)
Ejemplo n.º 11
0
def get_dataset(modelpath):
    exp, model = load_exp_and_model(modelpath, set_invalid_to_NaN=False)

    datasets = exp.dataset_provider.get_train_merged_valid_test(exp.dataset)
    
    return exp,model,datasets
def create_env_class_corr_file(base_name, with_square):
    exp, model = load_exp_and_model(base_name)
    exp.dataset.load()    
    create_env_class_corr_files_after_load(base_name, with_square,
        exp, model)