def check_shallow_env_corrs(): i_layer = 5 for i_exp in xrange(1, 21): file_base_name = 'data/models-backup/paper/ours/cnt/shallow//car/{:d}'.format( i_exp) result = np.load(file_base_name + '.result.pkl') log.info("Running {:d} of {:d}:\n{:s}".format(i_exp, 20, file_base_name)) exp_shallow, model_shallow = load_exp_and_model(file_base_name) exp_shallow.dataset.load() env_filename = dataset_to_env_file( result.parameters['dataset_filename']) trial_env = load_trial_env(env_filename, model_shallow, i_layer, exp_shallow.dataset.sets[0], n_inputs_per_trial=2) trial_acts = compute_trial_acts(model_shallow, i_layer, exp_shallow.iterator, exp_shallow.dataset.train_set) log.info("Compute topo corrs") topo_corrs = compute_topo_corrs(trial_env, trial_acts) topo_corrs_old = np.load(file_base_name + '5.env_corrs.npy') diff = np.mean(np.abs(topo_corrs - topo_corrs_old)) log.info("Diff ok {:f}".format(diff)) assert diff < 1e-3, ("Diff too big for {:s}: {:f}".format( file_base_name, diff))
def create_env_class_corr_files_after_load(base_name, with_square, exp, model): train_set = exp.dataset_provider.get_train_merged_valid_test(exp.dataset)['train'] trial_env = load_trial_env(base_name + '.env.npy', model, i_layer=26, # 26 is last max-pool i think train_set=train_set, n_inputs_per_trial=2, square_before_mean=with_square) topo_corrs = compute_env_class_corr(exp, trial_env) rand_model = create_experiment(base_name + '.yaml').final_layer rand_topo_corrs = compute_rand_preds_topo_corr(exp, rand_model, trial_env) file_name_end = 'class.npy' if with_square: file_name_end = 'square.' + file_name_end np.save('{:s}.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs) np.save('{:s}.env_rand_corrs.{:s}'.format(base_name, file_name_end), rand_topo_corrs)
def create_env_class_corr_files_after_load(base_name, with_square, exp, model): train_set = exp.dataset_provider.get_train_merged_valid_test( exp.dataset)['train'] trial_env = load_trial_env( base_name + '.env.npy', model, i_layer=26, # 26 is last max-pool i think train_set=train_set, n_inputs_per_trial=2, square_before_mean=with_square) topo_corrs = compute_env_class_corr(exp, trial_env) rand_model = create_experiment(base_name + '.yaml').final_layer rand_topo_corrs = compute_rand_preds_topo_corr(exp, rand_model, trial_env) file_name_end = 'class.npy' if with_square: file_name_end = 'square.' + file_name_end np.save('{:s}.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs) np.save('{:s}.env_rand_corrs.{:s}'.format(base_name, file_name_end), rand_topo_corrs)
def check_shallow_env_corrs(): i_layer = 5 for i_exp in xrange(1,21): file_base_name = 'data/models-backup/paper/ours/cnt/shallow//car/{:d}'.format( i_exp) result = np.load(file_base_name + '.result.pkl') log.info("Running {:d} of {:d}:\n{:s}".format(i_exp, 20, file_base_name)) exp_shallow, model_shallow = load_exp_and_model(file_base_name) exp_shallow.dataset.load() env_filename = dataset_to_env_file(result.parameters['dataset_filename']) trial_env = load_trial_env(env_filename, model_shallow, i_layer, exp_shallow.dataset.sets[0], n_inputs_per_trial=2) trial_acts = compute_trial_acts(model_shallow, i_layer, exp_shallow.iterator, exp_shallow.dataset.train_set) log.info("Compute topo corrs") topo_corrs = compute_topo_corrs(trial_env, trial_acts) topo_corrs_old = np.load(file_base_name + '5.env_corrs.npy') diff = np.mean(np.abs(topo_corrs - topo_corrs_old)) log.info("Diff ok {:f}".format(diff)) assert diff < 1e-3, ( "Diff too big for {:s}: {:f}".format(file_base_name, diff))
def create_topo_env_corrs_files(base_name, i_all_layers, with_square): # Load env first to make sure env is actually there. result = np.load(base_name + '.result.pkl') env_file_name = dataset_to_env_file(result.parameters['dataset_filename']) exp, model = load_exp_and_model(base_name) exp.dataset.load() train_set = exp.dataset_provider.get_train_merged_valid_test( exp.dataset)['train'] rand_model = create_experiment(base_name + '.yaml').final_layer for i_layer in i_all_layers: log.info("Layer {:d}".format(i_layer)) trial_env = load_trial_env(env_file_name, model, i_layer, train_set, n_inputs_per_trial=2, square_before_mean=with_square) topo_corrs = compute_trial_topo_corrs(model, i_layer, train_set, exp.iterator, trial_env, split_per_class=True) rand_topo_corrs = compute_trial_topo_corrs(rand_model, i_layer, train_set, exp.iterator, trial_env, split_per_class=True) file_name_end = '{:d}.npy'.format(i_layer) if with_square: file_name_end = 'square.' + file_name_end np.save('{:s}.labelsplitted.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs) np.save('{:s}.labelsplitted.env_rand_corrs.{:s}'.format(base_name, file_name_end), rand_topo_corrs) return
def create_topo_env_corrs_files(base_name, i_all_layers, with_square): # Load env first to make sure env is actually there. result = np.load(base_name + '.result.pkl') env_file_name = dataset_to_env_file(result.parameters['dataset_filename']) exp, model = load_exp_and_model(base_name) exp.dataset.load() train_set = exp.dataset_provider.get_train_merged_valid_test( exp.dataset)['train'] rand_model = create_experiment(base_name + '.yaml').final_layer for i_layer in i_all_layers: log.info("Layer {:d}".format(i_layer)) trial_env = load_trial_env(env_file_name, model, i_layer, train_set, n_inputs_per_trial=2, square_before_mean=with_square) topo_corrs = compute_trial_topo_corrs(model, i_layer, train_set, exp.iterator, trial_env) rand_topo_corrs = compute_trial_topo_corrs(rand_model, i_layer, train_set, exp.iterator, trial_env) file_name_end = '{:d}.npy'.format(i_layer) if with_square: file_name_end = 'square.' + file_name_end np.save('{:s}.labelsplitted.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs) np.save('{:s}.labelsplitted.env_rand_corrs.{:s}'.format(base_name, file_name_end), rand_topo_corrs) return