def fit_metric(model_name, subject_data_file, threshold, C, nfolds, normalize, pca, method, rank): subject_data = pd.read_table(subject_data_file, sep=" ") net, layer = model_name row_labels = eval(open('outputs/{0:s}_row_labels.txt'.format(net)).read()) if not pca: x = np.load('outputs/{0:s}_{1:s}.npy'.format(net, layer)) else: x = np.load('outputs/{0:s}_{1:s}_pca.npy'.format(net, layer)) scaler = pp.StandardScaler() accuracies = [] for n in range(nfolds): train_x, train_relations, test_x, test_relations = split_data( subject_data, x, row_labels, objects_in_train=7, threshold=threshold) if normalize: train_x = scaler.fit_transform(train_x) test_x = scaler.transform(test_x) if method == 'low_rank': A, objective, accuracy, converged = metric.learn_low_rank_metric( train_x, train_relations, cost=C, rank=rank, method='SLSQP', tol=1e-6, verbose=False) elif method == 'diag': A, objective, accuracy, converged = metric.learn_diagonal_metric( train_x, train_relations, cost=C, method='L-BFGS-B', tol=1e-6, verbose=False) test_accuracy = metric.calculate_accuracy(test_x, A, test_relations) accuracies.append(test_accuracy) print("{0:35s} ({1:4.2f}): {2:.4f}+-{3:.4f}".format( net + '_' + layer, threshold, np.mean(accuracies), np.std(accuracies) * 2)) results = { 'Accuracy': np.mean(accuracies), 'AccuracySD': np.std(accuracies) } return results
def test(): subject_data_file = 'HumanPredictions_20151118.txt' outputs = np.load('outputs/alexnet_prob_pca.npy') subject_data = pd.read_table(subject_data_file, sep=" ") row_labels = eval(open('outputs/alexnet_row_labels.txt').read()) C = 1.0 normalize = False scaler = pp.StandardScaler() train_accuracies = [] test_accuracies = [] for n in range(20): print('.'), train_x, train_rels, test_x, test_rels = split_data(subject_data, outputs, row_labels, objects_in_train=7, threshold=0.8) if normalize: train_x = scaler.fit_transform(train_x) test_x = scaler.transform(test_x) """ A, objective, accuracy, converged = metric.learn_diagonal_metric(train_x, train_rels, cost=C, method='L-BFGS-B', tol=1e-6, verbose=False) """ A, objective, accuracy, converged = metric.learn_low_rank_metric( train_x, train_rels, cost=C, method='SLSQP', rank=5, tol=1e-6, verbose=False) test_accuracy = metric.calculate_accuracy(test_x, A, test_rels) train_accuracies.append(accuracy) test_accuracies.append(test_accuracy) print("\nTrain: {0:f}+-{1:f}".format(np.mean(train_accuracies), np.std(train_accuracies) * 2)) print("Test: {0:f}+-{1:f}".format(np.mean(test_accuracies), np.std(test_accuracies) * 2))
def fit_metric(model_name, subject_data_file, threshold, C, nfolds, normalize, pca, method, rank): subject_data = pd.read_table(subject_data_file, sep=" ") net, layer = model_name row_labels = eval(open("outputs/{0:s}_row_labels.txt".format(net)).read()) if not pca: x = np.load("outputs/{0:s}_{1:s}.npy".format(net, layer)) else: x = np.load("outputs/{0:s}_{1:s}_pca.npy".format(net, layer)) scaler = pp.StandardScaler() accuracies = [] for n in range(nfolds): train_x, train_relations, test_x, test_relations = split_data( subject_data, x, row_labels, objects_in_train=7, threshold=threshold ) if normalize: train_x = scaler.fit_transform(train_x) test_x = scaler.transform(test_x) if method == "low_rank": A, objective, accuracy, converged = metric.learn_low_rank_metric( train_x, train_relations, cost=C, rank=rank, method="SLSQP", tol=1e-6, verbose=False ) elif method == "diag": A, objective, accuracy, converged = metric.learn_diagonal_metric( train_x, train_relations, cost=C, method="L-BFGS-B", tol=1e-6, verbose=False ) test_accuracy = metric.calculate_accuracy(test_x, A, test_relations) accuracies.append(test_accuracy) print( "{0:35s} ({1:4.2f}): {2:.4f}+-{3:.4f}".format( net + "_" + layer, threshold, np.mean(accuracies), np.std(accuracies) * 2 ) ) results = {"Accuracy": np.mean(accuracies), "AccuracySD": np.std(accuracies)} return results
def test(): subject_data_file = "HumanPredictions_20151118.txt" outputs = np.load("outputs/alexnet_prob_pca.npy") subject_data = pd.read_table(subject_data_file, sep=" ") row_labels = eval(open("outputs/alexnet_row_labels.txt").read()) C = 1.0 normalize = False scaler = pp.StandardScaler() train_accuracies = [] test_accuracies = [] for n in range(20): print("."), train_x, train_rels, test_x, test_rels = split_data( subject_data, outputs, row_labels, objects_in_train=7, threshold=0.8 ) if normalize: train_x = scaler.fit_transform(train_x) test_x = scaler.transform(test_x) """ A, objective, accuracy, converged = metric.learn_diagonal_metric(train_x, train_rels, cost=C, method='L-BFGS-B', tol=1e-6, verbose=False) """ A, objective, accuracy, converged = metric.learn_low_rank_metric( train_x, train_rels, cost=C, method="SLSQP", rank=5, tol=1e-6, verbose=False ) test_accuracy = metric.calculate_accuracy(test_x, A, test_rels) train_accuracies.append(accuracy) test_accuracies.append(test_accuracy) print("\nTrain: {0:f}+-{1:f}".format(np.mean(train_accuracies), np.std(train_accuracies) * 2)) print("Test: {0:f}+-{1:f}".format(np.mean(test_accuracies), np.std(test_accuracies) * 2))