model = train(train_val_x, train_val_y, best_C) t3 = time.time() print("training time:", t3 - t2) pred = predict(test_x, test_y, model) t4 = time.time() print("testing time:", t4 - t3) accuracies.append(accuracy_score(pred, test_y) * 100) print(accuracy_score(pred, test_y) * 100) train_times.append(t3 - t2) test_times.append(t4 - t3) mean_accuracies.append(mean(accuracies)) acc_list.append(mean(mean_accuracies)) std_list.append(stdev(mean_accuracies)) train_time_list.append(mean(train_times)) test_time_list.append(mean(test_times)) dict_to_csv( { 'accuracy': acc_list, 'error': std_list, 'train_time': train_time_list, 'test_time': test_time_list, 'landmarks': landmarks }, ["nb_iter={},cv={}".format(ITER, 3)], PATH + ".csv")
7), labels[train_val_inds] models = train_svm_per_view(train_val_x, train_val_y, 17, 7, best_C) print(best_C) t3 = time.time() print("training time:", t3 - t2) test_x, test_y = get_view_blocks(dist_matrices, test_inds, train_val_inds, 7), labels[test_inds] pred = predict_svm_per_view(test_x, test_y, 7, models) t4 = time.time() print("testing time:", t4 - t3) accuracies.append(accuracy_score(pred, test_y) * 100) train_times.append(t3 - t2) test_times.append(t4 - t3) # acc_list.append(mean(mean_accuracies)) # std_list.append(stdev(mean_accuracies)) # train_time_list.append(mean(train_times)) # test_time_list.append(mean(test_times)) dict_to_csv( { 'accuracy': mean(accuracies), 'error': stdev(accuracies), 'train_time': mean(train_times), 'test_time': mean(test_times) }, ["nb_iter={},cv={}".format(1, 3)], PATH + ".csv")
t2 = time.time() print("tuning time:", t2-t1) # training train_val_inds = np.hstack((train_inds,val_inds)) k_train_val_x = get_view_dict(k_x[np.ix_(train_val_inds,train_val_inds)]) model = train(k_train_val_x, y[train_val_inds], best_C) t3 = time.time() print("training time:", t3-t2) test_inds = np.arange(len(test_y))+len(y) k_test_x = get_view_dict(k_x[np.ix_(test_inds,train_val_inds)]) pred = predict(k_test_x, test_y, model) t4 = time.time() print("testing time:", t4-t3) acc = accuracy_score(pred, test_y)*100 print(acc) accuracies.append(acc) times.append(t10-t0) acc_list.append(mean(accuracies)) std_list.append(stdev(accuracies)) time_list.append(mean(times)) dict_to_csv({'accuracy':acc_list,'error':std_list,'times':time_list,'ratios':ratios_missing},["nb_iter={},cv={}".format(ITER,3)],PATH+".csv")
# training train_val_x = get_view_dict(get_kernels(X, X, kernel=rbf_kernel)) mvml = one_vs_all_mvml_train(train_val_x, Y, 8, best_l, best_e, a) t3 = time.time() print("training time:", t3 - t2) test_x = get_view_dict(get_kernels(test_X, X, kernel=rbf_kernel)) pred = one_vs_all_mvml_predict(test_x, mvml) p_acc = accuracy_score(test_Y, pred) t4 = time.time() print("testing time:", t4 - t3) acc_list.append(p_acc * 100) std_list.append(0.) train_time_list.append(t3 - t2) test_time_list.append(t4 - t3) dict_to_csv( { 'accuracy': acc_list, 'error': std_list, 'train_time': train_time_list, 'test_time': test_time_list, 'rank': appr_levels }, ["nb_iter={},cv={}".format(ITER, CV)], PATH + ".csv")