acc_list = [] std_list = [] train_time_list = [] test_time_list = [] for L in landmarks: mean_accuracies = [] for i in range(ITER): accuracies = [] train_times = [] test_times = [] for train_inds, val_inds, test_inds in splits_generator( dist_matrices, 3, sets): lands = select_landmarks(train_inds, L) train_x, train_y = twod_array( get_kernels(dist_matrices[train_inds], lands)), labels[train_inds] val_x, val_y = twod_array( get_kernels(dist_matrices[val_inds], lands)), labels[val_inds] test_x, test_y = twod_array( get_kernels(dist_matrices[test_inds], lands)), labels[test_inds] t1 = time.time() # tuning
PATH = "results/{}/missing/lmvsvm/{}".format(DATASET, recons) print("learning on {}, missing views completed by {}. results saved in {}". format(DATASET, recons, PATH)) acc_list = [] std_list = [] times = [] for r in ratios_missing: print(r, "\n") accuracies = [] rec_times = [] for train_inds, val_inds, _ in splits_generator(X, ITER, None): train_x, train_y = X[train_inds], Y[train_inds] val_x, val_y = X[val_inds], Y[val_inds] lands = select_landmarks(train_x, L) k_train_x = get_kernels(train_x, lands, rbf_kernel) k_val_x = get_kernels(val_x, lands, rbf_kernel) # erase some views from data k_train_x = set_random_views_to_value(k_train_x, r, recons) k_val_x = set_random_views_to_value(k_val_x, r, recons) t1 = time.time()
# erase some views from training x = set_random_views_to_value(X, r, r_type="none") test_x = set_random_views_to_value(test_X, r, r_type="none") # kernelize and reconstruct views t0 = time.time() # k_x, mask, mask2 = laplacian_reconstruction(x, rbf_kernel, test_x) mask, mask2 = np.ones(len(Y), dtype=bool), np.ones(len(test_Y), dtype=bool) k_x = multiview_kernels(np.vstack((x, test_x)), np.vstack((x, test_x)), rbf_kernel) y, test_y = Y[mask], test_Y[mask2] t10 = time.time() # cross-validation for train_inds, val_inds, _ in splits_generator(y, CV, None): train_y = y[train_inds] val_y = y[val_inds] k_train_x = get_view_dict(k_x[np.ix_(train_inds,train_inds)]) k_val_x = get_view_dict(k_x[np.ix_(val_inds,train_inds)]) t1 = time.time() # tuning tuning_acc = {}.fromkeys(c_range, 0.) for c in c_range: model = train(k_train_x, train_y, c) pred = predict(k_val_x, val_y, model)
accuracies = [] times = [] # erase some views from training x = set_random_views_to_value(X, r, r_type="none", sym=True) t0 = time.time() # kernelize and reconstruct views # k_x, mask = laplacian_reconstruction(x) k_x, mask = x, np.ones(len(x), dtype=bool) t10 = time.time() inds = np.arange(len(X)) # cross-validation for train_inds, val_inds, test_inds in splits_generator(Y, CV, sets): train_val_inds = np.hstack((train_inds,val_inds)) train_inds = np.isin(inds, train_inds)[mask] val_inds = np.isin(inds, val_inds)[mask] test_inds = np.isin(inds, test_inds)[mask] train_val_inds = np.isin(inds, train_val_inds)[mask] train_y = Y[mask][train_inds] val_y = Y[mask][val_inds] test_y = Y[mask][test_inds] train_val_y = Y[mask][train_val_inds] k_train_x = get_view_dict(k_x[np.ix_(train_inds,train_inds)]) k_val_x = get_view_dict(k_x[np.ix_(val_inds,train_inds)])