R.append(all_data['R']) n_voxel, n_tr = data[0].shape # Run HTFA with downloaded data from brainiak.factoranalysis.htfa import HTFA # uncomment below line to get help message on HTFA #help(HTFA) K = 5 htfa = HTFA(K=K, n_subj=n_subj, max_global_iter=5, max_local_iter=2, voxel_ratio=0.5, tr_ratio=0.5, max_voxel=n_voxel, max_tr=n_tr, verbose=True) htfa.fit(data, R) if rank == 0: print("\n centers of global latent factors are:") print(htfa.get_centers(htfa.global_posterior_)) print("\n widths of global latent factors are:") widths = htfa.get_widths(htfa.global_posterior_) print(widths) print("\n stds of global latent RBF factors are:") rbf_std = np.sqrt(widths/(2.0)) print(rbf_std)
R.append(all_data['R']) n_voxel, n_tr = data[0].shape # Run HTFA with downloaded data from brainiak.factoranalysis.htfa import HTFA # uncomment below line to get help message on HTFA #help(HTFA) K = 5 htfa = HTFA(K=K, n_subj=n_subj, max_global_iter=5, max_local_iter=2, voxel_ratio=0.5, tr_ratio=0.5, max_voxel=n_voxel, max_tr=n_tr, verbose=True) htfa.fit(data, R) if rank == 0: print("\n centers of global latent factors are:") print(htfa.get_centers(htfa.global_posterior_)) print("\n widths of global latent factors are:") widths = htfa.get_widths(htfa.global_posterior_) print(widths) print("\n stds of global latent RBF factors are:") rbf_std = np.sqrt(widths / (2.0)) print(rbf_std)
upper_ratio=upper_ratio, lower_ratio=lower_ratio, max_tr=max_sample_tr, max_voxel=max_sample_voxel, comm=htfa_comm, verbose=True) htfa.fit(train_data, R) for s in range(n_local_subj): #get posterior for each subject subj_idx = mapping[str(s)] start_idx = s * htfa.prior_size end_idx = (s + 1) * htfa.prior_size local_posteiror = htfa.local_posterior_[start_idx:end_idx] local_centers = htfa.get_centers(local_posteiror) local_widths = htfa.get_widths(local_posteiror) htfa.n_dim = n_dim htfa.cov_vec_size = np.sum(np.arange(htfa.n_dim) + 1) htfa.map_offset = htfa.get_map_offset() #training happens on all voxels, but part of TRs unique_R_all, inds_all = htfa.get_unique_R(R[s]) train_F = htfa.get_factors(unique_R_all, inds_all, local_centers, local_widths) #calculate train_recon_err tmp_train_recon_errs[subj_idx, p, idx] = get_train_err( htfa, train_data[s], train_F) #calculate weights on test_weight_data, test_recon_err on test_recon_data tmp_test_recon_errs[subj_idx, p, idx] = get_test_err(
upper_ratio=upper_ratio, lower_ratio=lower_ratio, max_tr=max_sample_tr, max_voxel=max_sample_voxel, comm=htfa_comm, verbose=True) htfa.fit(train_data, R) for s in range(n_local_subj): #get posterior for each subject subj_idx = mapping[str(s)] start_idx = s * htfa.prior_size end_idx = (s + 1) * htfa.prior_size local_posteiror = htfa.local_posterior_[start_idx:end_idx] local_centers = htfa.get_centers(local_posteiror) local_widths = htfa.get_widths(local_posteiror) htfa.n_dim = n_dim htfa.cov_vec_size = np.sum(np.arange(htfa.n_dim) + 1) htfa.map_offset = htfa.get_map_offset() #training happens on all voxels, but part of TRs unique_R_all, inds_all = htfa.get_unique_R(R[s]) train_F = htfa.get_factors(unique_R_all, inds_all, local_centers, local_widths) #calculate train_recon_err tmp_train_recon_errs[subj_idx, p,idx] = get_train_err(htfa, train_data[s], train_F)