# ------------------------------------ # Display PC resonstruction over time # ------------------------------------ empty_wavelet = np.zeros(39 * 39) if n_com == 0 or n_com == 9: num_to_see = 400 else: num_to_see = 80 for frame_num in range(num_to_see): empty_wavelet[ relevant_wavelet_features] = wavelet_array_relevant_features_recon[ frame_num, :] wavelet = np.reshape(empty_wavelet, (39, 39)) #reconstruct image from wavelet transform reconstruction_from_wavelet = reconstruct_from_wavelet( wavelet, coeff_slices, level, discard_scale) reconstruction_from_wavelet[ reconstruction_from_wavelet > 255] = 255 reconstruction_from_wavelet = cv2.resize( abs(reconstruction_from_wavelet).astype(np.uint8), (450, 450)) cv2.imshow('PC / wavelet reconstruction', reconstruction_from_wavelet) vid.write(reconstruction_from_wavelet) if cv2.waitKey(int(1000 / display_frame_rate)) & 0xFF == ord('q'): break if cv2.waitKey(500) & 0xFF == ord('q'): break vid.release() ''' -------------------------------------------------------------------------------------------------------------------------------------
for t in range(2 * windows_to_look_at + 1): # Get the mean features for that pose mean_features = mean_features_model[n, t * features_used:(t + 1) * features_used] # Reconstruct wavelet-transformed data from the PCs mean_wavelet_relevant_features = pca.inverse_transform( np.append(mean_features[0:num_PCs_used], np.zeros(pca.n_components_ - num_PCs_used))) mean_wavelet = np.zeros(39 * 39) mean_wavelet[ relevant_wavelet_features] = mean_wavelet_relevant_features mean_wavelet_array = np.reshape(mean_wavelet, (39, 39)) # Reconstruct image in pixel space from wavelet-transformed reconstruction reconstruction_from_wavelet = reconstruct_from_wavelet( mean_wavelet_array, coeff_slices, level, discard_scale) reconstruction_from_wavelet[ (reconstruction_from_wavelet <= 0) + twoD * (reconstruction_from_wavelet >= 250)] = 0 + twoD * 250 reconstruction_image = cv2.resize( reconstruction_from_wavelet.astype(np.uint8), (trajectory_pose_size, trajectory_pose_size)) reconstruction_image = cv2.cvtColor(reconstruction_image, cv2.COLOR_GRAY2BGR) reconstruction_image = (reconstruction_image * np.squeeze(color_array[:, :, :, n])).astype( np.uint8) # rotate image M = cv2.getRotationMatrix2D(
# Open up the selected data wavelet array file_location_session = save_folder_location + session_name_tags[ session_to_examine] + '\\' + session_name_tags[session_to_examine] wavelet_array_session = np.load( file_location_session + '_wavelet.npy')[:, :, ::downsample_every_other] #show in one video the unflipped frames, and in another, those that the model would flip level = 5 discard_scale = 4 # these must be parameters taken from original wavelet transform flip_ind_single_video = flip_ind[flip_ind < wavelet_array_session.shape[2]] for j in range(len(flip_ind_single_video)): #reconstruct images from wavelet transform wavelet_up = wavelet_array_session[:, :, keep_ind[j]] wavelet_down = wavelet_array_session[:, :, flip_ind[j]] reconstruction_from_wavelet_up = reconstruct_from_wavelet( wavelet_up, coeff_slices, level, discard_scale) reconstruction_from_wavelet_down = reconstruct_from_wavelet( wavelet_down, coeff_slices, level, discard_scale) cv2.imshow('right-side up', reconstruction_from_wavelet_up.astype(np.uint8)) cv2.imshow('up-side down', reconstruction_from_wavelet_down.astype(np.uint8)) if (cv2.waitKey(int(1000 / display_frame_rate)) & 0xFF == ord('q')): break if j % 500 == 0: print( str(j) + ' out of ' + str(len(flip_ind_single_video)) + ' flip frames complete') # ---------------