def sort_skeletons(task_skeleton_data): ids = map(lambda line: line[0][2], task_skeleton_data) print 'skeleton id: ', Counter(ids).most_common() skeletons = [] for counter_ids in Counter(ids).most_common(): skeleton_id = task_skeleton_data main_id = counter_ids[0] new_joints_points = [] for i_point, points in enumerate(skeleton_id): if points[0][2] == main_id: if len(new_joints_points) == 0: print points[0] new_joints_points.append(points) skeleton_id = new_joints_points skeletons.append(skeleton_id) skeleton_data_sorted = skeletons[1] data_organizer.save_matrix_pickle( skeleton_data_sorted, 'C:/Users/dario.dotti/Documents/pecs_data_review/skeletons_confusion_behavior_08082017_test.txt' ) return skeleton_data_sorted
def create_cluster_labels_participant_task(raw_features, AE_weights_level_2): cluster_model = data_organizer.load_matrix_pickle( 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/10_cluster_model_layer2_new.txt' ) final_labels = [] for participant in raw_features: participant_label = [] for task in participant: task_label = [] for f_vector in task: hd = np.dot(f_vector, AE_weights_level_2[0][0]) act = sigmoid_function(hd + AE_weights_level_2[1]) label = cluster_model.predict(act.reshape((1, -1))) task_label.append(label[0]) participant_label.append(task_label) final_labels.append(participant_label) data_organizer.save_matrix_pickle( final_labels, 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/10clusters_labels_l2_participants_tasks_new.txt' )
def get_ambient_sensor_features(): #read and parse file with recorded data with open('C:/Users/dario.dotti/Documents/file_to_analyze_AS_5_labels.txt','r') as f: files = f.read().split('\n') print 'number of recorded files: '+str(len(files)) matrix_allData_as = [] for file in files: print file activation_matrix = ambient_sensor_analysis.feature_extraction_as(file) print np.array(activation_matrix).shape if len(matrix_allData_as)>0: matrix_allData_as = np.vstack((matrix_allData_as,activation_matrix)) else: matrix_allData_as = activation_matrix my_data_org.save_matrix_pickle(matrix_allData_as,'C:/Users/dario.dotti/Documents/AS_activation_5_labels_transformed.txt')
def main_pecs_data(): ##get raw data for displaying task_skeleton_data = data_organizer.load_matrix_pickle( 'C:/Users/dario.dotti/Documents/pecs_data_review/skeletons_repetitive_behavior_08082017.txt' ) ##if data contains multiple skeletons here I sort them cronologically sort_skeletons(task_skeleton_data) my_room = np.zeros((424, 512, 3), dtype=np.uint8) my_room += 255 list_poly = img_processing.divide_image(my_room) #draw_joints_and_tracks(task_skeleton_data,my_room) #return 0 skeleton_data_in_time_slices = org_data_in_timeIntervals( task_skeleton_data, [0, 0, 2]) HOT_data, patient_ID = histograms_of_oriented_trajectories( list_poly, skeleton_data_in_time_slices) data_organizer.save_matrix_pickle( HOT_data, 'C:/Users/dario.dotti/Documents/pecs_data_review/HOT_repetitive_behavior_08082017.txt' )
def video_clustering_fit(HOT_matrix, filename): global my_ms my_ms.fit(HOT_matrix) if len(filename) > 2: data_organizer.save_matrix_pickle(my_ms, filename)
filename = os.path.basename(file) video_traj.set_subject(filename.split('_')[0]) traj_features = video_traj.feature_extraction_video_traj(file) matrix_allData_HOT.append(traj_features[1]) # if len(matrix_allData_HOT)>0: # matrix_allData_HOT = np.vstack((matrix_allData_HOT,traj_features[1])) # else: # matrix_allData_HOT = np.array(traj_features[1]) print len(matrix_allData_HOT) #scipy.io.savemat('C:/Users/dario.dotti/Documents/hot_spatial_grid_4x4.mat',mdict={'spatial_grid_4x4': matrix_allData_HOT}) <<<<<<< HEAD my_data_org.save_matrix_pickle(matrix_allData_HOT,'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/skeleton_data_in_tasks_time_slices_30fps_ordered_1sec.txt')##C:/Users/dario.dotti/Desktop/data_recordings_master/master_skeleton_data_in_tasks_time_slices_30fps_1sec.txt ======= #my_data_org.save_matrix_pickle(matrix_allData_HOT,'C:/Users/dario.dotti/Desktop/data_recordings_master/master_skeleton_data_in_tasks_time_slices_30fps.txt') #C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/skeleton_data_in_tasks_time_slices_30fps_ordered >>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082 def get_ambient_sensor_features(): #read and parse file with recorded data with open('C:/Users/dario.dotti/Documents/file_to_analyze_AS_5_labels.txt','r') as f: files = f.read().split('\n') print 'number of recorded files: '+str(len(files)) matrix_allData_as = [] for file in files: print file
final_matrix = [] final_orig_points = [] final_matrix_realcoord =[] for participant in skeleton_data_in_tasks_and_time_slices: #extract_traj_word_spatio_temporal_grid(participant, n_layer=1) <<<<<<< HEAD feature_participant,orig_point_participant = extract_traj_word_temporal_window(participant, n_layer=1) ======= feature_participant,orig_point_participant,matrix_real_coord = extract_traj_word_temporal_window(participant, n_layer=2) final_matrix_realcoord.append(matrix_real_coord) >>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082 final_matrix.append(feature_participant) final_orig_points.append(orig_point_participant) print len(final_matrix),len(final_orig_points) final_matrix=final_matrix+final_matrix_realcoord data_organizer.save_matrix_pickle(final_matrix, 'C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/head_joint_id1/feature_matrix_participant_task_l2_new_realCoordinates.txt') #data_organizer.save_matrix_pickle(final_orig_points, # 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/head_joint_id1/orig_points_participant_task_l2_new.txt') if __name__ == '__main__': main_realtime_traj_dict()
def visualize_activations(matrix_activation): ### Training 625 matrix activation on shallow ae on only arms # participant_length = [0, 2197, 2082, 1873, 1595, 1779, 1991, 2148, 1702, 2484, 1744, 2902, 1947, 1860, 1743, 1645, # 2398, 2287, 1998, 1573] # s = [] # dim = 30 # for l in xrange(1, len(participant_length)): # slide = matrix_activation[participant_length[l - 1]:(participant_length[l - 1] + participant_length[l])] # # for m in xrange(0, len(slide) - dim, dim): # if len(s) > 0: # s = np.vstack((s, matrix_activation[m:m + dim].reshape((1, -1)))) # else: # s = matrix_activation[m:m + dim].reshape((1, -1)) ### trained deep AE on upperBody participant_length = [ 0, 2876, 2394, 2256, 1998, 1887, 2597, 2703, 2105, 3137, 2190, 4072, 2226, 2282, 2480, 2120, 2536, 2507, 2511, 1675 ] s = [] dim = 50 for l in xrange(1, len(participant_length)): slide = matrix_activation[participant_length[l - 1]:( participant_length[l - 1] + participant_length[l])] for m in xrange(0, len(slide) - dim, dim): if len(s) > 0: s = np.vstack((s, matrix_activation[m:m + dim].reshape( (1, -1)))) else: s = matrix_activation[m:m + dim].reshape((1, -1)) print s.shape #s = np.array(random.sample(matrix_activation, 30000)) # kernel_bandwith = 5.1 # X = img_processing.my_mean_shift(s, iterations=5, kernel_bandwith=kernel_bandwith) # print datetime.now().time() # my_kmean = KMeans(n_clusters=3, n_jobs=-1, algorithm='full') # X = my_kmean.fit(s) # means = np.mean(X,axis=1) pca = decomposition.PCA( n_components=100 ) # 2-dimensional PCA whiten=True, svd_solver='randomized' s_t = pca.fit(s) data_organizer.save_matrix_pickle( s_t, 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/upperBody/100pca_deep900225AE_5sec_data.txt' ) s_t = pca.transform(s) #print s_t.shape print np.sum(pca.explained_variance_ratio_) # plt.bar(range(100), pca.explained_variance_ratio_) # plt.show() ## testing clustering #m_s = KMeans(n_clusters=10, n_jobs=-1) #m_s = MeanShift(n_jobs=-1,bandwidth=0.9) #m_s.fit(s_t) #s_t = s m_s = AgglomerativeClustering(n_clusters=15, affinity='cosine', linkage='average') m_s.fit(s_t) y_tr = m_s.fit_predict(s_t) print Counter(y_tr) ##since agglomerative clustering doesnt have predict I use svm with the cluster labels for classification clf = svm.LinearSVC().fit(s_t, y_tr) data_organizer.save_matrix_pickle( clf, 'C:/Users/dario.dotti/Documents/data_for_personality_exp/after_data_cleaning/posture_data/linearSVM_agglomerative15c_5sec_100pca.txt' ) #print 'file saved' colors = np.array(np.random.randint(0, 255, size=(20, 3))) / 255.0 color_labels = [colors[p] for p in y_tr] ## 2D plt.scatter(s_t[:, 0], s_t[:, 1], c=color_labels) #plt.scatter(m_s[:, 0], m_s[:, 1], marker='^', c='r') plt.show() ##3D fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(s_t[:, 0], s_t[:, 1], s_t[:, 2], c=color_labels) # s_t[:, 1], s_t[:, 2],s_t[:,0] #ax.scatter(m_s[:, 0], m_s.means_[:, 1], m_s.means_[:, 2], marker='^', c='r') plt.show() return s, s_t, m_s, y_tr