def feature_extraction_video_traj(file_traj):
    print 'old dataset'

    ##visulaization apathy over week 19_4-29_4
    # motion_week = [12.038,9.022,7.974,9.9650,2.113,4.4285,5.7845]
    # slight_motion_week = [27.856,22.571,27.846,31.002,13.4013,10.6954,28.1096]
    # sedentary_week = [29.236,36.7410,35.1045,53.6780,35.505,43.7546,57.1622]
    #
    # vis.bar_plot_motion_in_region_over_long_time(motion_week)

    ##divide image into patches(polygons) and get the positions of each one
    my_room = np.zeros((480, 640), dtype=np.uint8)
    list_poly = my_img_proc.divide_image(my_room)

    ##--------------Pre-Processing----------------##
    content, skeleton_data_in_time_slices = org_OLDdata_timeIntervals(
        file_traj)

    #occupancy_histograms = occupancy_histograms_in_time_interval(my_room, list_poly, skeleton_data_in_time_slices)
    occupancy_histograms = 1

    ## create Histograms of Oriented Tracks
    HOT_data = histograms_of_oriented_trajectories(
        list_poly, skeleton_data_in_time_slices)
    #HOT_data = 1

    vis.bar_plot_motion_over_time(HOT_data)
コード例 #2
0
def main_pecs_data():
    ##get raw data for displaying
    task_skeleton_data = data_organizer.load_matrix_pickle(
        'C:/Users/dario.dotti/Documents/pecs_data_review/skeletons_repetitive_behavior_08082017.txt'
    )
    ##if data contains multiple skeletons here I sort them cronologically
    sort_skeletons(task_skeleton_data)

    my_room = np.zeros((424, 512, 3), dtype=np.uint8)
    my_room += 255
    list_poly = img_processing.divide_image(my_room)

    #draw_joints_and_tracks(task_skeleton_data,my_room)
    #return 0

    skeleton_data_in_time_slices = org_data_in_timeIntervals(
        task_skeleton_data, [0, 0, 2])

    HOT_data, patient_ID = histograms_of_oriented_trajectories(
        list_poly, skeleton_data_in_time_slices)
    data_organizer.save_matrix_pickle(
        HOT_data,
        'C:/Users/dario.dotti/Documents/pecs_data_review/HOT_repetitive_behavior_08082017.txt'
    )
コード例 #3
0
    return frames_where_joint_displacement_over_threshold


def feature_extraction_video_traj(file_traj):
    ##divide image into patches(polygons) and get the positions of each one
<<<<<<< HEAD
    global scene
=======
    #global scene
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082
    #scene = np.zeros((414,512),dtype=np.uint8)
    #scene = cv2.imread('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/subject4_1834.jpg')
    #scene = cv2.imread('D:/experiment_data/subject_20/388.jpg')

    list_poly = my_img_proc.divide_image(scene)

    ##check patches are correct
    # for rect in list_poly:
    #     cv2.rectangle(scene, (int(rect.vertices[1][0]), int(rect.vertices[1][1])),
    #                   (int(rect.vertices[3][0]), int(rect.vertices[3][1])), (0, 0, 0))
    # #
    # cv2.imshow('ciao',scene)
    # cv2.waitKey(0)


    ##--------------Pre-Processing----------------##
    skeleton_data = xml_parser(file_traj)

    ##reliability method
    #measure_joints_accuracy(skeleton_data)
コード例 #4
0
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082

import data_organizer
import img_processing
import AE_rec



feature_p_1 = data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/head_joint_id1/feature_matrix_participant_task_l2_new_realCoordinates.txt')
<<<<<<< HEAD
feature_p_2 =data_organizer.load_matrix_pickle('C:/Users/dario.dotti/Desktop/Hier_AE_deliverable/head_joint_id1/feature_matrix_participant_master_task_l2_new_realCoordinates.txt')


feature_p = feature_p_1[:19]+feature_p_2[:27]
real_coord = feature_p_1[19:]+feature_p_2[27:]
list_poly = img_processing.divide_image(np.zeros((414,512),dtype=np.uint8))


space_features = []
for i_p in xrange(0,len(real_coord)):
    for i_task in xrange(0,len(real_coord[i_p])):
        for i_slice in  xrange(0,len(real_coord[i_p][i_task])):
            votes = np.zeros((16, 3))

            for p in xrange(0,len(real_coord[i_p][i_task][i_slice])):
                size_per_frame = int(len(real_coord[i_p][i_task][i_slice][p])/3)
                x  = real_coord[i_p][i_task][i_slice][p][:size_per_frame]
                y =  real_coord[i_p][i_task][i_slice][p][size_per_frame:(size_per_frame*2)]
                z = real_coord[i_p][i_task][i_slice][p][(size_per_frame*2):]

コード例 #5
0
    # data_organizer.save_matrix_pickle(training_bayes_vector,
    #                                   'C:/Users/dario.dotti/Documents/data_for_personality_exp/computed_matrix/bayes_vector.txt')


def extract_traj_word_temporal_window(participant_data, n_layer):
<<<<<<< HEAD
    scene = cv2.imread('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')
    #scene = np.zeros((414, 512, 3), dtype=np.uint8)
    #scene += 255
=======
    #scene = cv2.imread('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')
    scene = np.zeros((414, 512, 3), dtype=np.uint8)
    scene += 255
>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082

    list_poly = img_processing.divide_image(scene)


    size_mask = 20

    #max_step = np.sqrt(np.power(((size_mask - 3) - 0), 2) + np.power(((size_mask - 3) - 0), 2)) * 1.3
    max_step = 23

    matrix_features_participant = []
    matrix_original_points_participants = []
    matrix_real_coord_participants = []

    for i_task, task in enumerate(participant_data):
        print 'task: ', i_task
        if len(task) == 0: continue