Example #1
0
def get_distances_between_points(participant_data):

    ##get max distances in every time slice
    n = 1
    list_max_dist =[]
    list_dist_two_points = []
    for task in participant_data:
        for slice in task[:4]:
            if len(slice) <=1: continue
            list_distances = []
            flat_list = [item for item in slice]
            x_f, y_f, z, ids = img_processing.get_coordinate_points(flat_list, joint_id=1)
            a =1
            distances = [np.sqrt(((x_f[0] - x_f[i_p]) ** 2) + ((y_f[0] - y_f[i_p]) ** 2)) for i_p in range(1, len(y_f))]
            index_max = int(np.where(distances == np.max(distances))[0][0])


            dist_two_points = [np.sqrt(((x_f[i_p] - x_f[i_p+1]) ** 2) + ((y_f[i_p] - y_f[i_p+1]) ** 2)) for i_p in range(0, len(y_f)-1)]
            index_max_two_p = int(np.where(dist_two_points == np.max(dist_two_points))[0][0])

            list_dist_two_points.append(dist_two_points[index_max_two_p])
            list_max_dist.append(distances[index_max])

    #return np.array(list_max_dist).reshape(-1,1)

    # ###Eliminate outlier (bigger than double median), and set the size window
    best_ten =  np.sort(list_max_dist)[::-1][:10]
    #print 'AAAA'
    print best_ten
    med = np.median(best_ten)
    new_data = map(lambda x: x if x < (med*1.5) else False, list_max_dist)
    best_ten = np.sort(new_data)[::-1][:10]
    #print best_ten
    w_s =  int(np.max(new_data))
    print 'window size: ',w_s

    best_ten = np.sort(list_dist_two_points)[::-1][:10]
    #print best_ten
    med = np.median(best_ten)
    new_data = map(lambda x: x if x < (med * 3) else False, list_dist_two_points)
    best_ten = np.sort(new_data)[::-1][:10]
    #print best_ten
    max_step = int(np.max(new_data))
    print 'max_step: ', max_step




    return w_s,max_step
Example #2
0
    for i_task, task in enumerate(participant_data):
        print 'task: ', i_task
        if len(task) == 0: continue

        n_sec_data = []
        n_sec_path_features = []


        for n_slice in range(0, len(task)):
            if len(task[n_slice]) <= 1 : continue
            #print 'n_slice ', n_slice

            flat_list = [item for item in task[n_slice]]

            ##### arms  ########
            shoulder_left_x, shoulder_left_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=10)
            shoulder_right_x, shoulder_right_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=6)

            elbow_left_x, elbow_left_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=11)
            elbow_right_x, elbow_right_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=7)

            wrist_left_x, wrist_left_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=12)
            wrist_right_x, wrist_right_y, zs, ids = img_processing.get_coordinate_points(flat_list, joint_id=8)

            #### spinal ###
            head_x, head_y, head_z, ids = img_processing.get_coordinate_points(flat_list, joint_id=1)
            spineBase_x,spineBase_y, z, ids = img_processing.get_coordinate_points(flat_list, joint_id=4)
            ##
            foot_x, foot_y, footz, ids = img_processing.get_coordinate_points(flat_list, joint_id=17)

Example #3
0
def histograms_of_oriented_trajectories(list_poly, time_slices):
    #print kinect_max_distance, kinect_min_distance

    hot_all_data_matrix = []
    hot_all_data_matrix_append = hot_all_data_matrix.append

    for i in xrange(0, len(time_slices)):
        ##Checking the start time of every time slice
        if (len(time_slices[i]) > 1):
            print 'start time: %s' % str(time_slices[i][0][0][1])
        else:
            print 'no data in this time slice'
            continue

        # get x,y,z of every traj point after smoothing process
        x_filtered, y_filtered, zs, ids = img_processing.get_coordinate_points(
            time_slices[i], joint_id=3)

        # initialize histogram of oriented tracklets
        hot_matrix = []

        for p in xrange(0, len(list_poly)):
            tracklet_in_cube_f = []
            tracklet_in_cube_c = []
            tracklet_in_cube_middle = []
            tracklet_in_cube_append_f = tracklet_in_cube_f.append
            tracklet_in_cube_append_c = tracklet_in_cube_c.append
            tracklet_in_cube_append_middle = tracklet_in_cube_middle.append

            for ci in xrange(0, len(x_filtered)):
                if np.isinf(x_filtered[ci]) or np.isinf(y_filtered[ci]):
                    continue

                # 2d polygon
                if list_poly[p].contains_point(
                    (int(x_filtered[ci]), int(y_filtered[ci]))):
                    ## 3d cube close to the camera
                    if zs[ci] <= (kinect_min_distance + cube_size):

                        tracklet_in_cube_append_c(
                            [x_filtered[ci], y_filtered[ci], ids[ci]])

                    elif zs[ci] > (kinect_min_distance + cube_size
                                   ) and zs[ci] < (kinect_min_distance +
                                                   (cube_size * 2)):  #
                        tracklet_in_cube_append_middle(
                            [x_filtered[ci], y_filtered[ci], ids[ci]])

                    elif zs[ci] >= kinect_min_distance + (
                            cube_size * 2):  ##3d cube far from the camera
                        tracklet_in_cube_append_f(
                            [x_filtered[ci], y_filtered[ci], ids[ci]])

            print len(tracklet_in_cube_c), len(tracklet_in_cube_middle), len(
                tracklet_in_cube_f)

            for three_d_poly in [
                    tracklet_in_cube_c, tracklet_in_cube_middle,
                    tracklet_in_cube_f
            ]:
                if len(three_d_poly) > 0:

                    ## for tracklet in cuboids compute HOT following paper
                    hot_single_poly = img_processing.histogram_oriented_tracklets(
                        three_d_poly)

                    ## compute hot+curvature
                    # hot_single_poly = my_img_proc.histogram_oriented_tracklets_plus_curvature(three_d_poly)

                else:
                    hot_single_poly = np.zeros((24))

                ##add to general matrix
                if len(hot_matrix) > 0:
                    hot_matrix = np.hstack((hot_matrix, hot_single_poly))
                else:
                    hot_matrix = hot_single_poly

        hot_all_data_matrix_append(hot_matrix)

    ## normalize the final matrix
    normalized_finalMatrix = np.array(
        normalize(np.array(hot_all_data_matrix), norm='l2'))

    ##add extra bin containing time

    ##return patinet id
    patient_id = ids[0]

    print 'HOT final matrix size: ', normalized_finalMatrix.shape

    return normalized_finalMatrix, patient_id
Example #4
0
def histograms_of_oriented_trajectories(list_poly,time_slices):

    hot_all_data_task_time_slices = []

    for i_task,task in enumerate(time_slices):
        #if i_task != 2: continue

        hot_all_data_matrix = []
        hot_all_data_matrix_append = hot_all_data_matrix.append

        print '###########task########### ',i_task
        for i in xrange(0,len(task)):
            ##Checking the start time of every time slice
            if(len(task[i])>1):
                print 'start time: %s' %task[i][0][0][1].split(' ')[3]
            else:
                print 'no data in this time slice'

                continue
            #get x,y,z of every traj point after smoothing process
            x_filtered,y_filtered,zs,ids = my_img_proc.get_coordinate_points(task[i],joint_id=1)#get all position of the head joint id =1

            #initialize histogram of oriented tracklets
            hot_matrix = []

            temp_img = scene.copy()

            for p in xrange(0,len(list_poly)):
                tracklet_in_cube_f = []
                tracklet_in_cube_c = []
                tracklet_in_cube_middle = []
                tracklet_in_cube_append_f = tracklet_in_cube_f.append
                tracklet_in_cube_append_c = tracklet_in_cube_c.append
                tracklet_in_cube_append_middle = tracklet_in_cube_middle.append

                for ci in xrange(0,len(x_filtered)):
                    #2d polygon
                    if list_poly[p].contains_point((int(x_filtered[ci]),int(y_filtered[ci]))):
                        ## 3d cube close to the camera
                        if zs[ci] < (kinect_max_distance-(1.433*2)):
                            #print 'close to kinect'
                            tracklet_in_cube_append_c([x_filtered[ci],y_filtered[ci],ids[ci]])

                            cv2.circle(temp_img,(int(x_filtered[ci]),int(y_filtered[ci])),2,(255,0,0),-1)

                        elif zs[ci] > (kinect_max_distance-(1.433*2)) and zs[ci] < (kinect_max_distance-1.433):
                            #print 'middle'
                            tracklet_in_cube_append_middle([x_filtered[ci],y_filtered[ci],ids[ci]])

                            cv2.circle(temp_img,(int(x_filtered[ci]),int(y_filtered[ci])),2,(0,255,0),-1)

                        elif zs[ci] > (kinect_max_distance-1.433): ##3d cube far from the camera
                            #print 'faraway to kinect'
                            tracklet_in_cube_append_f([x_filtered[ci],y_filtered[ci],ids[ci]])

                            cv2.circle(temp_img,(int(x_filtered[ci]),int(y_filtered[ci])),2,(0,0,255),-1)


                for three_d_poly in [tracklet_in_cube_c,tracklet_in_cube_middle,tracklet_in_cube_f]:

                    if len(three_d_poly)>0:

                        ## for tracklet in cuboids compute HOT following paper
                        hot_single_poly = my_img_proc.histogram_oriented_tracklets(three_d_poly)

                        ## compute hot+curvature
                        #hot_single_poly = my_img_proc.histogram_oriented_tracklets_plus_curvature(three_d_poly)

                    else:
                        hot_single_poly = np.zeros((24))

                    ##add to general matrix
                    if len(hot_matrix)>0:
                        hot_matrix = np.hstack((hot_matrix,hot_single_poly))
                    else:
                        hot_matrix = hot_single_poly

            #time = time_slices[i][0][0][1].split(' ')[3].split(':')
            #filename = 'C:/Users/dario.dotti/Documents/time_windows_HOT/'+subjectID+'_'+time[0]+'_'+time[1]+'_'+time[2]+'.jpg'
            #cv2.imwrite(filename,temp_img)

            ##Test cluster
            # load cluster data
            # cluster_model = data_org.load_matrix_pickle(
            #     'C:/Users/dario.dotti/Documents/bow_experiment_data/cl_30_kmeans_model_2secWindow_newVersion.txt')
            # keys_labels = data_org.load_matrix_pickle(
            #     'C:/Users/dario.dotti/Documents/bow_experiment_data/cluster_30_kmeans_word_newVersion.txt')
            #
            # similar_word = cluster_model.predict(np.array(hot_matrix).reshape(1, -1))
            # print 's_w ',similar_word
            # if similar_word[0] == 3:
            #     cv2.imshow('ciao',temp_img)
            #     cv2.waitKey(0)
            #     continue

            hot_all_data_matrix_append(hot_matrix)


        ## normalize the final matrix
        normalized_finalMatrix = np.array(normalize(np.array(hot_all_data_matrix),norm='l1'))

        hot_all_data_task_time_slices.append(normalized_finalMatrix)




    #print 'final matrix size:'
    #print np.array(normalized_finalMatrix).shape


    ##add extra bin with hours
    # hs = np.zeros((len(time_slices),1))
    #
    # for i,t in enumerate(time_slices):
    #
    #     if len(t) > 1:
    #         hs[i] = int(t[0][0][1].split(' ')[3].split(':')[0])
    #     else:
    #         hs[i] = hs[i-1]
    #
    #
    # normalized_finalMatrix = np.hstack((normalized_finalMatrix,hs))

    # print 'matrix with extra bin'
    # print np.array(hot_all_data_matrix).shape

    return hot_all_data_task_time_slices
Example #5
0
def extract_traj_word_spatio_temporal_grid(participant_data, n_layer):

    create_activation_layer2 = 1
    create_bayes_vector = 0


    #scene = cv2.imread('C:/Users/dario.dotti/Documents/Datasets/my_dataset/wandering_dataset_um/exp_scene_depth.jpg')
    scene = np.zeros((414, 512, 3), dtype=np.uint8)
    scene += 255


    training_bayes_vector = []


    for i_task, task in enumerate(participant_data):
        print 'task: ',i_task
        if len(task)==0: continue

        if n_layer == 1:
            matrix_features = []
        elif n_layer == 2:
            matrix_activations = []
            matrix_orig_points = []
            training_bayes_vector_task = []


        for n_slice in range(0,len(task)):
            print 'n_slice ', n_slice
            flat_list = [item for item in task[n_slice]]

            video_traj.draw_joints_and_tracks(flat_list, [])

            # get x,y,z of every traj point after smoothing process
            x_f, y_f, z, ids = img_processing.get_coordinate_points(flat_list, joint_id=1)

            ########### start hierarchical autoencoder learning #######################

            size_mask = 18
            # print step
            max_step = np.sqrt(np.power(((size_mask - 3) - 0), 2) + np.power(((size_mask - 3) - 0), 2)) * 1.3

            start_t = 0
            first_point_traj = [x_f[start_t], y_f[start_t]]

            temp_scene = scene.copy()

            labels_history = []
            directions_history = []
            activation_history = []
            orig_points_history = []


            for i_p in xrange(1, len(x_f)):

                ##accumulate traj points until the distance between the first point and current point is enough for the grid
                d = np.sqrt(((x_f[i_p] - first_point_traj[0]) ** 2) + ((y_f[i_p] - first_point_traj[1]) ** 2))

                ##if the distance is enough compute the grid starting from the first point until the current point

                if abs(d - max_step) < 8:

                    xs_untilNow = x_f[start_t:i_p]
                    ys_unilNow = y_f[start_t:i_p]

                    # print len(xs_untilNow), len(ys_unilNow)

                    if len(xs_untilNow) > 30:
                        # ##update the beginning of the trajectory
                        start_t = i_p - 1
                        first_point_traj = [x_f[start_t], y_f[start_t]]
                        continue

                    ##get directions of the traj chunck using first and last point
                    # direction = get_direction_traj([x_f[start_t],y_f[start_t]],[x_f[i_p],y_f[i_p]])
                    directions = hs.get_directions_traj(xs_untilNow, ys_unilNow)
                    if directions[0] == -180: directions[0] = 180
                    directions_history.append(directions[0])

                    ##create grid according to the direction of the trajectory
                    rects_in_grid = hs.create_grid(xs_untilNow, ys_unilNow, size_mask, directions, temp_scene)

                    ##compute the features from traj chuncks in rect
                    traj_features, orig_points = hs.transform_traj_in_pixel_activation(rects_in_grid, xs_untilNow,
                                                                                       ys_unilNow, size_mask, max_step)

                    if n_layer ==1:
                    #########store final matrix#################
                        if len(matrix_features) > 0:
                            matrix_features = np.vstack((matrix_features, traj_features.reshape((1, -1))))
                        else:
                            matrix_features = traj_features.reshape((1, -1))

                    elif n_layer == 2:

                        orig_points_history.append(orig_points)

                        activation = encode_features_using_AE_layer1_cluster_activation(traj_features, 'layer2')

                        activation_history.append(activation)

                        if len(activation_history) == 3 :

                            cv2.imshow('scene', temp_scene)
                            cv2.waitKey(0)

                            if create_activation_layer2:
                                ##extract features for AE layer2
                                matrixt_activation_l2, original_points_l2 = hs.create_vector_activations_layer_2(
                                    directions_history, activation_history, orig_points_history)

                                ##save activations for layer2
                                if len(matrix_activations) > 0: matrix_activations = np.vstack((matrix_activations, matrixt_activation_l2))
                                else: matrix_activations = matrixt_activation_l2

                                ##save original for layer2
                                if len(matrix_orig_points) > 0: matrix_orig_points = np.vstack((matrix_orig_points, original_points_l2))
                                else: matrix_orig_points = original_points_l2

                            elif create_bayes_vector:
                                for a in activation_history:
                                    label = cluster_model_l1.predict(a.reshape((1, -1)))[0]
                                    labels_history.append(label)

                                ###create vector with two info label
                                vector_bayes = hs.create_vector_for_bayesian_probability(
                                    labels_history, directions_history, 3)

                                ## saving data for cluster prediction
                                if len(training_bayes_vector_task) > 0: training_bayes_vector_task = np.vstack((training_bayes_vector_task, vector_bayes))
                                else: training_bayes_vector_task = vector_bayes




                            ##refresh history
                            orig_points_history = []
                            directions_history = []
                            activation_history = []
                            orig_points_history = []
                            labels_history = []


                    ##GENERAL FOR ALL THE LAYERS## Update the beginning of the trajectory
                    start_t = i_p - 1
                    first_point_traj = [x_f[start_t], y_f[start_t]]

            #training_bayes_vector.append(training_bayes_vector_task)
    #print matrix_activations.shape

    ##save matrix activation
    # data_organizer.save_matrix_pickle(matrix_activations,
    #                                   'C:/Users/dario.dotti/Documents/data_for_personality_exp/computed_matrix/matrix_activations_l2.txt')
    # ## save original points
    # data_organizer.save_matrix_pickle(matrix_orig_points,
    #                                   'C:/Users/dario.dotti/Documents/data_for_personality_exp/computed_matrix/matrix_orig_points_l2.txt')

    print np.array(training_bayes_vector).shape
Example #6
0
        matrix_activations_task = []
        matrix_orig_points_task = []
        matrix_real_coord = []

        temp_scene = scene.copy()

        for n_slice in range(0, len(task)):
            if len(task[n_slice]) <= 1 : continue
            #print 'n_slice ', n_slice

            flat_list = [item for item in task[n_slice]]

            #video_traj.draw_joints_and_tracks(flat_list, [])

            # get x,y,z of every traj point after smoothing process
            x_f, y_f, z, ids = img_processing.get_coordinate_points(flat_list, joint_id=1)

            # for point in range(len(x_f)):
            #     cv2.circle(temp_scene,(x_f[point],y_f[point]),1,(0,0,255),-1)
            # cv2.imshow('ciao', temp_scene)
            # cv2.waitKey(0)
<<<<<<< HEAD
=======

>>>>>>> 9348384985d2847c272133ff77ce6181ca1fa082

            directions = hs.get_directions_traj(x_f, y_f)
            if directions[0] == -180: directions[0] = 180
            directions_history.append(directions[0])

            distances = [np.sqrt(((x_f[0] - x_f[i_p]) ** 2) + ((y_f[0] - y_f[i_p]) ** 2)) for i_p in range(1, len(y_f))]