def computeGroundTrajectory(features, homography, timeInterval = None):
    '''Computes a trajectory for the set of features as the closes point to the ground
    using the homography in image space'''
    if not timeInterval:
        raise Exception('not implemented') # compute from the features
    
    yCoordinates = -np.ones((len(features),int(timeInterval.length())))
    for i,f in enumerate(features):
        traj = f.getPositions().asArray()
        imgTraj = cvutils.projectArray(homography, traj)
        yCoordinates[i,f.getFirstInstant()-timeInterval.first:f.getLastInstant()+1-timeInterval.first] = imgTraj[1,:]

    indices = np.argmax(yCoordinates,0)
    newTraj = moving.Trajectory()
    for j,idx in enumerate(indices):
        newTraj.addPosition(features[idx].getPositionAtInstant(j+timeInterval.first))
        #newVelocities.addPosition(features[obj.featureNumbers[idx]].getVelocityAtInstant(j+obj.getFirstInstant()))

    return newTraj
def position_list(data_dict, first_obj_center, first_matched_to_box_id,
                  frame_num, box_id, obj_center_list_x, obj_center_list_y,
                  homography, iteration):
    if first_obj_center != []:
        obj_center = first_obj_center
        matched_to_box_id = first_matched_to_box_id
    else:
        obj_center = data_dict[frame_num][box_id]['center']
        matched_to_box_id = data_dict[frame_num][box_id]['matched_to_box_id']
    if homography is not None:
        obj_center = cvutils.projectArray(homography,
                                          np.array(obj_center).reshape(2, 1))
        obj_center_list_x.append(obj_center[0][0])
        obj_center_list_y.append(obj_center[1][0])
    if matched_to_box_id == -1 or iteration == 0:
        return obj_center_list_x, obj_center_list_y, iteration
    iteration -= 1
    obj_center_list_x, obj_center_list_y, iteration = position_list(
        data_dict, [], [], frame_num - 1, matched_to_box_id, obj_center_list_x,
        obj_center_list_y, homography, iteration)
    return obj_center_list_x, obj_center_list_y, iteration
def computeGroundTrajectory(features, homography, timeInterval=None):
    '''Computes a trajectory for the set of features as the closes point to the ground
    using the homography in image space'''
    if not timeInterval:
        raise Exception('not implemented')  # compute from the features

    yCoordinates = -np.ones((len(features), int(timeInterval.length())))
    for i, f in enumerate(features):
        traj = f.getPositions().asArray()
        imgTraj = cvutils.projectArray(homography, traj)
        yCoordinates[i,
                     f.getFirstInstant() -
                     timeInterval.first:f.getLastInstant() + 1 -
                     timeInterval.first] = imgTraj[1, :]

    indices = np.argmax(yCoordinates, 0)
    newTraj = moving.Trajectory()
    for j, idx in enumerate(indices):
        newTraj.addPosition(
            features[idx].getPositionAtInstant(j + timeInterval.first))
        #newVelocities.addPosition(features[obj.featureNumbers[idx]].getVelocityAtInstant(j+obj.getFirstInstant()))

    return newTraj
#! /usr/bin/env python

import sys

import matplotlib.pyplot as plt
import numpy as np
import cv2

import cvutils
import utils

if len(sys.argv) < 4:
    print(
        'Usage: {} homography_filename original_size new_size (size can be width or height)'
        .format(sys.argv[0]))
    sys.exit()

homography = np.loadtxt(sys.argv[1])

imgPoints = np.array([[10, 10], [10, 20], [20, 20], [20, 10]])

wldPoints = cvutils.projectArray(homography, imgPoints.T).T

newSize = float(sys.argv[3])
originalSize = float(sys.argv[2])
imgPoints = imgPoints * newSize / originalSize

newHomography, mask = cv2.findHomography(imgPoints, wldPoints)

np.savetxt(sys.argv[1] + '.new', newHomography)
def put_in_table(data_dict, frame_data_dict, frame_num, box_id, obj_class_dict,
                 homography, box_id_new):
    obj_id = frame_data_dict[box_id]['id']
    obj_class = frame_data_dict[box_id]['class']
    obj_left = frame_data_dict[box_id]['left']
    obj_right = frame_data_dict[box_id]['right']
    obj_top = frame_data_dict[box_id]['top']
    obj_bottom = frame_data_dict[box_id]['bottom']
    matched_to_box_id = frame_data_dict[box_id]['matched_to_box_id']
    number_of_matched_boxes = frame_data_dict[box_id][
        'number_of_matched_boxes']
    proj_x_tl = obj_left
    proj_y_tl = obj_top
    proj_x_tr = obj_right
    proj_y_tr = obj_top
    proj_x_bl = obj_left
    proj_y_bl = obj_bottom
    proj_x_br = obj_right
    proj_y_br = obj_bottom
    frame_data_dict[box_id]['tl'] = [proj_x_tl, proj_y_tl]
    frame_data_dict[box_id]['tr'] = [proj_x_tr, proj_y_tr]
    frame_data_dict[box_id]['bl'] = [proj_x_bl, proj_y_bl]
    frame_data_dict[box_id]['br'] = [proj_x_br, proj_y_br]

    if matched_to_box_id == -1:
        if homography is not None:
            filtered_proj_tl = cvutils.projectArray(
                homography,
                np.array([proj_x_tl, proj_y_tl]).reshape(2, 1))
            filtered_proj_x_tl = filtered_proj_tl[0][0]
            filtered_proj_y_tl = filtered_proj_tl[1][0]
            filtered_proj_tr = cvutils.projectArray(
                homography,
                np.array([proj_x_tr, proj_y_tr]).reshape(2, 1))
            filtered_proj_x_tr = filtered_proj_tr[0][0]
            filtered_proj_y_tr = filtered_proj_tr[1][0]
            filtered_proj_bl = cvutils.projectArray(
                homography,
                np.array([proj_x_bl, proj_y_bl]).reshape(2, 1))
            filtered_proj_x_bl = filtered_proj_bl[0][0]
            filtered_proj_y_bl = filtered_proj_bl[1][0]
            filtered_proj_br = cvutils.projectArray(
                homography,
                np.array([proj_x_br, proj_y_br]).reshape(2, 1))
            filtered_proj_x_br = filtered_proj_br[0][0]
            filtered_proj_y_br = filtered_proj_br[1][0]
        else:
            filtered_proj_x_tl = proj_x_tl
            filtered_proj_y_tl = proj_y_tl
            filtered_proj_x_tr = proj_x_tr
            filtered_proj_y_tr = proj_y_tr
            filtered_proj_x_bl = proj_x_bl
            filtered_proj_y_bl = proj_y_bl
            filtered_proj_x_br = proj_x_br
            filtered_proj_y_br = proj_y_br
        vel_x_tl = 0
        vel_y_tl = 0
        vel_x_tr = 0
        vel_y_tr = 0
        vel_x_bl = 0
        vel_y_bl = 0
        vel_x_br = 0
        vel_y_br = 0
        vtl_list_x = [0]
        vtl_list_y = [0]
        vtr_list_x = [0]
        vtr_list_y = [0]
        vbl_list_x = [0]
        vbl_list_y = [0]
        vbr_list_x = [0]
        vbr_list_y = [0]
    else:
        tl_list_x, tl_list_y, vtl_list_x, vtl_list_y, _ = position_list(
            data_dict, [proj_x_tl, proj_y_tl], matched_to_box_id, frame_num,
            box_id, [], [], [], [], window_size, 'tl')
        tr_list_x, tr_list_y, vtr_list_x, vtr_list_y, _ = position_list(
            data_dict, [proj_x_tr, proj_y_tr], matched_to_box_id, frame_num,
            box_id, [], [], [], [], window_size, 'tr')
        bl_list_x, bl_list_y, vbl_list_x, vbl_list_y, _ = position_list(
            data_dict, [proj_x_bl, proj_y_bl], matched_to_box_id, frame_num,
            box_id, [], [], [], [], window_size, 'bl')
        br_list_x, br_list_y, vbr_list_x, vbr_list_y, _ = position_list(
            data_dict, [proj_x_br, proj_y_br], matched_to_box_id, frame_num,
            box_id, [], [], [], [], window_size, 'br')
        if homography is not None:
            filtered_proj_tl = cvutils.projectArray(
                homography,
                np.array(
                    [filter_list_mean(tl_list_x),
                     filter_list_mean(tl_list_y)]).reshape(2, 1))
            filtered_proj_x_tl = filtered_proj_tl[0][0]
            filtered_proj_y_tl = filtered_proj_tl[1][0]
            filtered_proj_tr = cvutils.projectArray(
                homography,
                np.array(
                    [filter_list_mean(tr_list_x),
                     filter_list_mean(tr_list_y)]).reshape(2, 1))
            filtered_proj_x_tr = filtered_proj_tr[0][0]
            filtered_proj_y_tr = filtered_proj_tr[1][0]
            filtered_proj_bl = cvutils.projectArray(
                homography,
                np.array(
                    [filter_list_mean(bl_list_x),
                     filter_list_mean(bl_list_y)]).reshape(2, 1))
            filtered_proj_x_bl = filtered_proj_bl[0][0]
            filtered_proj_y_bl = filtered_proj_bl[1][0]
            filtered_proj_br = cvutils.projectArray(
                homography,
                np.array(
                    [filter_list_mean(br_list_x),
                     filter_list_mean(br_list_y)]).reshape(2, 1))
            filtered_proj_x_br = filtered_proj_br[0][0]
            filtered_proj_y_br = filtered_proj_br[1][0]
        else:
            filtered_proj_x_tl = filter_list_mean(tl_list_x)
            filtered_proj_y_tl = filter_list_mean(tl_list_y)
            filtered_proj_x_tr = filter_list_mean(tr_list_x)
            filtered_proj_y_tr = filter_list_mean(tr_list_y)
            filtered_proj_x_bl = filter_list_mean(bl_list_x)
            filtered_proj_y_bl = filter_list_mean(bl_list_y)
            filtered_proj_x_br = filter_list_mean(br_list_x)
            filtered_proj_y_br = filter_list_mean(br_list_y)
        vel_x_tl = filtered_proj_x_tl - data_dict[
            frame_num - 1][matched_to_box_id]['filtered_tl'][0]
        vel_y_tl = filtered_proj_y_tl - data_dict[
            frame_num - 1][matched_to_box_id]['filtered_tl'][1]
        vel_x_tr = filtered_proj_x_tr - data_dict[
            frame_num - 1][matched_to_box_id]['filtered_tr'][0]
        vel_y_tr = filtered_proj_y_tr - data_dict[
            frame_num - 1][matched_to_box_id]['filtered_tr'][1]
        vel_x_bl = filtered_proj_x_bl - data_dict[
            frame_num - 1][matched_to_box_id]['filtered_bl'][0]
        vel_y_bl = filtered_proj_y_bl - data_dict[
            frame_num - 1][matched_to_box_id]['filtered_bl'][1]
        vel_x_br = filtered_proj_x_br - data_dict[
            frame_num - 1][matched_to_box_id]['filtered_br'][0]
        vel_y_br = filtered_proj_y_br - data_dict[
            frame_num - 1][matched_to_box_id]['filtered_br'][1]

    frame_data_dict[box_id]['vtl'] = [vel_x_tl, vel_y_tl]
    frame_data_dict[box_id]['vtr'] = [vel_x_tr, vel_y_tr]
    frame_data_dict[box_id]['vbl'] = [vel_x_bl, vel_y_bl]
    frame_data_dict[box_id]['vbr'] = [vel_x_br, vel_y_br]
    filter_v_x = filter_list_mean(vtl_list_x + vtr_list_x + vbl_list_x +
                                  vbr_list_x)
    filter_v_y = filter_list_mean(vtl_list_y + vtr_list_y + vbl_list_y +
                                  vbr_list_y)

    frame_data_dict[box_id]['filtered_tl'] = [
        filtered_proj_x_tl, filtered_proj_y_tl
    ]
    frame_data_dict[box_id]['filtered_tr'] = [
        filtered_proj_x_tr, filtered_proj_y_tr
    ]
    frame_data_dict[box_id]['filtered_bl'] = [
        filtered_proj_x_bl, filtered_proj_y_bl
    ]
    frame_data_dict[box_id]['filtered_br'] = [
        filtered_proj_x_br, filtered_proj_y_br
    ]

    if obj_id not in obj_class_dict:
        obj_class_dict[obj_id] = gen2_tools.userType2Num('unknown')
        c.execute('INSERT INTO objects VALUES (?, ?, ?)',
                  [obj_id, gen2_tools.userType2Num('unknown'), 1])
    if number_of_matched_boxes >= fps / 2 and obj_class_dict[
            obj_id] != obj_class:  # each object has to be in the video for at least 0.5s (fps/2), otherwise it will not be classified and used
        obj_class_dict[obj_id] = obj_class
        c.execute('UPDATE objects SET road_user_type = ? WHERE object_id = ?',
                  [gen2_tools.userType2Num(obj_class), obj_id])
    if number_of_matched_boxes >= window_size:
        offset = int(np.ceil(window_size / 2.))
        c.execute('INSERT INTO objects_features VALUES (?, ?)',
                  [obj_id, box_id_new * 4 + 0])
        c.execute('INSERT INTO positions VALUES (?, ?, ?, ?)', [
            box_id_new * 4 + 0, frame_num - offset, filtered_proj_x_tl,
            filtered_proj_y_tl
        ])
        c.execute(
            'INSERT INTO velocities VALUES (?, ?, ?, ?)',
            [box_id_new * 4 + 0, frame_num - offset, filter_v_x, filter_v_y])
        c.execute('INSERT INTO objects_features VALUES (?, ?)',
                  [obj_id, box_id_new * 4 + 1])
        c.execute('INSERT INTO positions VALUES (?, ?, ?, ?)', [
            box_id_new * 4 + 1, frame_num - offset, filtered_proj_x_tr,
            filtered_proj_y_tr
        ])
        c.execute(
            'INSERT INTO velocities VALUES (?, ?, ?, ?)',
            [box_id_new * 4 + 1, frame_num - offset, filter_v_x, filter_v_y])
        c.execute('INSERT INTO objects_features VALUES (?, ?)',
                  [obj_id, box_id_new * 4 + 2])
        c.execute('INSERT INTO positions VALUES (?, ?, ?, ?)', [
            box_id_new * 4 + 2, frame_num - offset, filtered_proj_x_bl,
            filtered_proj_y_bl
        ])
        c.execute(
            'INSERT INTO velocities VALUES (?, ?, ?, ?)',
            [box_id_new * 4 + 2, frame_num - offset, filter_v_x, filter_v_y])
        c.execute('INSERT INTO objects_features VALUES (?, ?)',
                  [obj_id, box_id_new * 4 + 3])
        c.execute('INSERT INTO positions VALUES (?, ?, ?, ?)', [
            box_id_new * 4 + 3, frame_num - offset, filtered_proj_x_br,
            filtered_proj_y_br
        ])
        c.execute(
            'INSERT INTO velocities VALUES (?, ?, ?, ?)',
            [box_id_new * 4 + 3, frame_num - offset, filter_v_x, filter_v_y])
        box_id_new += 1
    return box_id_new
def put_in_table(data_dict, frame_data_dict, frame_num, box_id, obj_class_dict,
                 homography):
    obj_id = frame_data_dict[box_id]['id']
    obj_class = frame_data_dict[box_id]['class']
    obj_left = frame_data_dict[box_id]['left']
    obj_right = frame_data_dict[box_id]['right']
    obj_top = frame_data_dict[box_id]['top']
    obj_bottom = frame_data_dict[box_id]['bottom']
    obj_center = frame_data_dict[box_id]['center']
    matched_to_box_id = frame_data_dict[box_id]['matched_to_box_id']
    number_of_matched_boxes = frame_data_dict[box_id][
        'number_of_matched_boxes']
    if obj_id not in obj_class_dict:
        obj_class_dict[obj_id] = gen2_tools.userType2Num('unknown')
        c.execute('INSERT INTO objects VALUES (?, ?, ?)',
                  [obj_id, gen2_tools.userType2Num('unknown'), 1])

# TODO: a better way for (fps / 2)
    if number_of_matched_boxes >= fps / 2 and obj_class_dict[
            obj_id] != obj_class:  # each object has to be in the video for at least 0.5s (fps/2), otherwise it will not be classified and used
        obj_class_dict[obj_id] = obj_class
        c.execute('UPDATE objects SET road_user_type = ? WHERE object_id = ?',
                  [gen2_tools.userType2Num(obj_class), obj_id])
    c.execute('INSERT INTO objects_features VALUES (?, ?)',
              [obj_id, box_id * 4 + 0])
    c.execute('INSERT INTO objects_features VALUES (?, ?)',
              [obj_id, box_id * 4 + 1])
    c.execute('INSERT INTO objects_features VALUES (?, ?)',
              [obj_id, box_id * 4 + 2])
    c.execute('INSERT INTO objects_features VALUES (?, ?)',
              [obj_id, box_id * 4 + 3])
    if homography is not None:
        proj_tl = cvutils.projectArray(
            homography,
            np.array([obj_left, obj_top]).reshape(2, 1))
        proj_x_tl = proj_tl[0][0]
        proj_y_tl = proj_tl[1][0]
        proj_tr = cvutils.projectArray(
            homography,
            np.array([obj_right, obj_top]).reshape(2, 1))
        proj_x_tr = proj_tr[0][0]
        proj_y_tr = proj_tr[1][0]
        proj_bl = cvutils.projectArray(
            homography,
            np.array([obj_left, obj_bottom]).reshape(2, 1))
        proj_x_bl = proj_bl[0][0]
        proj_y_bl = proj_bl[1][0]
        proj_br = cvutils.projectArray(
            homography,
            np.array([obj_right, obj_bottom]).reshape(2, 1))
        proj_x_br = proj_br[0][0]
        proj_y_br = proj_br[1][0]
    else:
        proj_x_tl = obj_left
        proj_y_tl = obj_top
        proj_x_tr = obj_right
        proj_y_tr = obj_top
        proj_x_bl = obj_left
        proj_y_bl = obj_bottom
        proj_x_br = obj_right
        proj_y_br = obj_bottom
    frame_data_dict[box_id] = kalman_estimate(frame_data_dict[box_id],
                                              data_dict, frame_num,
                                              matched_to_box_id, proj_x_tl,
                                              proj_y_tl, proj_x_tr, proj_y_tr,
                                              proj_x_bl, proj_y_bl, proj_x_br,
                                              proj_y_br)
    kalman_x_tl, kalman_y_tl, kalman_vx_tl, kalman_vy_tl = frame_data_dict[
        box_id]['state_tl'].tolist()
    kalman_x_tr, kalman_y_tr, kalman_vx_tr, kalman_vy_tr = frame_data_dict[
        box_id]['state_tr'].tolist()
    kalman_x_bl, kalman_y_bl, kalman_vx_bl, kalman_vy_bl = frame_data_dict[
        box_id]['state_bl'].tolist()
    kalman_x_br, kalman_y_br, kalman_vx_br, kalman_vy_br = frame_data_dict[
        box_id]['state_br'].tolist()
    c.execute('INSERT INTO positions VALUES (?, ?, ?, ?)',
              [box_id * 4 + 0, frame_num, kalman_x_tl[0], kalman_y_tl[0]])
    c.execute('INSERT INTO positions VALUES (?, ?, ?, ?)',
              [box_id * 4 + 1, frame_num, kalman_x_tr[0], kalman_y_tr[0]])
    c.execute('INSERT INTO positions VALUES (?, ?, ?, ?)',
              [box_id * 4 + 2, frame_num, kalman_x_bl[0], kalman_y_bl[0]])
    c.execute('INSERT INTO positions VALUES (?, ?, ?, ?)',
              [box_id * 4 + 3, frame_num, kalman_x_br[0], kalman_y_br[0]])
    c.execute('INSERT INTO velocities VALUES (?, ?, ?, ?)',
              [box_id * 4 + 0, frame_num, kalman_vx_tl[0], kalman_vy_tl[0]])
    c.execute('INSERT INTO velocities VALUES (?, ?, ?, ?)',
              [box_id * 4 + 1, frame_num, kalman_vx_tr[0], kalman_vy_tr[0]])
    c.execute('INSERT INTO velocities VALUES (?, ?, ?, ?)',
              [box_id * 4 + 2, frame_num, kalman_vx_bl[0], kalman_vy_bl[0]])
    c.execute('INSERT INTO velocities VALUES (?, ?, ?, ?)',
              [box_id * 4 + 3, frame_num, kalman_vx_br[0], kalman_vy_br[0]])
    if number_of_matched_boxes == 2:  # update the velocity for the first frame
        c.execute(
            'UPDATE velocities SET x_coordinate = ? WHERE trajectory_id = ?',
            [kalman_vx_tl[0], matched_to_box_id * 4 + 0])
        c.execute(
            'UPDATE velocities SET y_coordinate = ? WHERE trajectory_id = ?',
            [kalman_vy_tl[0], matched_to_box_id * 4 + 0])
        c.execute(
            'UPDATE velocities SET x_coordinate = ? WHERE trajectory_id = ?',
            [kalman_vx_tr[0], matched_to_box_id * 4 + 1])
        c.execute(
            'UPDATE velocities SET y_coordinate = ? WHERE trajectory_id = ?',
            [kalman_vy_tr[0], matched_to_box_id * 4 + 1])
        c.execute(
            'UPDATE velocities SET x_coordinate = ? WHERE trajectory_id = ?',
            [kalman_vx_bl[0], matched_to_box_id * 4 + 2])
        c.execute(
            'UPDATE velocities SET y_coordinate = ? WHERE trajectory_id = ?',
            [kalman_vy_bl[0], matched_to_box_id * 4 + 2])
        c.execute(
            'UPDATE velocities SET x_coordinate = ? WHERE trajectory_id = ?',
            [kalman_vx_br[0], matched_to_box_id * 4 + 3])
        c.execute(
            'UPDATE velocities SET y_coordinate = ? WHERE trajectory_id = ?',
            [kalman_vy_br[0], matched_to_box_id * 4 + 3])
    return
Пример #7
0
 if args.undistort:
     [map1, map2] = cvutils.computeUndistortMaps(
         videoImg.shape[1], videoImg.shape[0],
         args.undistortedImageMultiplication,
         np.loadtxt(args.intrinsicCameraMatrixFilename),
         args.distortionCoefficients)
     videoImg = cv2.remap(videoImg,
                          map1,
                          map2,
                          interpolation=cv2.INTER_LINEAR)
     if args.saveImages:
         cv2.imwrite(
             utils.removeExtension(args.videoFrameFilename) +
             '-undistorted.png', videoImg)
 invHomography = np.linalg.inv(homography)
 projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
 projectedVideoPts = cvutils.projectArray(homography, videoPts.T).T
 for i in range(worldPts.shape[0]):
     # world image
     cv2.circle(worldImg,
                tuple(np.int32(np.round(worldPts[i] / args.unitsPerPixel))),
                2, cvutils.cvBlue)
     cv2.circle(
         worldImg,
         tuple(np.int32(np.round(projectedVideoPts[i] /
                                 args.unitsPerPixel))), 2, cvutils.cvRed)
     cv2.putText(
         worldImg, str(i + 1),
         tuple(np.int32(np.round(worldPts[i] / args.unitsPerPixel)) + 5),
         cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue, 2)
     # video image