示例#1
0
def main(filename=None):
    if filename is None:
        if (len(sys.argv) < 2):
            print "python radar_tracker.py <.pal file>"
            sys.exit()
        filename = sys.argv[1]

    use_turked = filename.find('-track') != -1 \
        and os.path.exists(filename.replace('-track',''))
    print "Loading annotation file . . ."
    annotations_tracked = parse(filename)
    absolute_path_to_relative_path(annotations_tracked)

    if use_turked:
        annotations_turked = parse(filename.replace('-track', ''))
        copy_from_turked(annotations_turked, annotations_tracked)

    print len(annotations_tracked)

    # args = parse_args(sys.argv[2], sys.argv[3])
    data_folder = os.path.dirname(
        image_name_to_map_file(annotations_tracked[0].imageName))
    args = {
        'cam_num':
        2,
        'params':
        LoadParameters(
            open(data_folder + '/params.ini', 'r').readline().rstrip())
    }
    args['video'] = image_name_to_map_file(
        annotations_tracked[0].imageName).replace('.map', '2.avi')
    params = args['params']

    video_reader = VideoReader(args['video'])
    #print "Loading file %s" % args['map']
    #rdr_map = loadRDRCamMap(args['map'])
    rdr_map = load_all_radar_info(annotations_tracked)

    set_annotations_ids_using_radar(annotations_tracked, rdr_map, args)
    fill_gaps(annotations_tracked, rdr_map)
    for annotation in annotations_tracked:
        for rect in annotation.rects:
            rect.classID = int(rect.classID)

    if use_turked:
        copy_from_tracked(annotations_turked, annotations_tracked)

    save_filename = filename.split('.')[0] + "-with-depth.pal"
    save(save_filename, annotations_tracked)

    if use_turked:
        save_filename = filename.split('.')[0].replace('-track',
                                                       '') + "-with-depth.pal"
        save(save_filename, annotations_turked)

    # compute_statistics(annotations_turked, rdr_map);
    show_3D(annotations_tracked, rdr_map, args, True, False, True)
示例#2
0
    #possible to do other filters such as removing objects beyond certain distance, height filters etc
    #mask = disp > disp.min()

    return points


if __name__ == '__main__':

    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = int(sys.argv[2][-5])
    params = args['params']
    assert (cam_num == 1)
    video_file_left = args['video']
    video_file_right = args['opposite_video']
    video_reader_left = VideoReader(video_file_left)
    video_reader_right = VideoReader(video_file_right)

    fnum = 0
    while True:
        for t in range(10):
            (successL, imgL) = video_reader_left.getNextFrame()
            (successR, imgR) = video_reader_right.getNextFrame()
        """
        WRITEDIR = 'data/'
        imgL = cv2.cvtColor(imgL, cv2.COLOR_RGB2GRAY)
        imgR = cv2.cvtColor(imgR, cv2.COLOR_RGB2GRAY)
        cv2.imwrite(WRITEDIR+ 'left_%06d.png' % fnum, imgL)
        cv2.imwrite(WRITEDIR+ 'right_%06d.png' % fnum, imgR)
        fnum+=1;
示例#3
0
    rx = []
    ry = []
    rz = []
    # frames at which manual adjustments are made
    mframes = []
    showLidar = False
    # current status of rotation
    crx = 0.042000
    cry = 0.022000
    crz = 0.015000
    cR = euler_matrix(crx, cry, crz)[0:3, 0:3]
    # stardard arg parsing
    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = int(sys.argv[2][-5])
    video_file = args['video']
    video_reader = VideoReader(video_file)
    params = args['params']
    cam = params['cam'][cam_num - 1]
    if os.path.isfile(args['gps_mark2']):
        gps_key1 = 'gps_mark1'
        gps_key2 = 'gps_mark2'
        postfix_len = 13
    else:
        gps_key1 = 'gps'
        gps_key2 = 'gps'
        postfix_len = 8

    #gps_filename= args['gps']
    gps_filename = args[gps_key2]
    gps_reader = GPSReader(gps_filename)
    prefix = gps_filename[0:-postfix_len]
示例#4
0
    # run an edge filter
    edges = cv2.filter2D(E, cv2.CV_8U, np.zeros((1,1)))
    
    for k in kernels: 
        edges = np.maximum(edges, np.abs(cv2.filter2D(E, cv2.CV_8U, k)))

    edges = computeDistanceTransform(edges+1, 0.98, 1.0/1.8)
    return edges
if __name__ == '__main__': 
    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = int(sys.argv[2][-5])
    video_file = args['video']
    params = args['params'] 
    cam = params['cam'][cam_num-1]
    video_reader = VideoReader(video_file)
    ldr_map = loadLDRCamMap(args['map'])

    #(tx,ty,tz) = (-0.50000000000000004, -0.2875, 0.34)
    (tx,ty,tz) = (-0.50000000000000004, 0.03, 0.34)
    (rx,ry,rz) = (0.0,0.0,0.0)
    C_current = np.array([tx,ty,tz,rx,ry,rz])
    BATCH_SIZE = 30 

    from multiprocessing import Pool
    pool = Pool(10)

    while True:
        
        batch_data = [ ]
        while len(batch_data) < BATCH_SIZE:
示例#5
0
    print(ratio_d)


if __name__ == "__main__":
    if (len(sys.argv) < 4):
        print "python radar_tracker.py <.al file> <directory of .avi> <video file>"
        sys.exit()

    filename = (sys.argv[1])
    print "Loading annotation file . . ."
    annotations = parseXML(filename)
    print len(annotations)

    args = parse_args(sys.argv[2], sys.argv[3])
    params = args['params']

    video_reader = VideoReader(args['video'])
    rdr_map = loadRDRCamMap(args['map'])

    set_annotations_ids_using_radar(annotations, rdr_map, args)
    fill_gaps(annotations, rdr_map)
    for annotation in annotations:
        for rect in annotation.rects:
            rect.classID = int(rect.classID)
    save_filename = filename.split('.')[0] + "_new.al"
    saveXML(save_filename, annotations)
#	compute_statistics(annotations, rdr_map);
#	show_3D(annotations, rdr_map,args, True, False, True);
#	print "Writing new annotations into a file..."
#	saveXML(filename.split('.')[1] + "_with_distance.al", annotations);
示例#6
0
    pts_wrt_camera_t = np.vstack((pts_wrt_camera_t, np.ones((1, pts_wrt_camera_t.shape[1]))))
    pts_wrt_camera_t = dot(cam["E"], pts_wrt_camera_t)
    pts_wrt_camera_t = pts_wrt_camera_t[0:3, :]

    # reproject camera_t points in camera frame
    (pix, mask) = cloudToPixels(cam, pts_wrt_camera_t)

    return (pix, mask)


if __name__ == "__main__":
    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = args["cam_num"]
    video_file = args["video"]
    video_reader = VideoReader(video_file)
    params = args["params"]
    cam = params["cam"][cam_num]

    all_data = np.load(sys.argv[3])
    map_data = all_data["data"]
    map_t = all_data["t"]

    gps_reader_mark1 = GPSReader(args["gps_mark1"])
    gps_data_mark1 = gps_reader_mark1.getNumericData()
    gps_reader_mark2 = GPSReader(args["gps_mark2"])
    gps_data_mark2 = gps_reader_mark2.getNumericData()

    imu_transforms_mark1 = IMUTransforms(gps_data_mark1)
    imu_transforms_mark2 = IMUTransforms(gps_data_mark2)
    gps_times_mark1 = utc_from_gps_log_all(gps_data_mark1)
示例#7
0
    T_to_w_from_i[0:3, 0:3] = R_to_i_from_w(roll_0, pitch_0,
                                            yaw_0).transpose()
    enu_wrt_imu_0 = np.dot(T_to_w_from_i, pos_wrt_imu_0).transpose()

    # ENU w.r.t imu at time 0 -> WGS84
    wgs84_pts = ENUtoWGS84(gps_dat[0, 1:4], enu_wrt_imu_0[:, 0:3]).transpose()
    return wgs84_pts


if __name__ == '__main__':
    video_filename = sys.argv[1]
    path, vfname = os.path.split(video_filename)
    vidname = vfname.split('.')[0]
    cam_num = int(vidname[-1])
    gps_filename = path + '/' + vidname[0:-1] + '_gps.out'
    video_reader = VideoReader(video_filename, num_splits=1)
    gps_reader = GPSReader(gps_filename)
    gps_dat = gps_reader.getNumericData()
    cam = getCameraParams()[cam_num - 1]

    labels = loadmat(sys.argv[2])
    lp = labels['left']
    rp = labels['right']

    l_gps_coord = pixelToGPS(lp, gps_dat, cam)
    r_gps_coord = pixelToGPS(rp, gps_dat, cam)

    print "gps_dat: "
    print gps_dat[:, 1:4]
    print "l_gps_dat: "
    print l_gps_coord
示例#8
0
if __name__ == '__main__':
    video_filename = sys.argv[1]
    existing_lanes = []
    path, vfname = os.path.split(video_filename)
    vidname = vfname.split('.')[0]
    cam_num = int(vidname[-1])
    gps_filename = path + '/' + vidname[0:-1] + '_gps.out'
    out_name = sys.argv[2]
    display = True
    if '--quiet' in sys.argv:
        display = False

    cv2.namedWindow('video')
    num_imgs_fwd = 200
    video_reader = VideoReader(video_filename, num_splits=1)
    gps_reader = GPSReader(gps_filename)
    gps_dat = gps_reader.getNumericData()

    cam = pickle.load(open('cam_params.pickle', 'rb'))[cam_num - 1]

    framenum = 0
    lastTime = time.time()
    video_reader.setFrame(framenum)

    skip_frame = 5
    seconds_back = 4
    default_offset = 60

    left_present = -1
    right_present = -1
示例#9
0
    return (pix, mask)


def trackbarOnchange(t, prev_t):
    if abs(t - prev_t) > 1:
        video_reader.setFrame(t)


if __name__ == '__main__':
    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = int(sys.argv[2][-5])
    video_file = args['video']

    params = args['params']
    cam = params['cam'][cam_num - 1]
    video_reader = VideoReader(video_file)
    gps_reader = GPSReader(args['gps'])
    GPSData = gps_reader.getNumericData()
    imu_transforms = IMUTransforms(GPSData)

    T_from_i_to_l = np.linalg.inv(params['lidar']['T_from_l_to_i'])

    # Left and right data
    left_file = np.load(sys.argv[3])
    left_data = left_file['data']
    right_file = np.load(sys.argv[4])
    right_data = right_file['data']

    # Label left and right data
    left_data = np.hstack((left_data, np.zeros((left_data.shape[0], 1))))
    right_data = np.hstack((right_data, np.ones((right_data.shape[0], 1))))

def trackbarOnchange(t, prev_t):
    if abs(t - prev_t) > 1:
        video_reader.setFrame(t)


if __name__ == '__main__':

    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = int(sys.argv[2][-5])
    video_file = args['video']

    params = args['params'] 
    cam = params['cam'][cam_num-1]
    video_reader = VideoReader(video_file)
    gps_reader = GPSReader(args['gps'])
    GPSData = gps_reader.getNumericData()
    imu_transforms = IMUTransforms(GPSData)
    
    T_from_i_to_l = np.linalg.inv(params['lidar']['T_from_l_to_i'])

    all_data = np.load(sys.argv[3])
    map_data = all_data['data']
    #map_data = map_data[map_data[:,3] > 60, :]
    # map points are defined w.r.t the IMU position at time 0
    # each entry in map_data is (x,y,z,intensity,framenum). 

    print "Hit 'q' to quit"
    trackbarInit = False
from VideoReader import *

patternShape = (7,10)

# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

if __name__ == '__main__':
  video_filename = sys.argv[1]
  path, vfname = os.path.split(video_filename)
  vfname_2 = vfname.replace('1','2')
  outdir = sys.argv[2]
  outname = sys.argv[3]

  cv2.namedWindow('video')
  video_reader_cam_1 = VideoReader(video_filename)
  video_reader_cam_2 = VideoReader(path + '/' + vfname_2)
  
  framenum = 0
  right_corners = None
  left_corners = None
  right_imgpoints = [ ] 
  left_imgpoints = [ ] 
  right_frames = [ ] 
  left_frames = [ ]

  firstTime = True
  ct = 1;
  fileList = open(outdir+'/filelist.txt', 'w');

  while True:
示例#12
0
from VideoReader import * 
from ArgParser import *
import sys, cv2

"""
usage: 
python PlayStereoVideo.py <path_to_data> <basename><camera_num>.avi
for example:
python PlayStereoVideo.py /scail/group/deeplearning/driving_data/q50_data/4-2-14-monterey/ 17N_b1.avi

under this command, reader1 will load 17N_b1.avi 
reader2 will load 17N_b2.avi

If you switch the command to python PlayStereoVideo.py /scail/group..../4-2-14-monterey/ 17N_b2.avi,
then you will have reader1 loading 17N_b2.avi and reader2 loading 17N_b1.avi. The point is that reader1 does not necessairly load the camera 1. 
"""

if __name__ == '__main__':
    args = parse_args(sys.argv[1], sys.argv[2])
    reader1 = VideoReader(args['video'])
    reader2 = VideoReader(args['opposite_video'])
    while True:
        (success1, I1) = reader1.getNextFrame()
        (success2, I2) = reader2.getNextFrame()
        if not success1 or not success2:
            break
        cv2.imshow('video1', cv2.pyrDown(I1))
        cv2.imshow('video2', cv2.pyrDown(I2))
        cv2.waitKey(5)
    (pix, mask) = cloudToPixels(cam, pts_wrt_camera_t)

    return (pix, mask)

if __name__ == '__main__':
    showLidar=False
    # current status of rotation
    crx=0.042000
    cry=0.022000
    crz=0.015000
    cR = euler_matrix(crx, cry, crz)[0:3,0:3]
    # stardard arg parsing
    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = int(sys.argv[2][-5])
    video_file = args['video']
    video_reader = VideoReader(video_file)
    params = args['params'] 
    cam = params['cam'][cam_num-1]
    if os.path.isfile(args['gps_mark2']):
      gps_key1='gps_mark1'
      gps_key2='gps_mark2'
      postfix_len = 13
    else:
      gps_key1='gps'
      gps_key2='gps'
      postfix_len=8
      
    #gps_filename= args['gps']
    gps_filename= args[gps_key2]
    gps_reader = GPSReader(gps_filename)
    prefix = gps_filename[0:-postfix_len]
示例#14
0
from VideoReader import *
from ArgParser import *
import sys, cv2
"""
usage: 
python PlayStereoVideo.py <path_to_data> <basename><camera_num>.avi
for example:
python PlayStereoVideo.py /scail/group/deeplearning/driving_data/q50_data/4-2-14-monterey/ 17N_b1.avi

under this command, reader1 will load 17N_b1.avi 
reader2 will load 17N_b2.avi

If you switch the command to python PlayStereoVideo.py /scail/group..../4-2-14-monterey/ 17N_b2.avi,
then you will have reader1 loading 17N_b2.avi and reader2 loading 17N_b1.avi. The point is that reader1 does not necessairly load the camera 1. 
"""

if __name__ == '__main__':
    args = parse_args(sys.argv[1], sys.argv[2])
    reader1 = VideoReader(args['video'])
    reader2 = VideoReader(args['opposite_video'])
    while True:
        (success1, I1) = reader1.getNextFrame()
        (success2, I2) = reader2.getNextFrame()
        if not success1 or not success2:
            break
        cv2.imshow('video1', cv2.pyrDown(I1))
        cv2.imshow('video2', cv2.pyrDown(I2))
        cv2.waitKey(5)
    pts_wrt_camera_t = np.vstack(
        (pts_wrt_camera_t, np.ones((1, pts_wrt_camera_t.shape[1]))))
    pts_wrt_camera_t = dot(cam['E'], pts_wrt_camera_t)
    pts_wrt_camera_t = pts_wrt_camera_t[0:3, :]

    # reproject camera_t points in camera frame
    (pix, mask) = cloudToPixels(cam, pts_wrt_camera_t)

    return (pix, mask)


if __name__ == '__main__':
    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = args['cam_num']
    video_file = args['video']
    video_reader = VideoReader(video_file)
    params = args['params']
    cam = params['cam'][cam_num]

    gps_reader_mark1 = GPSReader(args['gps_mark1'])
    gps_data_mark1 = gps_reader_mark1.getNumericData()
    gps_reader_mark2 = GPSReader(args['gps_mark2'])
    gps_data_mark2 = gps_reader_mark2.getNumericData()

    imu_transforms_mark1 = IMUTransforms(gps_data_mark1)
    imu_transforms_mark2 = IMUTransforms(gps_data_mark2)
    gps_times_mark1 = utc_from_gps_log_all(gps_data_mark1)
    gps_times_mark2 = utc_from_gps_log_all(gps_data_mark2)
    builder = MapBuilder(args, 0, 999999, 0.1, 0.05)

    # parameter server
示例#16
0
    points = cv2.reprojectImageTo3D(disp, Q)

    #possible to do other filters such as removing objects beyond certain distance, height filters etc
    #mask = disp > disp.min()
    
    return points

if __name__ == '__main__':
    
    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = int(sys.argv[2][-5])
    params = args['params']
    assert(cam_num == 1)
    video_file_left = args['video']
    video_file_right = args['opposite_video']
    video_reader_left = VideoReader(video_file_left)
    video_reader_right = VideoReader(video_file_right)


    fnum = 0 
    while True:
        for t in range(10):
            (successL, imgL) = video_reader_left.getNextFrame()
            (successR, imgR) = video_reader_right.getNextFrame()
        

        """
        WRITEDIR = 'data/'
        imgL = cv2.cvtColor(imgL, cv2.COLOR_RGB2GRAY)
        imgR = cv2.cvtColor(imgR, cv2.COLOR_RGB2GRAY)
        cv2.imwrite(WRITEDIR+ 'left_%06d.png' % fnum, imgL)
示例#17
0
from VideoReader import * 
from ArgParser import *
import sys, cv2, pickle

FRAME_STEP = 5
HISTORY_WINDOW = 30 
FUTURE_WINDOW = 20

def drawBorder(I, color, thickness): 
    cv2.rectangle(I, (0,0), (I.shape[1], I.shape[0]), color, thickness)

if __name__ == '__main__':
    args = parse_args(sys.argv[1], sys.argv[2])
    reader = VideoReader(args['video'])
    fdir = sys.argv[1] + '/car_frames'
    fname = fdir + "/" + args['basename'] + str(args['cam_num']) + '.pickle'
    f = open(fname, 'r')
    framenums = pickle.load(f)
    last_record = 0 
    while True:
        for p in range(FRAME_STEP):
            (success, I) = reader.getNextFrame()
        if not success:
            break

        if reader.framenum in framenums:
            drawBorder(I, (0,255,0), 10)
        cv2.imshow('video', I)
        key = chr(cv2.waitKey(50) & 255)

示例#18
0
if __name__ == '__main__':
  video_filename = sys.argv[1]
  path, vfname = os.path.split(video_filename)
  vidname = vfname.split('.')[0]
  cam_num = int(vidname[-1])
  gps_filename = path + '/' + vidname[0:-1] + '_gps.out'
  out_name = sys.argv[2]
  display = True
  if '--quiet' in sys.argv:
      display = False


  cv2.namedWindow('video')
  cv.SetMouseCallback('video', on_mouse, 0)
  num_imgs_fwd = 200; 
  video_reader = VideoReader(video_filename, num_splits=1)
  gps_reader = GPSReader(gps_filename)
  gps_dat = gps_reader.getNumericData()

  cam = pickle.load(open('cam_params.pickle', 'rb'))[cam_num - 1]

  framenum = 0
  lastTime = time.time()
  lastClickProcessed = time.time()
  lastCols = [None, None]
  lastLine = [None, None, None, None]
  video_reader.setFrame(framenum)

  skip_frame = 10
  default_offset = 60
示例#19
0
from VideoReader import *
from ArgParser import *
import sys, cv2

if __name__ == '__main__':
    args = parse_args(sys.argv[1], sys.argv[2])

    write = True

    fourcc = cv2.cv.CV_FOURCC(*'MPG1')
    vid_name = '/deep/u/jkiske/test.mpg'
    writer = None
    fnum = 0

    with VideoReader(args['video']) as reader:
        while True:
            (success, I) = reader.getNextFrame()
            if not success:
                break
            if write == True:
                if writer == None:
                    writer = cv2.VideoWriter(vid_name, fourcc, 50,
                                             (I.shape[1], I.shape[0]))
                writer.write(I)
                fnum += 1
                print fnum
                if fnum >= 50:
                    break
            else:
                cv2.imshow('video', I)
                cv2.waitKey(1)
    #print (rx,ry,rz)


if __name__ == '__main__':
    vfname = sys.argv[2]
    vidname = vfname.split('.')[0]
    vidname2 = vidname[:-1] + '2'
    video_filename2 = sys.argv[1] + '/' + vidname2 + '.avi'

    args = parse_args(sys.argv[1], sys.argv[2])

    gps_reader = GPSReader(args['gps'])
    params = args['params']
    cam1 = params['cam'][0]
    cam2 = params['cam'][1]
    video_reader1 = VideoReader(args['video'])
    video_reader2 = VideoReader(video_filename2)
    gps_reader = GPSReader(args['gps'])
    GPSData = gps_reader.getNumericData()
    imu_transforms = IMUTransforms(GPSData)
    ldr_map = loadLDRCamMap(args['map'])

    if '--full' in sys.argv:
        total_num_frames = GPSData.shape[0]
        start_fn = 0
        step = 10
        num_fn = int(total_num_frames / step)

    # this has been flipped for the q50

    cloud_r.SetBackground(0., 0., 0.)
示例#21
0
    color_data[:,0] = I[pix[1,mask], pix[0,mask], 2]
    color_data[:,1] = I[pix[1,mask], pix[0,mask], 1]
    color_data[:,2] = I[pix[1,mask], pix[0,mask], 0]

    return (color_data,mask)



if __name__ == '__main__':
    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = int(sys.argv[2][-5])

    params = args['params']
    cam = params['cam'][cam_num - 1]
    video_reader = VideoReader(args['video'])
    gps_reader = GPSReader(args['gps'])
    GPSData = gps_reader.getNumericData()
    imu_transforms = IMUTransforms(GPSData)
    
    T_from_i_to_l = np.linalg.inv(params['lidar']['T_from_l_to_i'])

    all_data = np.load(sys.argv[3])
    map_data = all_data['data']
    color_data = np.zeros((map_data.shape[0], 3))
    #color_data = np.zeros((map_data.shape[0], 7, 10))
    #map_data = map_data[map_data[:,3] > 60, :]
   
    start_t = 0
    t = start_t
    while True:
    pts_wrt_camera_t = np.vstack((pts_wrt_camera_t,
                                  np.ones((1, pts_wrt_camera_t.shape[1]))))
    pts_wrt_camera_t = dot(cam['E'], pts_wrt_camera_t)
    pts_wrt_camera_t = pts_wrt_camera_t[0:3, :]

    # reproject camera_t points in camera frame
    (pix, mask) = cloudToPixels(cam, pts_wrt_camera_t)

    return (pix, mask)

if __name__ == '__main__':
    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = args['cam_num']
    video_file = args['video']
    video_reader = VideoReader(video_file)
    params = args['params']
    cam = params['cam'][cam_num]

    gps_reader_mark1 = GPSReader(args['gps_mark1'])
    gps_data_mark1 = gps_reader_mark1.getNumericData()
    gps_reader_mark2 = GPSReader(args['gps_mark2'])
    gps_data_mark2 = gps_reader_mark2.getNumericData()

    imu_transforms_mark1 = IMUTransforms(gps_data_mark1)
    imu_transforms_mark2 = IMUTransforms(gps_data_mark2)
    gps_times_mark1 = utc_from_gps_log_all(gps_data_mark1)
    gps_times_mark2 = utc_from_gps_log_all(gps_data_mark2)
    builder = MapBuilder(args, 0, 999999, 0.1, 0.05)

    # parameter server
        t = time.time()

        count += 1
        if count > 1:
            iren.GetRenderWindow().GetRenderers().GetFirstRenderer().RemoveActor(self.actor)
        self.actor = actor
        iren.GetRenderWindow().GetRenderers().GetFirstRenderer().AddActor(self.actor)
        if count == 1:
            iren.GetRenderWindow().GetRenderers().GetFirstRenderer().ResetCamera()
            iren.GetRenderWindow().GetRenderers().GetFirstRenderer().GetActiveCamera().Zoom(1.6)

        iren.GetRenderWindow().Render()
        print 'render:', time.time() - t

if __name__ == '__main__':
    reader = VideoReader(sys.argv[1])


    renderWindow = vtk.vtkRenderWindow()
    renderWindow.SetSize(1280/2, 960/2)
    interactor = vtk.vtkRenderWindowInteractor()
    interactor.SetRenderWindow(renderWindow)

    renderer = vtk.vtkRenderer()
    renderWindow.AddRenderer(renderer)

    renderWindow.Render()
    cb = ImageGrabberCallback(reader)
    interactor.AddObserver('TimerEvent', cb.execute)
    timerId = interactor.CreateRepeatingTimer(1)
    interactor.Start()
HISTORY_WINDOW = 40
FUTURE_WINDOW = 30


def export_frames(fnums, fname):
    f = open(fname, 'w')
    pickle.dump(fnums, f)


def drawBorder(I, color, thickness):
    cv2.rectangle(I, (0, 0), (I.shape[1], I.shape[0]), color, thickness)


if __name__ == '__main__':
    args = parse_args(sys.argv[1], sys.argv[2])
    reader = VideoReader(args['video'])

    outdir = sys.argv[1] + '/car_frames'
    try:
        os.mkdir(outdir)
    except:
        pass
    os.chmod(
        outdir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP
        | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH
        | stat.S_IXOTH)

    outfname = outdir + "/" + args['basename'] + str(
        args['cam_num']) + '.pickle'

    framenums = set()
示例#25
0

def trackbarOnchange(t, prev_t):
    if abs(t - prev_t) > 1:
        video_reader.setFrame(t)


if __name__ == '__main__':

    args = parse_args(sys.argv[1], sys.argv[2])
    cam_num = int(sys.argv[2][-5])
    video_file = args['video']

    params = args['params']
    cam = params['cam'][cam_num - 1]
    video_reader = VideoReader(video_file)
    gps_reader = GPSReader(args['gps'])
    GPSData = gps_reader.getNumericData()
    imu_transforms = IMUTransforms(GPSData)

    T_from_i_to_l = np.linalg.inv(params['lidar']['T_from_l_to_i'])

    all_data = np.load(sys.argv[3])
    map_data = all_data['data']
    #map_data = map_data[map_data[:,3] > 60, :]
    # map points are defined w.r.t the IMU position at time 0
    # each entry in map_data is (x,y,z,intensity,framenum).

    print "Hit 'q' to quit"
    trackbarInit = False
 def __init__(self, video_file, cam_params):
     vfilePath, vFile = os.path.split(video_file)
     self.cam_num = int(vFile[-5])
     self.video_reader = VideoReader(video_file)
     self.cam = cam_params