Ejemplo n.º 1
0
def record(filename=None):
    opennpy.align_depth_to_rgb()
    if filename is None:
        filename = str(np.random.rand())

    foldername = 'data/sets/%s/' % filename
    dataset.folder = foldername

    os.mkdir(foldername)
    shutil.copytree('data/newest_calibration/config', '%s/config' % foldername)
    print "Created new dataset: %s" % foldername

    frame = 0
    try:
        while 1:
            opennpy.sync_update()
            (depth,_) = opennpy.sync_get_depth()
            (rgb,_) = opennpy.sync_get_video()

            np.save('%s/depth_%05d.npy' % (foldername,frame), depth)

            cv.CvtColor(rgb, rgb, cv.CV_RGB2BGR)
            cv.SaveImage('%s/rgb_%05d.png' % (foldername,frame), rgb)

            if frame % 30 == 0:
                print 'frame: %d' % frame
            frame = frame + 1
    except KeyboardInterrupt:
        print "Captured %d frames" % frame
        compress()
Ejemplo n.º 2
0
def record(filename=None):
    opennpy.align_depth_to_rgb()
    if filename is None:
        filename = str(np.random.rand())

    foldername = 'data/sets/%s/' % filename
    dataset.folder = foldername

    os.mkdir(foldername)
    shutil.copytree('data/newest_calibration/config', '%s/config' % foldername)
    print "Created new dataset: %s" % foldername

    frame = 0
    try:
        while 1:
            opennpy.sync_update()
            (depth, _) = opennpy.sync_get_depth()
            (rgb, _) = opennpy.sync_get_video()

            np.save('%s/depth_%05d.npy' % (foldername, frame), depth)

            cv.CvtColor(rgb, rgb, cv.CV_RGB2BGR)
            cv.SaveImage('%s/rgb_%05d.png' % (foldername, frame), rgb)

            if frame % 30 == 0:
                print 'frame: %d' % frame
            frame = frame + 1
    except KeyboardInterrupt:
        print "Captured %d frames" % frame
        compress()
Ejemplo n.º 3
0
def run_calib():
    close('all')
    """Run the table plane calibration
    """
    global points, depth
    print("Getting an image from the camera")
    opennpy.align_depth_to_rgb()
    for i in range(10):
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()

    fig = figure(1)
    clf()
    points = []

    def pick(event):
        global points
        points.append((event.xdata, event.ydata))
        print('Picked point %d of 4' % (len(points)))

    #imshow(depth)
    imshow(1./depth)
    draw()
    fig.canvas.mpl_disconnect('button_press_event')
    fig.canvas.mpl_connect('button_press_event', pick)

    print("Click four points")
    while len(points) < 4:
        waitforbuttonpress(0.001)

    print 'OK'
    np.save('%s/config/boundpts' % (newest_folder), points)
    np.save('%s/config/depth' % (newest_folder), depth)

    finish_table_calib()
def once():
    global depth, rgb
    preview.canvas.SetCurrent()

    opennpy.sync_update()
    depth,_ = opennpy.sync_get_depth()
    rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    #blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    preview.clearcolor=[0,0,0,0]
    preview.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        preview.modelmat = main.R_display
    else:
        preview.modelmat = main.R_aligned

    preview.Refresh()
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
Ejemplo n.º 5
0
def preview():
    opennpy.sync_update()
    (depth,_) = opennpy.sync_get_depth()
    global depth_cache
    depth_cache.append(np.array(depth))
    depth_cache = depth_cache[-5:]
    show_depth('depth', depth)
Ejemplo n.º 6
0
def preview():
    opennpy.sync_update()
    (depth, _) = opennpy.sync_get_depth()
    global depth_cache
    depth_cache.append(np.array(depth))
    depth_cache = depth_cache[-5:]
    show_depth('depth', depth)
Ejemplo n.º 7
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()
        rgb, _ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1, 0.6, 0.6, 1]))

    window.clearcolor = [0, 0, 0, 0]
    window.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    g = blockcraft.translated_rotated(main.R_correct, grid.occ)
    talk_to_minecraft(g)

    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    import sys
    sys.stdout.flush()
Ejemplo n.º 8
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat

    try:
        (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)
    except IndexError:
        return

    cv.ShowImage('mask',mask.astype('u1')*255)

    global label_image
    label_image = classify.predict(depth)
    cv.ShowImage('label_image', ((label_image[0]+1)*100*mask).astype('u1'))
    pylab.waitforbuttonpress(0.03)
Ejemplo n.º 9
0
def run_calib():
    close('all')
    """Run the table plane calibration
    """
    global points, depth
    print("Getting an image from the camera")
    opennpy.align_depth_to_rgb()
    for i in range(10):
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()

    fig = figure(1)
    clf()
    points = []

    def pick(event):
        global points
        points.append((event.xdata, event.ydata))
        print('Picked point %d of 4' % (len(points)))

    #imshow(depth)
    imshow(1. / depth)
    draw()
    fig.canvas.mpl_disconnect('button_press_event')
    fig.canvas.mpl_connect('button_press_event', pick)

    print("Click four points")
    while len(points) < 4:
        waitforbuttonpress(0.001)

    print 'OK'
    np.save('%s/config/boundpts' % (newest_folder), points)
    np.save('%s/config/depth' % (newest_folder), depth)

    finish_table_calib()
Ejemplo n.º 10
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    window.clearcolor=[0,0,0,0]
    window.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display
    
    g = blockcraft.translated_rotated(main.R_correct, grid.occ)
    talk_to_minecraft(g)
    
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    import sys
    sys.stdout.flush()
Ejemplo n.º 11
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat

    try:
        (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)
    except IndexError:
        return

    cv.ShowImage('mask',mask.astype('u1')*255)

    global label_image
    label_image = classify.predict(depth)
    cv.ShowImage('label_image', ((label_image[0]+1)*100*mask).astype('u1'))
    pylab.waitforbuttonpress(0.03)
Ejemplo n.º 12
0
def preview(cams):
    opennpy.align_depth_to_rgb()
    opennpy.sync_update()
    global depth_cache
    for cam in cams:
        (depth,_) = opennpy.sync_get_depth(cam)
        print(depth.shape)
        print "Depth aligned:", is_aligned(depth)
        depth_cache.append(np.array(depth))
        depth_cache = depth_cache[-6:]
        show_depth('depth_%d'%cam, depth)
Ejemplo n.º 13
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()
        rgb, _ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)
    blockdraw.clear()
    try:
        c, _ = hashalign.find_best_alignment(grid.occ, 0 * grid.occ,
                                             target_model, ~target_model)
    except ValueError:
        pass
    else:
        tm = hashalign.apply_correction(target_model, *c)
        tm = np.ascontiguousarray(tm)

        not_filled = tm & ~grid.occ
        correct = tm & grid.occ
        incorrect = ~tm & grid.occ

        try:
            next_layer = np.min(np.nonzero(not_filled)[1])
        except ValueError:
            blockdraw.show_grid('0',
                                grid.occ,
                                color=np.array([0.2, 1, 0.2, 1]))
        else:
            blockdraw.show_grid('1', incorrect, color=np.array([1, 1, 0.1, 1]))
            nf = not_filled * 0
            nf[:, next_layer, :] = 1
            nf = nf & not_filled
            blockdraw.show_grid('2', nf, color=np.array([1, 0.2, 1.0, 1]))
            blockdraw.show_grid('3',
                                correct,
                                color=np.array([0.1, 0.3, 0.1, 1]))

    window.clearcolor = [0, 0, 0, 0]
    window.flag_drawgrid = False

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    #show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
Ejemplo n.º 14
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)
    blockdraw.clear()
    try:
        c,_ = hashalign.find_best_alignment(grid.occ,0*grid.occ,
                                        target_model,~target_model)
    except ValueError:
        pass
    else:
        tm = hashalign.apply_correction(target_model, *c)
        tm = np.ascontiguousarray(tm)

        not_filled = tm & ~grid.occ
        correct = tm & grid.occ
        incorrect = ~tm & grid.occ

        try:
            next_layer = np.min(np.nonzero(not_filled)[1])
        except ValueError:
            blockdraw.show_grid('0', grid.occ, color=np.array([0.2,1,0.2,1]))
        else:
            blockdraw.show_grid('1', incorrect,
                                color=np.array([1,1,0.1,1]))
            nf = not_filled*0
            nf[:,next_layer,:] = 1
            nf = nf & not_filled
            blockdraw.show_grid('2', nf,
                                color=np.array([1,0.2,1.0,1]))
            blockdraw.show_grid('3', correct, color=np.array([0.1,0.3,0.1,1]))

    window.clearcolor=[0,0,0,0]
    window.flag_drawgrid = False

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    #show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
Ejemplo n.º 15
0
 def block_once(self):
     """Process the next blockplayer frame"""
     opennpy.sync_update()
     depth,_ = opennpy.sync_get_depth()
     rgb,_ = opennpy.sync_get_video()
     
     main.update_frame(depth, rgb)
     
     # Update the blocks in the playing area
     if self.state < Game.STATE_CHECKWALL and self.update_blocks:
         self.blocks = main.grid.occ
         if hasattr(main, 'R_correct'):
             self.blocks = blockcraft.translated_rotated(main.R_correct, self.blocks)
         self.blocks = self.block_slice(self.blocks)
         self.board.update_blocks(self.blocks,
             remove=self.board.wall_abs-Board.BOARD_BORDER > Board.PLAY_WIDTH)
def run_calib():
    config.load('data/newest_calibration')
    opennpy.align_depth_to_rgb()

    samples = []

    for i in arange(0,2*np.pi,np.pi/8): 
        line = line_through_point(center, i)
        draw_line_XZ(line)
        pylab.waitforbuttonpress(0.1)
        for _ in range(60):
            opennpy.sync_update()
            rgb, _ = opennpy.sync_get_video()
            depth, _ = opennpy.sync_get_depth()
        samples.append((line, rgb, depth))

    return samples
Ejemplo n.º 17
0
def once():
    global depth
    global rgb
    opennpy.sync_update()
    depth,_ = opennpy.sync_get_depth()
    rgb,_ = opennpy.sync_get_video()

    global x,y,z,xyz

    if PREPROCESS:
        global mask, rect
        mask, rect = preprocess.threshold_and_mask(depth, config.bg)

        (l,t),(r,b) = rect
        depth_ = np.ascontiguousarray(depth[t:b,l:r])
        v,u = np.mgrid[t:b,l:r].astype('f')
        x,y,z = calibkinect.convertOpenNI2Real(depth_,u,v)
        x=x[mask[t:b,l:r]]
        y=y[mask[t:b,l:r]]
        z=z[mask[t:b,l:r]]

        rgb_ = rgb[t:b,l:r,:][mask[t:b,l:r],:]
    else:
        x,y,z = calibkinect.convertOpenNI2Real(depth)
        #global X,Y,Z
        #X,Y,Z = calibkinect.convertReal2OpenNI(x,y,z)

        x,y,z = map(lambda _:_.flatten(), (x,y,z))
        rgb_ = rgb.reshape(-1,3)

    #x = x[depth>0]
    #y = y[depth>0]
    #z = z[depth>0]
    xyz_ = np.ascontiguousarray(np.dstack((x,y,z)).reshape(-1,3))
    #xyz_ = xyz - xyz.mean(0)
    #xyz_ /= np.std(xyz_)

    rgba = np.empty((xyz_.shape[0],4),'f')
    rgba[:,3] = 1
    rgba[:,:3] = rgb_.astype('f')/256.0

    window.lookat = np.array([0,0,0])
    window.update_points(xyz_,rgba)
    window.clearcolor = [0,0,0,0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
Ejemplo n.º 18
0
def once():
    global depth
    global rgb
    opennpy.sync_update()
    depth, _ = opennpy.sync_get_depth()
    rgb, _ = opennpy.sync_get_video()

    global x, y, z, xyz

    if PREPROCESS:
        global mask, rect
        mask, rect = preprocess.threshold_and_mask(depth, config.bg)

        (l, t), (r, b) = rect
        depth_ = np.ascontiguousarray(depth[t:b, l:r])
        v, u = np.mgrid[t:b, l:r].astype('f')
        x, y, z = calibkinect.convertOpenNI2Real(depth_, u, v)
        x = x[mask[t:b, l:r]]
        y = y[mask[t:b, l:r]]
        z = z[mask[t:b, l:r]]

        rgb_ = rgb[t:b, l:r, :][mask[t:b, l:r], :]
    else:
        x, y, z = calibkinect.convertOpenNI2Real(depth)
        #global X,Y,Z
        #X,Y,Z = calibkinect.convertReal2OpenNI(x,y,z)

        x, y, z = map(lambda _: _.flatten(), (x, y, z))
        rgb_ = rgb.reshape(-1, 3)

    #x = x[depth>0]
    #y = y[depth>0]
    #z = z[depth>0]
    xyz_ = np.ascontiguousarray(np.dstack((x, y, z)).reshape(-1, 3))
    #xyz_ = xyz - xyz.mean(0)
    #xyz_ /= np.std(xyz_)

    rgba = np.empty((xyz_.shape[0], 4), 'f')
    rgba[:, 3] = 1
    rgba[:, :3] = rgb_.astype('f') / 256.0

    window.lookat = np.array([0, 0, 0])
    window.update_points(xyz_, rgba)
    window.clearcolor = [0, 0, 0, 0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
def once():
    opennpy.sync_update()
    (depth,_), (rgb,_) = opennpy.sync_get_depth(), opennpy.sync_get_video()

    # Look for chessboard corners in the image
    was_found, corners = cv.FindChessboardCorners(cv.fromarray(255-rgb),
                                                  checkerboard.pattern_size)
    preview = cv.fromarray(rgb.copy())
    if was_found:
        cv.DrawChessboardCorners(preview, checkerboard.pattern_size,
                                 corners, was_found)
    cv.ShowImage("RGB", preview)
    if not was_found: return

    # Sample the kinect depth image X,Y,Z points at the corners
    XYZ = calibkinect.convertOpenNI2Real(depth)
    XYZ = [scipy.ndimage.map_coordinates(im, np.array(corners).transpose()[::-1,:],
                                         order=0, prefilter=False) for im in XYZ]
    XYZ = np.vstack(XYZ).transpose()
    return XYZ
Ejemplo n.º 20
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    #blockdraw.show_grid('vac', grid.vac,
    #                    color=np.array([0.6,1,0.6,0]))
    if 0 and lattice.is_valid_estimate():
        window.clearcolor=[0.9,1,0.9,0]
    else:
        window.clearcolor=[0,0,0,0]
        #window.clearcolor=[1,1,1,0]
        window.flag_drawgrid = True

    if 1:
        update_display()

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
Ejemplo n.º 21
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()
        rgb, _ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    #blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1, 0.6, 0.6, 1]))

    #blockdraw.show_grid('vac', grid.vac,
    #                    color=np.array([0.6,1,0.6,0]))
    if 0 and lattice.is_valid_estimate():
        window.clearcolor = [0.9, 1, 0.9, 0]
    else:
        window.clearcolor = [0, 0, 0, 0]
        #window.clearcolor=[1,1,1,0]
        window.flag_drawgrid = True

    if 1:
        update_display()

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    #show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
Ejemplo n.º 22
0
def once():
    global depth
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat
    (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)

    global n,w
    if 0:
        n,w = normals.normals_numpy(depth)
        show_normals(n, w, 'normals_numpy')

    if 0:
        n,w = normals.normals_c(depth)
        show_normals(n, w, 'normals_c')

    if 1:
        normals.opencl.set_rect(rect)
        dt = timeit.timeit(lambda:
                           normals.normals_opencl(depth, mask, rect).wait(),
                           number=1)

        #print dt
        nw = normals.opencl.get_normals()
        n,w = nw[:,:,:3], nw[:,:,3]
        #show_normals(n, w, 'normals_opencl')
        show_normals_sphere(n, w)

    pylab.waitforbuttonpress(0.01)
Ejemplo n.º 23
0
def once():
    global depth
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()

    def from_rect(m, rect):
        (l, t), (r, b) = rect
        return m[t:b, l:r]

    global mask, rect, modelmat
    (mask, rect) = preprocess.threshold_and_mask(depth, config.bg)

    global n, w
    if 0:
        n, w = normals.normals_numpy(depth)
        show_normals(n, w, 'normals_numpy')

    if 0:
        n, w = normals.normals_c(depth)
        show_normals(n, w, 'normals_c')

    if 1:
        normals.opencl.set_rect(rect)
        dt = timeit.timeit(
            lambda: normals.normals_opencl(depth, mask, rect).wait(), number=1)

        #print dt
        nw = normals.opencl.get_normals()
        n, w = nw[:, :, :3], nw[:, :, 3]
        #show_normals(n, w, 'normals_opencl')
        show_normals_sphere(n, w)

    pylab.waitforbuttonpress(0.01)
Ejemplo n.º 24
0
def record(filename=None, cams=(0,), do_rgb=False):
    if len(cams) > 1 and do_rgb:
        print """You're trying to record from 2+ kinects with RGB and depth.
        This probably will not work out for you, but it depends on if you have
        enough USB bandwidth to support all four streams. Call record with 
        do_rgb=False to turn off rgb."""
    q = multiprocessing.Queue(10000)
    p = multiprocessing.Process(target=worker, args=(q,))
    p.start()
        
    opennpy.align_depth_to_rgb()
    if filename is None:
        filename = str(time.time())

    foldername = KINECT_PATH + '%s' % filename
    dataset.folder = foldername
    try:
        os.makedirs(foldername)
    except OSError:
        pass
    #shutil.copytree('data/newest_calibration/config', '%s/config' % foldername)
    print "Created new dataset: %s" % foldername
    min_delay = 1
    frame = 0
    frame_md5s = {}
    frame_last_update = {}
    def check_frame(name, cam, frame_data):
        frame_md5 = hashlib.md5(frame_data).digest()
        key = '%s:%d' % (name, cam)
        if frame_md5s.get(key) == frame_md5:
            #print('Kinect [%s] is repeating data, likely crashed...' % key)
            last_update = frame_last_update.get(key)
            if last_update is None or (time.time() - last_update) > 10.:
                return 'die'
        else:
            frame_last_update[key] = time.time()
            frame_md5s[key] = frame_md5
            return 'new'

    def die():
        q.put(None)

    while 1:
        print('')
        st = time.time()
        opennpy.sync_update()
        for cam in cams:
            (depth,_) = opennpy.sync_get_depth(cam)
            ret = check_frame('depth', cam, depth.tostring())
            if ret == 'die':
                return die()
            elif ret == 'new':
                q.put(('%s/depth_%f_%05d_%d.snappy' % (foldername,st,frame,cam), depth.tostring()))
            if do_rgb:
                (rgb,_) = opennpy.sync_get_video(cam)
                ret = check_frame('rgb', cam, rgb.tostring())
                if ret == 'die':
                    return die()
                elif ret == 'new':
                    rgb = cv2.cvtColor(rgb, cv.CV_RGB2BGR)
                    q.put(('%s/rgb_%f_%05d_%d.ppm' % (foldername,st,frame,cam), rgb))

        if frame % 30 == 0:
            print 'frame: %d' % frame
        cur_time = time.time()
        print({k: cur_time - v for k, v in frame_last_update.items()})
        frame = frame + 1
        sleep_time = max(0, min_delay - (time.time() - st))
        #time.sleep(sleep_time)
        print('Time: %f' % (time.time() - st,))
from OpenGL.GL import *
from OpenGL.GLUT import *
import numpy as np
from rtmodel import mesh
from rtmodel import camera
from wxpy3d import Window
from wxpy3d.opengl_state import opengl_state
import opennpy
import calibkinect
import cv
import scipy.ndimage
import os
import cPickle as pickle

opennpy.align_depth_to_rgb(); opennpy.sync_update()
np.set_printoptions(2)


"""
My best guess for the intrinsic calibration values for the dell projector are:
KK = [[2460, 0, 1152/2],
      [0, 2460, 863],
      [0, 0, 1]]

This is based on measuring the focal length with a tape measure, averaging
between my measurements for X and Y. Assume the principal point is at the bottom
and pixels are square.
      
"""

if not 'window' in globals():