def once():
    global depth, rgb
    preview.canvas.SetCurrent()

    opennpy.sync_update()
    depth,_ = opennpy.sync_get_depth()
    rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    #blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    preview.clearcolor=[0,0,0,0]
    preview.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        preview.modelmat = main.R_display
    else:
        preview.modelmat = main.R_aligned

    preview.Refresh()
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
Example #2
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat

    try:
        (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)
    except IndexError:
        return

    cv.ShowImage('mask',mask.astype('u1')*255)

    global label_image
    label_image = classify.predict(depth)
    cv.ShowImage('label_image', ((label_image[0]+1)*100*mask).astype('u1'))
    pylab.waitforbuttonpress(0.03)
Example #3
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()
        rgb, _ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1, 0.6, 0.6, 1]))

    window.clearcolor = [0, 0, 0, 0]
    window.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    g = blockcraft.translated_rotated(main.R_correct, grid.occ)
    talk_to_minecraft(g)

    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    import sys
    sys.stdout.flush()
Example #4
0
def record(filename=None):
    opennpy.align_depth_to_rgb()
    if filename is None:
        filename = str(np.random.rand())

    foldername = 'data/sets/%s/' % filename
    dataset.folder = foldername

    os.mkdir(foldername)
    shutil.copytree('data/newest_calibration/config', '%s/config' % foldername)
    print "Created new dataset: %s" % foldername

    frame = 0
    try:
        while 1:
            opennpy.sync_update()
            (depth, _) = opennpy.sync_get_depth()
            (rgb, _) = opennpy.sync_get_video()

            np.save('%s/depth_%05d.npy' % (foldername, frame), depth)

            cv.CvtColor(rgb, rgb, cv.CV_RGB2BGR)
            cv.SaveImage('%s/rgb_%05d.png' % (foldername, frame), rgb)

            if frame % 30 == 0:
                print 'frame: %d' % frame
            frame = frame + 1
    except KeyboardInterrupt:
        print "Captured %d frames" % frame
        compress()
Example #5
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat

    try:
        (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)
    except IndexError:
        return

    cv.ShowImage('mask',mask.astype('u1')*255)

    global label_image
    label_image = classify.predict(depth)
    cv.ShowImage('label_image', ((label_image[0]+1)*100*mask).astype('u1'))
    pylab.waitforbuttonpress(0.03)
Example #6
0
def record(filename=None):
    opennpy.align_depth_to_rgb()
    if filename is None:
        filename = str(np.random.rand())

    foldername = 'data/sets/%s/' % filename
    dataset.folder = foldername

    os.mkdir(foldername)
    shutil.copytree('data/newest_calibration/config', '%s/config' % foldername)
    print "Created new dataset: %s" % foldername

    frame = 0
    try:
        while 1:
            opennpy.sync_update()
            (depth,_) = opennpy.sync_get_depth()
            (rgb,_) = opennpy.sync_get_video()

            np.save('%s/depth_%05d.npy' % (foldername,frame), depth)

            cv.CvtColor(rgb, rgb, cv.CV_RGB2BGR)
            cv.SaveImage('%s/rgb_%05d.png' % (foldername,frame), rgb)

            if frame % 30 == 0:
                print 'frame: %d' % frame
            frame = frame + 1
    except KeyboardInterrupt:
        print "Captured %d frames" % frame
        compress()
Example #7
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    window.clearcolor=[0,0,0,0]
    window.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display
    
    g = blockcraft.translated_rotated(main.R_correct, grid.occ)
    talk_to_minecraft(g)
    
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    import sys
    sys.stdout.flush()
Example #8
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()
        rgb, _ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)
    blockdraw.clear()
    try:
        c, _ = hashalign.find_best_alignment(grid.occ, 0 * grid.occ,
                                             target_model, ~target_model)
    except ValueError:
        pass
    else:
        tm = hashalign.apply_correction(target_model, *c)
        tm = np.ascontiguousarray(tm)

        not_filled = tm & ~grid.occ
        correct = tm & grid.occ
        incorrect = ~tm & grid.occ

        try:
            next_layer = np.min(np.nonzero(not_filled)[1])
        except ValueError:
            blockdraw.show_grid('0',
                                grid.occ,
                                color=np.array([0.2, 1, 0.2, 1]))
        else:
            blockdraw.show_grid('1', incorrect, color=np.array([1, 1, 0.1, 1]))
            nf = not_filled * 0
            nf[:, next_layer, :] = 1
            nf = nf & not_filled
            blockdraw.show_grid('2', nf, color=np.array([1, 0.2, 1.0, 1]))
            blockdraw.show_grid('3',
                                correct,
                                color=np.array([0.1, 0.3, 0.1, 1]))

    window.clearcolor = [0, 0, 0, 0]
    window.flag_drawgrid = False

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    #show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
Example #9
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)
    blockdraw.clear()
    try:
        c,_ = hashalign.find_best_alignment(grid.occ,0*grid.occ,
                                        target_model,~target_model)
    except ValueError:
        pass
    else:
        tm = hashalign.apply_correction(target_model, *c)
        tm = np.ascontiguousarray(tm)

        not_filled = tm & ~grid.occ
        correct = tm & grid.occ
        incorrect = ~tm & grid.occ

        try:
            next_layer = np.min(np.nonzero(not_filled)[1])
        except ValueError:
            blockdraw.show_grid('0', grid.occ, color=np.array([0.2,1,0.2,1]))
        else:
            blockdraw.show_grid('1', incorrect,
                                color=np.array([1,1,0.1,1]))
            nf = not_filled*0
            nf[:,next_layer,:] = 1
            nf = nf & not_filled
            blockdraw.show_grid('2', nf,
                                color=np.array([1,0.2,1.0,1]))
            blockdraw.show_grid('3', correct, color=np.array([0.1,0.3,0.1,1]))

    window.clearcolor=[0,0,0,0]
    window.flag_drawgrid = False

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    #show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
Example #10
0
 def block_once(self):
     """Process the next blockplayer frame"""
     opennpy.sync_update()
     depth,_ = opennpy.sync_get_depth()
     rgb,_ = opennpy.sync_get_video()
     
     main.update_frame(depth, rgb)
     
     # Update the blocks in the playing area
     if self.state < Game.STATE_CHECKWALL and self.update_blocks:
         self.blocks = main.grid.occ
         if hasattr(main, 'R_correct'):
             self.blocks = blockcraft.translated_rotated(main.R_correct, self.blocks)
         self.blocks = self.block_slice(self.blocks)
         self.board.update_blocks(self.blocks,
             remove=self.board.wall_abs-Board.BOARD_BORDER > Board.PLAY_WIDTH)
Example #11
0
def once():
    global depth
    global rgb
    opennpy.sync_update()
    depth,_ = opennpy.sync_get_depth()
    rgb,_ = opennpy.sync_get_video()

    global x,y,z,xyz

    if PREPROCESS:
        global mask, rect
        mask, rect = preprocess.threshold_and_mask(depth, config.bg)

        (l,t),(r,b) = rect
        depth_ = np.ascontiguousarray(depth[t:b,l:r])
        v,u = np.mgrid[t:b,l:r].astype('f')
        x,y,z = calibkinect.convertOpenNI2Real(depth_,u,v)
        x=x[mask[t:b,l:r]]
        y=y[mask[t:b,l:r]]
        z=z[mask[t:b,l:r]]

        rgb_ = rgb[t:b,l:r,:][mask[t:b,l:r],:]
    else:
        x,y,z = calibkinect.convertOpenNI2Real(depth)
        #global X,Y,Z
        #X,Y,Z = calibkinect.convertReal2OpenNI(x,y,z)

        x,y,z = map(lambda _:_.flatten(), (x,y,z))
        rgb_ = rgb.reshape(-1,3)

    #x = x[depth>0]
    #y = y[depth>0]
    #z = z[depth>0]
    xyz_ = np.ascontiguousarray(np.dstack((x,y,z)).reshape(-1,3))
    #xyz_ = xyz - xyz.mean(0)
    #xyz_ /= np.std(xyz_)

    rgba = np.empty((xyz_.shape[0],4),'f')
    rgba[:,3] = 1
    rgba[:,:3] = rgb_.astype('f')/256.0

    window.lookat = np.array([0,0,0])
    window.update_points(xyz_,rgba)
    window.clearcolor = [0,0,0,0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
Example #12
0
def once():
    global depth
    global rgb
    opennpy.sync_update()
    depth, _ = opennpy.sync_get_depth()
    rgb, _ = opennpy.sync_get_video()

    global x, y, z, xyz

    if PREPROCESS:
        global mask, rect
        mask, rect = preprocess.threshold_and_mask(depth, config.bg)

        (l, t), (r, b) = rect
        depth_ = np.ascontiguousarray(depth[t:b, l:r])
        v, u = np.mgrid[t:b, l:r].astype('f')
        x, y, z = calibkinect.convertOpenNI2Real(depth_, u, v)
        x = x[mask[t:b, l:r]]
        y = y[mask[t:b, l:r]]
        z = z[mask[t:b, l:r]]

        rgb_ = rgb[t:b, l:r, :][mask[t:b, l:r], :]
    else:
        x, y, z = calibkinect.convertOpenNI2Real(depth)
        #global X,Y,Z
        #X,Y,Z = calibkinect.convertReal2OpenNI(x,y,z)

        x, y, z = map(lambda _: _.flatten(), (x, y, z))
        rgb_ = rgb.reshape(-1, 3)

    #x = x[depth>0]
    #y = y[depth>0]
    #z = z[depth>0]
    xyz_ = np.ascontiguousarray(np.dstack((x, y, z)).reshape(-1, 3))
    #xyz_ = xyz - xyz.mean(0)
    #xyz_ /= np.std(xyz_)

    rgba = np.empty((xyz_.shape[0], 4), 'f')
    rgba[:, 3] = 1
    rgba[:, :3] = rgb_.astype('f') / 256.0

    window.lookat = np.array([0, 0, 0])
    window.update_points(xyz_, rgba)
    window.clearcolor = [0, 0, 0, 0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
def run_calib():
    config.load('data/newest_calibration')
    opennpy.align_depth_to_rgb()

    samples = []

    for i in arange(0,2*np.pi,np.pi/8): 
        line = line_through_point(center, i)
        draw_line_XZ(line)
        pylab.waitforbuttonpress(0.1)
        for _ in range(60):
            opennpy.sync_update()
            rgb, _ = opennpy.sync_get_video()
            depth, _ = opennpy.sync_get_depth()
        samples.append((line, rgb, depth))

    return samples
def once():
    opennpy.sync_update()
    (depth,_), (rgb,_) = opennpy.sync_get_depth(), opennpy.sync_get_video()

    # Look for chessboard corners in the image
    was_found, corners = cv.FindChessboardCorners(cv.fromarray(255-rgb),
                                                  checkerboard.pattern_size)
    preview = cv.fromarray(rgb.copy())
    if was_found:
        cv.DrawChessboardCorners(preview, checkerboard.pattern_size,
                                 corners, was_found)
    cv.ShowImage("RGB", preview)
    if not was_found: return

    # Sample the kinect depth image X,Y,Z points at the corners
    XYZ = calibkinect.convertOpenNI2Real(depth)
    XYZ = [scipy.ndimage.map_coordinates(im, np.array(corners).transpose()[::-1,:],
                                         order=0, prefilter=False) for im in XYZ]
    XYZ = np.vstack(XYZ).transpose()
    return XYZ
Example #15
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    #blockdraw.show_grid('vac', grid.vac,
    #                    color=np.array([0.6,1,0.6,0]))
    if 0 and lattice.is_valid_estimate():
        window.clearcolor=[0.9,1,0.9,0]
    else:
        window.clearcolor=[0,0,0,0]
        #window.clearcolor=[1,1,1,0]
        window.flag_drawgrid = True

    if 1:
        update_display()

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
Example #16
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()
        rgb, _ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    #blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1, 0.6, 0.6, 1]))

    #blockdraw.show_grid('vac', grid.vac,
    #                    color=np.array([0.6,1,0.6,0]))
    if 0 and lattice.is_valid_estimate():
        window.clearcolor = [0.9, 1, 0.9, 0]
    else:
        window.clearcolor = [0, 0, 0, 0]
        #window.clearcolor=[1,1,1,0]
        window.flag_drawgrid = True

    if 1:
        update_display()

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    #show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
Example #17
0
def run_calib():
    close('all')
    """Run the table plane calibration
    """
    global points, depth
    print("Getting an image from the camera")
    opennpy.align_depth_to_rgb()
    for i in range(10):
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()
        rgb, _ = opennpy.sync_get_video()

    fig = figure(1)
    clf()
    points = []

    def pick(event):
        global points
        points.append((event.xdata, event.ydata))
        print('Picked point %d of 4' % (len(points)))

    #imshow(depth)
    #imshow(1./depth)
    imshow(rgb*np.dstack(3*[1./depth]))
    draw()
    fig.canvas.mpl_disconnect('button_press_event')
    fig.canvas.mpl_connect('button_press_event', pick)

    print("Click four points")
    while len(points) < 4:
        waitforbuttonpress(0.001)

    print 'OK'
    np.save('%s/config/boundpts' % (newest_folder), points)
    np.save('%s/config/depth' % (newest_folder), depth)

    finish_table_calib()
Example #18
0
def run_calib():
    close('all')
    """Run the table plane calibration
    """
    global points, depth
    print("Getting an image from the camera")
    opennpy.align_depth_to_rgb()
    for i in range(10):
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()
        rgb, _ = opennpy.sync_get_video()

    fig = figure(1)
    clf()
    points = []

    def pick(event):
        global points
        points.append((event.xdata, event.ydata))
        print('Picked point %d of 4' % (len(points)))

    #imshow(depth)
    #imshow(1./depth)
    imshow(rgb * np.dstack(3 * [1. / depth]))
    draw()
    fig.canvas.mpl_disconnect('button_press_event')
    fig.canvas.mpl_connect('button_press_event', pick)

    print("Click four points")
    while len(points) < 4:
        waitforbuttonpress(0.001)

    print 'OK'
    np.save('%s/config/boundpts' % (newest_folder), points)
    np.save('%s/config/depth' % (newest_folder), depth)

    finish_table_calib()
Example #19
0
def record(filename=None, cams=(0,), do_rgb=False):
    if len(cams) > 1 and do_rgb:
        print """You're trying to record from 2+ kinects with RGB and depth.
        This probably will not work out for you, but it depends on if you have
        enough USB bandwidth to support all four streams. Call record with 
        do_rgb=False to turn off rgb."""
    q = multiprocessing.Queue(10000)
    p = multiprocessing.Process(target=worker, args=(q,))
    p.start()
        
    opennpy.align_depth_to_rgb()
    if filename is None:
        filename = str(time.time())

    foldername = KINECT_PATH + '%s' % filename
    dataset.folder = foldername
    try:
        os.makedirs(foldername)
    except OSError:
        pass
    #shutil.copytree('data/newest_calibration/config', '%s/config' % foldername)
    print "Created new dataset: %s" % foldername
    min_delay = 1
    frame = 0
    frame_md5s = {}
    frame_last_update = {}
    def check_frame(name, cam, frame_data):
        frame_md5 = hashlib.md5(frame_data).digest()
        key = '%s:%d' % (name, cam)
        if frame_md5s.get(key) == frame_md5:
            #print('Kinect [%s] is repeating data, likely crashed...' % key)
            last_update = frame_last_update.get(key)
            if last_update is None or (time.time() - last_update) > 10.:
                return 'die'
        else:
            frame_last_update[key] = time.time()
            frame_md5s[key] = frame_md5
            return 'new'

    def die():
        q.put(None)

    while 1:
        print('')
        st = time.time()
        opennpy.sync_update()
        for cam in cams:
            (depth,_) = opennpy.sync_get_depth(cam)
            ret = check_frame('depth', cam, depth.tostring())
            if ret == 'die':
                return die()
            elif ret == 'new':
                q.put(('%s/depth_%f_%05d_%d.snappy' % (foldername,st,frame,cam), depth.tostring()))
            if do_rgb:
                (rgb,_) = opennpy.sync_get_video(cam)
                ret = check_frame('rgb', cam, rgb.tostring())
                if ret == 'die':
                    return die()
                elif ret == 'new':
                    rgb = cv2.cvtColor(rgb, cv.CV_RGB2BGR)
                    q.put(('%s/rgb_%f_%05d_%d.ppm' % (foldername,st,frame,cam), rgb))

        if frame % 30 == 0:
            print 'frame: %d' % frame
        cur_time = time.time()
        print({k: cur_time - v for k, v in frame_last_update.items()})
        frame = frame + 1
        sleep_time = max(0, min_delay - (time.time() - st))
        #time.sleep(sleep_time)
        print('Time: %f' % (time.time() - st,))
Example #20
0
import opennpy

a = opennpy.sync_get_depth()
b = opennpy.sync_get_video()
opennpy.align_depth_to_rgb()
a = opennpy.sync_get_depth()
b = opennpy.sync_get_video()
Example #21
0
def get_video():
    return frame_convert.video_cv(opennpy.sync_get_video()[0])
Example #22
0
def show_video():
    cv.ShowImage('Video', frame_convert.video_cv(opennpy.sync_get_video()[0]))
Example #23
0
def get_video():
    return opennpy.sync_get_video()[0]