Esempio n. 1
0
def once():
    dataset.advance()
    depthL, depthR = dataset.depthL, dataset.depthR
    maskL, rectL = preprocess.threshold_and_mask(depthL, config.bgL)
    maskR, rectR = preprocess.threshold_and_mask(depthR, config.bgR)
    show_mask("maskL", maskL.astype("f"), rectL)
    show_mask("maskR", maskR.astype("f"), rectR)

    pylab.waitforbuttonpress(0.01)
Esempio n. 2
0
def once():
    dataset.advance()
    depthL, depthR = dataset.depthL, dataset.depthR
    maskL, rectL = preprocess.threshold_and_mask(depthL, config.bgL)
    maskR, rectR = preprocess.threshold_and_mask(depthR, config.bgR)
    show_mask('maskL', maskL.astype('f'), rectL)
    show_mask('maskR', maskR.astype('f'), rectR)

    pylab.waitforbuttonpress(0.01)
Esempio n. 3
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat

    try:
        (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)
    except IndexError:
        return

    cv.ShowImage('mask',mask.astype('u1')*255)

    global label_image
    label_image = classify.predict(depth)
    cv.ShowImage('label_image', ((label_image[0]+1)*100*mask).astype('u1'))
    pylab.waitforbuttonpress(0.03)
Esempio n. 4
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat

    try:
        (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)
    except IndexError:
        return

    cv.ShowImage('mask',mask.astype('u1')*255)

    global label_image
    label_image = classify.predict(depth)
    cv.ShowImage('label_image', ((label_image[0]+1)*100*mask).astype('u1'))
    pylab.waitforbuttonpress(0.03)
Esempio n. 5
0
def once():
    global depth
    global rgb
    opennpy.sync_update()
    depth,_ = opennpy.sync_get_depth()
    rgb,_ = opennpy.sync_get_video()

    global x,y,z,xyz

    if PREPROCESS:
        global mask, rect
        mask, rect = preprocess.threshold_and_mask(depth, config.bg)

        (l,t),(r,b) = rect
        depth_ = np.ascontiguousarray(depth[t:b,l:r])
        v,u = np.mgrid[t:b,l:r].astype('f')
        x,y,z = calibkinect.convertOpenNI2Real(depth_,u,v)
        x=x[mask[t:b,l:r]]
        y=y[mask[t:b,l:r]]
        z=z[mask[t:b,l:r]]

        rgb_ = rgb[t:b,l:r,:][mask[t:b,l:r],:]
    else:
        x,y,z = calibkinect.convertOpenNI2Real(depth)
        #global X,Y,Z
        #X,Y,Z = calibkinect.convertReal2OpenNI(x,y,z)

        x,y,z = map(lambda _:_.flatten(), (x,y,z))
        rgb_ = rgb.reshape(-1,3)

    #x = x[depth>0]
    #y = y[depth>0]
    #z = z[depth>0]
    xyz_ = np.ascontiguousarray(np.dstack((x,y,z)).reshape(-1,3))
    #xyz_ = xyz - xyz.mean(0)
    #xyz_ /= np.std(xyz_)

    rgba = np.empty((xyz_.shape[0],4),'f')
    rgba[:,3] = 1
    rgba[:,:3] = rgb_.astype('f')/256.0

    window.lookat = np.array([0,0,0])
    window.update_points(xyz_,rgba)
    window.clearcolor = [0,0,0,0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
Esempio n. 6
0
def once():
    global depth
    global rgb
    opennpy.sync_update()
    depth, _ = opennpy.sync_get_depth()
    rgb, _ = opennpy.sync_get_video()

    global x, y, z, xyz

    if PREPROCESS:
        global mask, rect
        mask, rect = preprocess.threshold_and_mask(depth, config.bg)

        (l, t), (r, b) = rect
        depth_ = np.ascontiguousarray(depth[t:b, l:r])
        v, u = np.mgrid[t:b, l:r].astype('f')
        x, y, z = calibkinect.convertOpenNI2Real(depth_, u, v)
        x = x[mask[t:b, l:r]]
        y = y[mask[t:b, l:r]]
        z = z[mask[t:b, l:r]]

        rgb_ = rgb[t:b, l:r, :][mask[t:b, l:r], :]
    else:
        x, y, z = calibkinect.convertOpenNI2Real(depth)
        #global X,Y,Z
        #X,Y,Z = calibkinect.convertReal2OpenNI(x,y,z)

        x, y, z = map(lambda _: _.flatten(), (x, y, z))
        rgb_ = rgb.reshape(-1, 3)

    #x = x[depth>0]
    #y = y[depth>0]
    #z = z[depth>0]
    xyz_ = np.ascontiguousarray(np.dstack((x, y, z)).reshape(-1, 3))
    #xyz_ = xyz - xyz.mean(0)
    #xyz_ /= np.std(xyz_)

    rgba = np.empty((xyz_.shape[0], 4), 'f')
    rgba[:, 3] = 1
    rgba[:, :3] = rgb_.astype('f') / 256.0

    window.lookat = np.array([0, 0, 0])
    window.update_points(xyz_, rgba)
    window.clearcolor = [0, 0, 0, 0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
Esempio n. 7
0
def once():
    if not FOR_REAL:
        dataset.advance()
        global depth
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect

    (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)

    # Compute the surface normals
    normals.normals_opencl(depth, mask, rect)

    # Find the lattice orientation and then translation
    global R_oriented, R_aligned, R_correct
    R_oriented = lattice.orientation_opencl()
    R_aligned = lattice.translation_opencl(R_oriented)

    global modelmat
    if modelmat is None:
        modelmat = R_aligned.copy()
    else:
        modelmat,_ = grid.nearest(modelmat, R_aligned)

    global face, Xo, Yo, Zo
    _,_,_,face = np.rollaxis(opencl.get_modelxyz(),1)
    Xo,Yo,Zo,_ = np.rollaxis(opencl.get_xyz(),1)

    global cx,cy,cz
    cx,cy,cz,_ = np.rollaxis(np.frombuffer(np.array(face).data,
                                           dtype='i1').reshape(-1,4),1)
    R,G,B = [np.abs(_).astype('f') for _ in cx,cy,cz]

    window.update_xyz(Xo,Yo,Zo,COLOR=(R,G,B,R*0+1))

    window.clearcolor = [1,1,1,0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
Esempio n. 8
0
def once():
    if not FOR_REAL:
        dataset.advance()
        global depth
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()

    def from_rect(m, rect):
        (l, t), (r, b) = rect
        return m[t:b, l:r]

    global mask, rect

    (mask, rect) = preprocess.threshold_and_mask(depth, config.bg)

    # Compute the surface normals
    normals.normals_opencl(depth, mask, rect)

    # Find the lattice orientation and then translation
    global R_oriented, R_aligned, R_correct
    R_oriented = lattice.orientation_opencl()
    R_aligned = lattice.translation_opencl(R_oriented)

    global modelmat
    if modelmat is None:
        modelmat = R_aligned.copy()
    else:
        modelmat, _ = grid.nearest(modelmat, R_aligned)

    global face, Xo, Yo, Zo
    _, _, _, face = np.rollaxis(opencl.get_modelxyz(), 1)
    Xo, Yo, Zo, _ = np.rollaxis(opencl.get_xyz(), 1)

    global cx, cy, cz
    cx, cy, cz, _ = np.rollaxis(
        np.frombuffer(np.array(face).data, dtype='i1').reshape(-1, 4), 1)
    R, G, B = [np.abs(_).astype('f') for _ in cx, cy, cz]

    window.update_xyz(Xo, Yo, Zo, COLOR=(R, G, B, R * 0 + 1))

    window.clearcolor = [1, 1, 1, 0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
Esempio n. 9
0
def once():
    global depth
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat
    (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)

    global n,w
    if 0:
        n,w = normals.normals_numpy(depth)
        show_normals(n, w, 'normals_numpy')

    if 0:
        n,w = normals.normals_c(depth)
        show_normals(n, w, 'normals_c')

    if 1:
        normals.opencl.set_rect(rect)
        dt = timeit.timeit(lambda:
                           normals.normals_opencl(depth, mask, rect).wait(),
                           number=1)

        #print dt
        nw = normals.opencl.get_normals()
        n,w = nw[:,:,:3], nw[:,:,3]
        #show_normals(n, w, 'normals_opencl')
        show_normals_sphere(n, w)

    pylab.waitforbuttonpress(0.01)
Esempio n. 10
0
def once():
    global depth
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()

    def from_rect(m, rect):
        (l, t), (r, b) = rect
        return m[t:b, l:r]

    global mask, rect, modelmat
    (mask, rect) = preprocess.threshold_and_mask(depth, config.bg)

    global n, w
    if 0:
        n, w = normals.normals_numpy(depth)
        show_normals(n, w, 'normals_numpy')

    if 0:
        n, w = normals.normals_c(depth)
        show_normals(n, w, 'normals_c')

    if 1:
        normals.opencl.set_rect(rect)
        dt = timeit.timeit(
            lambda: normals.normals_opencl(depth, mask, rect).wait(), number=1)

        #print dt
        nw = normals.opencl.get_normals()
        n, w = nw[:, :, :3], nw[:, :, 3]
        #show_normals(n, w, 'normals_opencl')
        show_normals_sphere(n, w)

    pylab.waitforbuttonpress(0.01)
Esempio n. 11
0
def once():
    global depth
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()

    def from_rect(m, rect):
        (l, t), (r, b) = rect
        return m[t:b, l:r]

    global mask, rect
    try:
        (mask, rect) = preprocess.threshold_and_mask(depth, config.bg)
    except IndexError:
        return

    normals.opencl.set_rect(rect)
    normals.normals_opencl(depth, mask, rect).wait()

    # Find the lattice orientation and then translation
    global R_oriented, R_aligned, R_correct
    R_oriented = lattice.orientation_opencl()

    if "R_correct" in globals():
        # Correct the lattice ambiguity for 90 degree rotations just by
        # using the previous estimate. This is good enough for illustrations
        # but global alignment is preferred (see hashalign)
        R_oriented, _ = grid.nearest(R_correct, R_oriented)

    R_aligned = lattice.translation_opencl(R_oriented)
    R_correct = R_aligned

    # Find the color based on the labeling from lattice
    global face, Xo, Yo, Zo
    _, _, _, face = np.rollaxis(opencl.get_modelxyz(), 1)

    global cx, cy, cz
    cx, cy, cz, _ = np.rollaxis(np.frombuffer(np.array(face).data, dtype="i1").reshape(-1, 4), 1) - 1
    global R, G, B
    R, G, B = [np.abs(_).astype("f") for _ in cx, cy, cz]
    if 0:
        G *= 0
        R *= 0
        B *= 0
    else:
        pass

    # Draw the points collected on a sphere
    nw = normals.opencl.get_normals()
    global n, w
    n, w = nw[:, :, :3], nw[:, :, 3]

    if 1:  # Point cloud position mode
        X, Y, Z, _ = np.rollaxis(opencl.get_xyz(), 1)
    else:
        X, Y, Z = n[:, :, 0], n[:, :, 1], n[:, :, 2]
        X, Y, Z = map(lambda _: _.copy(), (X, Y, Z))

    # Render the points in 'table space' but colored with the axes from flatrot
    window.update_xyz(X, Y, Z, COLOR=(R, G * 0, B, R * 0 + w.flatten()))
    window.clearcolor = [1, 1, 1, 0]
    window.Refresh()
    # pylab.imshow(1./depth)
    pylab.waitforbuttonpress(0.01)
Esempio n. 12
0
def once():
    global depth
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()

    def from_rect(m, rect):
        (l, t), (r, b) = rect
        return m[t:b, l:r]

    global mask, rect
    try:
        (mask, rect) = preprocess.threshold_and_mask(depth, config.bg)
    except IndexError:
        return

    normals.opencl.set_rect(rect)
    normals.normals_opencl(depth, mask, rect).wait()

    # Find the lattice orientation and then translation
    global R_oriented, R_aligned, R_correct
    R_oriented = lattice.orientation_opencl()

    if 'R_correct' in globals():
        # Correct the lattice ambiguity for 90 degree rotations just by
        # using the previous estimate. This is good enough for illustrations
        # but global alignment is preferred (see hashalign)
        R_oriented, _ = grid.nearest(R_correct, R_oriented)

    R_aligned = lattice.translation_opencl(R_oriented)
    R_correct = R_aligned

    # Find the color based on the labeling from lattice
    global face, Xo, Yo, Zo
    _, _, _, face = np.rollaxis(opencl.get_modelxyz(), 1)

    global cx, cy, cz
    cx, cy, cz, _ = np.rollaxis(
        np.frombuffer(np.array(face).data, dtype='i1').reshape(-1, 4), 1) - 1
    global R, G, B
    R, G, B = [np.abs(_).astype('f') for _ in cx, cy, cz]
    if 0:
        G *= 0
        R *= 0
        B *= 0
    else:
        pass

    # Draw the points collected on a sphere
    nw = normals.opencl.get_normals()
    global n, w
    n, w = nw[:, :, :3], nw[:, :, 3]

    if 1:  # Point cloud position mode
        X, Y, Z, _ = np.rollaxis(opencl.get_xyz(), 1)
    else:
        X, Y, Z = n[:, :, 0], n[:, :, 1], n[:, :, 2]
        X, Y, Z = map(lambda _: _.copy(), (X, Y, Z))

    # Render the points in 'table space' but colored with the axes from flatrot
    window.update_xyz(X, Y, Z, COLOR=(R, G * 0, B, R * 0 + w.flatten()))
    window.clearcolor = [1, 1, 1, 0]
    window.Refresh()
    #pylab.imshow(1./depth)
    pylab.waitforbuttonpress(0.01)