def update(dt=0): global projpts, rgb, depth depth,_ = freenect.sync_get_depth() rgb,_ = freenect.sync_get_video() q = depth X,Y = np.meshgrid(range(640),range(480)) # YOU CAN CHANGE THIS AND RERUN THE PROGRAM! # Point cloud downsampling d = 4 projpts = calibkinect.depth2xyzuv(q[::d,::d],X[::d,::d],Y[::d,::d]) refresh()
def update(dt=0): global projpts, rgb, depth depth, _ = freenect.sync_get_depth() rgb, _ = freenect.sync_get_video() q = depth X, Y = np.meshgrid(range(640), range(480)) # YOU CAN CHANGE THIS AND RERUN THE PROGRAM! # Point cloud downsampling d = 4 projpts = calibkinect.depth2xyzuv(q[::d, ::d], X[::d, ::d], Y[::d, ::d]) refresh()
def calcPosition(dets): for k, d in enumerate(dets): #print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format( # k, d.left(), d.top(), d.right(), d.bottom())) u,v = np.mgrid[d.left():d.right(), d.top():d.bottom()] #print("Height/y-pixel: {}".format(HEIGHT/(d.bottom()-d.top()))) #print("Width/x-pixel: {}".format(WIDTH /(d.right()-d.left()))) #print(" ") xyz, uv = calibkinect.depth2xyzuv(depth1[v,u], u, v) if len(xyz) != 0: #print(depth_dets[k-1].bottom()) #+= 20 #depth_dets[k-1].set_bottom(depth_dets[k-1].bottom()+20) #print(depth_dets[k-1].bottom()) #x_ind = np.argmin(abs(xyz[:,0])) #y_ind = len(xyz[:,1])/2 #np.argmin(abs(xyz[:,1])) z_ind = len(xyz[:,2])/2#np.argmin(abs(xyz[:,2])) x = np.median(xyz[:,0]) #xyz[x_ind,0] #+ 0.03 y = np.median(xyz[:,1]) #xyz[y_ind,1] #+ 0.04 z = np.median(xyz[:,2])#*(1-.270718) #xyz[z_ind,2] #- 0.04 x = x*100 + 3.3 + z*9.01e-1 y = y*100 + 15 - z*4.87 z = z*100 - 44.6 - z*21 x = x/100 y = y/100 z = z/100 rect = dlib.rectangle( left=np.median(uv[:,0])-5, right=np.median(uv[:,0])+5, top=np.median(uv[:,1])-5, bottom=np.median(uv[:,1])+5 ) print("x [m]: {}".format(x)) print("y [m]: {}".format(y)) print("z [m]: {}".format(z)) print(" ") return x, y, z, rect
#!/usr/bin/env python # Test of mapping depth to video using calibkinect, which doesn't seem to work import numpy as np import freenect, calibkinect import matplotlib.pyplot as plt depth = freenect.sync_get_depth()[0] video = freenect.sync_get_video()[0] depth = np.where(depth == 2047, 0, depth) _, uv = calibkinect.depth2xyzuv(depth) mapping = uv.astype(np.dtype('int16')).reshape(480, 640, 2) mapped = np.zeros((480, 640, 3), np.dtype('uint8')) for i, row in enumerate(mapping): for j, point in enumerate(row): mapped[i, j] = video[point[0], point[1]] plt.figure(1) plt.imshow(depth, cmap=plt.cm.gray) plt.figure(2) plt.imshow(mapped) plt.figure(3) plt.imshow(video) plt.show()
def point_cloud(): ''' turn depth into raw data ''' return depth2xyzuv(freenect.sync_get_depth()[0])
while True: depth, _ = freenect.sync_get_depth() rgb, _ = freenect.sync_get_video() depth = depth[::d, ::d] intdepth = depth.astype(np.int) diff = intdepth diff[np.fabs(intdepth - bgnd) < 10] = 0 for i in range(3, 4): diff = median_filter(diff, size=(i, i)) old_diff = diff.copy() nonz_idx = diff.nonzero() diff[nonz_idx] = prev[nonz_idx] * diff[nonz_idx] projpts = calibkinect.depth2xyzuv(diff, X, Y) points = projpts[0] if points.shape[0] < 15: cloud = None else: cloud = np.mean(points, axis=0) print(cloud) #cv.imshow('Current', depth_to_rgb(depth)) cv.imshow('Diff', depth_to_rgb(diff)) cv.waitKey(5) prev = old_diff prev[prev.nonzero()] = 1 #print("Projected points") #refresh() #print("Drew image")
def get_data(self): (depth, _) = get_depth() (raw_data, _) = depth2xyzuv(depth) self.data = np.array( [point for point in raw_data[::4] if inBBox(point, self.bounds)])
def xyzuv(self): q = self.depth X, Y = numpy.meshgrid(range(KINECT_FRAME_WIDTH), range(KINECT_FRAME_HEIGHT)) return calibkinect.depth2xyzuv(q, X, Y)
def loopcv(): cv.NamedWindow("Depth_Map", cv.CV_WINDOW_AUTOSIZE) cv.SetMouseCallback("Depth_Map", on_mouse, None) print "Execute loopcv: click 3 pts on mirror, click 4 pts at screen corner" global mr3 global sn4 global sn4_ref while 1: (depth, _) = freenect.sync_get_depth() im = array2cv(depth.astype(np.uint8)) #[warp]add rgb as img if pt is not None: print "==================" (x_d, y_d) = pt print "x=", x_d, " ,y=", y_d #print depth.shape #Watch out the indexing for depth col,row = 480,640 d_raw = np.array([depth[y_d, x_d]]) u_d = np.array([x_d]) v_d = np.array([y_d]) print "d_raw= ", d_raw print "u_d= ", u_d print "v_d= ", v_d xyz, uv = calibkinect.depth2xyzuv(d_raw, u_d, v_d) print "XYZ=", xyz print "XYZonRGBplane=", uv cv.WaitKey(100) cv.Circle(im, (x_d, y_d), 4, (0, 0, 255, 0), -1, 8, 0) cv.Circle(im, (int(uv[0, 0]), int(uv[0, 1])), 2, (255, 255, 255, 0), -1, 8, 0) if (mr3 is None): mr3 = xyz #print mr3 elif (mr3.shape[0] <= 2): #append "2"+1= 3pts mr3 = np.append(mr3, xyz, axis=0) #print "append mr3=",mr3 if (mr3.shape[0] == 3): print "enough for mirror, click on screen" #print "mr3.shape= ",mr3.shape elif (mr3.shape[0] == 3): if (sn4 is None): sn4 = xyz sn4_ref = MirrorReflection(mr3, xyz[0]) elif (sn4.shape[0] <= 3): #append "3"+1= 4pts sn4 = np.append(sn4, xyz, axis=0) sn4_ref = np.append(sn4_ref, MirrorReflection(mr3, xyz[0]), axis=0) if (sn4.shape[0] == 4): #sn4 have 4 pts actually print "Total screen pts before reflection=", sn4 #print "sn4 shape= ",sn4.shape print "Total screen pts after reflection=", sn4_ref #print "sn4_ref shape= ",sn4_ref.shape if (mr3.shape[0] == 3 and sn4.shape[0] == 4): print "go into Real Game: Virtual Mirror mode" #print "mr3= ", mr3 #print "sn4_ref[0:3,:]= ", sn4_ref[0:3,:] else: print "..." #for (x,y) in feat: #print x, y, velx[y,x], vely[y,x] cv.ShowImage("Depth_Map", im) if cv.WaitKey(10) == 27: print "screen size after calibration: " print "Corner 1-2 = ", np.linalg.norm(sn4_ref[0] - sn4_ref[1]) print "Corner 2-3 = ", np.linalg.norm(sn4_ref[1] - sn4_ref[2]) print "Corner 3-4 = ", np.linalg.norm(sn4_ref[2] - sn4_ref[3]) print "Corner 4-1 = ", np.linalg.norm(sn4_ref[3] - sn4_ref[0]) break #update() if (sn4_ref is not None): cv.DestroyWindow("Depth_Map") VirtualMirror()
def VirtualMirror(): cv.NamedWindow("RGB_remap", cv.CV_WINDOW_NORMAL) cv.NamedWindow("Depth_remap", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow('dst', cv.CV_WINDOW_NORMAL) cv.SetMouseCallback("Depth_remap", on_mouse, None) print "Virtual Mirror" print "Calibrated 4 Screen corner= ", sn4_ref print "Corner 1-2 = ", np.linalg.norm(sn4_ref[0] - sn4_ref[1]) print "Corner 2-3 = ", np.linalg.norm(sn4_ref[1] - sn4_ref[2]) print "Corner 3-4 = ", np.linalg.norm(sn4_ref[2] - sn4_ref[3]) print "Corner 4-1 = ", np.linalg.norm(sn4_ref[3] - sn4_ref[0]) global head_pos global head_virtual global scene4_cross head_pos = np.array([-0.2, -0.2, 1.0]) #Head_detect() while 1: (depth, _) = freenect.sync_get_depth() (rgb, _) = freenect.sync_get_video() #print type(depth) img = array2cv(rgb[:, :, ::-1]) im = array2cv(depth.astype(np.uint8)) #modulize this part for update_on() and loopcv() #q = depth X, Y = np.meshgrid(range(640), range(480)) d = 2 #downsampling if need projpts = calibkinect.depth2xyzuv(depth[::d, ::d], X[::d, ::d], Y[::d, ::d]) xyz, uv = projpts if tracking == 0: #********************************* if pt is not None: print "==================" (x_d, y_d) = pt print "x=", x_d, " ,y=", y_d #print depth.shape #Watch out the indexing for depth col,row = 480,640 d_raw = np.array([depth[y_d, x_d]]) u_d = np.array([x_d]) v_d = np.array([y_d]) print "d_raw= ", d_raw print "u_d= ", u_d print "v_d= ", v_d head3D, head2D = calibkinect.depth2xyzuv(d_raw, u_d, v_d) print "XYZ=", head3D print "XYZonRGBplane=", head2D head_pos = head3D[0] #print "head_pos.shape",head_pos.shape print "head_pos= ", head_pos cv.WaitKey(100) cv.Circle(im, (x_d, y_d), 4, (0, 0, 255, 0), -1, 8, 0) cv.Circle(im, (int(head2D[0, 0]), int(head2D[0, 1])), 2, (255, 255, 255, 0), -1, 8, 0) #********************************* elif tracking == 1: #find the nearest point (nose) as reference for right eye position print "nose" inds = np.nonzero(xyz[:, 2] > 0.5) #print xyz.shape new_xyz = xyz[inds] #print new_xyz.shape close_ind = np.argmin(new_xyz[:, 2]) head_pos = new_xyz[close_ind, :] + (0.03, 0.04, 0.01) #print head_pos.shape #print head_pos elif tracking == 2: #find the closest point as eye posiiton print "camera" inds = np.nonzero(xyz[:, 2] > 0.5) #print xyz.shape new_xyz = xyz[inds] #print new_xyz.shape close_ind = np.argmin(new_xyz[:, 2]) head_pos = new_xyz[close_ind, :] #print head_pos.shape #print head_pos else: print "please select a tracking mode" head_virtual = MirrorReflection(sn4_ref[0:3, :], head_pos) print "head_virtual= ", head_virtual rgbK = np.array([[520.97092069697146, 0.0, 318.40565581396697], [0.0, 517.85544366622719, 263.46756370601804], [0.0, 0.0, 1.0]]) rgbD = np.array([[0.22464481251757576], [-0.47968370787671893], [0.0], [0.0]]) irK = np.array([[588.51686020601733, 0.0, 320.22664144213843], [0.0, 584.73028132692866, 241.98395817513071], [0.0, 0.0, 1.0]]) irD = np.array([[-0.1273506872313161], [0.36672476189160591], [0.0], [0.0]]) mapu = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) mapv = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) cv.InitUndistortMap(rgbK, rgbD, mapu, mapv) cv.InitUndistortMap(irK, irD, mapx, mapy) if 1: rgb_remap = cv.CloneImage(img) cv.Remap(img, rgb_remap, mapu, mapv) depth_remap = cv.CloneImage(im) cv.Remap(im, depth_remap, mapx, mapy) scene4_cross = Cross4Pts.CrossPts(xyz, uv, head_pos, head_virtual, sn4_ref) #[warp] Add whole warpping code here #[warp] points = Scene4Pts() as warpping 4 pts #Flip the dst image!!!!!!!!! #ShowImage("rgb_warp", dst) #Within/out of the rgb range #Mapping Destination (width, height)=(x,y) #Warning: the order of pts in clockwise: pt1(L-T),pt2(R-T),pt3(R-B),pt4(L-B) #points = [(test[0,0],test[0,1]), (630.,300.), (700.,500.), (400.,470.)] points = [(scene4_cross[0, 0], scene4_cross[0, 1]), (scene4_cross[1, 0], scene4_cross[1, 1]), (scene4_cross[2, 0], scene4_cross[2, 1]), (scene4_cross[3, 0], scene4_cross[3, 1])] #Warping the image without flipping (camera image) #npoints = [(0.,0.), (640.,0.), (640.,480.), (0.,480.)] #Warping the image with flipping (mirror flip image) npoints = [(640., 0.), (0., 0.), (0., 480.), (640., 480.)] mat = cv.CreateMat(3, 3, cv.CV_32FC1) cv.GetPerspectiveTransform(points, npoints, mat) #src = cv.CreateImage( cv.GetSize(img), cv.IPL_DEPTH_32F, 3 ) src = cv.CreateImage(cv.GetSize(rgb_remap), cv.IPL_DEPTH_32F, 3) #cv.ConvertScale(img,src,(1/255.00)) cv.ConvertScale(rgb_remap, src, (1 / 255.00)) dst = cv.CloneImage(src) cv.Zero(dst) cv.WarpPerspective(src, dst, mat) #************************************************************************ #Remap the rgb and depth image #Warping will use remap rgb image as src if 1: cv.ShowImage("RGB_remap", rgb_remap) #rgb[200:440,300:600,::-1] cv.ShowImage("Depth_remap", depth_remap) cv.ShowImage("dst", dst) #warp rgb image if cv.WaitKey(5) == 27: cv.DestroyWindow("RGB_remap") cv.DestroyWindow("Depth_remap") cv.DestroyWindow("dst") break
while True: depth, _ = freenect.sync_get_depth() rgb,_ = freenect.sync_get_video() depth = depth[::d, ::d] intdepth = depth.astype(np.int) diff = intdepth diff[np.fabs(intdepth - bgnd) < 10] = 0 for i in range(3,4): diff = median_filter(diff, size=(i,i)) old_diff = diff.copy() nonz_idx = diff.nonzero() diff[nonz_idx] = prev[nonz_idx] * diff[nonz_idx] projpts = calibkinect.depth2xyzuv(diff, X, Y) points = projpts[0] if points.shape[0] < 15: cloud = None else: cloud = np.mean(points, axis=0) print(cloud) #cv.imshow('Current', depth_to_rgb(depth)) cv.imshow('Diff', depth_to_rgb(diff)) cv.waitKey(5) prev = old_diff prev[prev.nonzero()] = 1 #print("Projected points") #refresh() #print("Drew image")
def xyzuv(self): q = self.depth X,Y = numpy.meshgrid(range(KINECT_FRAME_WIDTH), range(KINECT_FRAME_HEIGHT)) return calibkinect.depth2xyzuv(q,X,Y)
def VirtualMirror(): cv.NamedWindow("RGB_remap",cv.CV_WINDOW_NORMAL) cv.NamedWindow("Depth_remap",cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow('dst', cv.CV_WINDOW_NORMAL) cv.SetMouseCallback( "Depth_remap", on_mouse, None) print "Virtual Mirror" print "Calibrated 4 Screen corner= ", sn4_ref print "Corner 1-2 = ", np.linalg.norm(sn4_ref[0]-sn4_ref[1]) print "Corner 2-3 = ", np.linalg.norm(sn4_ref[1]-sn4_ref[2]) print "Corner 3-4 = ", np.linalg.norm(sn4_ref[2]-sn4_ref[3]) print "Corner 4-1 = ", np.linalg.norm(sn4_ref[3]-sn4_ref[0]) global head_pos global head_virtual global scene4_cross head_pos = np.array([-0.2,-0.2,1.0]) #Head_detect() while 1: (depth,_) = freenect.sync_get_depth() (rgb,_) = freenect.sync_get_video() #print type(depth) img = array2cv(rgb[:,:,::-1]) im = array2cv(depth.astype(np.uint8)) #modulize this part for update_on() and loopcv() #q = depth X,Y = np.meshgrid(range(640),range(480)) d = 2 #downsampling if need projpts = calibkinect.depth2xyzuv(depth[::d,::d],X[::d,::d],Y[::d,::d]) xyz,uv = projpts if tracking == 0: #********************************* if pt is not None: print "==================" (x_d,y_d) = pt print "x=",x_d, " ,y=",y_d #print depth.shape #Watch out the indexing for depth col,row = 480,640 d_raw = np.array([depth[y_d,x_d]]) u_d = np.array([x_d]) v_d = np.array([y_d]) print "d_raw= ", d_raw print "u_d= ", u_d print "v_d= ", v_d head3D,head2D = calibkinect.depth2xyzuv(d_raw,u_d,v_d) print "XYZ=", head3D print "XYZonRGBplane=", head2D head_pos = head3D[0] #print "head_pos.shape",head_pos.shape print "head_pos= ",head_pos cv.WaitKey(100) cv.Circle(im, (x_d,y_d), 4, (0, 0, 255, 0), -1, 8, 0) cv.Circle(im, (int(head2D[0,0]),int(head2D[0,1])), 2, (255, 255, 255, 0), -1, 8, 0) #********************************* elif tracking == 1: #find the nearest point (nose) as reference for right eye position print "nose" inds = np.nonzero(xyz[:,2]>0.5) #print xyz.shape new_xyz = xyz[inds] #print new_xyz.shape close_ind = np.argmin(new_xyz[:,2]) head_pos = new_xyz[close_ind,:]+(0.03,0.04,0.01) #print head_pos.shape #print head_pos elif tracking == 2: #find the closest point as eye posiiton print "camera" inds = np.nonzero(xyz[:,2]>0.5) #print xyz.shape new_xyz = xyz[inds] #print new_xyz.shape close_ind = np.argmin(new_xyz[:,2]) head_pos = new_xyz[close_ind,:] #print head_pos.shape #print head_pos else: print "please select a tracking mode" head_virtual = MirrorReflection (sn4_ref[0:3,:],head_pos) print "head_virtual= ",head_virtual rgbK = np.array([[520.97092069697146,0.0,318.40565581396697], [0.0,517.85544366622719,263.46756370601804], [0.0,0.0,1.0]]) rgbD = np.array([[0.22464481251757576],[-0.47968370787671893],[0.0],[0.0]]) irK = np.array([[588.51686020601733,0.0,320.22664144213843], [0.0,584.73028132692866,241.98395817513071], [0.0,0.0,1.0]]) irD = np.array([[-0.1273506872313161], [0.36672476189160591], [0.0], [0.0]]) mapu = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) mapv = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) cv.InitUndistortMap(rgbK, rgbD, mapu, mapv) cv.InitUndistortMap(irK, irD, mapx, mapy) if 1: rgb_remap = cv.CloneImage(img) cv.Remap(img, rgb_remap, mapu, mapv) depth_remap = cv.CloneImage(im) cv.Remap(im, depth_remap, mapx, mapy) scene4_cross = Cross4Pts.CrossPts(xyz, uv, head_pos, head_virtual ,sn4_ref) #[warp] Add whole warpping code here #[warp] points = Scene4Pts() as warpping 4 pts #Flip the dst image!!!!!!!!! #ShowImage("rgb_warp", dst) #Within/out of the rgb range #Mapping Destination (width, height)=(x,y) #Warning: the order of pts in clockwise: pt1(L-T),pt2(R-T),pt3(R-B),pt4(L-B) #points = [(test[0,0],test[0,1]), (630.,300.), (700.,500.), (400.,470.)] points = [(scene4_cross[0,0],scene4_cross[0,1]), (scene4_cross[1,0],scene4_cross[1,1]), (scene4_cross[2,0],scene4_cross[2,1]), (scene4_cross[3,0],scene4_cross[3,1])] #Warping the image without flipping (camera image) #npoints = [(0.,0.), (640.,0.), (640.,480.), (0.,480.)] #Warping the image with flipping (mirror flip image) npoints = [(640.,0.), (0.,0.), (0.,480.), (640.,480.)] mat = cv.CreateMat(3, 3, cv.CV_32FC1) cv.GetPerspectiveTransform( points, npoints, mat); #src = cv.CreateImage( cv.GetSize(img), cv.IPL_DEPTH_32F, 3 ) src = cv.CreateImage( cv.GetSize(rgb_remap), cv.IPL_DEPTH_32F, 3 ) #cv.ConvertScale(img,src,(1/255.00)) cv.ConvertScale(rgb_remap,src,(1/255.00)) dst = cv.CloneImage( src ); cv.Zero(dst); cv.WarpPerspective(src, dst, mat); #************************************************************************ #Remap the rgb and depth image #Warping will use remap rgb image as src if 1: cv.ShowImage("RGB_remap",rgb_remap) #rgb[200:440,300:600,::-1] cv.ShowImage("Depth_remap",depth_remap) cv.ShowImage("dst", dst) #warp rgb image if cv.WaitKey(5)==27: cv.DestroyWindow("RGB_remap") cv.DestroyWindow("Depth_remap") cv.DestroyWindow("dst") break
def loopcv(): cv.NamedWindow("Depth_Map", cv.CV_WINDOW_AUTOSIZE) cv.SetMouseCallback( "Depth_Map", on_mouse, None) print "Execute loopcv: click 3 pts on mirror, click 4 pts at screen corner" global mr3 global sn4 global sn4_ref while 1: (depth,_) = freenect.sync_get_depth() im = array2cv(depth.astype(np.uint8)) #[warp]add rgb as img if pt is not None: print "==================" (x_d,y_d) = pt print "x=",x_d, " ,y=",y_d #print depth.shape #Watch out the indexing for depth col,row = 480,640 d_raw = np.array([depth[y_d,x_d]]) u_d = np.array([x_d]) v_d = np.array([y_d]) print "d_raw= ", d_raw print "u_d= ", u_d print "v_d= ", v_d xyz,uv = calibkinect.depth2xyzuv(d_raw,u_d,v_d) print "XYZ=", xyz print "XYZonRGBplane=", uv cv.WaitKey(100) cv.Circle(im, (x_d,y_d), 4, (0, 0, 255, 0), -1, 8, 0) cv.Circle(im, (int(uv[0,0]),int(uv[0,1])), 2, (255, 255, 255, 0), -1, 8, 0) if(mr3 is None): mr3=xyz #print mr3 elif(mr3.shape[0] <= 2): #append "2"+1= 3pts mr3=np.append(mr3,xyz,axis=0) #print "append mr3=",mr3 if(mr3.shape[0] == 3): print "enough for mirror, click on screen" #print "mr3.shape= ",mr3.shape elif(mr3.shape[0] == 3): if(sn4 is None): sn4 = xyz sn4_ref = MirrorReflection(mr3,xyz[0]) elif(sn4.shape[0] <= 3): #append "3"+1= 4pts sn4 = np.append(sn4, xyz, axis=0) sn4_ref = np.append(sn4_ref, MirrorReflection(mr3,xyz[0]), axis=0) if(sn4.shape[0] == 4):#sn4 have 4 pts actually print "Total screen pts before reflection=" ,sn4 #print "sn4 shape= ",sn4.shape print "Total screen pts after reflection=" ,sn4_ref #print "sn4_ref shape= ",sn4_ref.shape if(mr3.shape[0]==3 and sn4.shape[0]==4): print "go into Real Game: Virtual Mirror mode" #print "mr3= ", mr3 #print "sn4_ref[0:3,:]= ", sn4_ref[0:3,:] else: print "..." #for (x,y) in feat: #print x, y, velx[y,x], vely[y,x] cv.ShowImage("Depth_Map",im) if cv.WaitKey(10)==27: print "screen size after calibration: " print "Corner 1-2 = ", np.linalg.norm(sn4_ref[0]-sn4_ref[1]) print "Corner 2-3 = ", np.linalg.norm(sn4_ref[1]-sn4_ref[2]) print "Corner 3-4 = ", np.linalg.norm(sn4_ref[2]-sn4_ref[3]) print "Corner 4-1 = ", np.linalg.norm(sn4_ref[3]-sn4_ref[0]) break #update() if(sn4_ref is not None): cv.DestroyWindow("Depth_Map") VirtualMirror()
def get_data(self): (depth,_) = get_depth() (raw_data, _) = depth2xyzuv(depth) self.data = np.array([point for point in raw_data[::4] if inBBox(point, self.bounds)])