def get_cld(im1, im2): #im1 and im2 must be black and white
    if show_rt:
        t = time.time()

    #compute Fundamental matrix
    F, pts1, pts2 = compute_F(im1, im2)

    #compute Essential matrix
    E = compute_E(F, im1, im2)

    #get translation matrix for translation in the x direction
    R, T = compute_R_T(E)

    #get K
    K = compute_K(im1)

    #get projection from K, R, T
    P = compute_P(K, R, T)

    #quit()
    Tx = T[2]

    #need to reshape vector so that stereorectifyuncalibrated can
    pts1_n = pts1.reshape((pts1.shape[0] * 2, 1))
    pts2_n = pts2.reshape((pts2.shape[0] * 2, 1))

    #compute rectification transform
    retval, rec1, rec2 = cv2.stereoRectifyUncalibrated(pts1_n, pts2_n, F, (len(im1), len(im1[0])))

    if DEBUGGING:
        print 'rec1:\n', rec1
        print 'rec2:\n', rec2

    #apply rectification matrix
    rec_im1 = cv2.warpPerspective(im1, rec1, (len(im1), len(im1[0])))

    rec_im2 = cv2.warpPerspective(im2, rec2, (len(im2), len(im2[0])))

    if UNIT_TESTING:
        cv2.imshow('im1', rec_im1)
        cv2.waitKey(0)

        cv2.imshow('im2', rec_im2)
        cv2.waitKey(0)

        '''
        h = rec_im1.shape[0]
        w = rec_im1.shape[1]
        dispim = np.zeros((h,2*w), dtype=np.uint8)
        dispim[:,0:w] = rec_im1
        dispim[:,w:] = rec_im2
        for jj in range(0,h,15):
            cv2.line(dispim, (0,jj), (2*w,jj), (255,255,255), 1)
        cv2.imshow('Sbs', dispim)
        cv2.waitKey(0)
        '''

    #get disparity map
    stereo = cv2.StereoBM(cv2.STEREO_BM_BASIC_PRESET,ndisparities=16, SADWindowSize=15)

    #rec_im1 = cv2.cvtColor(rec_im1, cv2.COLOR_BGR2GRAY)
    #rec_im2 = cv2.cvtColor(rec_im2, cv2.COLOR_BGR2GRAY)

    disparity = stereo.compute(rec_im1, rec_im2) #/ 16

    disparityF = disparity.astype(float)
    maxv = np.max(disparityF.flatten())
    minv = np.min(disparityF.flatten())
    disparityF = 255.0*(disparityF-minv)/(maxv-minv)
    disparityU = disparityF.astype(np.uint8)

    print 'disparity.dtype:', disparity.dtype

    cv2.imwrite('disparity.jpg', disparityU)

    if UNIT_TESTING:
        cv2.imshow('disparity', disparityU)
        cv2.waitKey(0)

        plt.subplot(122), plt.imshow(disparityF)
        plt.show()


    #get perspective transform (Q)
    cx = len(im1[0]) / 2
    cy = len(im1) / 2
    cxp = cx

    Q = np.asarray([[1, 0, 0, -cx],
                    [0, 1, 0, -cy],
                    [0, 0, 0, f_pix],
                    [0, 0, -1/Tx, (cx-cxp)/Tx]])

    #reproject to 3d
    im_3D = cv2.reprojectImageTo3D(disparityU, Q)

    if show_rt:
        print '\nget_cld() runtime:', time.time() - t, 'secs\n'

    return im_3D
示例#2
1
 def projectTo3D(self):
     #openCV to make into a 3D image for writing later
     print 'creating 3d projection...'
     self.pointCloud = cv2.reprojectImageTo3D(
                         self.disparityMap,
                         self.transformation,
                         handleMissingValues=0)
示例#3
0
    def DistanceMeasure(self,x,y):
            # 根据更正map对图片进行重构
            img1_rectified = cv2.remap(self.left_image, camera_configs.left_map1, camera_configs.left_map2, cv2.INTER_LINEAR)
            img2_rectified = cv2.remap(self.right_image, camera_configs.right_map1, camera_configs.right_map2, cv2.INTER_LINEAR)

            # 将图片置为灰度图,为StereoBM作准备
            imgL = cv2.cvtColor(img1_rectified, cv2.COLOR_BGR2GRAY)
            imgR = cv2.cvtColor(img2_rectified, cv2.COLOR_BGR2GRAY)

            # 两个trackbar用来调节不同的参数查看效果
            num = cv2.getTrackbarPos("num", "depth")
            blockSize = cv2.getTrackbarPos("blockSize", "depth")
            if blockSize % 2 == 0:
                blockSize += 1
            if blockSize < 5:
                blockSize = 5

            # 根据Block Maching方法生成差异图(opencv里也提供了SGBM/Semi-Global Block Matching算法,有兴趣可以试试)
            stereo = cv2.StereoSGBM(minDisparity = 16,numDisparities = 128,SADWindowSize = 3,uniquenessRatio = 10,
                          speckleWindowSize = 100,speckleRange = 16,disp12MaxDiff = -1,P1 = 8*3*3**2,P2 = 32*3*3**2,fullDP = False)
            disparity = stereo.compute(imgL, imgR)
            disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
            # 将图片扩展至3d空间中,其z方向的值则为当前的距离
            threeD = cv2.reprojectImageTo3D(disparity.astype(np.float32) / 16., camera_configs.Q)
            return threeD[x][y]
示例#4
0
def compute_stereo_images(imgL, imgR, K, rvec, tvec, dist):
    window_size = 3
    min_disp = 16
    num_disp = 112 - min_disp
    stereo = cv2.StereoSGBM_create(minDisparity=min_disp,
                                   numDisparities=num_disp,
                                   blockSize=16,
                                   P1=8 * 3 * window_size**2,
                                   P2=32 * 3 * window_size**2,
                                   disp12MaxDiff=1,
                                   uniquenessRatio=10,
                                   speckleWindowSize=100,
                                   speckleRange=32)

    print('computing disparity...')
    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0

    print('generating 3d point cloud...', )
    h, w = imgL.shape[:2]
    P1, P2, Q = stereo_rectify(K, dist, K, dist, (h, w), rvec, tvec)
    points = cv2.reprojectImageTo3D(disp, Q)
    colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)
    mask = disp > disp.min()
    out_points = points[mask]
    out_colors = colors[mask]
示例#5
0
        def output_fn(x):
            sp_l, disp = x

            cv2.imshow('sp_l', sp_l)
            cv2.imshow('disp', disp)

            raw_dist = cv2.reprojectImageTo3D((disp/16.).astype(np.float32), rectifier.Q, handleMissingValues=True)
            dist = raw_dist[:,:,2]
            if pcl:
                pcl_msg = toPointCloud(raw_dist, im_l)
                pcl_pub.publish(pcl_msg)
            cv2.imshow('dist', dist)

            params = {
                    'color' : 'yellow',
                    'min_dist' : 0.3,
                    'max_dist' : 1.5,
                    'min_area' : 0.003,
                    'max_area' : 1.0
                    }
            filtered, ctrs = apply_criteria(dist, sp_l, params)
            if len(ctrs) > 0:
                m = cv2.moments(ctrs[0]) # pull out biggest one--ish
                cX = int(m["m10"] / m["m00"])
                cY = int(m["m01"] / m["m00"])
                x,y,z = raw_dist[cY,cX]
                x,y,z = z,-x, -y
                obj_msg.header.stamp = rospy.Time.now()
                obj_msg.point.x = x
                obj_msg.point.y = y
                obj_msg.point.z = z
                obj_pub.publish(obj_msg)
            cv2.imshow('filtered', filtered)
示例#6
0
def threeD(disp, Q):
    # 计算像素点的3D坐标(左相机坐标系下)
    points_3d = cv2.reprojectImageTo3D(disp, Q)

    points_3d = points_3d.reshape(points_3d.shape[0] * points_3d.shape[1], 3)

    X = points_3d[:, 0]
    Y = points_3d[:, 1]
    Z = points_3d[:, 2]

    #选择并删除错误的点
    remove_idx1 = np.where(Z <= 0)
    remove_idx2 = np.where(Z > 15000)
    remove_idx3 = np.where(X > 10000)
    remove_idx4 = np.where(X < -10000)
    remove_idx5 = np.where(Y > 10000)
    remove_idx6 = np.where(Y < -10000)
    remove_idx = np.hstack((remove_idx1[0], remove_idx2[0], remove_idx3[0],
                            remove_idx4[0], remove_idx5[0], remove_idx6[0]))

    points_3d = np.delete(points_3d, remove_idx, 0)

    #计算目标点(这里我选择的是目标区域的中位数,可根据实际情况选取)
    if points_3d.any():
        x = np.median(points_3d[:, 0])
        y = np.median(points_3d[:, 1])
        z = np.median(points_3d[:, 2])
        targetPoint = [x, y, z]
    else:
        targetPoint = [0, 0, -1]  #无法识别目标区域

    return targetPoint
示例#7
0
def SaveDisparityImageToPLY(disparityImg, leftImg, perspectiveMatrix,
                            plyFilename):
    worldImg = cv2.reprojectImageTo3D(disparity=disparityImg,
                                      Q=perspectiveMatrix,
                                      handleMissingValues=True)
    SaveWorldImageToPLY(worldImg, leftImg, plyFilename)
    return
示例#8
0
def SGBM_update(val=0):
    global SGBM_num
    global SGBM_blockSize
    global threeD
    SGBM_blockSize = cv.getTrackbarPos('blockSize', 'SGNM_disparity')
    if SGBM_blockSize % 2 == 0:
        SGBM_blockSize += 1
    if SGBM_blockSize < 5:
        SGBM_blockSize = 5
    SGBM_stereo.setBlockSize(SGBM_blockSize)
    SGBM_num = cv.getTrackbarPos('num_disp', 'SGNM_disparity')
    num_disp = SGBM_num * 16
    SGBM_stereo.setNumDisparities(num_disp)

    SGBM_stereo.setUniquenessRatio(
        cv.getTrackbarPos('unique_Ratio', 'SGNM_disparity'))
    SGBM_stereo.setSpeckleWindowSize(
        cv.getTrackbarPos('spec_WinSize', 'SGNM_disparity'))
    SGBM_stereo.setSpeckleRange(
        cv.getTrackbarPos('spec_Range', 'SGNM_disparity'))
    SGBM_stereo.setDisp12MaxDiff(
        cv.getTrackbarPos('disp12MaxDiff', 'SGNM_disparity'))

    print('computing SGNM_disparity...')

    disp = SGBM_stereo.compute(imgL, imgR).astype(np.float32) / 16.0
    threeD = cv.reprojectImageTo3D(disp.astype(np.float32) / 16., Q)
    cv.imshow('left', imgL)
    cv.imshow('right', imgR)
    cv.imshow('SGNM_disparity', (disp - min_disp) / num_disp)
示例#9
0
文件: camera.py 项目: n800sau/roborep
 def do_disparity(self):
     if not (self.lframe is None or self.rframe is None):
         window_size = 3
         min_disp = 16
         num_disp = 112 - min_disp
         stereo = cv2.StereoSGBM(minDisparity=min_disp,
                                 numDisparities=num_disp,
                                 SADWindowSize=window_size,
                                 uniquenessRatio=10,
                                 speckleWindowSize=100,
                                 speckleRange=32,
                                 disp12MaxDiff=1,
                                 P1=8 * 3 * window_size**2,
                                 P2=32 * 3 * window_size**2,
                                 fullDP=False)
         disp = stereo.compute(self.lframe, self.rframe).astype(
             np.float32) / 16.0
         dbprint('generating 3d point cloud...')
         h, w = self.lframe.shape[:2]
         f = 0.8 * w  # guess for focal length
         Q = np.float32([
             [1, 0, 0, -0.5 * w],
             [0, -1, 0, 0.5 * h],  # turn points 180 deg around x-axis,
             [0, 0, 0, -f],  # so that y-axis looks up
             [0, 0, 1, 0]
         ])
         points = cv2.reprojectImageTo3D(disp, Q)
         colors = cv2.cvtColor(self.lframe, cv2.COLOR_BGR2RGB)
         mask = disp > disp.min()
         self.verts = points[mask]
         self.colors = colors[mask]
         self.disparity = (disp - min_disp) / num_disp
示例#10
0
    def measure_ball_dist(self,img_pair,ball_center):
        if ball_center==0:
            print("invalid ball center value")
            return 0
        img_left=img_pair[0]
        img_right=img_pair[1]
        img_left_rect=cv2.remap(img_left, self.Map_Left_1, self.Map_Left_2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
        img_right_rect=cv2.remap(img_right, self.Map_Right_1, self.Map_Right_2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
        imgl = cv2.cvtColor(img_left_rect, cv2.COLOR_BGR2GRAY)
        imgr = cv2.cvtColor(img_right_rect, cv2.COLOR_BGR2GRAY)

        disparity = self.stereo.compute(imgl,imgr)
        points=cv2.reprojectImageTo3D(disparity,self.Q,handleMissingValues=True)
        
        (b_x,b_y)=ball_center
        bp=points[b_y][b_x]

        if bp[2]>1000: #out of range
            print("got invalid ball dist")
            return 0 
        #units in m
        bp=bp*10
        #point to point distance
        dist=math.sqrt(math.pow(bp[0],2)+math.pow(bp[1],2)+math.pow(bp[2],2))
        return dist
示例#11
0
def Reprojection3D(image, disparity, f, b):
    Q = np.array([[1, 0, 0, -2964 / 2], [0, 1, 0, -2000 / 2], [0, 0, 0, f],
                  [0, 0, -1 / b, -124.343 / b]])

    points = cv2.reprojectImageTo3D(disparity, Q)
    mask = disparity > disparity.min()
    colors = image

    out_points = points[mask]
    out_colors = image[mask]

    verts = out_points.reshape(-1, 3)
    colors = out_colors.reshape(-1, 3)
    verts = np.hstack([verts, colors])

    ply_header = '''ply
		format ascii 1.0
		element vertex %(vert_num)d
		property float x
		property float y
		property float z
		property uchar blue
		property uchar green
		property uchar red
		end_header
		'''
    with open('stereo.ply', 'w') as f:
        f.write(ply_header % dict(vert_num=len(verts)))
        np.savetxt(f, verts, '%f %f %f %d %d %d')
def StereoCompute(frame1, frame2, left_map1, left_map2, right_map1, right_map2,
                  Q):
    img1_rectified = cv2.remap(frame1, left_map1, left_map2, cv2.INTER_LINEAR)
    img2_rectified = cv2.remap(frame2, right_map1, right_map2,
                               cv2.INTER_LINEAR)
    imgL = cv2.cvtColor(img1_rectified, cv2.COLOR_BGR2GRAY)
    imgR = cv2.cvtColor(img2_rectified, cv2.COLOR_BGR2GRAY)
    # imgL = img1_rectified
    # imgR = img2_rectified
    # 根据Block Maching方法生成差异图(opencv里也提供了SGBM/Semi-Global Block Matching算法,有兴趣可以试试)
    stereo = cv2.StereoBM_create(numDisparities=16, blockSize=21)
    # stereo = cv2.StereoSGBM_create(minDisparity=0, numDisparities=16, blockSize=11, P1=8, P2=64)
    # stereo2 = cv2.StereoSGBM_create(minDisparity=0, numDisparities=16, blockSize=15, P1=8, P2=64)
    # print(cv2.StereoSGBM.getMode(stereo))
    disparity = stereo.compute(imgL, imgR)
    # disparity2 = stereo2.compute(imgL, imgR)

    disp = cv2.normalize(disparity,
                         disparity,
                         alpha=50,
                         beta=255,
                         norm_type=cv2.NORM_MINMAX,
                         dtype=cv2.CV_8U)
    # disp2 = cv2.normalize(disparity2, disparity2, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
    # 将图片扩展至3d空间中,其z方向的值则为当前的距离
    threeD = cv2.reprojectImageTo3D(disparity.astype(np.float32) / 10., Q)
    # print(threeD[120, 160, 2])

    return disp, threeD, img1_rectified, img2_rectified
示例#13
0
 def estimate_depth_map(self, disparity, Q, frame, metrics=True):
     """ Convert disparity map to depth map
     Arguments:
         frame (arr): left or right frame.
         disparity (arr): a disparity map with a shape of (w, h, 1)
         Q (arr): a 4x4 array with the following structure, 
                 [[1 0   0          -cx     ]
                  [0 1   0          -cy     ]
                  [0 0   0           f      ]
                  [0 0 -1/Tx (cx - cx')/Tx ]]
                 cx: is the principal point x in left image
                 cx': is the principal point x in right image
                 cy: is the principal point y in left image
                 f: is the focal lenth in left image
                 Tx: The x coordinate in Translation matrix  
         metrics (bool): if is true print by console the time of execution of depth map step.
     """
     if metrics:
         get_3D_points_time = time.time()
     disparity = disparity.astype(np.float32) / 16.0
     points = cv.reprojectImageTo3D(disparity, Q)
     colors = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
     mask = disparity > disparity.min()
     out_points = points[mask]
     out_colors = colors[mask]
     if metrics:
         print("3D points time: {} s".format(time.time() - get_3D_points_time))
     return out_points, out_colors
def getPointCloud(dispMap, cals):
    #    p = pcl.PointCloud()
    validPoints = cv2.reprojectImageTo3D(
        dispMap, cals.qMatrix)[np.where(dispMap != -16)].squeeze()
    indices = []
    print(len(validPoints))

    for i in range(len(validPoints)):
        if (validPoints[i, 0] > 10 or validPoints[i, 0] < -10
                or validPoints[i, 1] > 10 or validPoints[i, 1] < -10
                or validPoints[i, 2] > 10 or validPoints[i, 2] < -10):
            indices.append(i)

    validPoints = np.delete(validPoints, indices, axis=0)
    print(len(validPoints))

    #    p.from_list(validPoints)
    #    fil = p.make_statistical_outlier_filter()
    #    fil.set_mean_k(100)
    #    fil.set_std_dev_mul_thresh (0.01)
    #    filtPoints = np.array(fil.filter().to_list())
    #    cloud = np.empty(len(filtPoints),dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
    #    cloud['x'] = filtPoints[:,0]
    #    cloud['y'] = filtPoints[:,1]
    #    cloud['z'] = filtPoints[:,2]
    #    cloud_element = PlyElement.describe(cloud,'vertex')
    return validPoints
def compute_disparity(img_l, img_r):

    rows, cols = img_l.shape[:2]
    color = (0, 255, 0)

    # rectification to find Q matrix
    Q = rectify(img_l, img_r)

    # Semi-global-block-matching
    stereo = SGBM()

    # computing disparity
    disparity = stereo.compute(img_l, img_r)

    # creating point cloud in ply form
    Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.CV_32F)
    colors = cv2.cvtColor(img_l, cv2.COLOR_BGR2RGB)
    mask = disparity > disparity.min()
    out_points = Image3D[mask]
    out_colors = colors[mask]
    out_fn = 'out.ply'
    pcl.write_ply('out.ply', out_points, out_colors)
    print('%s saved' % 'out.ply')

    return disparity
示例#16
0
    def reproject(self, img, disp):
        if len(img.shape) == 2:
            colors = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        else:
            colors = img

        # t = self.baseline
        # f = self.f
        # Q = np.float32([[1, 0, 0, -0.5*w],
        #                 [0, 1, 0, -0.5*h],
        #                 [0, 0, 0,      f],
        #                 [0, 0,-1/t,    0]])
        # print(Q)

        points = cv2.reprojectImageTo3D(disp, self.Q)
        #         mask = disp > disp.min()  # find pts with min depth
        #         mask = disp > 1

        points = points.reshape(-1, 3)
        colors = colors.reshape(-1, 3)
        disp = disp.reshape(-1)

        print(colors.shape, points.shape, disp.shape)

        # filter out NaN and inf (disparity == 0)
        mask = ((disp > disp.min()) & np.all(~np.isnan(points), axis=1)
                & np.all(~np.isinf(points), axis=1))

        out_points = points[mask]  # remove pts with no depth
        out_colors = colors[mask]  # remove pts with no depth

        # print(f'>> Generated 3d point cloud, {out_points.shape}')
        # print(f">> out_points {out_points.max()}, {out_points.min()}")

        return out_points, out_colors
    def GetPCLFromDisparity(self, disparity):
        Q = self.GetQ()
        pcl = cv2.reprojectImageTo3D(disparity,
                                     Q)  # returns shape (height,width,3)

        ## NOT NEEDED, AS APPLYING THE MASK WILL ALLREADY RESHAPE
        # Reshape from (height,width,3) to (height*width,3)
        #pcl = np.array(pcl).reshape(-1,1,3).squeeze()

        # Filter out invalid points
        mask = ((disparity < disparity.max()) & (disparity > disparity.min()) &
                (pcl[:, :, 0] > 2) & (np.isfinite(pcl[:, :, 0])) &
                (np.isfinite(pcl[:, :, 1])) & (np.isfinite(pcl[:, :, 2])))
        pcl = pcl[
            mask]  # returns shape ((height*width)-x,3) with x being the number of points filtered out by mask

        # If no color image was given, make all points black
        if self.color_image is None:
            pcl_c = np.append(
                pcl.transpose(), [np.zeros(pcl.shape[0])], axis=0).transpose(
                )  # format: [x, y, z, r, g, b] with r = g = b = 0 (black)
        else:
            pcl_c = np.append(pcl, self.color_image[mask],
                              axis=1)  # format: [x, y, z, r, g, b]
        return pcl_c
def main():
    # Set up GraycodePattern with params
    graycode = sl.GrayCodePattern_create(width_projector, height_projector)

    # setting white_thresh and black_thresh value
    graycode.setWhiteThreshold(white_thresh)
    graycode.setBlackThreshold(black_thresh)

    number_pattern_images = graycode.getNumberOfPatternImages()

    # Loading calibration parameters
    # Stereo rectify
    # Loading pattern images
    # Loading images (all white + all black) needed for shadows computation


    # Storage for pattern

    # Generate the all-white and all-black images needed for shadows mask computation
    # Setting pattern window on second monitor (the projector's one)

    # Decode
    print("Decoding pattern...")
    retval, disparity_map = graycode.decode(pattern_images, flags=sl.DECODE_3D_UNDERWORLD)
    print("Pattern decoded")

    # Compute the point cloud
    disparity_map = np.float32(disparity_map)
    point_cloud = cv2.reprojectImageTo3D(disparity_map)

    # Compute a mask to remove background
    thresholded_disp = cv2.threshold(disparity_map, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
    dst = cv2.resize(thresholded_disp, (640, 480))
    cv2.imshow("threshold disp otsu", dst)
def project_into_3d(Q, depth):

    # get_mesh.main(imgL_BGR=imgL, Q=Q, disp= depth)
    return cv2.reprojectImageTo3D(
        disparity=depth,
        Q=Q,
    )
示例#20
0
def point_cloud(disparity_image, image_left, focal_length):
    """Create a point cloud from a disparity image and a focal length.

    Arguments:
      disparity_image: disparities in pixels.
      image_left: BGR-format left stereo image, to color the points.
      focal_length: the focal length of the stereo camera, in pixels.

    Returns:
      A string containing a PLY point cloud of the 3D locations of the
        pixels, with colors sampled from left_image. You may filter low-
        disparity pixels or noise pixels if you choose.
    """

    w, h = disparity_image.shape
    Q = np.float32([[1, 0, 0, -0.5*w],
                    [0, -1, 0,  0.5*h],  # turn points 180 deg around x-axis,
                    [0, 0, focal_length,     0],  # so that y-axis looks up
                    [0, 0, 0,      1]])
    points = cv2.reprojectImageTo3D(disparity_image, Q)
    colors = cv2.cvtColor(image_left, cv2.COLOR_BGR2RGB)
    mask = disparity_image > disparity_image.min()
    out_points = points[mask]
    out_colors = colors[mask]

    output = StringIO.StringIO()

    verts = np.hstack([out_points, out_colors])
    output.write(ply_header % dict(vert_num=len(verts)))
    np.savetxt(output, verts, '%f %f %f %d %d %d')

    return output.getvalue()
示例#21
0
def update():
	colors = ((1.0, 1.0, 1.0, 1.0))
	
	
#	out = cv2.imread('car_depth.png')
#	out = out.astype(float)
#	out = out/16
#	colors = cv2.imread('car.png')
#	colors = colors.astype(float)
#	colors = colors/255
#	ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	retL, left_img = capL.read()
        retR, right_img = capR.read()

#        left_img = frame[:,0:(width/2)]
#        right_img = frame[:,(width/2):width]
        disparity_img1 = df.disparity_with_filter(left_img, right_img, 32, 5)
        disparity_img2, max_val = df.disparity_without_filter(left_img, right_img, 64, 9)

        disparity_img1 = cv2.normalize(src=disparity_img1, dst=disparity_img1, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);
        disparity_img1 = np.uint8(disparity_img1)
        depthMapImg = cv2.reprojectImageTo3D(disparity_img1, Q)

	colors = left_img.astype(float)
	colors = colors/255
	sp2.setData(pos=np.array(depthMapImg, dtype=np.float64), color=colors, size=2)
示例#22
0
def pointCloud(disp, im, imgRigth, mtxL, distL, mtxR, distR, image_size, R, T, Q):

    im1 = im[:,:,0] #B
    im2 = im[:,:,1] #G
    im3 = im[:,:,2] #R

    points = cv2.reprojectImageTo3D(disp, Q) #punti(X Y Z)

    im1, imgRightRemap, Q = rettificaImmagini(im1,imgRigth,mtxL, distL, mtxR, distR, image_size, R, T)
    im2, imgRightRemap, Q = rettificaImmagini(im2,imgRigth,mtxL, distL, mtxR, distR, image_size, R, T)
    im3, imgRightRemap, Q = rettificaImmagini(im3,imgRigth,mtxL, distL, mtxR, distR, image_size, R, T)

    im = cv2.merge([im1, im2, im3]) #ho l'immagine rettificata in BGR

    colors = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) #punti( B G R)

    mask = disp > 0 # maggiore di dove la disp e -1, sconosciuta
    coords = points[mask]
    colors = colors[mask]
    coords = coords.reshape(-1,3)
    colors = colors.reshape(-1,3)
    verts = np.hstack([coords, colors])
    '''con questo su ogni riga del file ho poszione vertice e colore, no bene con paraviewer	
    with open('out.ply', 'wb') as f:
        f.write((ply_header % dict(vert_num=len(verts))).encode('utf-8'))
        np.savetxt(f, verts, fmt='%f %f %f %d %d %d'), %f,%d tipo di dati
    '''

    with open('out.ply', 'wb') as f:
       	f.write((ply_header % dict(vert_num=len(coords))).encode('utf-8'))
        np.savetxt(f, coords, fmt='%f %f %f')
def SGBM_update(val=0):
    global threeD
    global SGBM_num
    global SGBM_blockSize
    SGBM_blockSize = cv2.getTrackbarPos('blockSize',
                                        'SGBM_disparity')  # 从滑动条中获取值
    if SGBM_blockSize % 2 == 0:
        SGBM_blockSize += 1
    if SGBM_blockSize < 5:
        SGBM_blockSize = 5
    SGBM_stereo.setBlockSize(SGBM_blockSize)
    SGBM_num = cv2.getTrackbarPos('num_disp', 'SGBM_disparity')
    num_disp = SGBM_num * 16
    SGBM_stereo.setNumDisparities(num_disp)
    SGBM_stereo.setUniquenessRatio(
        cv2.getTrackbarPos('unique_Ratio', 'SGBM_disparity'))
    SGBM_stereo.setSpeckleWindowSize(
        cv2.getTrackbarPos('spec_WinSize', 'SGBM_disparity'))
    SGBM_stereo.setSpeckleRange(
        cv2.getTrackbarPos('spec_Range', 'SGBM_disparity'))
    SGBM_stereo.setDisp12MaxDiff(
        cv2.getTrackbarPos('disp12MaxDiff', 'SGBM_disparity'))

    print('computing SGBM_disparity...')

    disp = SGBM_stereo.compute(imgL, imgR).astype(np.float32) / 16.0
    threeD = cv2.reprojectImageTo3D(disp, camera_configs.Q)

    # cv2.imshow('left', imgL)
    # cv2.imshow('right', imgR)
    cv2.imshow('SGBM_disparity', (disp - min_disp) / num_disp)
    cv2.imwrite("./detect_img/SGBM_depth.jpg", (disp - min_disp) / num_disp)
def point_cloud(disparity_image, image_left, focal_length):
    """Create a point cloud from a disparity image and a focal length.
        Arguments:
        disparity_image: disparities in pixels.
        image_left: BGR-format left stereo image, to color the points.
        focal_length: the focal length of the stereo camera, in pixels.
        Returns:
        A string containing a PLY point cloud of the 3D locations of the
        pixels, with colors sampled from left_image. You may filter low-
        disparity pixels or noise pixels if you choose.
        """
    h, w = image_left.shape[:2]
    Q = np.float32([
        [1, 0, 0, w / 2],
        [0, -1, 0, h / 2],  # turn points 180 deg around x-axis,
        [0, 0, focal_length, 0],  # so that y-axis looks up
        [0, 0, 0, 1]
    ])

    # reproject image points to 3D space, compute the colors
    points = cv2.reprojectImageTo3D(disparity_image, Q)
    colors = cv2.cvtColor(image_left, cv2.COLOR_BGR2RGB)
    mask = disparity_image > disparity_image.min()
    out_points = points[mask]
    out_colors = colors[mask]

    # write PLY string data to StringIO object and return the contents
    cloud = StringIO.StringIO()
    verts = np.hstack([out_points, out_colors])
    cloud.write(PLY_HEADER % dict(vert_num=len(verts)))
    np.savetxt(cloud, verts, '%f %f %f %d %d %d')

    return cloud.getvalue()
    def get_registred_depth_rgb(self, depth, img):
        '''
        Returns the registred pointclaud and image with transforming the cameras position in world coordinate system
        :return: registred point cloud and image
        '''

        if depth is not None and img is not None:
            h, w = img.shape[:2]

            depth = np.array(depth, dtype=np.float32)

            # project points to 3D space
            points = cv2.reprojectImageTo3D(depth, self.depth_cam_mat())

            # transform coordinates to RGB camera coordinates
            points = np.dot(points, self.RGBCamParams['rot'].T)
            points = np.add(points, self.RGBCamParams['trans'])

            # handle invalid values
            points[depth >= depth.max()] = 0.0

            points = points.reshape(-1, w, 3)

            # project 3D points back to image plain
            with np.errstate(divide='ignore', invalid='ignore'):
                x = np.array((points[:, :, 0] *
                              (self.RGBCamParams['fx'] / points[:, :, 2]) +
                              self.RGBCamParams['cx']),
                             dtype=np.int).clip(0, w - 1)
                y = np.array((points[:, :, 1] *
                              (self.RGBCamParams['fy'] / points[:, :, 2]) +
                              self.RGBCamParams['cy']),
                             dtype=np.int).clip(0, h - 1)

        return points, img[y, x]
示例#26
0
 def from_disparity(disp,
                    rgb=None,
                    Q=None,
                    fb=None,
                    roi=None,
                    clip=None,
                    scale=1.0):
     if rgb is not None:
         assert disp.shape[:2] == rgb.shape[:2]
     if Q is None:
         h, w = disp.shape[:2]
         if fb is None:
             fb = w * 0.8
         Q = np.float32([[1, 0, 0, -0.5 * w], [0, 1, 0, -0.5 * h],
                         [0, 0, 0, fb], [0, 0, 1, 0]])
     if roi is not None:
         x, y, w, h = roi
         Q[0, 3] += x * Q[0, 0]
         Q[1, 3] += y * Q[1, 1]
         disp = disp[y:y + h, x:x + w]
         rgb = rgb[y:y + h, x:x + w] if rgb is not None else None
     mask = disp > 0 if clip is None else \
         (disp > clip[0]) * (disp < clip[1])
     points = cv2.reprojectImageTo3D(disp, Q)[mask] * scale
     colors = cv2.cvtColor(
         rgb, cv2.COLOR_BGR2RGB)[mask] if rgb is not None else None
     return PointCloud(points, colors)
示例#27
0
def point_cloud(disparity_image, image_left, focal_length):
    """Create a point cloud from a disparity image and a focal length.
        Arguments:
        disparity_image: disparities in pixels.
        image_left: BGR-format left stereo image, to color the points.
        focal_length: the focal length of the stereo camera, in pixels.
        Returns:
        A string containing a PLY point cloud of the 3D locations of the
        pixels, with colors sampled from left_image. You may filter low-
        disparity pixels or noise pixels if you choose.
        """
    h, w = image_left.shape[:2]
    Q = np.float32([[1, 0,  0, w / 2],
                    [0, -1,  0,  h / 2],  # turn points 180 deg around x-axis,
                    [0, 0, focal_length,  0],  # so that y-axis looks up
                    [0, 0,  0,  1]])
                    
    # reproject image points to 3D space, compute the colors
    points = cv2.reprojectImageTo3D(disparity_image, Q)
    colors = cv2.cvtColor(image_left, cv2.COLOR_BGR2RGB)
    mask = disparity_image > disparity_image.min()
    out_points = points[mask]
    out_colors = colors[mask]
    
    # write PLY string data to StringIO object and return the contents
    cloud = StringIO.StringIO()
    verts = np.hstack([out_points, out_colors])
    cloud.write(PLY_HEADER % dict(vert_num=len(verts)))
    np.savetxt(cloud, verts, '%f %f %f %d %d %d')
    
    return cloud.getvalue()
def callback(data):
    disp_real = bridge.imgmsg_to_cv2(data.image,
                                     desired_encoding="passthrough")
    world_pts = cv2.reprojectImageTo3D(disp_real, Q, handleMissingValues=True)
    world_pts_msg = bridge.cv2_to_imgmsg(world_pts, "passthrough")
    world_pts_pub.publish(world_pts_msg)
    cv2.waitKey(1)
示例#29
0
    def calculate_depth_mm(self, disparity):
        localQ = self.Q.copy()
        #print(localQ)
        #print("Estimated distance between the two cameras: {:4.1f}mm".format(1/localQ[3,2]))

        # Rotate the image upside down for a clearer view in MeshLab
        localQ[1,:] = -1 * localQ[1,:]
        localQ[2,:] = -1 * localQ[2,:]

        # Calculate reprojected points
        points = cv2.reprojectImageTo3D(disparity, localQ)

        # Estimate distance
        # x_points_mm = np.polyval(self.distance_calibration_poly, -points[:,:,0])
        # y_points_mm = np.polyval(self.distance_calibration_poly, -points[:,:,1])
        if self.distance_calibration_invert:
            z_points_mm = np.polyval(self.distance_calibration_poly, -1/points[:,:,2])
        else:
            z_points_mm = np.polyval(self.distance_calibration_poly, -points[:,:,2])

        # Prepare infinity mask to avoid MeshLab issues
        # infinity_mask = disparity > disparity.min() # TODO check is this is always 0 or can change
        infinity_mask = disparity > 0
        # print("Minimum value: {}".format(disparity.min()))

        # Make global variable to be used by the tuner
        self.points = points
        #self.points = np.dstack((x_points_mm, y_points_mm, z_points_mm))
        self.infinity_mask = infinity_mask

        return points, infinity_mask, z_points_mm
示例#30
0
    def getDepth(self, image, image2):
          
        grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1)
        cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY)
        
        grayScaleFullImage2 = cv.CreateImage((image2.width, image2.height), 8, 1)
        cv.CvtColor(image2, grayScaleFullImage2, cv.CV_BGR2GRAY)
           
        [mat_w, mat_h] = self.size
        
        r = cv.CreateMat(mat_h, mat_w, cv.CV_8UC1)
        r2 = cv.CreateMat(mat_h, mat_w, cv.CV_8UC1)
        print type(r)
        print type(image)
        print type(self.map1x)
        print cv.GetSize(r)
        print cv.GetSize(self.map1x)
        cv.Remap(grayScaleFullImage, r, self.map1x, self.map1y)
        cv.Remap(grayScaleFullImage2, r2, self.map2x, self.map2y)
        
        cv.ShowImage("win3", r)
        cv.ShowImage("win4", r2)
        
        
        #stereo_match that comes in opencv
        
        # disparity range is tuned for 'aloe' image pair
        window_size = 3
        min_disp = 16
        num_disp = 112 - min_disp
        stereo = cv2.StereoSGBM(minDisparity=min_disp,
            numDisparities=num_disp,
            SADWindowSize=window_size,
            uniquenessRatio=10,
            speckleWindowSize=100,
            speckleRange=32,
            disp12MaxDiff=1,
            P1=8 * 3 * window_size ** 2,
            P2=32 * 3 * window_size ** 2,
            fullDP=False
        )
    
        print 'computing disparity...'
        disp = stereo.compute(np.asarray(r), np.asarray(r2)).astype(np.float32) / 16.0
    
        print 'generating 3d point cloud...'
        points = cv2.reprojectImageTo3D(disp, np.asarray(self.Q))
        
        
        colors = cv2.cvtColor(np.asarray(r), cv2.COLOR_GRAY2RGB)
        mask = disp > disp.min()
        out_points = points[mask]
        out_colors = colors[mask]
        # Resulting .ply file cam be easily viewed using MeshLab ( http://meshlab.sourceforge.net
        out_fn = 'out.ply'
        write_ply('out.ply', out_points, out_colors)
        print '%s saved' % 'out.ply'
    

        cv2.imshow('disparity', (disp - min_disp) / num_disp)
示例#31
0
def point_cloud(filteredImg, imgL):
    #https://medium.com/@omar.ps16/stereo-3d-reconstruction-with-opencv-using-an-iphone-camera-part-iii-95460d3eddf0
    # Q는 steror_cam.yml에서 가져왔어요
    Q = np.float32([[1., 0., 0., -4.6851158237457275e+02],
                    [0., 1., 0., -2.6415181350708008e+02],
                    [
                        0.,
                        0.,
                        0.,
                        5.3994586146833012e+02,
                    ], [0., 0., 1.6558455087828328e+01, 0.]])
    # This transformation matrix is derived from Prof. Didier Stricker's power point presentation on computer vision.
    # Link : https://ags.cs.uni-kl.de/fileadmin/inf_ags/3dcv-ws14-15/3DCV_lec01_camera.pdf

    # Reproject points into 3D
    points_3D = cv2.reprojectImageTo3D(filteredImg, Q)
    # Get color points
    colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)
    # Get rid of points with value 0 (i.e no depth)
    mask_map = filteredImg > filteredImg.min()
    # Mask colors and points.
    output_points = points_3D[mask_map]
    output_colors = colors[mask_map]
    # Define name for output file
    output_file = 'reconstructed.ply'
    # Generate point cloud
    print("\n Creating the output file... \n")
    create_output(output_points, output_colors, output_file)
示例#32
0
def distance(disparity):
    threeD = cv2.reprojectImageTo3D(disparity,
                                    camera_config.Q,
                                    handleMissingValues=True)
    threeD = threeD * 16

    return threeD
示例#33
0
def main(filteredImg,imgL,mini):
    disp=filteredImg
    min_disp = 16
    num_disp = 112-min_disp
    (h, w,_) = imgL.shape  
    mask=disp>mini
    Q=np.zeros((4,4))
    Q[0][0]=1
    Q[1][1]=-1
    Q[3][2]=1
    Q[0][3]=-0.5*w
    Q[1][3]=0.5*h
    Q[2][3]=0.8*w
    Q=np.float32(Q)
    points = cv.reprojectImageTo3D(disp, Q)
    colors = cv.cvtColor(imgL, cv.COLOR_BGR2RGB)


    
    out_p = points[mask]
    out_c = colors[mask]

    mask =np.zeros((disp.shape[0],disp.shape[1]),dtype=int)
   
    for i in range(disp.shape[0]):
        for j in range(disp.shape[1]):
            if(disp[i][j]>mini):
                mask[i][j]=1
                # print(mask[i][j])
            else:
                mask[i][j]=0
    
    write(out_p, out_c)
def calculate_disparity_michael(imgL,
                                imgR,
                                window_size=2,
                                min_disp=16,
                                num_disp=160,
                                blockSize=2,
                                uniquenessRatio=1,
                                speckleRange=50,
                                speckleWindowSize=200,
                                disp12MaxDiff=200,
                                P1=600,
                                P2=2400,
                                preFilterCap=63,
                                show_disparity=False,
                                save_point_cloud=False):
    stereo = cv2.StereoSGBM_create(
        # Minimum possible disparity value. Normally, it is zero but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
        minDisparity=min_disp,
        # Maximum disparity minus minimum disparity. The value is always greater than zero. In the current implementation, this parameter must be divisible by 16.
        numDisparities=num_disp,
        blockSize=window_size,
        uniquenessRatio=uniquenessRatio,
        speckleRange=speckleRange,
        speckleWindowSize=speckleWindowSize,
        disp12MaxDiff=disp12MaxDiff,
        P1=P1,
        P2=P2,
        preFilterCap=preFilterCap)
    # compute disparity
    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0
    if show_disparity:
        displayed_image = (disp - min_disp) / num_disp
        cv2.imshow(
            'disparity', displayed_image
        )  # for some reason when using imshow, the picture has to be normalized
    if save_point_cloud:
        h, w = imgL.shape[:2]
        f = 0.8 * w  # guess for focal length
        Q = np.float32([
            [1, 0, 0, -0.5 * w],
            [0, -1, 0, 0.5 * h],  # turn points 180 deg around x-axis,
            [0, 0, 0, -f],  # so that y-axis looks up
            [0, 0, 1, 0]
        ])
        points = cv2.reprojectImageTo3D(disp, Q)
        colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)
        mask = disp > disp.min()
        out_points = points[mask]
        out_colors = colors[mask]
        # saving files
        cv2.imwrite(
            os.path.join("./data/disparity",
                         "frame_{}_disparity.jpg".format(counter)),
            (disp))  # but when saving, normalization is not necessary
        write_ply(
            os.path.join("./data/point_cloud",
                         "frame_{}_pointcloud.ply".format(counter)),
            out_points, out_colors)
    return disp
    def getPointsCloud(self, limits):
        mask = self.depth_map > (self.depth_map).min()

        points = cv2.reprojectImageTo3D(self.depth_map, self.Q)

        colors = cv2.cvtColor(self.imgL, cv2.COLOR_BGR2RGB)

        points_cloud = PointsCloud(points=points[mask], colors=colors[mask])

        return points_cloud.makeBoundingBox(limits)
示例#36
0
def get3dPoints(disp, Q):
    """
    Q is the disparity to depth mapping calibrated with open CV code and Sameep's parameters
    """
    points = cv2.reprojectImageTo3D(disp, Q)

    #possible to do other filters such as removing objects beyond certain distance, height filters etc
    #mask = disp > disp.min()
    
    return points
示例#37
0
    def doThings(self):
        sgbm = cv2.StereoSGBM()
        sgbm.SADWindowSize, numberOfDisparitiesMultiplier, sgbm.preFilterCap, sgbm.minDisparity, \
        sgbm.uniquenessRatio, sgbm.speckleWindowSize, sgbm.P1, sgbm.P2, \
        sgbm.speckleRange = [v for v,_ in self.params.itervalues()]
        sgbm.numberOfDisparities = numberOfDisparitiesMultiplier*16
        sgbm.disp12MaxDiff = -1
        sgbm.fullDP = False
        R1, R2, P1, P2, Q, topValidRoi, bottomValidRoi = cv2.stereoRectify(self.M1, self.D1, self.M2, self.D2, 
                                (self.top.shape[1],self.top.shape[0]), self.R, self.T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=0)

        top_map1, top_map2 = cv2.initUndistortRectifyMap(self.M1, self.D1, R1, P1, 
                                                         (self.top.shape[1],self.top.shape[0]), cv2.CV_16SC2)
        bottom_map1, bottom_map2 = cv2.initUndistortRectifyMap(self.M2, self.D2, R2, P2, 
                                                               (self.bottom.shape[1], self.bottom.shape[0]), cv2.CV_16SC2)
        
        self.top_r = cv2.remap(self.top, top_map1, top_map2, cv2.cv.CV_INTER_LINEAR);
        self.bottom_r = cv2.remap(self.bottom, bottom_map1, bottom_map2, cv2.cv.CV_INTER_LINEAR)
        top_small = cv2.resize(self.top_r, (self.top_r.shape[1]/2,self.top_r.shape[0]/2))
        bottom_small = cv2.resize(self.bottom_r, (self.bottom_r.shape[1]/2,self.bottom_r.shape[0]/2))
        cv2.imshow('top', top_small);
        cv2.imshow('bottom', bottom_small);
        
#        top_r = cv2.equalizeHist(top_r)
        top_r = cv2.blur(self.top_r, (5,5))
#        bottom_r = cv2.equalizeHist(bottom_r)
        bottom_r = cv2.blur(self.bottom_r, (5,5))
        dispTop = sgbm.compute(top_r.T, bottom_r.T).T;
        dispTopPositive = dispTop
        dispTopPositive[dispTop<0] = 0
        disp8 = (dispTopPositive / (sgbm.numberOfDisparities * 16.) * 255).astype(np.uint8);
        disp_small = cv2.resize(disp8, (disp8.shape[1]/2, disp8.shape[0]/2));
        cv2.imshow(self.winname, disp_small);
        
        self.disp8 = disp8
        self.xyz = cv2.reprojectImageTo3D(dispTop, Q, handleMissingValues=True)
#        self.xyzrgb = np.zeros((self.xyz.shape[0],self.xyz.shape[1],4))
        
#        import struct
#        def color_to_float(color):
#            if color.size == 1:
#                color = [color]*3
#            rgb = (color[2] << 16 | color[1] << 8 | color[0]);
#            rgb_hex = hex(rgb)[2:-1]
#            s = '0'*(8-len(rgb_hex)) + rgb_hex.capitalize()
##            print color, rgb, hex(rgb)
#            rgb_float = struct.unpack('!f', s.decode('hex'))[0]
##            print rgb_float
#            return rgb_float
        
#        for i in range(self.xyz.shape[0]):
#            for j in range(self.xyz.shape[1]):
#                self.xyzrgb[i,j] = np.append(self.xyz[i,j], color_to_float(self.top[i,j])) 
        
示例#38
0
def stereoBMMatch(imgL, imgR, Q):
    gray_img1 = cv2.cvtColor(imgL, cv2.COLOR_RGB2GRAY)
    gray_img2 = cv2.cvtColor(imgR, cv2.COLOR_RGB2GRAY)
    cv2.imshow('Image 1', gray_img1); cv2.imshow('Image 2', gray_img2)
    numDisp = 128
    stereoBM = cv2.StereoBM_create(numDisp, 11)
    disp = stereoBM.compute(gray_img1, gray_img2).astype(np.float32)
    cv2.imshow('Disparity', disp / numDisp)

    #SAVE 3D POINTS
    points = cv2.reprojectImageTo3D(disp, Q)
    return disp, points
def world_coordinates(u,v,left,right):
    # Load the undistortion and rectification transformation map
    path = '/home/ros/workspace/src/robotic_surgery/tip3d_detection/camera_calibration/calibration_data/'
    left_undistortion = np.load(path + 'left_undistortion.npy', mmap_mode='r')
    left_rectification = np.load(path + 'left_rectification.npy', mmap_mode='r')
    right_undistortion = np.load(path + 'right_undistortion.npy', mmap_mode='r')
    right_rectification = np.load(path + 'right_rectification.npy', mmap_mode='r')

    # Rectify left and right images
    left_rectified = cv2.remap(left, left_undistortion, left_rectification, 
	                           cv2.INTER_NEAREST)
    right_rectified = cv2.remap(right, right_undistortion, right_rectification, 
	                    		cv2.INTER_NEAREST)

    # Specify parameters for the semi global block matching algorithm
    stereo = cv2.StereoSGBM(minDisparity=16, numDisparities=96, SADWindowSize=3, 
	                		P1=216, P2=864, disp12MaxDiff=14, preFilterCap=100, 
			                uniquenessRatio=15, speckleWindowSize=150, 
			                speckleRange=1, fullDP=False)

    # Compute the disparity map
    disparity = stereo.compute(left_rectified, right_rectified)

    # Adjust the disparity map for clarity in depth
    disparity = disparity.astype(np.float32) / 16.0 

    # Disparity-to-depth mapping matrix
    Q = np.float32([[1, 0, 0, -0.5 * 640],
                    [0, -1, 0, 0.5 * 480],
                    [0, 0, 0, -0.8*640],
                    [0, 0, 1, 0]])

    # Compute the 3D world coordinates
    Image = cv2.reprojectImageTo3D(disparity, Q)

    # Identify the world coordinates of a certain point with left image
    # coordinates (u,v)
    camera_coord = Image[u,v]
    
    # Convert camera world coordinates to robot world coordinates
    robot_coord = [0.,0.,0.]
    distance_between_cameras = 9.1
    accurate_vertical_distance = 18
    vertical_distance_from_camera_to_the_board = 29.8
    tilted_distance_from_camera_to_the_board = 31.3
    centre_shift_to_right = 3.7
    centre_shift_forward = 73.8
    angle = math.acos(vertical_distance_from_camera_to_the_board/tilted_distance_from_camera_to_the_board)
    robot_coord[0] = (camera_coord[1]*math.sin(angle) - camera_coord[2]*math.cos(angle) + centre_shift_forward)/100
    robot_coord[1] = (camera_coord[0] - centre_shift_to_right)/100
    robot_coord[2] = (-(camera_coord[1]*math.cos(angle) + camera_coord[2]*math.sin(angle)) + accurate_vertical_distance)/100
    
    return robot_coord
示例#40
0
    def createPointClouds(
    self,
    cameraRotationMatrix,
    cameraTranslationVectorMatrix):

        #notify user of progress
        print("Reprojecting to 3D ...\n")

        #left image
        #convert data to numpy arrays
        self.leftImageForProjection = numpy.asarray(self.leftRectifiedImage)
        self.leftImageForProjectionBGR = cv2.cvtColor(self.leftImageForProjection, cv.CV_RGB2BGR)

        #create homogeneous transformation matrix to move the points from camera frame of reference to object frame of reference
        #create homogeneous rotation matrix from the rectification
        #leftHomogeneousRectificationMatrix = numpy.identity(4)
        #leftHomogeneousRectificationMatrix[0:3,0:3] = self.leftRectificationMatrix.transpose()
        #print("Left camera rectification matrix:\n{0}".format(self.leftRectificationMatrix))
        #print("left homogeneous rectification matrix:\n{0}".format(leftHomogeneousRectificationMatrix))
        #create homogeneous rotation matrix
        leftHomogeneousRotationMatrix = numpy.identity(4)
        leftHomogeneousRotationMatrix[0:3,0:3] = cameraRotationMatrix
        #print("Left camera rotation matrix:\n{0}".format(self.leftCameraRotationMatrix))
        #print("Left homogeneous rotation matrix:\n{0}".format(leftHomogeneousRotationMatrix))
        #create homogeneous translation matrix
        leftHomogeneousTranslationMatrix = numpy.identity(4)
        leftHomogeneousTranslationMatrix[0:3,3] = cameraTranslationVectorMatrix[:,0]
        #print("Left camera translation matrix:\n{0}".format(self.leftCameraTranslationVectorMatrix))
        #print("Left homogeneous translation matrix:\n{0}".format(leftHomogeneousTranslationMatrix))
        leftHomogeneousCameraTransformationMatrix = numpy.dot(leftHomogeneousTranslationMatrix, leftHomogeneousRotationMatrix)
        #print("Homogeneous transformation matrix:\n{0}".format(homogeneousTransformationMatrixFromCameraFrameToObjectFrame))

        #calculate depth from disparity
        self.left3DImageCameraReference = cv2.reprojectImageTo3D(self.leftDisparityMap, self.disparityToDepthMappingMatrix, handleMissingValues = False, ddepth = cv.CV_32FC1)

        #reshape images into 2D arrays
        self.leftDisparity2DArray = self.leftDisparityMap.reshape((1, -1))
        self.leftRgbImage2DArray = self.leftImageForProjectionBGR.reshape((-1, 3)).transpose()
        self.leftXyzImage2DArray = self.left3DImageCameraReference.reshape((-1, 3)).transpose()

        #create a mask for the matched points by checking against the minimum disparity value
        self.leftSuccessfulMatchMask = numpy.ma.getmask(numpy.ma.masked_outside(self.leftDisparity2DArray, float(-self.minDisparity), float(-self.minDisparity - self.numberOfDisparities)))

        #transform to object reference frame
        #create homogeneous points
        self.leftHomogeneousXyzPoints = numpy.concatenate((self.leftXyzImage2DArray, numpy.ones((1, self.leftXyzImage2DArray.shape[1]), dtype = numpy.float32)), axis = 0)
        #multiply by transformation matrix
        self.leftTransformedXyzImage2DArray = numpy.dot(leftHomogeneousCameraTransformationMatrix, self.leftHomogeneousXyzPoints)

    #end createPointClouds

#end stereo
示例#41
0
def computeDisparity(r_imgL, r_imgR, cameraObj):
    '''Compute the disparity between the left and right images
    @r_imgL = rectified left image
    @r_imgR = rectified right image
    @cameraObj = StereoCam object

    return = the disparity and the reprojected to 3D points
    '''
    # Calculate the disparity between the L and R images
    h, w = r_imgL.shape[:2]
    window_size = 9
    min_disp = 0  # 20
    max_disp = w / 8  # image width / 8
    num_disp = max_disp - min_disp 
    stereo = cv2.StereoSGBM(minDisparity=min_disp,
                            numDisparities=num_disp,
                            SADWindowSize=window_size,
                            uniquenessRatio=10,
                            speckleWindowSize=100,
                            speckleRange=32,
                            disp12MaxDiff=1,
                            preFilterCap=63,
                            P1=8 * 3 * window_size**2,
                            P2=32 * 3 * window_size**2,
                            fullDP=False
                            )
    print 'computing disparity...'

    # Normalize the values
    disp = stereo.compute(r_imgL, r_imgR).astype(
        np.float32) / 16. 

    # Update the Q matrix
    cx = cameraObj.M1[0][-1]
    cy = cameraObj.M1[1][-1]
    f =  np.mean([cameraObj.M1[0][0], cameraObj.M1[1][1]])
    Tx = cameraObj.T[0]
    cxp = cameraObj.M2[0][-1]
    q43 = (cx - cxp) / Tx

    Q = np.float32([[1, 0, 0, -cx],
                    [0, 1, 0, -cy],  # turn points 180 deg around x-axis,
                    [0, 0, 0, f],  # so that y-axis looks up
                    [0, 0, -1./Tx, q43]])   
    
    # Compute the 3D World Coordinates - these points are world coordinates
    # relative to the image's specific image frame
    points = cv2.reprojectImageTo3D(disp, Q)

    return disp, points
示例#42
0
def stereosgbm_match(imgL, imgR, fname, Q, min_disp, params, eps):
    pitch, yaw, height, posx, posy = params
    # disparity range is tuned for 'aloe' image pair
    window_size = 6
    num_disp = 112 - min_disp
    stereo = cv2.StereoSGBM_create(minDisparity = min_disp,
        numDisparities = num_disp,
        blockSize = 3,
        P1 = 8 * 3 * window_size ** 2,
        P2 = 32 * 3 * window_size ** 2,
        disp12MaxDiff = 1,
        uniquenessRatio = 10,
        speckleWindowSize = 100,
        speckleRange = 32
    )

    #print 'computing disparity...'
    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16

    #print 'generating 3d point cloud...'
    points = cv2.reprojectImageTo3D(disp, Q)
    colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)
    mask = disp > disp.min()
    points = points[mask]; colors = colors[mask]

    #ROTATE the points to the world coordinate
    #print points[:3]
    #points = rotate(0.04, 0.176, points)
    points = rotate(pitch, yaw, points)
    #print points[:3]
    points = points + np.asarray((posx, posy, height))
    #print points[:3]
    mask = points[:, 2] > eps
    points = points[mask]
    colors = colors[mask]
    '''mask = points[:, 0] < 150
    points = points[mask]
    colors = colors[mask]'''

    if not fname is None:
        write_ply(fname, points, colors)
        print '%s saved' % fname

        cv2.imshow('left', imgL)
        cv2.imshow('disparity', (disp - min_disp) / num_disp)
        cv2.waitKey()
        cv2.destroyAllWindows()

    return points
示例#43
0
def get_points_cloud(filename, disparity, Q):

    left = cv2.imread(filename)

    print Q
    points = cv2.reprojectImageTo3D(disparity, Q)
    colors = cv2.cvtColor(left, cv2.COLOR_BGR2RGB)
    mask = disparity > disparity.min()
    out_points = points[mask]
    out_colors = colors[mask]

    verts = out_points.reshape(-1, 3)
    colors = out_colors.reshape(-1, 3)
    verts = np.hstack([verts, colors])

    return verts
示例#44
0
    def clean_up(self):
        self.xyz = cv2.reprojectImageTo3D(self.disp, self.Q, handleMissingValues=True)
        self.xyz_mask = np.all(~np.isinf(self.xyz),2)        
        out_points = self.xyz[self.mask * self.roi_mask * self.xyz_mask]
#        self.top_r_col = cv2.cvtColor(self.top_r, cv2.COLOR_GRAY2RGB)
        self.top_r_rgb = cv2.cvtColor(self.top_r, cv2.COLOR_BGR2RGB)
        out_colors = self.top_r_rgb[self.mask * self.roi_mask * self.xyz_mask]
        nout = out_points.shape[0]
        self.xyzrgb = np.array([np.append(point, utility.color_to_float(color)) 
                       for point,color in zip(out_points, out_colors)])
              
#        import itertools
#        self.xyzrgb_valid = np.array([i for i in itertools.chain(*self.xyzrgb) if i[2] != 10000.]).astype(np.float32)
        axis_cloud = utility.draw_axis()
        utility.write_XYZRGB(utility.merge_pointclouds([axis_cloud, self.xyzrgb]), 'lobby.pcd')
        
        utility.write_ply('lobby.ply', out_points, out_colors)
示例#45
0
def main():
    print('loading images...')
    imgL = cv.pyrDown(cv.imread(cv.samples.findFile('aloeL.jpg')))  # downscale images for faster processing
    imgR = cv.pyrDown(cv.imread(cv.samples.findFile('aloeR.jpg')))

    # disparity range is tuned for 'aloe' image pair
    window_size = 3
    min_disp = 16
    num_disp = 112-min_disp
    stereo = cv.StereoSGBM_create(minDisparity = min_disp,
        numDisparities = num_disp,
        blockSize = 16,
        P1 = 8*3*window_size**2,
        P2 = 32*3*window_size**2,
        disp12MaxDiff = 1,
        uniquenessRatio = 10,
        speckleWindowSize = 100,
        speckleRange = 32
    )

    print('computing disparity...')
    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0

    print('generating 3d point cloud...',)
    h, w = imgL.shape[:2]
    f = 0.8*w                          # guess for focal length
    Q = np.float32([[1, 0, 0, -0.5*w],
                    [0,-1, 0,  0.5*h], # turn points 180 deg around x-axis,
                    [0, 0, 0,     -f], # so that y-axis looks up
                    [0, 0, 1,      0]])
    points = cv.reprojectImageTo3D(disp, Q)
    colors = cv.cvtColor(imgL, cv.COLOR_BGR2RGB)
    mask = disp > disp.min()
    out_points = points[mask]
    out_colors = colors[mask]
    out_fn = 'out.ply'
    write_ply('out.ply', out_points, out_colors)
    print('%s saved' % 'out.ply')

    cv.imshow('left', imgL)
    cv.imshow('disparity', (disp-min_disp)/num_disp)
    cv.waitKey()

    print('Done')
示例#46
0
def get3dPoints():
    """
    Q is the disparity to depth mapping calibrated with open CV code and Sameep's parameters
    """

    Q = np.float32([[1, 0, 0, -1008.174053192139],
    [0, 1, 0, -469.5005378723145],
    [0, 0, 0, 2061.201143658453],
    [0, 0, 1.683177465418866, -0]])
    
    points = cv2.reprojectImageTo3D(disp, Q)
    colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)

    #possible to do other filters such as removing objects beyond certain distance, height filters etc
    mask = disp > disp.min()
    
    #applying mask to points and colors
    out_points = points[mask]
    out_colors = colors[mask]
    return (out_points, out_colors)
示例#47
0
def stereo_process(imgL,imgR,outprefix):
    # disparity range is tuned for 'aloe' image pair
    window_size = 3
    min_disp = 16
    num_disp = 112-min_disp
    stereo = cv2.StereoSGBM(minDisparity = min_disp,
        numDisparities = num_disp,
        SADWindowSize = window_size,
        uniquenessRatio = 10,
        speckleWindowSize = 100,
        speckleRange = 32,
        disp12MaxDiff = 1,
        P1 = 8*3*window_size**2,
        P2 = 32*3*window_size**2,
        fullDP = False
    )

    print 'computing disparity...'
    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0

    print 'generating 3d point cloud...',
    h, w = imgL.shape[:2]
    f = 0.8*w                          # guess for focal length
    Q = np.float32([[1, 0, 0, -0.5*w],
                    [0,-1, 0,  0.5*h], # turn points 180 deg around x-axis,
                    [0, 0, 0,     -f], # so that y-axis looks up
                    [0, 0, 1,      0]])
    points = cv2.reprojectImageTo3D(disp, Q)
    colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)
    mask = disp > disp.min()
    out_points = points[mask]
    out_colors = colors[mask]
    if(outprefix):
       out_fn = outprefix+'.ply'
       write_ply(out_fn, out_points, out_colors)
       print '%s saved' % out_fn

    cv2.imshow('left', imgL)
    cv2.imshow('disparity', (disp-min_disp)/num_disp)
    cv2.waitKey()
    cv2.destroyAllWindows()
示例#48
0
    def __SavePLY(self, disparity, image, Q):
        """Save the depth map into a PLY file."""
        # Reprojects a disparity image to 3D space.
        points = cv2.reprojectImageTo3D(disparity, Q)
        colors = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # Creates a mask of the depth mapping matrix.
        mask = disparity > disparity.min()
        points = points[mask].reshape(-1, 3)
        colors = colors[mask].reshape(-1, 3)

        # Defines the output numpy array.
        output = np.hstack([points, colors])

        # Save the output file.
        with open(self.__path + "Outputs/Assignment3.ply", "w") as filename:
            filename.write(self.__plyHeader % dict(num = len(output)))
            np.savetxt(filename, output, "%f %f %f %d %d %d", newline="\n")            

        # End the PLY save process.
        self.__isSaving = False
示例#49
0
def point_cloud(disparity_image, image_left, focal_length):
    """Create a point cloud from a disparity image and a focal length.

    Arguments:
      disparity_image: disparities in pixels.
      image_left: BGR-format left stereo image, to color the points.
      focal_length: the focal length of the stereo camera, in pixels.

    Returns:
      A string containing a PLY point cloud of the 3D locations of the
        pixels, with colors sampled from left_image. You may filter low-
        disparity pixels or noise pixels if you choose.
    """

    h, w = image_left.shape[:2]

    Q = np.float32([[1, 0, 0, w / 2],
                    [0, -1, 0, h / 2],
                    [0, 0, focal_length, 0],
                    [0, 0, 0, 1]])
    points = cv2.reprojectImageTo3D(disparity_image, Q)
    colors = cv2.cvtColor(image_left, cv2.COLOR_BGR2RGB)
    mask = disparity_image > disparity_image.min()
    out_points = points[mask]
    out_colors = colors[mask]

    verts = out_points.reshape(-1, 3)
    colors = out_colors.reshape(-1, 3)
    verts = np.hstack([verts, colors])

    result = ply_header % dict(vert_num=len(verts))
    for vert in verts:
        result += "%f " % vert[0]
        result += "%f " % vert[1]
        result += "%f " % vert[2]
        result += "%d " % vert[3]
        result += "%d " % vert[4]
        result += "%d\n" % vert[5]

    return result
示例#50
0
def do_stereo(inputL,inputR,outfile):
    print 'loading images...'
    imgL = cv2.pyrDown( inputL  )  # downscale images for faster processing
    imgR = cv2.pyrDown( inputR  )

    # disparity range is tuned for 'aloe' image pair
    window_size = 3
    min_disp = 16
    num_disp = 112-min_disp
    stereo = cv2.StereoSGBM(minDisparity = min_disp,
        numDisparities = num_disp,
        SADWindowSize = window_size,
        uniquenessRatio = 10,
        speckleWindowSize = 100,
        speckleRange = 32,
        disp12MaxDiff = 1,
        P1 = 8*3*window_size**2,
        P2 = 32*3*window_size**2,
        fullDP = False
    )

    print 'computing disparity...'
    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0

    print 'generating 3d point cloud...',
    h, w = imgL.shape[:2]
    f = 0.8*w                          # guess for focal length
    Q = np.float32([[1, 0, 0, -0.5*w],
                    [0,-1, 0,  0.5*h], # turn points 180 deg around x-axis,
                    [0, 0, 0,     -f], # so that y-axis looks up
                    [0, 0, 1,      0]])
    points = cv2.reprojectImageTo3D(disp, Q)
    colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)
    mask = disp > disp.min()
    out_points = points[mask]
    out_colors = colors[mask]
    write_ply(outfile, out_points, out_colors)
    print '%s saved' % outfile
def stereo_match(imgL, imgR):
    # disparity range is tuned for 'aloe' image pair
    window_size = 15
    min_disp = 16
    num_disp = 96 - min_disp
    stereo = cv2.StereoSGBM_create(minDisparity=min_disp,
                                   numDisparities=num_disp,
                                   blockSize=16,
                                   P1=8 * 3 * window_size ** 2,
                                   P2=32 * 3 * window_size ** 2,
                                   disp12MaxDiff=1,
                                   uniquenessRatio=10,
                                   speckleWindowSize=150,
                                   speckleRange=32
                                   )

    # print('computing disparity...')
    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0

    # print('generating 3d point cloud...',)
    h, w = imgL.shape[:2]
    f = 0.8 * w  # guess for focal length
    Q = np.float32([[1, 0, 0, -0.5 * w],
                    [0, -1, 0, 0.5 * h],  # turn points 180 deg around x-axis,
                    [0, 0, 0, -f],  # so that y-axis looks up
                    [0, 0, 1, 0]])
    points = cv2.reprojectImageTo3D(disp, Q)
    colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)
    mask = disp > disp.min()
    out_points = points[mask]
    out_colors = colors[mask]
    append_ply_array(out_points, out_colors)

    disparity_scaled = (disp - min_disp) / num_disp
    disparity_scaled += abs(np.amin(disparity_scaled))
    disparity_scaled /= np.amax(disparity_scaled)
    disparity_scaled[disparity_scaled < 0] = 0
    return np.array(255 * disparity_scaled, np.uint8)
    def __SavePLY(self, disparity, image):
        # Check if the system is calibrated.
        if Configuration.Instance.Calibration.IsCalibrated:
            # Get a 4x4 disparity-to-depth mapping matrix.
            Q = StereoCameras.Instance.Parameters.Q

            # Reprojects a disparity image to 3D space.
            points = cv2.reprojectImageTo3D(disparity, Q)

            # Creates a mask of the depth mapping matrix.
            mask = disparity > disparity.min()
            points = points[mask].reshape(-1, 3)

            # Defines the output numpy array.
            output = points

            # Save the output file.
            with open("OpenCV3D.ply", "w") as filename:
                filename.write(OpenCV3D.___plyHeader % dict(num = len(output)))
                np.savetxt(filename, output, "%f %f %f", newline="\n")            
            
            # End the PLY save process.
            self.IsSaving = False
        speckleRange = 32,
        disp12MaxDiff = 1,
        P1 = 8*3*window_size**2,
        P2 = 32*3*window_size**2,
        fullDP = False
    )

    print 'computing disparity...'
    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0

    print 'generating 3d point cloud...',
    h, w = imgL.shape[:2]
    f = 0.8*w                          # guess for focal length
    Q = np.float32([[1, 0, 0, -0.5*w],
                    [0,-1, 0,  0.5*h], # turn points 180 deg around x-axis,
                    [0, 0, 0,     -f], # so that y-axis looks up
                    [0, 0, 1,      0]])
    points = cv2.reprojectImageTo3D(disp, Q)
    colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)
    mask = disp > disp.min()
    out_points = points[mask]
    out_colors = colors[mask]
    out_fn = 'out.ply'
    write_ply('out.ply', out_points, out_colors)
    print '%s saved' % 'out.ply'

    cv2.imshow('left', imgL)
    cv2.imshow('disparity', (disp-min_disp)/num_disp)
    cv2.waitKey()
    cv2.destroyAllWindows()
	cv2.rectangle(frameLeftRemaped, (validROI_left[0], validROI_left[1]), (validROI_left[0]+validROI_left[2], validROI_left[1]+validROI_left[3]), (0,255,0), 2)
	cv2.rectangle(frameRightRemaped, (validROI_right[0], validROI_right[1]), (validROI_right[0]+validROI_right[2], validROI_right[1]+validROI_right[3]), (0,255,0), 2)


	cv2.imshow('Cameras Undistorted', np.concatenate((frameLeftRemaped, frameRightRemaped), axis=1))

	cv2.imshow("disp", fin)

	# cv2.imshow("huh", frameLeftRemaped[validROI_left])
	




	if cv2.waitKey(1) & 0xFF == ord('q'):

		points = cv2.reprojectImageTo3D(disparity, calib_Q)
		colors = cv2.cvtColor(frameLeftRemaped, cv2.COLOR_BGR2RGB)
		mask = disparity > disparity.min()
		out_points = points[mask]
		out_colors = colors[mask]
		out_fn = 'out.ply'
		write_ply('out.ply', out_points, out_colors)
		print('%s saved' % 'out.ply')

		break
#---------------------------------#


cv2.destroyAllWindows() #@TODO
ch.releaseCameras()
示例#55
0
	def run(self):
		self.initialize_camera()
		time.sleep(1)
		cv.SetMouseCallback("RightCam",self.on_mouse);
		while True:
			#time.sleep(0.1)
			self.grab_frame()

			#flip image to give correct prospective
			#self.frame = cv2.flip(self.frame, 1)
		
			#scale
			#self.frame = cv2.resize(self.frame, (len(self.frame[0]) / self.scale_down, len(self.frame) / self.scale_down))
			

			# disparity range tuning
			window_size = 5
			min_disp = 16
			num_disp = 64 - min_disp
			stereo = cv2.StereoSGBM(minDisparity = min_disp,
				numDisparities = num_disp,
				SADWindowSize = window_size,
				uniquenessRatio = 15,
				speckleWindowSize = 120,
				speckleRange = 32,
				disp12MaxDiff = 1,
				P1 = 8*3*window_size**2,
				P2 = 32*3*window_size**2,
				fullDP = False
			)

			print 'computing disparity...'
			disp = stereo.compute(self.left_frame, self.right_frame).astype(np.float32) / 16.0
			
			print 'generating 3d point cloud...',
			h, w = self.left_frame.shape[:2]
			f = 0.8*w                          # guess for focal length
			Q = np.float32([[1, 0, 0, -0.5*w],
				            [0,-1, 0,  0.5*h], # turn points 180 deg around x-axis,
				            [0, 0, 0,     -f], # so that y-axis looks up
				            [0, 0, 1,      0]])
			points = cv2.reprojectImageTo3D(disp, Q)
			colors = cv2.cvtColor(self.left_frame, cv2.COLOR_BGR2RGB)
			mask = disp > disp.min()
			out_points = points[mask]
			out_colors = colors[mask]
			out_fn = 'out.ply'
			self.write_ply('out.ply', out_points, out_colors)
			print '%s saved' % 'out.ply'
			

			disparity = stereo.compute(self.left_frame ,self.right_frame)
			cv2.imshow("RightCam", self.right_frame)
			cv2.imshow("LeftCam", self.left_frame)
			cv2.imshow("Disparity",  (disp-min_disp)/num_disp)
			# Clean up everything before leaving
			if cv2.waitKey(50) == 27: # or cv2.waitKey() == 1048603:
				self.right_cam.release()
				self.left_cam.release()
				cv2.destroyAllWindows()
				break
示例#56
0
 def get_3d(cls, disparity, disparity_to_depth_map):
     """Compute point cloud."""
     return cv2.reprojectImageTo3D(disparity, disparity_to_depth_map)
def reproject(disparity):
    global models
    mdl = image_geometry.StereoCameraModel()
    mdl.fromCameraInfo(images['left_info'], images['right_info'])
    pts = cv2.reprojectImageTo3D(disparity, mdl.Q)
    return pts
示例#58
0
     for y in range(image.shape[1]):
        pt = image[x,y]
        if (math.isinf(pt[0]) or math.isnan(pt[0])):
          # skip it
          None
        else: 
          f.write("%f %f %f %s\n" % (pt[0], pt[1], pt[2]-1, rgb))
   f.close()

for arg in sys.argv[1:]:
  cloud.txt = arg

client = MultirotorClient()

while True:
    rawImage = client.simGetImage(0, AirSimImageType.DepthPerspective)
    if (rawImage is None):
        print("Camera is not returning image, please check airsim for error messages")
        sys.exit(0)
    else:
        png = cv2.imdecode(np.frombuffer(rawImage, np.uint8) , cv2.IMREAD_UNCHANGED)
        gray = cv2.cvtColor(png, cv2.COLOR_BGR2GRAY)
        Image3D = cv2.reprojectImageTo3D(gray, projectionMatrix)
        savePointCloud(Image3D, outputFile)
        print("saved " + outputFile)
        sys.exit(0)

    key = cv2.waitKey(1) & 0xFF;
    if (key == 27 or key == ord('q') or key == ord('x')):
        break;
示例#59
0
                                   numDisparities= num_disp,
                                   blockSize= win_size,
                                   uniquenessRatio= 10,
                                   speckleWindowSize= 100,
                                   speckleRange= 32,
                                   disp12MaxDiff= 1,
                                   P1= 8*3*win_size**2,
                                   P2= 32*3*win_size**2,
                                   )

    print "\nComputing the disparity map..."
    disparity_map = stereo.compute(image_left, image_right).astype(np.float32) / 16.0

    print "\nGenerating the 3D map..."
    h, w = image_left.shape[:2]
    focal_length = 0.8*w

    # Perspective transformation matrix
    Q = np.float32([[1, 0, 0, -w/2.0],
                    [0,-1, 0, h/2.0],
                    [0, 0, 0, -focal_length],
                    [0, 0, 1, 0]])

    points_3D = cv2.reprojectImageTo3D(disparity_map, Q)
    colors = cv2.cvtColor(image_left, cv2.COLOR_BGR2RGB)
    mask_map = disparity_map > disparity_map.min()
    output_points = points_3D[mask_map]
    output_colors = colors[mask_map]

    print "\nCreating the output file..."
    create_output(output_points, output_colors, output_file)