Beispiel #1
0
def calibration(**kwargs):
    for k, v in kwargs.items():
        setattr(opt, k, v)
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6 * 7, 3), np.float32)
    objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)

    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    objpoints_r = []
    imgpoints_r = []

    images = glob.glob('../left/*.jpg')
    images_r = glob.glob('../right/*.jpg')
    images.sort()
    images_r.sort()

    for fname, fname_r in zip(images, images_r):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        img_r = cv2.imread(fname_r)
        gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)
        ret_r, corners_r = cv2.findChessboardCorners(gray_r, (7, 6), None)

        # If found, add object points, image points (after refining them)
        if ret == True and ret_r == True:
            objpoints.append(objp)
            objpoints_r.append(objp)

            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            corners2_r = cv2.cornerSubPix(gray_r, corners_r, (11, 11),
                                          (-1, -1), criteria)
            imgpoints.append(corners2)
            imgpoints_r.append(corners2_r)

            # Draw and display the corners
            if opt.disp_calib:
                cv2.imshow('img', img)
                cv2.waitKey(500)

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                       gray.shape[::-1], None,
                                                       None)
    img = cv2.imread('../left/left' + str(opt.sample) + '.jpg')
    h, w = img.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                      (w, h))
    # undistort
    dst = cv2.undistort(img, mtx, dist, None, newcameramtx)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    if opt.disp_calib:
        cv2.imwrite('../calibresult/left' + str(opt.sample) + '.png', dst)

    ret, mtx_r, dist_r, rvecs, tvecs = cv2.calibrateCamera(
        objpoints_r, imgpoints_r, gray_r.shape[::-1], None, None)
    img_r = cv2.imread('../right/right' + str(opt.sample) + '.jpg')
    h, w = img_r.shape[:2]
    newcameramtx_r, roi = cv2.getOptimalNewCameraMatrix(
        mtx_r, dist_r, (w, h), 1, (w, h))
    # undistort
    dst_r = cv2.undistort(img_r, mtx_r, dist_r, None, newcameramtx_r)

    # crop the image
    x, y, w, h = roi
    dst_r = dst_r[y:y + h, x:x + w]
    if opt.disp_calib:
        cv2.imwrite('../calibresult/right' + str(opt.sample) + '.png', dst)

    if not opt.stereo_calib:
        exit(0)

    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = \
        cv2.stereoCalibrate(objpoints, imgpoints, imgpoints_r, mtx,
                            dist, mtx_r, dist_r, gray.shape[::-1])

    if opt.matlab:
        try:
            R = opt.R[opt.sample]
            T = opt.T[opt.sample]
        except:
            print('Please modify config to add R and T for ' + opt.sample)

    R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(
        cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
        gray.shape[::-1], R, T)

    left_map1, left_map2 = cv2.initUndistortRectifyMap(cameraMatrix1,
                                                       distCoeffs1, R1, P1,
                                                       gray.shape[::-1],
                                                       cv2.INTER_NEAREST)
    right_map1, right_map2 = cv2.initUndistortRectifyMap(
        cameraMatrix2, distCoeffs2, R2, P2, gray.shape[::-1],
        cv2.INTER_NEAREST)

    img = cv2.imread('../left/left' + str(opt.sample) + '.jpg')
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    img = cv2.imread(('../right/right' + str(opt.sample) + '.jpg'))
    gray_r = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    imgL = cv2.remap(gray, left_map1, left_map2, cv2.INTER_LINEAR)
    imgR = cv2.remap(gray_r, right_map1, right_map2, cv2.INTER_LINEAR)

    if opt.disp_stereo_calib:
        cv2.imwrite(
            '../result/stereo_calibresult/left' + str(opt.sample) + '.png',
            imgL)
        cv2.imwrite(
            '../result/stereo_calibresult/right' + str(opt.sample) + '.png',
            imgR)

        plt.subplot(121)
        plt.title('left')
        plt.imshow(imgL, cmap='gray')
        plt.axis('off')
        plt.subplot(122)
        plt.title('right')
        plt.imshow(imgR, cmap='gray')
        plt.axis('off')
        plt.show()

    if not opt.disparity:
        exit(0)

    cv2.namedWindow("depth")
    cv2.namedWindow("disparity")
    cv2.moveWindow("depth", 0, 0)
    cv2.moveWindow("disparity", 600, 0)

    def callbackFunc(e, x, y, f, p):
        if e == cv2.EVENT_LBUTTONDOWN:
            print(threeD[y][x])

    cv2.setMouseCallback("depth", callbackFunc, None)

    stereo = cv2.StereoSGBM_create(numDisparities=16 * opt.num,
                                   blockSize=opt.blockSize)
    disparity = stereo.compute(imgL, imgR)

    disp = cv2.normalize(disparity,
                         disparity,
                         alpha=0,
                         beta=255,
                         norm_type=cv2.NORM_MINMAX,
                         dtype=cv2.CV_8U)
    # 将图片扩展至3d空间中,其z方向的值则为当前的距离
    threeD = cv2.reprojectImageTo3D(disparity.astype(np.float32) / 16., Q)

    cv2.imshow("disparity", disp)
    cv2.imshow("depth", imgL)

    key = cv2.waitKey(0)
    if key == ord("q"):
        exit(0)
    elif key == ord("s"):
        cv2.imwrite("../result/disparity/disparity" + opt.sample + ".png",
                    disp)
Beispiel #2
0
while (cv2.waitKey(1) & 0xFF != ord('q')):
    left_frame = cv2.imread(
        '/home/deepl/Documents/wb/opencv-3.0.0/samples/data/aloeL.jpg')
    right_frame = cv2.imread(
        '/home/deepl/Documents/wb/opencv-3.0.0/samples/data/aloeR.jpg')
    # our operations on the frame come here
    gray_left = cv2.cvtColor(left_frame, cv2.COLOR_BGR2GRAY)
    gray_right = cv2.cvtColor(right_frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow('left_Webcam', gray_left)
    cv2.imshow('right_Webcam', gray_right)
    stereo = cv2.StereoSGBM_create(
        minDisparity=1,
        numDisparities=16,
        blockSize=15,
        # uniquenessRatio = 10,
        speckleWindowSize=55,
        speckleRange=32,
        disp12MaxDiff=1,
        P1=8 * 3 * blockSize**2,
        P2=32 * 3 * blockSize**2)

    disparity = stereo.compute(gray_left, gray_right)
    disparity = cv2.normalize(disparity,
                              disparity,
                              alpha=0,
                              beta=255,
                              norm_type=cv2.NORM_MINMAX,
                              dtype=cv2.CV_8U)
    cv2.imshow('disparity', disparity)

cv2.destroyAllWindows()
    speckleWindowSize = 3
    disp12MaxDiff = 200
    P1 = 600
    P2 = 2400
    #imgL = cv2.imread('images/dis1.jpg')
    #imgR = cv2.imread('images/dis2.jpg')
    imgL = cv2.imread('C:\Sunaoxue\IT_Project\ADD/test/1.jpg')
    imgR = cv2.imread('C:\Sunaoxue\IT_Project\ADD/test/2.jpg')



    cv2.namedWindow('disparity')
    cv2.createTrackbar('speckleRange', 'disparity', speckleRange, 50, update)    
    cv2.createTrackbar('window_size', 'disparity', window_size, 21, update)
    cv2.createTrackbar('speckleWindowSize', 'disparity', speckleWindowSize, 200, update)
    cv2.createTrackbar('uniquenessRatio', 'disparity', uniquenessRatio, 50, update)
    cv2.createTrackbar('disp12MaxDiff', 'disparity', disp12MaxDiff, 250, update)
    stereo = cv2.StereoSGBM_create(
        minDisparity = min_disp,
        numDisparities = num_disp,
        blockSize = window_size,
        uniquenessRatio = uniquenessRatio,
        speckleRange = speckleRange,
        speckleWindowSize = speckleWindowSize,
        disp12MaxDiff = disp12MaxDiff,
        P1 = P1,
        P2 = P2
    )
    update()
    cv2.waitKey(100000)
Beispiel #4
0
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY);
        return gray

window_size = 5
min_disp = 32
num_disp = 112-min_disp
window_size = 3
min_disp = 16
num_disp = 112-min_disp
stereo = cv2.StereoBM_create(16, 5);
stereo = cv2.StereoSGBM_create(minDisparity = 16,
                               numDisparities = 3,
                               blockSize = 3,
                               P1 = 0,
                               P2 = 0,
                               disp12MaxDiff = 0,
                               preFilterCap = 0,
                               uniquenessRatio = 0,
                               speckleWindowSize = 0,
                               speckleRange = 0,
                               );


while True:
    gray1 = feed_gray(cap1);
    gray2 = feed_gray(cap2);
    
    disparity = stereo.compute(gray1, gray2).astype(np.float32)/16.0
    disparity = (disparity-min_disp)/num_disp
    cv2.imshow("disparity", disparity)
    #plt.imshow(disparity,'gray')
Beispiel #5
0
import cv2
from matplotlib import pyplot as plt

img_idx = 0

imgL = cv2.imread(f'../imgs/rectified/left/{img_idx}.jpg')
imgR = cv2.imread(f'../imgs/rectified/right/{img_idx}.jpg')

# 视差范围的调整
window_size = 3
min_disp = 0
num_disp = 320 - min_disp

stereo = cv2.StereoSGBM_create(
    minDisparity=0,
    numDisparities=240,  # max_disp has to be dividable by 16 f. E. HH 192, 256
    blockSize=3,
    P1=8 * 3 * window_size**2,
    # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
    P2=32 * 3 * window_size**2,
    disp12MaxDiff=1,
    uniquenessRatio=15,
    speckleWindowSize=0,
    speckleRange=2,
    preFilterCap=63,
    mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)
disparity = stereo.compute(imgL, imgR).astype(np.float32) / 16.0
# disparity = disparity.transpose()
plt.axis('off')
plt.imshow(disparity, 'gray')
plt.show()
Beispiel #6
0
import cv2
import numpy as np

max_disparity = 128
stereoProcessor = cv2.StereoSGBM_create(0, max_disparity,
                                        21)  # 21 is block size of neighbours


def calc_disparity_map(gl_img, gr_img):
    disparity = stereoProcessor.compute(gl_img, gr_img)

    dispNoiseFilter = 8  # increase for more agressive filtering
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)

    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity = (disparity / 16.)

    return disparity


def take_subarray(arr, x, y, w, h):
    arr_h, arr_w = arr.shape

    return arr[max(y, 0):min(y + h + 1, arr_h),
               max(x, 0):min(x + w + 1, arr_w)]


def non_zero_mean(disparities):
    return disparities.sum() / np.count_nonzero(disparities)
def main() :

    init = sl.InitParameters()
    init.coordinate_units = sl.UNIT.UNIT_METER
    cam = sl.Camera()
    status = cam.open(init)
    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD
    mat1 = sl.Mat()
    mat2 = sl.Mat()

    # if len(sys.argv) == 1 :
    #     print('Please provide ZED serial number')
    #     exit(1)

    # # Open the ZED camera
    # cap = cv2.VideoCapture(0)
    # if cap.isOpened() == 0:
    #     exit(-1)

    # image_size = Resolution()
    # image_size.width = 1280
    # image_size.height = 720

    # # Set the video resolution to HD720
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, image_size.width*2)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, image_size.height)

    # serial_number = int(sys.argv[1])
    # calibration_file = download_calibration_file(serial_number)
    # if calibration_file  == "":
    #     exit(1)
    # print("Calibration file found. Loading...")

    # camera_matrix_left, camera_matrix_right, map_left_x, map_left_y, map_right_x, map_right_y = init_calibration(calibration_file, image_size)

    while True :
        # Get a new frame from camera
        #retval, frame = cap.read()
        # Extract left and right images from side-by-side
        #left_right_image = np.split(frame, 2, axis=1)
        # Display images
        err = cam.grab(runtime)
        cam.retrieve_image(mat1, sl.VIEW.VIEW_LEFT)
        cam.retrieve_image(mat2, sl.VIEW.VIEW_RIGHT)
        left_rect = mat1.get_data()
        right_rect = mat2.get_data()
        #cv2.imshow("left RAW", left_right_image[0])

        #left_rect = cv2.remap(left_right_image[0], map_left_x, map_left_y, interpolation=cv2.INTER_LINEAR)
        #right_rect = cv2.remap(left_right_image[1], map_right_x, map_right_y, interpolation=cv2.INTER_LINEAR)

        cv2.imshow("left RECT", left_rect)
        cv2.imshow("right RECT", right_rect)

        stereo = cv2.StereoSGBM_create(minDisparity = 1,
            numDisparities = 128,
            blockSize = 4,
            uniquenessRatio = 1,
            speckleRange = 3,
            speckleWindowSize = 8,
            disp12MaxDiff = 200,
            P1 = 600,
            P2 = 2400

            # SADWindowSize = 6,
            # uniquenessRatio = 10,
            # speckleWindowSize = 100,
            # speckleRange = 32,
            # disp12MaxDiff = 1,
            # P1 = 8*3*3**2,
            # P2 = 32*3*3**2,
            # fullDP = False
        )

        disp = stereo.compute(left_rect, right_rect).astype(np.float32) / 16.0

        cv2.imshow("disparity", (disp-1)/(128))


        if cv2.waitKey(30) >= 0 :
            break

    exit(0)
Beispiel #8
0
def calculateDisparityWLS(imgL, imgR):
    #calculates the disparity image for a pair of stereo images combined with a wls filter
    #utilising the online tutorial https://docs.opencv.org/master/d3/d14/tutorial_ximgproc_disparity_filtering.html

    #convert images to greyscale
    grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

    clahe = cv2.createCLAHE(clipLimit=10.0, tileGridSize=(8, 8))
    grayL = clahe.apply(grayL)
    grayR = clahe.apply(grayR)

    #raise to power to improve calculation
    grayL = np.power(grayL, 0.75).astype('uint8')
    grayR = np.power(grayR, 0.75).astype('uint8')

    #to improve contrast
    #grayL = cv2.equalizeHist(grayL)
    #grayR = cv2.equalizeHist(grayR)

    filterLambda = 80000
    sigma = 1.2

    #creates both matchers
    leftSide = cv2.StereoSGBM_create(minDisparity=0,
                                     numDisparities=max_disparity,
                                     blockSize=11)
    rightSide = cv2.ximgproc.createRightMatcher(leftSide)
    #initialises the wls filter
    wls = cv2.ximgproc.createDisparityWLSFilter(leftSide)
    wls.setLambda(filterLambda)
    wls.setSigmaColor(sigma)

    #computes both disparity images
    disparityL = leftSide.compute(grayL, grayR)
    disparityR = rightSide.compute(grayR, grayL)

    #applies speckle filtering to both disparity images
    dispNoiseFilter = 5
    cv2.filterSpeckles(disparityL, 0, 4000, max_disparity - dispNoiseFilter)
    cv2.filterSpeckles(disparityR, 0, 4000, max_disparity - dispNoiseFilter)

    #thresholds and scales for display
    _, disparityLScaled = cv2.threshold(disparityL, 0, max_disparity * 16,
                                        cv2.THRESH_TOZERO)
    disparityLShow = (disparityLScaled / 16).astype(np.uint8)

    #applies the wls filter to both disparity images to produce one image
    disparity = wls.filter(disparityL, grayL, None, disparityR)

    #further speckle filtering to the final image, thresholding and scaling for distance calculation
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)
    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity_scaled = (disparity / 16).astype(np.uint8)

    #crops unique sections of each camera
    if (crop_disparity):
        width = np.size(disparity_scaled, 1)
        disparity_scaled = disparity_scaled[0:390, 135:width]

    return disparity_scaled
Beispiel #9
0
proj_image = cv2.imread(proj_file, cv2.IMREAD_GRAYSCALE)
cam_image = cv2.imread(cam_file, cv2.IMREAD_GRAYSCALE)

# Make sure they are the same size.
assert (proj_image.shape == cam_image.shape)

# Set up parameters for stereo matching (see OpenCV docs at
# http://goo.gl/U5iW51 for details).
min_disparity = 0
max_disparity = 16
window_size = 11
param_P1 = 0
param_P2 = 20000

# Create a stereo matcher object
matcher = cv2.StereoSGBM_create(min_disparity, max_disparity, window_size,
                                param_P1, param_P2)

# Compute a disparity image. The actual disparity image is in
# fixed-point format and needs to be divided by 16 to convert to
# actual disparities.
disparity = matcher.compute(cam_image, proj_image) / 16.0

# Pop up the disparity image.
#cv2.imshow('Disparity', disparity/disparity.max())
#while fixKeyCode(cv2.waitKey(5)) < 0:
#    pass

f = 600
u0 = 320
v0 = 240
b = 0.05
Beispiel #10
0
    P1 = 8 * 3 * SADWindowSize ** 2
    P2 = 32 * 3 * SADWindowSize ** 2
    disp12MaxDiff = 10
    preFilterCap = 0
    uniquenessRatio = 1
    speckleWindowSize = 100
    speckleRange = 10
    
    imgL = cv.cvtColor(I1, cv.COLOR_BGR2GRAY)
    imgR = cv.cvtColor(I2, cv.COLOR_BGR2GRAY)
    
    stereo = cv.StereoSGBM_create(minDisparity = minDisparity, 
                                  numDisparities = numDisparities, 
                                  blockSize = SADWindowSize, P1 = P1,
                                  P2 = P2, disp12MaxDiff = disp12MaxDiff, 
                                  preFilterCap = preFilterCap,
                                  uniquenessRatio = uniquenessRatio, 
                                  speckleWindowSize = speckleWindowSize, 
                                  speckleRange = speckleRange,
                                  mode = cv.StereoSGBM_MODE_HH)
    disparity = stereo.compute(imgL, imgR).astype(np.float32)/16
    
    disparity[disparity < 0] = np.nan
    
    cv.imwrite(savepath+'/left_disparity_map'+file[2*i][-11:-5]+'.tiff', disparity)
    disparity[disparity == np.nan] = 0
    dp = cv.normalize(disparity, disparity, 
                      alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
    cv.imwrite(savepath+'/imgL_'+file[2*i][-11:-5]+'.png', dp)

Beispiel #11
0
classes = []
with open("coco.names", "r") as f:
    classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
kernel = np.ones((3, 3), np.uint8)

### SETTINGS FOR STEREO VISION AND FILTERING
stereoSGBM = cv2.StereoSGBM_create(
    minDisparity=min_disp,
    numDisparities=num_disp,
    blockSize=block_size,
    P1=8 * 3 * window_size**2,
    P2=32 * 3 * window_size**2,
    disp12MaxDiff=1,  ##no difference
    preFilterCap=0,  ##no difference
    uniquenessRatio=uniqR,
    speckleWindowSize=speckWS,
    speckleRange=speckR,
    #mode = modeT)
    mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)

#height, width, channels = colored_left.shape ##for object detection
colored_left, colored_right = getWebcamFrame()
gray_left = cv2.cvtColor(colored_left,
                         cv2.COLOR_BGR2GRAY)  #have to work with gray images
gray_right = cv2.cvtColor(colored_right, cv2.COLOR_BGR2GRAY)

lmbda = 80000
sigma = 1.8
Beispiel #12
0
def ZedCamera(Cam):
    ## From ZedCamera conffile
    #[LEFT_CAM_VGA]
    Lfx = 350.494
    Lfy = 350.494
    Lcx = 331.039
    Lcy = 189.714
    Lk1 = -0.174488
    Lk2 = 0.027323
    Lk3 = 0  ##

    #[RIGHT_CAM_VGA]
    Rfx = 349.663
    Rfy = 349.663
    Rcx = 335.323
    Rcy = 189.551
    Rk1 = -0.175561
    Rk2 = 0.0269139
    Rk3 = 0  ##

    vc = cv2.VideoCapture(Cam)

    #stereo
    Baseline = 63
    CV = 0.00281173
    RX = 0.0031709
    RZ = -0.000340478

    R = np.array(
        [[0.9999959891308698, 0.0003449348247654698, 0.002811181626984763],
         [-0.0003360191236988869, 0.9999949147414725, -0.00317136915914702],
         [-0.002812261247064596, 0.003170411828413505, 0.9999910197974362]])
    # wvga 1344x376 focal ~0.008mm, 2560x720
    retw = vc.set(3, 1344)
    reth = vc.set(4, 376)

    CameraMatrixL = np.array([[Lfx, 0, Lcx], [0, Lfy, Lcy], [0, 0, 1]])

    CameraMatrixR = np.array([[Rfx, 0, Rcx], [0, Rfy, Rcy], [0, 0, 1]])

    distCoeffsL = np.array([[Lk1], [Lk2], [0], [0], [0]])
    distCoeffsR = np.array([[Rk1], [Rk2], [0], [0], [0]])

    T = np.array([[63], [0], [RZ]])
    print "start Zed Get_Frame"
    # thread for GPS RX serial
    Frames_Thread = Thread(target=Get_Frame, args=(vc, ))
    print Frames_Thread.start()
    print "started"
    if vc.isOpened():  # try to get the first frame
        imgL, imgR, dh, dw = frames.get()

        rval = True
    else:
        rval = False

    # disparity settings
    stereo = cv2.StereoBM_create(numDisparities=0, blockSize=21)
    #stereoRectify(cameraMatrix_left, distCoeffs_left, cameraMatrix_right, distCoeffs_right, image_size, R, T,R1, R2, P1, P2, Q, cv::CALIB_ZERO_DISPARITY, 0, image_size);
    h, w = (dh, dw)
    size = (w, h)
    #stereoRectify(cameraMatrix_left, distCoeffs_left, cameraMatrix_right, distCoeffs_right, image_size, R, T,R1, R2, P1, P2, Q, cv::CALIB_ZERO_DISPARITY, 0, image_size);
    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(CameraMatrixL,
                                                      distCoeffsL,
                                                      CameraMatrixR,
                                                      distCoeffsR, size, R, T)
    print R1
    print P1
    #(cameraMatrix_left, distCoeffs_left, R1, P1, image_size)
    map_left_x, map_left_y = cv2.initUndistortRectifyMap(
        CameraMatrixL, distCoeffsL, R1, P1, size, cv2.CV_32F)
    #initUndistortRectifyMap(cameraMatrix_right, distCoeffs_right, R2, P2, image_size, CV_32FC1, map_right_x, map_right_y);
    map_right_x, map_right_y = cv2.initUndistortRectifyMap(
        CameraMatrixR, distCoeffsR, R2, P2, size, cv2.CV_32F)
    cameraMatrixL = P1
    cameraMatrixR = P2
    print size

    ## filter
    # FILTER Parameters
    lmbda = 80000
    sigma = 1.2
    visual_multiplier = 1.0
    # SGBM Parameters -----------------
    window_size = 3  # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely

    left_matcher = cv2.StereoSGBM_create(
        minDisparity=0,
        numDisparities=
        32,  # max_disp has to be dividable by 16 f. E. HH 192, 256
        blockSize=5,
        P1=8 * 3 * window_size**
        2,  # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
        P2=32 * 3 * window_size**2,
        disp12MaxDiff=1,
        uniquenessRatio=15,
        speckleWindowSize=0,
        speckleRange=2,
        preFilterCap=63,
        mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)

    right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
    wls_filter = cv2.ximgproc.createDisparityWLSFilter(
        matcher_left=left_matcher)
    wls_filter.setLambda(lmbda)
    wls_filter.setSigmaColor(sigma)

    while rval:
        if not frames.empty():
            try:
                imgL, imgR, dh, dw = frames.get()

            except:
                pass

        RectImgL = cv2.remap(imgL, map_left_x, map_left_y,
                             3)  #cv2.cvtColor( xxx,cv2.COLOR_BGR2GRAY)
        RectImgR = cv2.remap(imgR, map_right_x, map_right_y,
                             3)  #cv2.cvtColor( XXX,cv2.COLOR_BGR2GRAY)

        RectImgL = cv2.remap(imgL, map_left_x, map_left_y,
                             3)  #cv2.cvtColor( xxx,cv2.COLOR_BGR2GRAY)
        RectImgR = cv2.remap(imgR, map_right_x, map_right_y,
                             3)  #cv2.cvtColor( XXX,cv2.COLOR_BGR2GRAY)

        displ = left_matcher.compute(RectImgR,
                                     RectImgL)  # .astype(np.float32)/16
        dispr = right_matcher.compute(RectImgL,
                                      RectImgR)  # .astype(np.float32)/16
        displ = np.int16(displ)
        dispr = np.int16(dispr)

        th, im_th = cv2.threshold(RectImgL, 70, 240, cv2.THRESH_BINARY)

        # Copy the thresholded image.
        im_floodfill = im_th.copy()

        # Mask used to flood filling.
        # Notice the size needs to be 2 pixels than the image.
        h, w = im_th.shape[:2]
        mask = np.zeros((h + 2, w + 2), np.uint8)

        # Floodfill from point (0, 0)
        cv2.floodFill(im_floodfill, mask, (0, 0), 255)

        # Invert floodfilled image
        im_floodfill_inv = cv2.bitwise_not(im_floodfill)

        im_floodfill_inv_gray = cv2.cvtColor(im_floodfill_inv,
                                             cv2.COLOR_BGR2GRAY)

        im_Binery = 1 * (im_floodfill > 200)

        filteredImg = wls_filter.filter(
            displ, RectImgL, None, dispr)  # important to put "imgL" here!!!
        filt = filteredImg
        filteredImg = cv2.normalize(src=filteredImg,
                                    dst=filteredImg,
                                    beta=0,
                                    alpha=255,
                                    norm_type=cv2.NORM_MINMAX)
        filteredImg = np.uint8(filteredImg)
        masked = filteredImg  #/im_Binery
        ##       out =((63*2.8)/ (masked_image)/1.)
        Zed_Disp.queue.clear()
        Zed_Disp.put((masked, cv2.cvtColor(RectImgL, cv2.COLOR_BGR2GRAY)))
        #cv2.imshow('Disparity Map', erode/255. )
        #print((masked_image[h/2,w/2])*2) #63 base distance ofcameras 0.008mm is focal lenth estimate

    cv2.destroyAllWindows()
    vc.release()
Beispiel #13
0
import numpy as np
import cv2
from matplotlib import pyplot as plt

imgL = cv2.imread('/Users/stutishukla/Downloads/data/tsucuba_left.png',cv2.IMREAD_GRAYSCALE)
imgR = cv2.imread('/Users/stutishukla/Downloads/data/tsucuba_right.png',cv2.IMREAD_GRAYSCALE) 

'''Referred code from :https://docs.opencv.org/trunk/d2/d85/classcv_1_1StereoSGBM.html'''
stereo = cv2.StereoSGBM_create(numDisparities=32, blockSize=20)
disparity = stereo.compute(imgL,imgR)
cv2.imwrite('/Users/stutishukla/Downloads/task2_images/task2_disparity.jpg',disparity)

import numpy as np
from sklearn.preprocessing import normalize
import cv2
import open3d as o3d

left = cv2.imread('left.png')
right = cv2.imread('right.png')

window_size = 3
left_matcher = cv2.StereoSGBM_create(minDisparity=-39,
                                     numDisparities=144,
                                     blockSize=5,
                                     P1=8 * 3 * window_size**2,
                                     P2=32 * 3 * window_size**2,
                                     disp12MaxDiff=1,
                                     uniquenessRatio=10,
                                     speckleWindowSize=100,
                                     speckleRange=32,
                                     preFilterCap=63,
                                     mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)

right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
#WLS Filter
lmbda = 80000
sigma = 1.7
visual_multiplier = 1.0
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)
displ = left_matcher.compute(left, right)
dispr = right_matcher.compute(right, left)
Beispiel #15
0
        min_disp = 16
        num_disp = 192 - min_disp * 2
        blockSize = window_size
        uniquenessRatio = 1
        speckleRange = 50
        speckleWindowSize = 200
        disp12MaxDiff = 200
        P1 = 600
        P2 = 2400
        stereo = cv2.StereoSGBM_create(
            # Minimum possible disparity value. Normally, it is zero but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
            minDisparity=min_disp,
            # Maximum disparity minus minimum disparity. The value is always greater than zero. In the current implementation, this parameter must be divisible by 16.
            numDisparities=num_disp,
            blockSize=window_size,
            uniquenessRatio=uniquenessRatio,
            speckleRange=speckleRange,
            speckleWindowSize=speckleWindowSize,
            disp12MaxDiff=disp12MaxDiff,
            P1=P1,
            P2=P2,
        )
        # compute disparity
        disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0

        # print(disp[100:150,120:160])
        displayed_image = (disp - min_disp) / num_disp
        cv2.imshow(
            'disparity', displayed_image
        )  # for some reason when using imshow, the picture has to be normalized
        h, w = imgL.shape[:2]
Beispiel #16
0
disp = cv2.normalize(disparity,
                     disparity,
                     alpha=0,
                     beta=255,
                     norm_type=cv2.NORM_MINMAX,
                     dtype=cv2.CV_8U)

threeD = cv2.reprojectImageTo3D(disparity.astype(np.float32) / 16., Q)

# cv2.imshow("left", imgl_rectified)
# cv2.imshow("right", imgr_rectified)
cv2.imshow("depth", disp)
cv2.waitKey()
cv2.destroyAllWindows()
'''SGBM'''
sbm = cv2.StereoSGBM_create(0, 16 * 3, 5)
disparity_sbm = sbm.compute(imgL, imgR)
disp_sbm = cv2.normalize(disparity_sbm,
                         disparity_sbm,
                         alpha=0,
                         beta=255,
                         norm_type=cv2.NORM_MINMAX,
                         dtype=cv2.CV_8U)

# cv2.imshow("left", imgl_rectified)
# cv2.imshow("right", imgr_rectified)
cv2.imshow("depth", disp_sbm)
cv2.waitKey()
cv2.destroyAllWindows()
Beispiel #17
0
def main():

    if (args.visualise3D):
        init_xyz = pptk.rand(10, 3)
        visualiser = pptk.viewer(init_xyz)

    try:
        #load calibration file
        cal_xml = args.calibration_folder + '/stereo_calibration.xml'
        fs = cv2.FileStorage(cal_xml, flags=cv2.FILE_STORAGE_READ)
        Q = fs.getNode("Q").mat()
        print("Q\n", Q)
        fs.release()

        #load camera images
        left_fns = glob.glob(args.input_folder + '/' + args.left_wildcard)
        right_fns = glob.glob(args.input_folder + '/' + args.right_wildcard)
        pose_fns = glob.glob(args.input_folder + '/' + args.pose_wildcard)

        #check the same number of left and right images exist
        if (not (len(left_fns) == len(right_fns))):
            raise ValueError(
                "Should have the same number of left and right images")

        if (args.pose_transformation):
            if (not (len(left_fns) == len(pose_fns))):
                raise ValueError(
                    "Should have the same number of image as pose files")

        i = 0
        while i < len(left_fns):
            left_fn = left_fns[i]
            right_fn = right_fns[i]
            if (args.pose_transformation):
                pose_fn = pose_fns[i]
                print(pose_fn)
            print(left_fn)
            print(right_fn)

            left_fn_basename = os.path.splitext(os.path.basename(left_fn))[0]
            print(left_fn_basename)

            print("reading images...")
            #read left and right image from file list
            imgL = cv2.imread(left_fn, cv2.IMREAD_GRAYSCALE)
            imgR = cv2.imread(right_fn, cv2.IMREAD_GRAYSCALE)

            # Convert source image to unsigned 8 bit integer Numpy array
            arrL = np.uint8(imgL)
            arrR = np.uint8(imgR)

            print(arrL.shape)
            print(arrR.shape)

            print("stereo matching...")

            #generate disparity using stereo matching algorithms
            if algorithm == CV_MATCHER_BM:
                stereo = cv2.StereoBM_create(numDisparities=num_disp,
                                             blockSize=block_size)
                stereo.setMinDisparity(min_disp)
                stereo.setSpeckleWindowSize(speckle_window_size)
                stereo.setSpeckleRange(speckle_range)
                stereo.setUniquenessRatio(uniqness_ratio)
            elif algorithm == CV_MATCHER_SGBM:
                stereo = cv2.StereoSGBM_create(
                    minDisparity=min_disp,
                    numDisparities=num_disp,
                    blockSize=block_size,
                    P1=8 * 3 * window_size**2,
                    P2=32 * 3 * window_size**2,
                    disp12MaxDiff=1,
                    uniquenessRatio=uniqness_ratio,
                    speckleWindowSize=speckle_window_size,
                    speckleRange=speckle_range)

            disp = stereo.compute(arrL, arrR).astype(np.float32) / 16.0

            print("generating 3D...")
            #reproject disparity to 3D
            points = cv2.reprojectImageTo3D(disp, Q)

            print("saving disparity maps...")

            disp = (disp - min_disp) / num_disp
            cv2.imwrite(
                args.output_folder_disparity +
                "/{}_disparity_map.png".format(left_fn_basename), disp)

            if (args.visualise_disparity):
                #dispay disparity to window
                plt.imshow(disp)
                plt.show(block=False)
                plt.pause(0.1)

            #normalise disparity
            imask = disp > disp.min()
            disp_thresh = np.zeros_like(disp, np.uint8)
            disp_thresh[imask] = disp[imask]

            disp_norm = np.zeros_like(disp, np.uint8)
            cv2.normalize(disp,
                          disp_norm,
                          alpha=0,
                          beta=255,
                          norm_type=cv2.NORM_MINMAX,
                          dtype=cv2.CV_8U)

            cv2.imwrite(
                args.output_folder_disparity +
                "/{}_disparity_image.png".format(left_fn_basename), disp_norm)

            #format colour image from left camera for mapping to point cloud
            h, w = arrL.shape[:2]
            colors = cv2.cvtColor(arrL, cv2.COLOR_BGR2RGB)
            mask = disp > disp.min()
            out_points = points[mask]
            out_colors = colors[mask]

            out_points_fn = args.output_folder_point_clouds + '/{}_point_cloud.ply'.format(
                left_fn_basename)
            out_points_transformed_fn = args.output_folder_point_clouds + '/{}_point_cloud_transformed.ply'.format(
                left_fn_basename)

            if (args.pose_transformation):
                print("transforming point cloud...")

                #extract pose from pose file
                pose_file = open(pose_fn, 'r')
                line = pose_file.readline().rstrip()
                pose = line.split(',')
                if (not (len(pose) == 7)):
                    error_msg = "Invalid number of values in pose data\nShould be in format [x,y,z,w,x,y,z]"
                    raise ValueError(error_msg)
                pose_np = np.array([float(pose[0]),float(pose[1]),float(pose[2]),\
                                    float(pose[3]),float(pose[4]),float(pose[5]),float(pose[6])])

                print("transformation:")
                print(pose_np)
                #get tranlation and quaternion
                pose_t = np.array(
                    [float(pose[0]),
                     float(pose[1]),
                     float(pose[2])])
                pose_q = np.array([
                    float(pose[4]),
                    float(pose[5]),
                    float(pose[6]),
                    float(pose[3])
                ])
                pose_matrix = t_q_to_matrix(pose_t, pose_q)
                #print("transformation matrix:")
                #print(pose_matrix)

                transformed_points = transform_points(out_points, pose_matrix)

            if (args.visualise3D):
                #visualise point cloud
                visualise_points(visualiser, out_points, out_colors)

            if (args.pose_transformation):
                print("saving point clouds...")
                write_ply(out_points_transformed_fn, transformed_points,
                          out_colors)
            else:
                print("saving point cloud...")
            write_ply(out_points_fn, out_points, out_colors)

            i += 1

        if (args.visualise3D):
            visualiser.close()
        if (args.visualise_disparity):
            plt.close()

    except KeyboardInterrupt:
        if (args.visualise3D):
            visualiser.close()
        if (args.visualise_disparity):
            plt.close()
        raise KeyboardInterrupt()
Beispiel #18
0
def run(args, save_dir, camera_mtx_dir):

    left_cam_num = args.left_cam_num
    right_cam_num = args.right_cam_num

    CamL = cv2.VideoCapture(left_cam_num)
    CamR = cv2.VideoCapture(right_cam_num)

    cam_mtx_path = os.path.join(save_dir, camera_mtx_dir, args.load_cam_mtx)
    with open(cam_mtx_path, 'rb') as f:
        total_mtx = pickle.load(f)

    MLS = total_mtx['left_cam_mtx']
    dLS = total_mtx['left_cam_dist']
    MRS = total_mtx['right_cam_mtx']
    dRS = total_mtx['right_cam_dist']
    R = total_mtx['rotation_mtx']
    T = total_mtx['translation_mtx']

    img_shape = (int(CamL.get(3)), int(CamL.get(4)))
    kernel = np.ones((3, 3), np.uint8)

    RL, RR, PL, PR, Q, roiL, roiR = cv2.stereoRectify(MLS, dLS, MRS, dRS,
                                                      img_shape, R, T)

    Left_Stereo_Map = cv2.initUndistortRectifyMap(MLS, dLS, RL, PL, img_shape,
                                                  cv2.CV_16SC2)
    Right_Stereo_Map = cv2.initUndistortRectifyMap(MRS, dRS, RR, PR, img_shape,
                                                   cv2.CV_16SC2)

    window_size = 3
    min_disp = 2
    num_disp = 130 - min_disp
    stereo = cv2.StereoSGBM_create(minDisparity=min_disp,
                                   numDisparities=num_disp,
                                   blockSize=window_size,
                                   uniquenessRatio=10,
                                   speckleWindowSize=100,
                                   speckleRange=32,
                                   disp12MaxDiff=5,
                                   P1=8 * 3 * window_size**2,
                                   P2=32 * 3 * window_size**2)

    # Used for the filtered image
    stereoR = cv2.ximgproc.createRightMatcher(
        stereo)  # Create another stereo for right this time

    # WLS FILTER Parameters
    lmbda = 80000
    sigma = 1.8

    wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=stereo)
    wls_filter.setLambda(lmbda)
    wls_filter.setSigmaColor(sigma)

    window_start = (640, 60)

    cv2.namedWindow('left_img')
    cv2.moveWindow('left_img', *window_start)

    cv2.namedWindow('right_img')
    cv2.moveWindow('right_img', int(window_start[0] + CamL.get(3)),
                   window_start[1])

    while True:
        retR, frameR = CamR.read()
        retL, frameL = CamL.read()

        # Rectify the images on rotation and alignement
        Left_nice = cv2.remap(
            frameL, Left_Stereo_Map[0], Left_Stereo_Map[1], cv2.INTER_LANCZOS4,
            cv2.BORDER_CONSTANT, 0
        )  # Rectify the image using the kalibration parameters founds during the initialisation
        Right_nice = cv2.remap(frameR, Right_Stereo_Map[0],
                               Right_Stereo_Map[1], cv2.INTER_LANCZOS4,
                               cv2.BORDER_CONSTANT, 0)

        grayR = cv2.cvtColor(Right_nice, cv2.COLOR_BGR2GRAY)
        grayL = cv2.cvtColor(Left_nice, cv2.COLOR_BGR2GRAY)

        disp = stereo.compute(grayL, grayR)
        dispL = disp
        dispR = stereoR.compute(grayR, grayL)
        dispL = np.int16(dispL)
        dispR = np.int16(dispR)

        # Using the WLS filter
        filteredImgL = wls_filter.filter(dispL, grayL, None, dispR)
        filteredImgL = cv2.normalize(src=filteredImgL,
                                     dst=filteredImgL,
                                     beta=0,
                                     alpha=255,
                                     norm_type=cv2.NORM_MINMAX)
        filteredImgL = np.uint8(filteredImgL)

        filteredImgR = wls_filter.filter(dispR, grayR, None, dispL)
        filteredImgR = cv2.normalize(src=filteredImgR,
                                     dst=filteredImgR,
                                     beta=0,
                                     alpha=255,
                                     norm_type=cv2.NORM_MINMAX)
        filteredImgR = np.uint8(filteredImgR)

        # Colors map
        filt_ColorL = cv2.applyColorMap(filteredImgL, cv2.COLORMAP_OCEAN)
        filt_ColorR = cv2.applyColorMap(filteredImgR, cv2.COLORMAP_OCEAN)

        # Show the result for the Depth_image
        cv2.imshow('left_img', filt_ColorL)
        cv2.imshow('right_img', filt_ColorR)

        # End the Programme
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    CamR.release()
    CamL.release()
    cv2.destroyAllWindows()
image_folder_list = list(map(os.path.basename, image_folder_list))
depth_folder_list = list(map(os.path.basename, depth_folder_list))

train_set = list(
    set.intersection(set(image_folder_list), set(depth_folder_list)))
val_image_set = list(set.difference(set(image_folder_list), set(train_set)))
val_depth_set = list(set.difference(set(depth_folder_list), set(train_set)))
data_max = 21931
data_min = 0

window_size = 9
minDisparity = 1
stereo = cv2.StereoSGBM_create(blockSize=10,
                               numDisparities=32,
                               preFilterCap=10,
                               minDisparity=minDisparity,
                               P1=4 * 3 * window_size**2,
                               P2=32 * 3 * window_size**2)

disparity_max = 1008
disparity_min = -16

for j, basename in enumerate(train_set):
    if j == 0:
        left_img, right_img = basename_to_image_path_list(basename)
        print(basename)

        for i in range(len(left_img)):
            left_img_img = Image.open(left_img[i]).resize((212, 64))
            right_img_img = Image.open(right_img[i]).resize((212, 64))
            # load img and resize it.
Beispiel #20
0
    def __init__(self, config, bus):
        super().__init__(config, bus)
        bus.register("artf", "dropped")
        self.verbose = False
        self.dump_dir = None  # optional debug ouput into directory
        self.scan = None  # should laster initialize super()
        self.depth = None  # more precise definiton of depth image
        self.width = None  # detect from incoming images
        self.look_for_artefacts = config.get('artefacts', [])
        self.estimate_distance = config.get('estimate_distance', False)

        window_size = 5
        min_disp = 16
        num_disp = 192 - min_disp
        blockSize = window_size
        uniquenessRatio = 7
        speckleRange = 3
        speckleWindowSize = 75
        disp12MaxDiff = 200
        P1 = 8 * 3 * window_size**2
        P2 = 32 * 3 * window_size**2
        self.stereo_calc = cv2.StereoSGBM_create(
            minDisparity=min_disp,
            numDisparities=num_disp,
            blockSize=window_size,
            uniquenessRatio=uniquenessRatio,
            speckleRange=speckleRange,
            speckleWindowSize=speckleWindowSize,
            disp12MaxDiff=disp12MaxDiff,
            P1=P1,
            P2=P2)
        self.Q = np.float32([
            [1, 0, 0, -0.5 * CAMERA_WIDTH],
            [0, -1, 0,
             0.5 * CAMERA_HEIGHT],  # turn points 180 deg around x-axis,
            [0, 0, 0, CAMERA_FOCAL_LENGTH],  # so that y-axis looks up
            [0, 0, 1 / 0.42, 0]
        ])

        self.detectors = [
            {
                'artefact_name':
                'cubesat',
                'detector_type':
                'classifier',
                'classifier':
                cv2.CascadeClassifier(str(curdir / 'xml/cubesat.xml')),
                'min_size':
                5,
                'max_size':
                110,
                'subsequent_detects_required':
                3
            },
            {
                'artefact_name':
                'homebase',
                'detector_type':
                'classifier',
                'classifier':
                cv2.CascadeClassifier(str(curdir / 'xml/homebase.xml')),
                'min_size':
                20,
                'max_size':
                400,
                'subsequent_detects_required':
                3
            },
            {
                'artefact_name': 'basemarker',
                'detector_type': 'colormatch',
                'min_size': 10,
                'max_size': 500,
                'mask': [
                    CAMERA_HEIGHT // 2, CAMERA_HEIGHT, 0, CAMERA_WIDTH
                ],  # [Y,X] order, look only in lower half of the screen (avoid solar panels)
                'pixel_count_threshold': 100,
                'bbox_union_count': 1,
                'hue_max_difference': 10,
                'hue_match': 100,  # from RGB 007DBD
                'subsequent_detects_required':
                3  # noise will add some of this color, wait for a consistent sequence
            },
            {
                'artefact_name': 'homebase',
                'detector_type': 'colormatch',
                'min_size': 20,
                'max_size': 700,
                'mask': None,
                'pixel_count_threshold': 400,
                'bbox_union_count': 5,
                'hue_max_difference': 10,
                'hue_match': 19,  # from RGB FFA616
                'subsequent_detects_required': 3
            },
            {
                'artefact_name': 'rover',
                'detector_type': 'colormatch',
                'min_size': 10,
                'max_size': 700,
                'mask': [180, CAMERA_HEIGHT, 0, CAMERA_WIDTH
                         ],  # [Y,X] order - only look in lower half of screen
                'pixel_count_threshold': 150,
                'bbox_union_count': 10,
                'hue_max_difference': 3,
                'hue_match': 27,  # from RGB FFA616
                'subsequent_detects_required': 1
            },
            {
                'artefact_name': 'excavator_arm',
                'detector_type': 'colormatch',
                'min_size': 10,
                'max_size': 200,
                'mask': [0, 120, 0, CAMERA_WIDTH],  # [Y,X] order
                'pixel_count_threshold': 150,
                'bbox_union_count': 3,
                'hue_max_difference': 3,
                'hue_match': 27,  # from RGB FFA616
                'subsequent_detects_required': 1
            }
        ]
        self.detect_sequences = {}
g = os.walk("./data/rectified")

for path, dir_list, file_list in g:
    for dir_name in dir_list:
        if dir_name == 'result':
            continue
        print('loading images...')
        left_np = cv2.imread(os.path.join(path, dir_name, 'im0.png'), 0)
        right_np = cv2.imread(os.path.join(path, dir_name, 'im1.png'), 0)

        window_size = 3
        left_matcher = cv2.StereoSGBM_create(
            blockSize=3,
            P1=8 * 3 * window_size ** 2,
            P2=32 * 3 * window_size ** 2,
            disp12MaxDiff=-1,
            uniquenessRatio=10,
            speckleWindowSize=100,
            speckleRange=2,
            mode=1
        )

        right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)

        left_disp = left_matcher.compute(left_np, right_np)
        right_disp = right_matcher.compute(right_np, left_np)

        wls_filter = cv2.ximgproc.createDisparityWLSFilter(left_matcher)
        wls_filter.setLambda(8000)
        wls_filter.setSigmaColor(1.2)
        wls_filter.setDepthDiscontinuityRadius(7)  # Normal value = 7
        wls_filter.setLRCthresh(24)
Beispiel #22
0
import cv2
import numpy
from matplotlib import pyplot as plt

# cv2.namedWindow("frame")
# cv2.namedWindow("disparity")
# cv2.namedWindow("images")
# cv2.namedWindow("imgL")
capture = cv2.VideoCapture()
capture.open(0)

stereo = cv2.StereoSGBM_create(minDisparity=-2,
                               numDisparities=16,
                               blockSize=18)
low = numpy.array((0, 0, 255))
high = numpy.array((255, 255, 255))

scale = 2
i = 0
while i == 0:
    s, frame = capture.read()
    # frame = cv2.blur(frame, (10, 10))
    cv2.imshow("frame", frame)
    frame = cv2.resize(frame, (0, 0), fx=1 / scale, fy=1 / scale)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    imgL = gray[0:int(240 / scale), 0:int(320 / scale)].copy()
    imgR = gray[0:int(240 / scale), int(320 / scale):int(640 / scale)].copy()
    disparity = stereo.compute(imgL, imgR)
    qw = cv2.normalize(disparity, disparity, 0, 255, cv2.NORM_MINMAX,
                       cv2.CV_8U)
    bgr = cv2.cvtColor(qw, cv2.COLOR_GRAY2BGR)
# (adjust parameters if needed - this will effect speed to processing)

# uses a modified H. Hirschmuller algorithm [Hirschmuller, 2008] that differs (see opencv manual)
# parameters can be adjusted, current ones from [Hamilton / Breckon et al. 2013]

# FROM manual: stereoProcessor = cv2.StereoSGBM(numDisparities=128, SADWindowSize=21);

# From help(cv2): StereoBM_create(...)
#        StereoBM_create([, numDisparities[, blockSize]]) -> retval
#
#    StereoSGBM_create(...)
#        StereoSGBM_create(minDisparity, numDisparities, blockSize[, P1[, P2[,
# disp12MaxDiff[, preFilterCap[, uniquenessRatio[, speckleWindowSize[, speckleRange[, mode]]]]]]]]) -> retval

max_disparity = 128
stereoProcessor = cv2.StereoSGBM_create(0, max_disparity, 21)

print("##please ignore the next error output, the rest is correct.##")

for filename_left in left_file_list:
    # skip forward to start a file we specify by timestamp (if this is set)

    if ((len(skip_forward_file_pattern) > 0)
            and not (skip_forward_file_pattern in filename_left)):
        continue
    elif ((len(skip_forward_file_pattern) > 0)
          and (skip_forward_file_pattern in filename_left)):
        skip_forward_file_pattern = ""

    # from the left image filename get the correspondoning right image
Beispiel #24
0
 #    disparity = np.uint8(255 * (disparity - min) / (max - min))
 while(1):
     cv2.imshow("image",disparity)
     k = cv2.waitKey(1) & 0xFF
     if k == 27:
         break   
     max_disp = 16*9
     win_size = cv2.getTrackbarPos('win_size','image')
     min_disp = -16*cv2.getTrackbarPos('max_disp1','image')
     
     num_disp = max_disp - min_disp 
     stereo = cv2.StereoSGBM_create(minDisparity = min_disp,
             numDisparities = num_disp,
             uniquenessRatio = cv2.getTrackbarPos('uniquenessRatio','image'),
             speckleWindowSize = cv2.getTrackbarPos('speckleWindowSize','image'),
             speckleRange = cv2.getTrackbarPos('speckleRange','image'),
             disp12MaxDiff = cv2.getTrackbarPos('disp12MaxDiff','image'),
             P1 = 8*3*win_size**2,
             P2 = 32*3*win_size**2
         )
     disparity = stereo.compute(left_img_remap,right_img_remap).astype(np.float32)
     min = disparity.min()
     max = disparity.max()
     disparity = np.uint8(255 * (disparity - min) / (max - min))
         
 cv2.destroyAllWindows()    
 w = left.shape[1]
 h = left.shape[0]
 #focal_length = data["cameraMatrix1"][0][0]
 focal_length = 0.8*w
 #Q = np.float32([[1, 0, 0, -w/2.0],
Right_Stereo_Map = cv2.initUndistortRectifyMap(MRS, dRS, RR, PR,
                                               ChessImaR.shape[::-1],
                                               cv2.CV_16SC2)
#*******************************************
#***** Parameters for the StereoVision *****
#*******************************************

# Create StereoSGBM and prepare all parameters
window_size = 3
min_disp = 2
num_disp = 130 - min_disp
stereo = cv2.StereoSGBM_create(minDisparity=min_disp,
                               numDisparities=num_disp,
                               blockSize=window_size,
                               uniquenessRatio=10,
                               speckleWindowSize=100,
                               speckleRange=32,
                               disp12MaxDiff=5,
                               P1=8 * 3 * window_size**2,
                               P2=32 * 3 * window_size**2)

# Used for the filtered image
stereoR = cv2.ximgproc.createRightMatcher(
    stereo)  # Create another stereo for right this time

# WLS FILTER Parameters
lmbda = 80000
sigma = 1.8
visual_multiplier = 1.0

wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=stereo)
Beispiel #26
0
import numpy as np
import cv2
from matplotlib import pyplot as plt

imgL = cv2.imread(r"C:\Users\9426224\Desktop\Data\SGBM\L.JPG")
imgR = cv2.imread(r"C:\Users\9426224\Desktop\Data\SGBM\R.JPG")

window_size = 3

stereo = cv2.StereoSGBM_create(minDisparity=0,
                               numDisparities=240,
                               blockSize=3,
                               P1=8 * 3 * window_size**2,
                               P2=32 * 3 * window_size**2,
                               disp12MaxDiff=1,
                               uniquenessRatio=15,
                               speckleWindowSize=0,
                               speckleRange=2,
                               preFilterCap=63,
                               mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)

# a = stereo.compute(imgL, imgR)

disparity = stereo.compute(imgL, imgR).astype(np.float32) / 16.0
plt.figure(dpi=500)
plt.imshow(disparity, 'gray')
plt.show()
# cv2.imshow("1", disparity)
Beispiel #27
0
                       200, update)
    cv2.createTrackbar('uniquenessRatio', 'disparity', uniquenessRatio, 50,
                       update)
    cv2.createTrackbar('disp12MaxDiff', 'disparity', disp12MaxDiff, 250,
                       update)
    cv2.createTrackbar('P1', 'disparity', P1, 600, update)
    cv2.createTrackbar('P2', 'disparity', P2, 2400, update)

    stereo = cv2.StereoSGBM_create(
        minDisparity=min_disp,  #minimální možná disparita(nesourodost)
        numDisparities=num_disp,  #maximální možná disparita(nesourodost)
        blockSize=window_size,  #velikost oblasti ke spárování
        uniquenessRatio=
        uniquenessRatio,  #procentuální rozpětí pro potvrzení správnosti párování
        speckleRange=
        speckleRange,  #maximální variace disparity v každém spojitém prvku
        speckleWindowSize=
        speckleWindowSize,  #maximální velikost spojité oblasti, kdy ještě šum v této oblasti bude anulován
        disp12MaxDiff=
        disp12MaxDiff,  #maximální diference mezi pixely obou snímků
        P1=P1,  #trest změny sousedících pixelů o 1 hodnotu
        P2=P2  #trest změny sousedících pixelů o více než 1 hodnotu
    )

(imgsize, leftMapX, leftMapY, leftROI, rightMapX, rightMapY, rightROI, frame,
 frame2) = Calibrate2Cameras(0, 2)
cap1 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(2)
kernel_erode = np.ones((7, 7), np.uint8)
kernel_dilate = np.ones((5, 5), np.uint8)
savedMeanDelta_danger = 0
Beispiel #28
0
	def update_disparity(self, depth_map = False, plot3d = False):
		#Verify disparity map is not being calculated
		if self.busy_disparity: return
		self.busy_disparity = True

		#Ensure both frames are valid
		if any(f is None for f in self.frames.values()):
			self.busy_disparity = False
			return

		#Fetch and rectify both frames
		img_l, img_r = self.frames['l'], self.frames['r']
		rect_l, rect_r = self.parentWindow.calibration.cw.rectify_image_pair(img_l, img_r)

		#Preprocessing
		if self.params['checkbox']['preprocessing']:
			rect_l = self.preprocessing_options[self.params['preprocessing']['currentTab']](rect_l)
			rect_r = self.preprocessing_options[self.params['preprocessing']['currentTab']](rect_r)

		#Create matcher and compute disparity map
		stereo = cv.StereoSGBM_create(**self.params['matcher'])
		disp = stereo.compute(rect_l, rect_r)

		#Pre-speckle
		if self.params['checkbox']['pre-speckle']:
			cv.filterSpeckles(disp, 0, self.params['filter']['pre_maxSpeckleSize'], self.params['filter']['pre_maxDiff'])

		#Blur filter
		if self.params['checkbox']['blur']:
			#Disparity to image
			_img = disp2img(disp, self.params['matcher']['minDisparity'], self.params['matcher']['numDisparities'])
			#Convert from float32 to uint8
			_img = cv.cvtColor(cv.normalize(_img, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U), cv.COLOR_GRAY2BGR)

			#Filter image
			_img = self.blur_filter_options[self.params['filter']['blur_filter']['currentTab']](_img)

			#Convert to uint8 to float32
			_img = (cv.cvtColor(_img, cv.COLOR_BGR2GRAY) / 255).astype("float32")
			#Image to disparity
			disp = img2disp(_img, self.params['matcher']['minDisparity'], self.params['matcher']['numDisparities'])

		#WLS
		if self.params['checkbox']['wls']:
			right_disp = cv.ximgproc.createRightMatcher(stereo).compute(rect_r, rect_l)
			wls_filter = cv.ximgproc.createDisparityWLSFilter(stereo)
			wls_filter.setLambda(self.params['filter']['lambda'])
			wls_filter.setSigmaColor(self.params['filter']['sigma'])
			disp = wls_filter.filter(disp, rect_l, disparity_map_right=right_disp)

		#Post-speckle
		if self.params['checkbox']['post-speckle']:
			cv.filterSpeckles(disp, 0, self.params['filter']['post_maxSpeckleSize'], self.params['filter']['post_maxDiff'])

		#Normalize disparity to show as image
		disp_norm = disp2img(disp, self.params['matcher']['minDisparity'], self.params['matcher']['numDisparities'])
		#Crop if requested
		if self.params['checkbox']['crop']: disp_norm = self._crop_img(disp_norm)
		#Show
		cv.imshow("Disparity map", disp_norm)

		#Depth map
		if depth_map:
			Q = self.parentWindow.calibration.cw.rectification_results['disparity2depth_mappingMatrix']
			if self.params['checkbox']['crop']: disp = self._crop_img(disp)
			show_depth_map(disp, Q)

		#If 3dplot is requested
		if plot3d:
			#Fetch perspective transformation matrix from the calibration process
			Q = self.parentWindow.calibration.cw.rectification_results['disparity2depth_mappingMatrix']

			#Crop both disparity and image if requested
			if self.params['checkbox']['crop']:
				disp = self._crop_img(disp)
				rect_l = self._crop_img(rect_l)

			self.plot3d_options[self.params['3dconf']['currentTab']](rect_l, disp, Q)

		self.busy_disparity = False
Beispiel #29
0
windowNameD = "Stereo Disparity"  # window name

################################################################################

# set up defaults for stereo disparity calculation

max_disparity = 128
window_size = 21

stereoProcessor = cv2.StereoSGBM_create(
    minDisparity=0,
    numDisparities=
    max_disparity,  # max_disp has to be dividable by 16 f. E. HH 192, 256
    blockSize=window_size,
    #P1=8 * window_size ** 2,       # 8*number_of_image_channels*SADWindowSize*SADWindowSize
    #P2=32 * window_size ** 2,      # 32*number_of_image_channels*SADWindowSize*SADWindowSize
    #disp12MaxDiff=1,
    #uniquenessRatio=15,
    #speckleWindowSize=0,
    #speckleRange=2,
    #preFilterCap=63,
    mode=cv2.STEREO_SGBM_MODE_HH)

# set up left to right + right to left left->right + right->left matching +
# weighted least squares filtering (not used by default)

left_matcher = stereoProcessor
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)

wls_lmbda = 800
wls_sigma = 1.2
Beispiel #30
0
    def __init__(self, stereo_mode, topic_to, camera_matrix_file,
                 translation_vector_file, camera_size):
        # Load Camera matrix, Translation vector
        camera_matrix = np.load(camera_matrix_file)
        translation_vector = np.load(translation_vector_file)
        # Read focal length
        self._FOCAL_LENGTH = camera_matrix[0, 0]
        # Read baseline distance
        self._STEREO_BASELINE = -translation_vector[0] * 2.54

        # Camera frame width and height
        self._CAMERA_WIDTH = camera_size[0]
        self._CAMERA_HEIGHT = camera_size[1]

        # Each Frame width and height
        self._FRAME_WIDTH = self._CAMERA_WIDTH // 2
        self._FRAME_HEIGHT = self._CAMERA_HEIGHT

        self._BLOCK_SIZE = 5
        self._MIN_DISPARITY = 0
        self._NUM_DISPARITIES = 160
        self._LAMBDA = 80000
        self._SIGMA = 1.8

        # StereoBM
        if stereo_mode == "StereoBM":
            self._left_stereo_matcher = cv2.StereoBM_create()
            self._left_stereo_matcher.setMinDisparity(self._MIN_DISPARITY)
            self._left_stereo_matcher.setNumDisparities(self._NUM_DISPARITIES)
            self._left_stereo_matcher.setBlockSize(self._BLOCK_SIZE)
            self._left_stereo_matcher.setSpeckleRange(32)
            self._left_stereo_matcher.setSpeckleWindowSize(100)
            self._left_stereo_matcher.setUniquenessRatio(15)
            self._left_stereo_matcher.setDisp12MaxDiff(1)

        # StereoSGBM
        elif stereo_mode == "StereoSGBM":
            # Left stereo matcher
            self._left_stereo_matcher = cv2.StereoSGBM_create(
                minDisparity=self._MIN_DISPARITY,
                numDisparities=self._NUM_DISPARITIES,
                blockSize=self._BLOCK_SIZE,
                P1=8 * 3 * self._BLOCK_SIZE**2,
                P2=32 * 3 * self._BLOCK_SIZE**2,
                disp12MaxDiff=1,
                uniquenessRatio=15,
                speckleWindowSize=100,
                speckleRange=32,
                preFilterCap=63,
                mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)

        # Right stereo matcher
        self._right_stereo_matcher = cv2.ximgproc.createRightMatcher(
            self._left_stereo_matcher)
        # Create WLS filter
        self._wls_filter = cv2.ximgproc.createDisparityWLSFilter(
            matcher_left=self._left_stereo_matcher)
        self._wls_filter.setLambda(self._LAMBDA)
        self._wls_filter.setSigmaColor(self._SIGMA)

        self._bridge = CvBridge()
        # Create Colored disparity publisher
        self._object_distance_frame_publisher = rospy.Publisher(
            topic_to, CompressedImage, queue_size=1)