Beispiel #1
0
def compute_disparity(left_image, right_image, maximum_disparity, noise_filter,
                      width):
    """
    Input:
    -Left & Rectified Right Images, Maximum Disparity Value
    -Noise filter: increase to be more aggressive
    Output:
    -Disparity between images, scaled appropriately
    """
    # convert to grayscale (as the disparity matching works on grayscale)
    grayL, grayR = convert_to_grayscale([left_image, right_image])

    # perform preprocessing - raise to the power, as this subjectively appears
    # to improve subsequent disparity calculation
    grayL = np.power(grayL, 0.75).astype('uint8')
    grayR = np.power(grayR, 0.75).astype('uint8')

    # compute disparity image from undistorted and rectified stereo images
    # (which for reasons best known to the OpenCV developers is returned scaled by 16)
    disparity = stereoProcessor.compute(grayL, grayR)

    # filter out noise and speckles (adjust parameters as needed)
    cv2.filterSpeckles(disparity, 0, 4000, maximum_disparity - noise_filter)

    # threshold the disparity so that it goes from 0 to max disparity
    _, disparity = cv2.threshold(disparity, 0, maximum_disparity * 16,
                                 cv2.THRESH_TOZERO)

    # scale the disparity to 8-bit for viewing
    disparity_scaled = (disparity / 16.).astype(np.uint8)

    # crop area not seen by *both* cameras and and area with car bonnet
    disparity_scaled = utils.crop_image(disparity_scaled, 0, 390, 135, width)

    return disparity_scaled
Beispiel #2
0
def disparity(grayL, grayR, max_disparity, crop_disparity):
    # compute disparity image from undistorted and rectified stereo images
    # that we have loaded
    # (which for reasons best known to the OpenCV developers is returned scaled by 16)
    disparity = stereoProcessor.compute(grayL, grayR)
    # filter out noise and speckles (adjust parameters as needed)
    dispNoiseFilter = 5
    # increase for more agressive filtering
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)
    # scale the disparity to 8-bit for viewing
    # divide by 16 and convert to 8-bit image (then range of values should
    # be 0 -> max_disparity) but in fact is (-1 -> max_disparity - 1)
    # so we fix this also using a initial threshold between 0 and max_disparity
    # as disparity=-1 means no disparity available
    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity_scaled = (disparity / 16.).astype(np.uint8)
    # crop disparity to chop out left part where there are with no disparity
    # as this area is not seen by both cameras and also
    # chop out the bottom area (where we see the front of car bonnet)
    if (crop_disparity):
        width = np.size(disparity_scaled, 1)
        disparity_scaled = disparity_scaled[0:390, 135:width]
    # display image (scaling it to the full 0->255 range based on the number
    # of disparities in use for the stereo part)
    disparity_scaled = (disparity_scaled * (256. / max_disparity)).astype(
        np.uint8)
    return disparity_scaled
Beispiel #3
0
def disparity_map(l_img_g, r_img_g, WLS=False):

    max_disparity = 128
    left_matcher = cv2.StereoSGBM_create(0, max_disparity, 21)

    if WLS:
        right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
        lmbda = 80000
        sigma = 1.2
        wls_filter = cv2.ximgproc.createDisparityWLSFilter(
            matcher_left=left_matcher)
        wls_filter.setLambda(lmbda)
        wls_filter.setSigmaColor(sigma)

    disparityL = left_matcher.compute(l_img_g, r_img_g)
    if WLS:
        disparityR = right_matcher.compute(l_img_g, r_img_g)
        disparity = wls_filter.filter(disparityL, l_img_g, None, disparityR)
    else:
        disparity = disparityL

    dispNoiseFilter = 5
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)

    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity_scaled = (disparity / 16.).astype(np.uint8)

    disparity_scaled = disparity_scaled[0:390]

    return disparity_scaled
Beispiel #4
0
    def create_disparity_map(self,
                             left_img,
                             right_img,
                             is_rectify_enabled=True):

        # left_img = cv2.imread(left_img_path, cv2.IMREAD_GRAYSCALE)
        # right_img = cv2.imread(right_img_path, cv2.IMREAD_GRAYSCALE)

        if is_rectify_enabled:
            left_img_rect, right_img_rect = self.rectification(
                left_img, right_img)  # Rectification using Homography
            # print("rectified")
        else:
            left_img_rect = left_img
            right_img_rect = right_img

        self.disparity = self.stereoProcessor.compute(left_img_rect,
                                                      right_img_rect)
        cv2.filterSpeckles(self.disparity, 0, 60, self.max_disparity)

        _, self.disparity = cv2.threshold(self.disparity, 0,
                                          self.max_disparity * 16,
                                          cv2.THRESH_TOZERO)
        disparity_scaled = (self.disparity / 16.).astype(np.uint8)
        # frame = cv2.applyColorMap(disparity_scaled, cv2.COLORMAP_HOT)
        # frame[frame > 200] = 0
        # cv2.imshow("Scaled Disparity stream", frame)

        disparity_colour_mapped = cv2.applyColorMap(
            (disparity_scaled * (256. / self.max_disparity)).astype(np.uint8),
            cv2.COLORMAP_HOT)
        cv2.imshow("Disparity", disparity_colour_mapped)
        cv2.imshow("rectified left", left_img_rect)
        cv2.imshow("rectified right", right_img_rect)
def calculate_disparity(left_image, right_image):
    grayL = cv2.cvtColor(left_image, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(right_image, cv2.COLOR_BGR2GRAY)

    grayL = np.power(grayL, 0.75).astype('uint8')
    grayR = np.power(grayR, 0.75).astype('uint8')

    # compute disparity image from undistorted and rectified stereo images
    # that we have loaded
    # (which for reasons best known to the OpenCV developers is returned scaled by 16)

    disparity = STEREO_PROCESSOR.compute(grayL, grayR)

    # filter out noise and speckles (adjust parameters as needed)

    dispNoiseFilter = 5  # increase for more aggressive filtering
    cv2.filterSpeckles(disparity, 0, 4000, MAX_DISPARITY - dispNoiseFilter)

    # scale the disparity to 8-bit for viewing
    # divide by 16 and convert to 8-bit image (then range of values should
    # be 0 -> max_disparity) but in fact is (-1 -> max_disparity - 1)
    # so we fix this also using a initial threshold between 0 and max_disparity
    # as disparity=-1 means no disparity available
    #
    _, disparity = cv2.threshold(disparity, 0, MAX_DISPARITY * 16,
                                 cv2.THRESH_TOZERO)
    disparity = (disparity / 16.).astype(np.uint8)

    return disparity
Beispiel #6
0
def get_distance_disparity(filename_left):
    # gets the left and right filenames
    full_path_filename_left, full_path_filename_right = get_lr_pair(
        filename_left)

    # returns the images from the filenames
    imgL, imgR = get_lr_images(filename_left, full_path_filename_left,
                               full_path_filename_right)
    # adds preprocessing to the images
    grayL, grayR = lr_preprocessing(imgL, imgR)
    disparity = distanceProcessor.compute(grayL, grayR)

    dispNoiseFilter = 5
    # increase for more agressive filtering
    # removes disparity noise
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)

    # scale the disparity to 8-bit for viewing
    # divide by 16 and convert to 8-bit image (then range of values should
    # be 0 -> max_disparity) but in fact is (-1 -> max_disparity - 1)
    # so we fix this also using a initial threshold between 0 and max_disparity
    # as disparity=-1 means no disparity available

    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity_scaled = (disparity / 16.).astype(np.uint8)

    width = np.size(disparity_scaled, 1)
    disparity_scaled = disparity_scaled[0:390, 135:width]

    return disparity_scaled
def FilterSpeckles(root, suffix):
    """
    batch processing
    """
    if not os.path.isdir(root):
        print("[Err]: invalid root.")
        return

    for f_name in os.listdir(root):
        if not f_name.endswith(suffix):
            continue

        f_path = root + "/" + f_name
        img = cv2.imread(f_path, cv2.IMREAD_UNCHANGED)
        if img is None:
            print("[Err]: empty image.")
            return

        if len(img.shape) == 2:
            H, W = img.shape
        elif len(img.shape) == 3:
            H, W, N = img.shape
        maxSpeckleSize = int((H * W) / 810.0 + 0.5)
        maxDiff = 10

        # ------------- remove speckles
        out = copy.deepcopy(img)
        cv2.filterSpeckles(out, 0, maxSpeckleSize, maxDiff)
        # -------------

        # save output
        out_name = f_name.replace(".jpg", "_filter.jpg")
        out_path = root + "/" + out_name
        cv2.imwrite(out_path, out)
        print("%s exported" % (out_name))
def edit_disparity(disparity, max_disparity, dispNoiseFilter):
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)
    _, disparity = cv2.threshold(disparity,0, max_disparity * 16, cv2.THRESH_TOZERO)
    disparity_scaled = (disparity / 16.).astype(np.uint8)
    if (crop_disparity):
        width = np.size(disparity_scaled, 1)
        disparity_scaled = disparity_scaled[0:390,135:width]
    return disparity_scaled
def filter_speckles(disparity):
    """
    Filter out noise and speckles.
    """
    disparity_noise_filter = 5
    max_speckle_size = 4000

    cv2.filterSpeckles(disparity, 0, max_speckle_size,
                       max_disparity - disparity_noise_filter)
Beispiel #10
0
def calc_disparity_map(gl_img, gr_img):
    disparity = stereoProcessor.compute(gl_img, gr_img)

    dispNoiseFilter = 8  # increase for more agressive filtering
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)

    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity = (disparity / 16.)

    return disparity
def compute_disparity(filepath, M1, d1, M2, d2, R, T, img_shape, window_size):
    R1, R2, P1, P2, Q, valid_pix_roi1, _valid_pix_roi2 = cv2.stereoRectify(
        M1, d1, M2, d2, img_shape, R, T, alpha=-1)

    map1_x, map1_y = cv2.initUndistortRectifyMap(M1, d1, R1, P1, img_shape,
                                                 cv2.CV_32FC1)
    map2_x, map2_y = cv2.initUndistortRectifyMap(M2, d2, R2, P2, img_shape,
                                                 cv2.CV_32FC1)

    img_left = glob.glob(filepath + '/rectified_left/*.jpg')
    img_right = glob.glob(filepath + '/rectified_right/*.jpg')

    img_left.sort()
    img_right.sort()

    for i, fname in enumerate(img_left):
        img_l = cv2.imread(img_left[i])
        img_r = cv2.imread(img_right[i])
        gray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
        gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)

        undistorted_rectified_l = cv2.remap(gray_l, map1_x, map1_y,
                                            cv2.INTER_LINEAR, img_shape)  #
        undistorted_rectified_r = cv2.remap(gray_r, map2_x, map2_y,
                                            cv2.INTER_LINEAR, img_shape)  #

        max_disparity = 128
        #        window_size = 21
        #        num = 5
        stereo_processor = cv2.StereoSGBM_create(
            0, max_disparity, window_size, 8 * window_size * window_size,
            32 * window_size * window_size)
        #        stereo_processor = cv2.StereoBM_create(16 * num, window_size)

        disparity = stereo_processor.compute(undistorted_rectified_l,
                                             undistorted_rectified_r)
        cv2.filterSpeckles(disparity, 0, 4000, 128)

        disparity_scaled = (disparity / 16.).astype(np.uint8) + abs(
            disparity.min())
        #        disparity_scaled = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)

        threeD = cv2.reprojectImageTo3D(disparity.astype(np.float32) / 16., Q)

        cv2.imshow('img_l', undistorted_rectified_l)
        cv2.imshow('img_r', undistorted_rectified_r)

        #display disparity
        cv2.imshow('img_disparity', disparity_scaled)

        cv2.waitKey(0)

        cv2.destroyAllWindows()
Beispiel #12
0
def depth_map(imgL, imgR):
    """ Depth map calculation. Works with SGBM and WLS. Need rectified images,
    #returns depth map ( left to right disparity ) """
    # SGBM Parameters -----------------
    window_size = wsize_.get()  # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely

    stereo = cv.StereoSGBM_create(
        minDisparity=minD_.get(),
        numDisparities=numD_.get(),  # max_disp has to be dividable by 16 f. E. HH 192, 256
        blockSize=window_size,
        P1=P1__.get() * 3 * window_size,
        P2=P2__.get() * 3 * window_size,
        disp12MaxDiff=d12Maxd_.get(),
        uniquenessRatio=uniqR_.get(),
        speckleWindowSize=speckWsize_.get(),
        speckleRange=speckR_.get(),
        preFilterCap=preFC_.get(),
        mode=cv.STEREO_SGBM_MODE_SGBM_3WAY
    )
    
    leftGr = cv.cvtColor(imgL, cv.COLOR_BGR2GRAY)
    rightGr = cv.cvtColor(imgR, cv.COLOR_BGR2GRAY)
##    cv.imshow("Gray Left", leftGr)
##    cv.imshow("Gray Right", rightGr)
    
    disparity = stereo.compute(leftGr, rightGr)#.astype(np.float32)/16
    
    cv.filterSpeckles(disparity, 0, 32, numD_.get())
    Threshold_Demo(0,disparity,disparity)
    #_, disparity = cv.threshold(disparity, 0, numD_.get(), cv.THRESH_TOZERO)
    # Call the function to initialize
    
    disparity_scaled = (disparity / 16.).astype(np.uint8)
##    # FILTER Parameters
##    lmbda = 80000
##    sigma = 1.3
##    visual_multiplier = 6
##
##    wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
##    wls_filter.setLambda(lmbda)
##
##    wls_filter.setSigmaColor(sigma)
##    displ = left_matcher.compute(imgL, imgR)  # .astype(np.float32)/16
##    dispr = right_matcher.compute(imgR, imgL)  # .astype(np.float32)/16
##    displ = np.int16(displ)
##    dispr = np.int16(dispr)
##    filteredImg = wls_filter.filter(displ, imgL, None, dispr)  # important to put "imgL" here!!!
##
##    filteredImg = cv.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);
##    filteredImg = np.uint8(filteredImg)

    return disparity_scaled
Beispiel #13
0
def postprocess(leftDisparity, rightDisparity, originalLeftImage):
    ''' Post processing on disparity map for more accurate readings '''
    disparity = weightedLeastSquareFilter.filter(
        leftDisparity, originalLeftImage, None, rightDisparity)

    noiseFilter = 5
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - noiseFilter)

    _, disparity = cv2.threshold(
        disparity, 0, max_disparity * 16, cv2.THRESH_TOZERO)
    disparity = (disparity / 16.).astype(np.uint8)

    return disparity
Beispiel #14
0
def get_disparity(l_image, r_image):
    """ Calculates the image disparity between a left and right image.

        Parameters:
            l_image (numpy.array) - The left image.
            r_image (numpy.array) - The right image.

        Attributes:
            stereo_processor (cv2.StereoSGBM) - A modified SGBM algorithm.
            l_gray (numpy.array)              - The left grayscale image.
            r_gray (numpy.array)              - The right grayscale image.
            disparity (numpy.array)           - The image disparity.
            disparity_scaled (numpy.array)    - The image disparity scaled.

        Returns:
            The disparity between the left and right images.

    """

    # Initialise the modified implementation of the SGBM algorithm.
    stereo_processor = cv2.StereoSGBM_create(0, params.MAX_DISPARITY, 21)

    # Get the left and right grayscale images.
    l_gray = cv2.cvtColor(l_image, cv2.COLOR_BGR2GRAY)
    r_gray = cv2.cvtColor(r_image, cv2.COLOR_BGR2GRAY)

    # Subjectively appears to improve subsequent disparity calculation.
    l_gray = np.power(l_gray, 0.75).astype('uint8')
    r_gray = np.power(r_gray, 0.75).astype('uint8')

    # Compute the disparity between the left and right images.
    disparity = stereo_processor.compute(l_gray, r_gray)

    # Filter out the salt and pepper noise.
    cv2.filterSpeckles(disparity, 0, 4000,
                       params.MAX_DISPARITY - params.DISPARITY_NOISE_FILTER)

    # Scale the disparity to 8-bit for viewing.
    _, disparity = cv2.threshold(disparity, 0, params.MAX_DISPARITY * 16,
                                 cv2.THRESH_TOZERO)

    # Divide by 16 and convert to 8-bit image.
    disparity_scaled = (disparity / 16.).astype(np.uint8)

    # Display the image.
    output_img = cv2.resize(disparity_scaled, (960, 520))
    cv2.imshow("disparity",
               (output_img * (256. / params.MAX_DISPARITY)).astype(np.uint8))
    key = cv2.waitKey(200)

    return disparity_scaled
Beispiel #15
0
def computeDisparity(img_rect1, img_rect2):
    max_disparity = 128
    min_disparity = 0
    num_disparities = max_disparity - min_disparity  # divisible by 16
    window_size = 21  # odd number in 3--11 range
    stereoProcessor = cv2.StereoSGBM_create(min_disparity, num_disparities,
                                            window_size)
    disparity = stereoProcessor.compute(img_rect1, img_rect2)

    cv2.filterSpeckles(disparity, 0, 400, max_disparity - 5)
    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity_scaled = (disparity / 16).astype(np.uint8)
    return disparity_scaled
def get_disparity(left_image, right_image):
    disparity = stereoProcessor.compute(left_image, right_image)
    # filter out noise and speckles (adjust parameters as needed)

    disp_noise_filter = 5  # increase for more aggressive filtering
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - disp_noise_filter)

    # scale the disparity to 8-bit for viewing
    # divide by 16 and convert to 8-bit image (then range of values should
    # be 0 -> max_disparity) but in fact is (-1 -> max_disparity - 1)
    # so we fix this also using a initial threshold between 0 and max_disparity
    # as disparity=-1 means no disparity available
    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    scaled = (disparity / 16.).astype(np.uint8)
    return scaled
Beispiel #17
0
def calculateDisparity(filename_left, master_path_to_dataset, skip_forward_file_pattern):

    # Create paths to read files

    full_path_directory_left =  os.path.join(master_path_to_dataset, directory_to_cycle_left);
    full_path_directory_right =  os.path.join(master_path_to_dataset, directory_to_cycle_right);


    filename_right = filename_left.replace("_L", "_R");
    full_path_filename_left = os.path.join(full_path_directory_left, filename_left);
    full_path_filename_right = os.path.join(full_path_directory_right, filename_right);

    if ('.png' in filename_left) and (os.path.isfile(full_path_filename_right)) :

        # Read in images

        imgL = cv2.imread(full_path_filename_left, cv2.IMREAD_COLOR)

        imgR = cv2.imread(full_path_filename_right, cv2.IMREAD_COLOR)

        # Convert to greyscale

        grayL = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
        grayR = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)

        grayL = np.power(grayL, 0.75).astype('uint8')
        grayR = np.power(grayR, 0.75).astype('uint8')

        # Compute disparity

        disparity = stereoProcessor.compute(grayL,grayR)

        # Filter noise

        dispNoiseFilter = 5
        cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)

        _, disparity = cv2.threshold(disparity,0, max_disparity * 16, cv2.THRESH_TOZERO)
        disparity_scaled = (disparity / 16.).astype(np.uint8)

        # Return file's disparity image

        return disparity_scaled
    return None

#cv2.imshow("disparity", calculateDisparity('1506942473.484027_L.png', "../TTBB-durham-02-10-17-sub10", ""))
#cv2.waitKey()
Beispiel #18
0
def compute_disparity(imgL, imgR, display=False):
    # setup the disparity stereo processor to find a maximum of 128 disparity values
    # (adjust parameters if needed - this will effect speed to processing)

    grayL = imgL
    grayR = imgR
    if isinstance(
            imgL[0][0],
            list):  # If not grayscale (and hance has a list for each pixel)
        grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
        grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

    max_disparity = 128
    stereoProcessor = cv2.StereoSGBM_create(0, max_disparity, 21)

    # compute disparity image from undistorted and rectified stereo images
    # that we have loaded
    # (which for reasons best known to the OpenCV developers is returned scaled by 16)
    disparity = stereoProcessor.compute(grayL, grayR)

    # filter out noise and speckles (adjust parameters as needed)

    dispNoiseFilter = 5
    # increase for more agressive filtering
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)

    # scale the disparity to 8-bit for viewing
    # divide by 16 and convert to 8-bit image (then range of values should
    # be 0 -> max_disparity) but in fact is (-1 -> max_disparity - 1)
    # so we fix this also using a initial threshold between 0 and max_disparity
    # as disparity=-1 means no disparity available

    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity_scaled = (disparity / 16.).astype(np.uint8)

    # display image (scaling it to the full 0->255 range based on the number
    # of disparities in use for the stereo part)

    if display:
        cv2.imshow("disparity", (disparity_scaled *
                                 (255. / max_disparity)).astype(np.uint8))
        cv2.waitKey(0)
    return disparity_scaled
def stereo_disparity(filepath_left, filepath_right, crop_disparity=False):
    """Calculate the stereo disparity between two images

    Args:
        filepath_left (string): location of the left image
        filepath_right (string): loaction of the right image
        crop_disparity (bool): crop disparity to chop out left part where there
            are with no disparity as this area is not seen by both cameras and
            also chop out the bottom area (where we see the front of car bonnet)
        show_disparity (bool): show the calculated disparity

    Returns:
        disparity: the stereo disparity of the two images
    """
    max_disparity = 128
    stereoProcessor = cv2.StereoSGBM_create(0, max_disparity, 21)

    imgL = cv2.imread(filepath_left, cv2.IMREAD_COLOR)
    imgR = cv2.imread(filepath_right, cv2.IMREAD_COLOR)

    grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

    # preprocess images by raising to a power - subjectively better response
    grayL = np.power(grayL, 0.75).astype('uint8')
    grayR = np.power(grayR, 0.75).astype('uint8')

    disparity = stereoProcessor.compute(grayL, grayR)

    # apply a noise filter
    dispNoiseFilter = 5  # increase for more agressive filtering
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)

    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity_scaled = (disparity / 16.).astype(np.uint8)

    if (crop_disparity):
        width = np.size(disparity_scaled, 1)
        disparity_scaled = disparity_scaled[0:390, 135:width]

    output_image = (disparity_scaled * (256. / max_disparity)).astype(np.uint8)

    return output_image
Beispiel #20
0
    def readText(self,
                 image,
                 ocr_config,
                 thresh_val,
                 speck_size=1,
                 scale_x=1,
                 scale_y=1,
                 border_size=100,
                 shrink_border=5,
                 invert=False,
                 name=False,
                 debug=False):

        # convert the input image to grayscale for better reading
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

        # threshold the image to prepare it for OCR reading
        ret, image = cv2.threshold(image, thresh_val, 255, cv2.THRESH_BINARY)

        # filter out unexpected pixel speckles
        cv2.filterSpeckles(image, 0, speck_size, speck_size)

        # invert image colours if necessary
        if invert:
            image = cv2.bitwise_not(image)

        # compute the new dimensions for the image to counteract small natural dimensions
        width = image.shape[1] * scale_x
        height = image.shape[0] * scale_y

        # re-scale the image
        image = cv2.resize(image, (width, height),
                           interpolation=cv2.INTER_AREA)

        # trim the white border from around the image, to increase OCR quality
        image = self._trimBorder(image, border_size, shrink_border, name)

        if debug:
            cv2.imwrite('./debug_data/debug_text.png', image)

        # call PyTesseract reader function and return the found text
        return pytesseract.image_to_string(image,
                                           lang='eng',
                                           config=ocr_config)
Beispiel #21
0
def run(batch_idx, left_path, right_path):
    model.eval()

    a = cv2.imread(left_path, cv2.IMREAD_COLOR)
    b = cv2.imread(right_path, cv2.IMREAD_COLOR)
    sgm = cv2.StereoSGBM_create(
        0,
        128,
        11,
        uniquenessRatio=15,
    )  # block size

    disp = sgm.compute(a, b)
    cv2.filterSpeckles(disp, 0, 40, 128)
    _, disp = cv2.threshold(disp, 0, 128 * 16, cv2.THRESH_TOZERO)
    disp_scaled = (disp / 16.)

    display_and_save(batch_idx, disp_scaled, 0, 0)
    print(batch_idx)
Beispiel #22
0
def Disparity(img0, img1):
    #Aqui foi usada a funcao cv2.StereoSGBM. 
    #fonte: http://timosam.com/python_opencv_depthimage
    #adaptado

    window_size = 11 
    stereoE = cv2.StereoSGBM_create(minDisparity = 16, numDisparities = 80, blockSize=11,
                                P1= 968,   
                                P2 = 3872,
                                disp12MaxDiff=1,
                                uniquenessRatio=15,
                                speckleWindowSize=150,
                                speckleRange=2,
                                preFilterCap=63,
                                mode = 1)
    
    stereoD = cv2.ximgproc.createRightMatcher(stereoE)

    # FILTER Parameters
    lmbda = 8000
    sigma = 2
    visual_multiplier = 1.0
    
    wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=stereoE)
    wls_filter.setLambda(lmbda)
    wls_filter.setSigmaColor(sigma)

    displ = stereoE.compute(img0, img1)
    dispr = stereoD.compute(img1, img0) 
    displ = np.int16(displ)
    dispr = np.int16(dispr)

    filteredImg = wls_filter.filter(displ, img0, None, dispr) 
    cv2.filterSpeckles(filteredImg, 16, 4000, 256) 

    filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)
    filteredImg = np.uint8(filteredImg)

    return filteredImg



    
def Test():
    """
    """
    dir = "E:/office/dense/stereo/depth_maps/dslr_images_undistorted"
    # dir = "F:/ETH3DResults/pipes/result/depth_maps"
    # img_path = dir + "/" + "DSC_0635.JPG.geometric.bin.jpg"
    img_path = dir + "/" + "DSC_0219.JPG.geometric.bin_win6.jpg"

    if not os.path.isfile(img_path):
        print("[Err]: invalid file path")
        return

    img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
    if img is None:
        print("[Err]: empty image.")
        return

    RATIO = 0.35

    # H, W = img.shape
    if len(img.shape) == 2:
        H, W = img.shape
    elif len(img.shape) == 3:
        H, W, N = img.shape

    img_rs = cv2.resize(img, (int(W * RATIO), int(H * RATIO)),
                        cv2.INTER_LINEAR)
    cv2.imshow("img", img_rs)

    maxSpeckleSize = int((H * W) / 810.0 + 0.5)
    maxDiff = 10

    # ------------- remove speckles
    out = copy.deepcopy(img)
    cv2.filterSpeckles(out, 0, maxSpeckleSize, maxDiff)  # uint8怪不得可以处理...
    # -------------

    out_rs = cv2.resize(out, (int(W * RATIO), int(H * RATIO)),
                        cv2.INTER_LINEAR)
    cv2.imshow("filtered", out_rs)
    cv2.waitKey()
Beispiel #24
0
def get_depth_points(imgL, imgR, is_sparse, use_fg_mask):

    # convert to grayscale

    grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

    # Different methods based on sparse and dense implementations
    if is_sparse:
        grayL = preprocess_sparse(grayL)
        grayR = preprocess_sparse(grayR)
        disparity = get_sparse_disp(grayL, grayR)

    else:
        grayL = preprocess_dense(grayL)
        grayR = preprocess_dense(grayR)
        # Disparity using left and right matching + wls filter
        disparity = disp_with_wls_filtering(grayL, grayR)
        dispNoiseFilter = 10  # increase for more agressive filtering
        # filter out noise and speckles (adjust parameters as needed)
        cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)

        if use_fg_mask:
            fg_mask = get_fg_mask(imgL)
            fg_mask = post_process_for_bg(fg_mask)
            # Only keep foreground values
            disparity = (fg_mask / 255).astype(np.uint8) * disparity

    _, disparity_scaled = cv2.threshold(disparity, 0, max_disparity * 16,
                                        cv2.THRESH_TOZERO)

    if is_sparse:
        disparity_scaled = disparity
    else:
        disparity_scaled = (disparity / 16).astype(np.uint8)

    cv2.imshow('Dense disparity',
               (disparity_scaled * (256 / max_disparity)).astype(np.uint8))
    points = project_disparity_to_2d_with_depth(disparity_scaled,
                                                max_disparity)
    return points
Beispiel #25
0
def calculateDisparity(imgL, imgR):
    #calculates the disparity image for a pair of stereo images

    stereoProcessor = cv2.StereoSGBM_create(minDisparity=0,
                                            numDisparities=max_disparity,
                                            blockSize=21)

    #convert images to greyscale
    grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

    #to reduce the impact of variable illumination
    clahe = cv2.createCLAHE(clipLimit=10.0, tileGridSize=(8, 8))
    grayL = clahe.apply(grayL)
    grayR = clahe.apply(grayR)

    #grayL = cv2.equalizeHist(grayL)
    #grayR = cv2.equalizeHist(grayR)

    #raise to power to improve calculation
    grayL = np.power(grayL, 0.75).astype('uint8')
    grayR = np.power(grayR, 0.75).astype('uint8')

    disparity = stereoProcessor.compute(grayL, grayR)

    #reduce noise
    dispNoiseFilter = 5
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter)

    _, disparity = cv2.threshold(disparity, 0, max_disparity * 16,
                                 cv2.THRESH_TOZERO)
    disparity_scaled = (disparity / 16.).astype(np.uint8)

    #crops unique sections of each camera
    if (crop_disparity):
        width = np.size(disparity_scaled, 1)
        disparity_scaled = disparity_scaled[0:390, 135:width]

    return disparity_scaled
Beispiel #26
0
def disparity(illumeL, illumeR):
    # convert to grayscale (as the disparity matching works on grayscale)
    grayL = cv2.cvtColor(illumeL, cv2.COLOR_BGR2GRAY);
    grayR = cv2.cvtColor(illumeR, cv2.COLOR_BGR2GRAY)

    # compute disparity image from undistorted and rectified stereo images that we have loaded, scale by 16

    disparity = stereoProcessor.compute(grayL,grayR);

    # filter out noise and speckles (adjust parameters as needed)

    dispNoiseFilter = 5; # increase for more agressive filtering
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter);

    # scale the disparity to 8-bit for viewing
    # divide by 16 and convert to 8-bit image (then range of values should
    # be 0 -> max_disparity) but in fact is (-1 -> max_disparity - 1)
    # so we fix this also using a initial threshold between 0 and max_disparity
    # as disparity=-1 means no disparity available

    _, disparity = cv2.threshold(disparity,0, max_disparity * 16, cv2.THRESH_TOZERO);
    disparity_scaled = (disparity / 16.).astype(np.uint8);


            
    # crop disparity to chop out left part where there are with no disparity
    # as this area is not seen by both cameras and also
    # chop out the bottom area (where we see the front of car bonnet)

    if (crop_disparity):
        width = np.size(disparity_scaled, 1);
        disparity_scaled = disparity_scaled[0:390,135:width];

    # display image (scaling it to the full 0->255 range based on the number
    # of disparities in use for the stereo part)

    disparity_scaled = (disparity_scaled * (256. / max_disparity)).astype(np.uint8);
        
    return disparity_scaled
    # remember to convert to grayscale (as the disparity matching works on grayscale)
    # N.B. need to do for both as both are 3-channel images

    grayL = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY);
    grayR = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY);

    # compute disparity image from undistorted and rectified stereo images
    # that we have loaded
    # (which for reasons best known to the OpenCV developers is returned scaled by 16)

    disparity = stereoProcessor.compute(grayL,grayR);

    # filter out noise and speckles (adjust parameters as needed)

    dispNoiseFilter = 5; # increase for more agressive filtering
    cv2.filterSpeckles(disparity, 0, 4000, max_disparity - dispNoiseFilter);

    # scale the disparity to 8-bit for viewing
    # divide by 16 and convert to 8-bit image (then range of values should
    # be 0 -> max_disparity) but in fact is (-1 -> max_disparity - 1)
    # so we fix this also using a initial threshold between 0 and max_disparity
    # as disparity=-1 means no disparity available

    _, disparity = cv2.threshold(disparity,0, max_disparity * 16, cv2.THRESH_TOZERO);
    disparity_scaled = (disparity / 16.).astype(np.uint8);

    # display image (scaling it to the full 0->255 range based on the number
    # of disparities in use for the stereo part)

    cv2.imshow("disparity", (disparity_scaled * (255. / max_disparity)).astype(np.uint8));
Beispiel #28
0
    # remember to convert to grayscale (as the disparity matching works on grayscale)

    grayL = cv2.cvtColor(frameL,cv2.COLOR_BGR2GRAY);
    grayR = cv2.cvtColor(frameR,cv2.COLOR_BGR2GRAY);

    # undistort and rectify based on the mappings (could improve interpolation and image border settings here)
    # N.B. mapping works independant of number of image channels

    undistorted_rectifiedL = cv2.remap(grayL, mapL1, mapL2, cv2.INTER_LINEAR);
    undistorted_rectifiedR = cv2.remap(grayR, mapR1, mapR2, cv2.INTER_LINEAR);

    # compute disparity image from undistorted and rectified versions
    # (which for reasons best known to the OpenCV developers is returned scaled by 16)

    disparity = stereoProcessor.compute(undistorted_rectifiedL,undistorted_rectifiedR);
    cv2.filterSpeckles(disparity, 0, 4000, 128);

    # scale the disparity to 8-bit for viewing

    disparity_scaled = (disparity / 16.).astype(np.uint8) + abs(disparity.min())

    # display image

    cv2.imshow(windowNameL,undistorted_rectifiedL);
    cv2.imshow(windowNameR,undistorted_rectifiedR);

    #display disparity

    cv2.imshow(windowNameD, disparity_scaled);

    # start the event loop - essential
    # remember to convert to grayscale (as the disparity matching works on grayscale)

    grayL = cv2.cvtColor(frameL,cv2.COLOR_BGR2GRAY);
    grayR = cv2.cvtColor(frameR,cv2.COLOR_BGR2GRAY);

    # undistort and rectify based on the mappings (could improve interpolation and image border settings here)
    # N.B. mapping works independant of number of image channels

    undistorted_rectifiedL = cv2.remap(grayL, mapL1, mapL2, cv2.INTER_LINEAR);
    undistorted_rectifiedR = cv2.remap(grayR, mapR1, mapR2, cv2.INTER_LINEAR);

    # compute disparity image from undistorted and rectified versions
    # (which for reasons best known to the OpenCV developers is returned scaled by 16)

    disparity = stereoProcessor.compute(undistorted_rectifiedL,undistorted_rectifiedR);
    cv2.filterSpeckles(disparity, 0, 40, max_disparity);

    # scale the disparity to 8-bit for viewing
    # divide by 16 and convert to 8-bit image (then range of values should
    # be 0 -> max_disparity) but in fact is (-1 -> max_disparity - 1)
    # so we fix this also using a initial threshold between 0 and max_disparity
    # as disparity=-1 means no disparity available

    _, disparity = cv2.threshold(disparity,0, max_disparity * 16, cv2.THRESH_TOZERO);
    disparity_scaled = (disparity / 16.).astype(np.uint8);

    # display image

    cv2.imshow(windowNameL,undistorted_rectifiedL);
    cv2.imshow(windowNameR,undistorted_rectifiedR);
	
	rvalLeft, frameLeft = vcLeft.retrieve()

	rvalRight, frameRight = vcRight.retrieve()
	
	frameLeftNew = cv2.cvtColor(frameLeft, cv2.COLOR_BGR2GRAY)

	frameRightNew = cv2.cvtColor(frameRight, cv2.COLOR_BGR2GRAY)

	leftRectified = cv2.remap(frameLeftNew, leftMapX, leftMapY, cv2.INTER_LINEAR);
	
	rightRectified = cv2.remap(frameRightNew, rightMapX, rightMapY, cv2.INTER_LINEAR);

	disparity = stereo.compute(leftRectified, rightRectified)
	
	cv2.filterSpeckles(disparity, 0, 6000, maximumDisparities);

	cv2.imshow("Normalized Disparity", (disparity/16.0 - minimumDisparities)/maximumDisparities);
	
	cv2.imshow("Left Camera", leftRectified)

	cv2.imshow("Right Camera", rightRectified)
		

	key = cv2.waitKey(10) # Give the frame some time

	if key == 27:

		break

print("Finished!")
Beispiel #31
0
        if True:
            marker_img = np.zeros(
                roi_g_0.shape)  # create black image for display

            # draw circles
            # cv2.circle(marker_img, (int(np.round(by0)), int(np.round(bx0))), 10, 255, 20)
            cv2.circle(marker_img, (int(np.round(gy0)), int(np.round(gx0))),
                       30, 128, 40)

            cv2.imshow('Markers', marker_img)
            cv2.imshow('roi', roi_g_0)
        """-------------------------------------------------------------------------"""
        """Disparity map calculation"""
        """-------------------------------------------------------------------------"""
        disparity_map = stereo_disparity.compute(img0_rm, img1_rm)
        cv2.filterSpeckles(disparity_map, 0, 64, 32)  #filter out noise
        # cv2.filterSpeckles(disparity_map, 0, 512, 32)

        # compute disparity image from undistorted and rectified versions
        # (which for reasons best known to the OpenCV developers is returned scaled by 16)
        # credit goes to Toby Breckon, Durham University, UK for sharing this caveat
        # disparity_scaled = disparity_map
        # set_trace()
        disparity_scaled = (disparity_map / 16.).astype(np.float32) + abs(
            disparity_map.min())
        # set_trace()

        displ = left_matcher.compute(img0, img1).astype(np.float32) / 16.
        dispr = right_matcher.compute(img0, img1).astype(np.float32) / 16.
        displ = np.int16(displ)
        dispr = np.int16(dispr)