Exemple #1
0
def test_disparity_with_bm(drive=11, frame=0):
    import cv2

    xyd = load_disparity_points(drive, frame, color=True)
    disp = np.zeros(image_shape, dtype=np.uint8)
    for x, y, d in np.round(xyd):
        disp[y, x] = d

    # compare with block matching
    left, right = load_stereo_frame(drive, frame, color=False)

    ndisp = 128
    bm = cv2.createStereoBM(numDisparities=ndisp, blockSize=9)
    bm.setPreFilterSize(41)
    bm.setPreFilterCap(31)
    bm.setTextureThreshold(20)
    bm.setUniquenessRatio(10)
    bm.setSpeckleWindowSize(100)
    bm.setSpeckleRange(32)

    bm_disp = bm.compute(left, right) / 16

    # compute difference
    diff = np.zeros(image_shape)
    for x, y, d in np.round(xyd):
        if bm_disp[y, x] >= 0:
            diff[y, x] = bm_disp[y, x] - d

    # downsample for visualization purposes
    diff2 = np.zeros((image_shape[0] / 2, image_shape[1] / 2))
    for i in range(diff2.shape[0]):
        for j in range(diff2.shape[1]):
            r = diff[2 * i:2 * i + 2, 2 * j:2 * j + 2].flatten()
            ind = np.argmax(np.abs(r))
            diff2[i, j] = r[ind]

    # custom colormap
    from matplotlib.colors import LinearSegmentedColormap
    g = 1.0
    cdict1 = {
        'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)),
        'green': ((0.0, g, g), (0.5, 0.0, 0.0), (1.0, g, g)),
        'blue': ((0.0, 1.0, 1.0), (0.5, 0.0, 0.0), (1.0, 0.0, 0.0)),
    }
    cmap = LinearSegmentedColormap('BlueRed', cdict1)

    plt.figure(1)
    plt.clf()
    plt.subplot(311)
    plt.imshow(disp)
    plt.colorbar()
    plt.subplot(312)
    plt.imshow(bm_disp)
    plt.colorbar()
    plt.subplot(313)
    plt.imshow(diff2, cmap=cmap, interpolation='nearest')
    plt.colorbar()

    plt.show()
Exemple #2
0
 def test_451(self):
     #45.1基础
     imgL = cv2.imread('tsukuba_l.png', 0)
     imgR = cv2.imread('tsukuba_r.png', 0)
     stereo = cv2.createStereoBM(numDisparities=16, blockSize=15)
     disparity = stereo.compute(imgL, imgR)
     plt.imshow(disparity, 'gray')
     plt.show()
     print("")
Exemple #3
0
    def processImage(self, imleft, imright):

        left = self.bridgeleft.imgmsg_to_cv2(imleft)
        right = self.bridgeright.imgmsg_to_cv2(imright)

        stereo = cv2.createStereoBM(numDisparities=16, blockSize=15)
        disp = stereo.compute(left, right)

        image_ros_msg = self.bridge.cv2_to_imgmsg(disp, "mono8")
        self.disp_pub.publish(image_ros_msg)
Exemple #4
0
     def __init__(self):

         self.bridge = CvBridge()
         self.stereo = cv2.createStereoBM(numDisparities=16, blockSize=15)
         self.disp_pub = rospy.Publisher("disparity", Image, queue_size=1)

         self.imleft_sub = rospy.Subscriber("/left/image_rect_color", Image, self.imleft, queue_size = 1)
 
         self.imright_sub = rospy.Subscriber("/right/image_rect_color", Image, self.imright, queue_size = 1)

         self.last_right_image=None
         self.last_left_image=None
         
         rospy.loginfo("disparity initialized")
comp = imgComp(leftImg, rightImg)
array = comp.getPatternArray(leftImg, blockSize)[0]
#print array
leftGrey = cv2.cvtColor(leftImg, cv2.COLOR_BGR2GRAY)
rightGrey = cv2.cvtColor(rightImg, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(leftGrey, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                            cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(leftImg, contours, -1, (0, 255, 0), 3)
#cv2.imshow('taa',leftGrey)
#cv2.imshow('taa',leftImg)
#cv2.waitKey(0)

#comp.comparePatternToImg(array,rightImg,blockSize, viewRange = 1, tolerance = 15, \
#CameraDistanceX = -13, CameraDistanceY = 10)

stereo = cv2.createStereoBM(numDisparities=16, blockSize=5)
disparity = stereo.compute(leftGrey, rightGrey)
plt.imshow(disparity, 'gray')
plt.show()

end = time.time()
print("FINISHED: RUN TIME:", end - start)

### calculate proper cameraDistance and test with code

### NEXT MAKE some equations for occlusion
## Group objects together by color i guess
### segregate each contour, and then match contours of the exact same color to be one plane,
#####if plane is interrupted, then it means it's behind???? but by how much dafuq
Exemple #6
0
def test_disparity_with_bm(drive=11, frame=0):
    # cv2.createStereoBM seems to be absent from current version of opencv
    import cv2

    xyd = load_disparity_points(drive, frame, color=True)
    disp = np.zeros(image_shape, dtype=np.uint8)
    for x, y, d in np.round(xyd):
        disp[y, x] = d

    # compare with block matching
    left, right = load_stereo_frame(drive, frame, color=False)

    ndisp = 128
    bm = cv2.createStereoBM(numDisparities=ndisp, blockSize=9)
    bm.setPreFilterSize(41)
    bm.setPreFilterCap(31)
    bm.setTextureThreshold(20)
    bm.setUniquenessRatio(10)
    bm.setSpeckleWindowSize(100)
    bm.setSpeckleRange(32)

    bm_disp = bm.compute(left, right) / 16

    # compute difference
    diff = np.zeros(image_shape)
    for x, y, d in np.round(xyd):
        if bm_disp[y, x] >= 0:
            diff[y, x] = bm_disp[y, x] - d

    # downsample for visualization purposes
    diff2 = np.zeros((image_shape[0] / 2, image_shape[1] / 2))
    for i in range(diff2.shape[0]):
        for j in range(diff2.shape[1]):
            r = diff[2*i:2*i+2, 2*j:2*j+2].flatten()
            ind = np.argmax(np.abs(r))
            diff2[i, j] = r[ind]

    # custom colormap
    from matplotlib.colors import LinearSegmentedColormap
    g = 1.0
    cdict1 = {
        'red':   ((0.0, 0.0, 0.0),
                  (0.5, 0.0, 0.0),
                  (1.0, 1.0, 1.0)),
        'green': ((0.0, g, g),
                  (0.5, 0.0, 0.0),
                  (1.0, g, g)),
        'blue':  ((0.0, 1.0, 1.0),
                  (0.5, 0.0, 0.0),
                  (1.0, 0.0, 0.0)),
    }
    cmap = LinearSegmentedColormap('BlueRed', cdict1)

    plt.figure(1)
    plt.clf()
    plt.subplot(311)
    plt.imshow(disp)
    plt.colorbar()
    plt.subplot(312)
    plt.imshow(bm_disp)
    plt.colorbar()
    plt.subplot(313)
    plt.imshow(diff2, cmap=cmap, interpolation='nearest')
    plt.colorbar()

    plt.show()
Exemple #7
0
import numpy as np
import cv2
from matplotlib import pyplot as plt

imgL = cv2.imread('tsukuba_l.png',0)
imgR = cv2.imread('tsukuba_r.jpg',0)

stereo = cv2.createStereoBM(numDisparities=16, blockSize=15)
disparity = stereo.compute(imgL,imgR)
plt.imshow(disparity,'gray')
plt.show()
Exemple #8
0
 def reconstruct(self):
     stereo = cv2.createStereoBM(numDisparities=32, blockSize=9)
     disparity = stereo.compute(self.image_left, self.image_right)
     plt.imshow(disparity, 'gray')
     plt.show()
Exemple #9
0
if vc3.isOpened():  # try to get the first frame
    rval3, frame3 = vc3.read()
else:
    rval3 = False
print(rval3)

if vc2.isOpened():  # try to get the first frame
    rval2, frame2 = vc2.read()
else:
    rval2 = False
print(rval2)

cv2.imwrite('Left.png', frame3)
cv2.imwrite('Right.png', frame2)

stereo = cv2.createStereoBM(numDisparities=64, blockSize=25)
#stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
frame2_new = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
frame3_new = cv2.cvtColor(frame3, cv2.COLOR_BGR2GRAY)

#print(frame2_new)
#print('')
#print(frame3_new)
disparity = stereo.compute(frame3_new, frame2_new)
plt.imshow(disparity, 'gray')
plt.show()

## GOt my left and right frame......

## TRY APPLYING PIXEL SHIFTING COMPARISON....# geometry for left and right image pixel difference which will give depth
# Larger pixel shift == object is closer
Exemple #10
0
import cv2 as cv 
import matplotlib.pyplot as plt 

img_L = cv.imread("IMG_20171206_143323.jpg",0)
img_R = cv.imread("IMG_20171206_143323.jpg",0)
 
#stereo = cv.StereoBM(cv.STEREO_BM_BASIC_PRESET,ndisparities=16, SADWindowSize=15)
stereo = cv.createStereoBM(numDisparities=16, blockSize=15)  #OpenCV 3.0的函数
disparity = stereo.compute(img_L, img_R)
 
plt.subplot(121),plt.imshow(img_L,'gray'),plt.title('img_left'),plt.xticks([]),plt.yticks([])
plt.subplot(122),plt.imshow(disparity,'gray'),plt.title('disparity'),plt.xticks([]),plt.yticks([])
Exemple #11
0
def filterImg(r1,r2,p1,p2,q):
    imgL = cv2.imread('c1/capture_1.jpg',0)
    imgR = cv2.imread('c2/capture_1.jpg',0)

    stereo = cv2.createStereoBM(numDisparities=16,blockSize=15)
    disparity = stereo.compute(imgL, imgR)

    #Median Blur
    print "Bluring..."
    imgL = cv2.medianBlur(imgL,9)
    imgR = cv2.medianBlur(imgR,9)

    filterImg = cv2.imread('filter2.png', 0)
    imgL = cv2.resize(imgL, (0,0), fx = .79, fy = .79)
    imgR = cv2.resize(imgR, (0,0), fx = .79, fy = .79)
    filterImg = cv2.resize(filterImg,(0,0), fx = 1, fy=.75)

    print "Starting dft..."

    dft_shiftL = numpy.fft.fftshift(cv2.dft(numpy.float32(imgL), flags = cv2.DFT_COMPLEX_OUTPUT))
    dft_shiftR = numpy.fft.fftshift(cv2.dft(numpy.float32(imgR), flags = cv2.DFT_COMPLEX_OUTPUT))

    rows, cols = imgL.shape
    crow, ccol = rows/2, cols/2
    mask = numpy.zeros((rows, cols,2), numpy.uint8)
    #filterImg = filterImg[484:1564,64:1984]
    mask[:,:,0] = filterImg[:,:]
    mask[:,:,1] = filterImg[:,:]


    img_backL = cv2.idft(numpy.fft.ifftshift(dft_shiftL*mask))
    img_backR = cv2.idft(numpy.fft.ifftshift(dft_shiftR*mask))

    img_backR = cv2.magnitude(img_backR[:,:,0], img_backR[:,:,1])
    img_backR = cv2.normalize(img_backR, 0, 255, cv2.NORM_MINMAX)
    img_backL = cv2.magnitude(img_backL[:,:,0], img_backL[:,:,1])
    img_backL = cv2.normalize(img_backL, 0, 255, cv2.NORM_MINMAX)

    print "Thresholding..."
    retR,img_threshR = cv2.threshold(img_backR,.135,255,cv2.THRESH_BINARY)
    retL,img_threshL = cv2.threshold(img_backL,.135,255,cv2.THRESH_BINARY)
    #cv2.namedWindow('thresholding', cv2.WINDOW_NORMAL)
    #cv2.resizeWindow('thresholding',cols/2, rows/2)
    #cv2.imshow("thresholding", img_thresh)


    print "Erosion and Dilation..."
    eltC = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
    eltR = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
    
    eroR = cv2.dilate(img_threshR,eltC, iterations=10)
    diaR = cv2.erode(eroR,eltR, iterations=30)
    diaR = cv2.dilate(diaR,eltR,iterations=15)

    eroL = cv2.dilate(img_threshL,eltC, iterations=10)
    diaL = cv2.erode(ero,eltRL, iterations=30)
    diaL = cv2.dilate(dia,eltRL,iterations=15)

    #cv2.namedWindow('DilationErosion', cv2.WINDOW_NORMAL)
    #cv2.resizeWindow('DilationErosion',cols/2, rows/2)
    #cv2.imshow("DilationErosion", dia)
    mask = numpy.zeros((rows+2,cols+2), numpy.uint8)
    #flood fill the corners
    print "Floodfill..."
    cv2.floodFill(diaR, mask, (0,0), 255)
    cv2.floodFill(diaR, mask, (2047, 1535), 255)
    cv2.floodFill(diaR, mask, (0, 1535), 255)
    cv2.floodFill(diaR, mask, (2047, 0), 255)

    cv2.floodFill(diaL, mask, (0,0), 255)
    cv2.floodFill(diaL, mask, (2047, 1535), 255)
    cv2.floodFill(diaL, mask, (0, 1535), 255)
    cv2.floodFill(diaL, mask, (2047, 0), 255)
    print "starting line detection..."
    #crop and do region
    cpR = 255 - diaR
    cpL = 255 - diaL
    

    size = numpy.size(cpL)
    skelR = numpy.zeros(cpR.shape,numpy.float32) #uint8
    skelL = numpy.zeros(cpL.shape,numpy.float32) #uint8
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
    finished = False

    print "Starting skeletonization"
    while(not finished):
        eroded = cv2.erode(cpL,kernel)
        temp = cv2.dilate(eroded,kernel)
        temp = cv2.subtract(cpL,temp)
        skel = cv2.bitwise_or(skel,temp)
        cpL = eroded.copy()

        #zeros = size - cv2.countNonZero(img)
        #print zeros
        print cv2.countNonZero(cpL)
        if cv2.countNonZero(cpL)<10:
            finished = True
    print "Skeletonization 50%..."
    finished = False
    while(not finished):
        eroded = cv2.erode(cpR,kernel)
        temp = cv2.dilate(eroded,kernel)
        temp = cv2.subtract(cpR,temp)
        skel = cv2.bitwise_or(skel,temp)
        cpR = eroded.copy()

        #zeros = size - cv2.countNonZero(img)
        #print zeros
        print cv2.countNonZero(cpR)
        if cv2.countNonZero(cpR)<10:
            finished = True
            
    #veinL = skelL
    #veinR = skelR
    #vein = vein.astype(numpy.uint8)
    #over = cv2.addWeighted(img, .5, vein, .5, 0.0)
    #cv2.namedWindow('Vein', cv2.WINDOW_NORMAL)
    #cv2.resizeWindow('Vein',cols/2, rows/2)
    #cv2.imshow("Vein", vein)
    #cv2.waitKey()
    
                
    #if the x are out of tolerance, highlight next section to indicate curve
    #points = cv2.findNonZero(vein)
    #points = cv2.findNonZero(vein)
    #change skel to rgb
    #finds the longest line that should be the vein and eliminates the other noise
    #may not be needed
    imgPtsL = longestLine(imgL, rows, cols)
    imgPtsR = longestLine(imgR, rows, cols)
    cXL = 0 
    cYL = 0
    cXR = 0
    cYR = 0
    numOfPts = len(imgPtsL)
    for img in range(0, numOfPts):
        cXL = cXL + (imgPtsL[img])[0] 
        cYL = cYL + (imgPtsL[img])[1]
        cXR = cXR + (imgPtsR[img])[0]
        cYR = cYR + (imgPtsR[img])[1]
    cXL = cXL/numOfPts
    cYL = cYL/numOfPts
    cXR = cXR/numOfPts
    cYR = cYR/numOfPts
    
    #this code is for displaying the vein line on the images. 
    #veinImage(skel) #if set to true, will return image that can be displayed
    
    print "Starting 3D Point Reconstruction..."
    #Finding real world point with disparity map (trajectory is in mm)
    stereo = cv2.StereoBM_create(numDisparities = 16, blockSize = 15)
    disparity = stereo.compute(img1, img2)
    difference = disparity(x,y)
    vectorPts = [cXL,cYL,difference(cXL,cYL),1]
    realWorldPts = Q*vectorPts
    realWorldPts_Normalized = [realWorldPts[0]/realWorldPts[3] , realWorldPts[1]/realWorldPts[3], realWorldPts[2]/realWorldPts[3]]
    print "Real world point in mm:"
    print realWorldPts_Normalized

    #points = Q*[x,y,disparity(x,y),1] #might need to tranpose Q
    #finalPoint = [points[0]/points[3],points[1]/points[3],points[2]/points[3]]
    return realWorldPts_Normalized