예제 #1
1
def remap(img1, map1x, map1y, img2, map2x, map2y):

    rimg1 = cv2.remap(img1, map1x, map1y,
                      interpolation=cv2.INTER_LINEAR,
                      borderMode=cv2.BORDER_CONSTANT,
                      borderValue=(0,0,0,0))
    rimg2 = cv2.remap(img2, map2x, map2y,
                      interpolation=cv2.INTER_LINEAR,
                      borderMode=cv2.BORDER_CONSTANT,
                      borderValue=(0,0,0,0))

    # Put a red background on the invalid values
    # TODO: Return a mask for valid/invalid values
    # TODO: There is aliasing hapenning on the images border. We should
    # invalidate a margin around the border so we're sure we have only valid
    # pixels
    '''
    if len(img1.shape) == 2: # grayscale
        # print "Grayscale for remap"
        rimg1[rimg1[:,:,3] == 0,:] = (255,0,0,255)
        rimg2[rimg2[:,:,3] == 0,:] = (255,0,0,255)
    elif len(img1.shape) == 3: # RGB
        # print "Color for remap"
        rimg1[rimg1[:,:,3] == 0,:] = (255,0,0,255)
        rimg2[rimg2[:,:,3] == 0,:] = (255,0,0,255)
    elif len(img1.shape) == 4: # RGBA
        # print "Color (RGBA) for remap"
        rimg1[rimg1[:,:,3] == 0,:] = (255,0,0,255)
        rimg2[rimg2[:,:,3] == 0,:] = (255,0,0,255)
    else:
        print str(len(img1.shape)) + " image size / type (remap)?"
    '''
        
    return rimg1, rimg2
예제 #2
0
    def __stereo(self, left, right):
        # return self.undistortLeft(left), self.undistortRight(right)
        if not self.maps_read:
            # clear init flag
            self.maps_read = True
            # use stereoRectify to calculate what we need to rectify stereo images
            M1 = self.info["cameraMatrix1"]
            d1 = self.info["distCoeffs1"]
            M2 = self.info["cameraMatrix2"]
            d2 = self.info["distCoeffs2"]
            size = self.info['imageSize']
            R = self.info['R']
            T = self.info['T']
            h, w = size[:2]
            R1, R2, self.P1, self.P2, self.Q, roi1, roi2 = cv2.stereoRectify(M1, d1, M2, d2, (w,h), R, T, alpha=self.alpha)

            # these return undistortion and rectification maps which are both
            # stored in maps_x for camera 1 and 2
            self.maps_1 = cv2.initUndistortRectifyMap(M1, d1, R1, self.P1, (w,h), cv2.CV_16SC2)  # CV_32F?
            self.maps_2 = cv2.initUndistortRectifyMap(M2, d2, R2, self.P2, (w,h), cv2.CV_16SC2)

        # return self.__fix2(left, self.maps_1), self.__fix2(right, self.maps_2)
        inter=cv2.INTER_LANCZOS4
        return cv2.remap(left, self.maps_1[0], self.maps_1[1], inter),
            cv2.remap(right, self.maps_2[0], self.maps_2[1], inter)
예제 #3
0
def draw_rect(K, d, train_frame, R, T, name):
	#perform the rectification
	R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(K, d, K, d, train_frame.shape[:2], R, T, alpha=1)
	mapx1, mapy1 = cv2.initUndistortRectifyMap(K, d, R1, K, train_frame.shape[:2], cv2.CV_32F)
	mapx2, mapy2 = cv2.initUndistortRectifyMap(K, d, R2, K, query_frame.shape[:2], cv2.CV_32F)
	img_rect1 = cv2.remap(train_bckp, mapx1, mapy1, cv2.INTER_LINEAR)
	img_rect2 = cv2.remap(query_bckp, mapx2, mapy2, cv2.INTER_LINEAR)

	# draw the images side by side
	total_size = (max(img_rect1.shape[0], img_rect2.shape[0]), img_rect1.shape[1] + img_rect2.shape[1],3)
	img = np.zeros(total_size, dtype=np.uint8)
	img[:img_rect1.shape[0], :img_rect1.shape[1]] = img_rect1
	img[:img_rect2.shape[0], img_rect1.shape[1]:] = img_rect2
	 
	# draw horizontal lines every 25 px accross the side by side image
	for i in range(20, img.shape[0], 25):
		cv2.line(img, (0, i), (img.shape[1], i), (255, 0, 0))
	

	h1, w1 = train_frame.shape[:2]
	h2, w2 = query_frame.shape[:2]
	org_imgs = np.zeros((max(h1, h2), w1+w2,3), np.uint8)
	org_imgs[:h1, :w1] = train_bckp
	org_imgs[:h2, w1:w1+w2] = query_bckp
	for m in good:
		# draw the keypoints
		# print m.queryIdx, m.trainIdx, m.distance
		color = tuple([np.random.randint(0, 255) for _ in xrange(3)])
		cv2.line(org_imgs, (int(train_keypoints[m.queryIdx].pt[0]), int(train_keypoints[m.queryIdx].pt[1])) , (int(query_keypoints[m.trainIdx].pt[0] + w1), int(query_keypoints[m.trainIdx].pt[1])), color)
		cv2.circle(org_imgs, (int(train_keypoints[m.queryIdx].pt[0]), int(train_keypoints[m.queryIdx].pt[1])) , 5, color, 1)
		cv2.circle(org_imgs, (int(query_keypoints[m.trainIdx].pt[0] + w1), int(query_keypoints[m.trainIdx].pt[1])) , 5, color, 1)
	cv2.imshow('original', org_imgs)
	cv2.imshow(name, img)
예제 #4
0
    def on_update(self):
        if not self.update:
            return False

        self.camera.grab()
        left_image, right_image = self.camera.retrieve()

        if self.rectify_check.get_active():
            left_image = cv2.remap(left_image, self.left_maps[0],
                                   self.left_maps[1], cv2.INTER_LINEAR)
            right_image = cv2.remap(right_image, self.right_maps[0],
                                    self.right_maps[1], cv2.INTER_LINEAR)

        if self.notebook.get_current_page() == 0:
            disparity_image = vision.stereobm(self.config,
                                              left_image, right_image)
        elif self.notebook.get_current_page() == 1:
            disparity_image = vision.stereosgbm(self.config,
                                                left_image, right_image)
        elif self.notebook.get_current_page() == 2:
            disparity_image = vision.stereovar(self.config,
                                               left_image, right_image)

        # Redraw the windows
        self.left_view.update(left_image)
        self.right_view.update(right_image)
        self.depth_view.update_depth(disparity_image)

        return True
def imgChangeUsingRemap(img, topLeft1, bottomRight1, topLeft2, bottomRight2):
    def difference(point1, point2):
        return point2[0] - point1[0], point2[1] - point1[1]

    diff1 = difference(topLeft1, bottomRight1)
    diff2 = difference(topLeft2, bottomRight2)

    map_x = np.zeros(img.shape[:2], np.float32)
    map_y = np.zeros(img.shape[:2], np.float32)

    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            map_x[i, j] = j / (diff2[1] / diff1[1])
            map_y[i, j] = i / (diff2[0] / diff1[0])

    img = cv2.remap(img, map_x, map_y, cv2.INTER_CUBIC)

    #cv2.imshow('Scaled', img)

    diff3 = difference(topLeft1, topLeft2)

    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            map_x[i, j] = j - diff3[0]
            map_y[i, j] = i - diff3[1]

    img = cv2.remap(img, map_x, map_y, cv2.INTER_CUBIC)

    return img
예제 #6
0
def spherical(img, mask, pos=[], scale=[]):
    size = img.shape
    if not scale:
        scale = [np.pi/2, np.pi/2]
    if not pos:
        pos = [0, 0]
    pos[0] = scale[0] - pos[0]
    pos[1] = scale[1] - pos[1]
    scale[0] = size[0]*.5 / scale[0] # old coor' = old coor / scale - pos
    scale[1] = size[1]*.5 / scale[1]
    # old region
    ymin, ymax = -pos[1], size[1]/scale[1]-pos[1]
    xmin, xmax = -pos[0], size[0]/scale[0]-pos[0]
    # new_y = sin(y)
    # new_x = sin(x)*cos(y)
    siny_max = np.sin(ymax)
    siny_min = np.sin(ymin)
    sinx_max = np.sin(xmax)
    sinx_min = np.sin(xmin)
    if ymin <= 0 and ymax >= 0:
        cosy_max = 1
    else:
        cosy_max = max(np.cos(ymax), np.cos(ymin))
    cosy_min = min(np.cos(ymax), np.cos(ymin))
    ymin = siny_min
    ymax = siny_max
    xmin = min(sinx_min*cosy_min, sinx_min*cosy_max)
    xmax = max(sinx_max*cosy_min, sinx_max*cosy_max)

    scale2 = [0, 0] # new coor' = new coor / scale2 - pos2
    pos2 = [0, 0]
    scale2[0] = 1. * size[0] / (xmax-xmin)
    scale2[1] = 1. * size[1] / (ymax-ymin)
    pos2[0] = -xmin
    pos2[1] = -ymin

    map = np.zeros([size[0], size[1], 2], np.float32)
    # y' = u' / scale2 - pos2
    # x' = v' / scale2 - pos2
    # y' = sin(y)
    # x' = sin(x)cos(y)
    # x = u / scale - pos
    # y = v / scale - pos
    # 
    # y = arcsin(y')
    # x = arcsin(x'/(sqrt(1-y'^2)))
    #
    # u = f(u', v'), v = g(u', v')
    for i in range(size[0]):
        for j in range(size[1]):
            x2 = i / scale2[0] - pos2[0]
            y2 = j / scale2[1] - pos2[1]
            x = np.arcsin(x2/np.sqrt(1-y2**2)) 
            y = np.arcsin(y2)
            u = (x+pos[0]) * scale[0]
            v = (y+pos[1]) * scale[1]
            map[i,j,0] = u
            map[i,j,1] = v
    return cv2.remap(img, map[:,:,1], map[:,:,0], cv2.INTER_CUBIC), cv2.remap(mask, map[:,:,1], map[:,:,0], cv2.INTER_NEAREST)
예제 #7
0
def getDisparity(stereo, img1, img2, mapx1, mapy1, mapx2, mapy2):
    dst1 = cv2.remap(img1, mapx1, mapy1, cv2.INTER_LINEAR)
    dst2 = cv2.remap(img2, mapx2, mapy2, cv2.INTER_LINEAR)
    gray1 = cv2.cvtColor(dst1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(dst2, cv2.COLOR_BGR2GRAY)
    disparity = stereo.compute(gray1, gray2)/16
    # disparity = cv2.medianBlur(disparity, 5)
    return disparity
예제 #8
0
def stereo_rectify(img1, img2, mapfn, qfn):
    urmaps = np.load(mapfn)
    Q = np.load(qfn)
    imgL = cv2.remap(img1, urmaps[0], urmaps[1], cv2.INTER_LINEAR)
    imgR = cv2.remap(img2, urmaps[2], urmaps[3], cv2.INTER_LINEAR)
    #cv2.imshow('Image L', imgL); cv2.imshow('Image R', imgR)
    #cv2.waitKey(0)
    return imgL, imgR, Q
 def inPlaceRemap(self, frame):
     """
     Corrects the distortion in 'frame' using the x and y maps computed by getMap()
     Contrary to remap() the correction is done in place on 'frame' and the method
     returns None
     
     :param frame: The frame to undistort
     """
     cv2.remap(frame, self.mapX, self.mapY, CameraCalibration.INTERP_METHOD)
예제 #10
0
    def doThings(self):
        sgbm = cv2.StereoSGBM()
        sgbm.SADWindowSize, numberOfDisparitiesMultiplier, sgbm.preFilterCap, sgbm.minDisparity, \
        sgbm.uniquenessRatio, sgbm.speckleWindowSize, sgbm.P1, sgbm.P2, \
        sgbm.speckleRange = [v for v,_ in self.params.itervalues()]
        sgbm.numberOfDisparities = numberOfDisparitiesMultiplier*16
        sgbm.disp12MaxDiff = -1
        sgbm.fullDP = False
        R1, R2, P1, P2, Q, topValidRoi, bottomValidRoi = cv2.stereoRectify(self.M1, self.D1, self.M2, self.D2, 
                                (self.top.shape[1],self.top.shape[0]), self.R, self.T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=0)

        top_map1, top_map2 = cv2.initUndistortRectifyMap(self.M1, self.D1, R1, P1, 
                                                         (self.top.shape[1],self.top.shape[0]), cv2.CV_16SC2)
        bottom_map1, bottom_map2 = cv2.initUndistortRectifyMap(self.M2, self.D2, R2, P2, 
                                                               (self.bottom.shape[1], self.bottom.shape[0]), cv2.CV_16SC2)
        
        self.top_r = cv2.remap(self.top, top_map1, top_map2, cv2.cv.CV_INTER_LINEAR);
        self.bottom_r = cv2.remap(self.bottom, bottom_map1, bottom_map2, cv2.cv.CV_INTER_LINEAR)
        top_small = cv2.resize(self.top_r, (self.top_r.shape[1]/2,self.top_r.shape[0]/2))
        bottom_small = cv2.resize(self.bottom_r, (self.bottom_r.shape[1]/2,self.bottom_r.shape[0]/2))
        cv2.imshow('top', top_small);
        cv2.imshow('bottom', bottom_small);
        
#        top_r = cv2.equalizeHist(top_r)
        top_r = cv2.blur(self.top_r, (5,5))
#        bottom_r = cv2.equalizeHist(bottom_r)
        bottom_r = cv2.blur(self.bottom_r, (5,5))
        dispTop = sgbm.compute(top_r.T, bottom_r.T).T;
        dispTopPositive = dispTop
        dispTopPositive[dispTop<0] = 0
        disp8 = (dispTopPositive / (sgbm.numberOfDisparities * 16.) * 255).astype(np.uint8);
        disp_small = cv2.resize(disp8, (disp8.shape[1]/2, disp8.shape[0]/2));
        cv2.imshow(self.winname, disp_small);
        
        self.disp8 = disp8
        self.xyz = cv2.reprojectImageTo3D(dispTop, Q, handleMissingValues=True)
#        self.xyzrgb = np.zeros((self.xyz.shape[0],self.xyz.shape[1],4))
        
#        import struct
#        def color_to_float(color):
#            if color.size == 1:
#                color = [color]*3
#            rgb = (color[2] << 16 | color[1] << 8 | color[0]);
#            rgb_hex = hex(rgb)[2:-1]
#            s = '0'*(8-len(rgb_hex)) + rgb_hex.capitalize()
##            print color, rgb, hex(rgb)
#            rgb_float = struct.unpack('!f', s.decode('hex'))[0]
##            print rgb_float
#            return rgb_float
        
#        for i in range(self.xyz.shape[0]):
#            for j in range(self.xyz.shape[1]):
#                self.xyzrgb[i,j] = np.append(self.xyz[i,j], color_to_float(self.top[i,j])) 
        
예제 #11
0
def world_coordinates(u,v,left,right):
    # Load the undistortion and rectification transformation map
    path = '/home/ros/workspace/src/robotic_surgery/tip3d_detection/camera_calibration/calibration_data/'
    left_undistortion = np.load(path + 'left_undistortion.npy', mmap_mode='r')
    left_rectification = np.load(path + 'left_rectification.npy', mmap_mode='r')
    right_undistortion = np.load(path + 'right_undistortion.npy', mmap_mode='r')
    right_rectification = np.load(path + 'right_rectification.npy', mmap_mode='r')

    # Rectify left and right images
    left_rectified = cv2.remap(left, left_undistortion, left_rectification, 
	                           cv2.INTER_NEAREST)
    right_rectified = cv2.remap(right, right_undistortion, right_rectification, 
	                    		cv2.INTER_NEAREST)

    # Specify parameters for the semi global block matching algorithm
    stereo = cv2.StereoSGBM(minDisparity=16, numDisparities=96, SADWindowSize=3, 
	                		P1=216, P2=864, disp12MaxDiff=14, preFilterCap=100, 
			                uniquenessRatio=15, speckleWindowSize=150, 
			                speckleRange=1, fullDP=False)

    # Compute the disparity map
    disparity = stereo.compute(left_rectified, right_rectified)

    # Adjust the disparity map for clarity in depth
    disparity = disparity.astype(np.float32) / 16.0 

    # Disparity-to-depth mapping matrix
    Q = np.float32([[1, 0, 0, -0.5 * 640],
                    [0, -1, 0, 0.5 * 480],
                    [0, 0, 0, -0.8*640],
                    [0, 0, 1, 0]])

    # Compute the 3D world coordinates
    Image = cv2.reprojectImageTo3D(disparity, Q)

    # Identify the world coordinates of a certain point with left image
    # coordinates (u,v)
    camera_coord = Image[u,v]
    
    # Convert camera world coordinates to robot world coordinates
    robot_coord = [0.,0.,0.]
    distance_between_cameras = 9.1
    accurate_vertical_distance = 18
    vertical_distance_from_camera_to_the_board = 29.8
    tilted_distance_from_camera_to_the_board = 31.3
    centre_shift_to_right = 3.7
    centre_shift_forward = 73.8
    angle = math.acos(vertical_distance_from_camera_to_the_board/tilted_distance_from_camera_to_the_board)
    robot_coord[0] = (camera_coord[1]*math.sin(angle) - camera_coord[2]*math.cos(angle) + centre_shift_forward)/100
    robot_coord[1] = (camera_coord[0] - centre_shift_to_right)/100
    robot_coord[2] = (-(camera_coord[1]*math.cos(angle) + camera_coord[2]*math.sin(angle)) + accurate_vertical_distance)/100
    
    return robot_coord
def rectify_images(img1, x1, img2, x2, K, d, F, shearing=False):
    imsize = (img1.shape[1], img1.shape[0])
    H1, H2 = epipolar.rectify_uncalibrated(x1, x2, F, imsize)

    #x1_1d = np.empty((2*x1.shape[1],), dtype=float)
    #x1_1d[0::2] = x1[0,:]
    #x1_1d[1::2] = x1[1,:]

    #x2_1d = np.empty((2*x2.shape[1],), dtype=float)
    #x2_1d[0::2] = x2[0,:]
    #x2_1d[1::2] = x2[1,:]

    #success, cvH1, cvH2 = cv2.stereoRectifyUncalibrated(x1_1d, x2_1d, F, imsize)


    if shearing:
        S = epipolar.rectify_shearing(H1, H2, imsize)
        H1 = S.dot(H1)

    rH = la.inv(K).dot(H1).dot(K)
    lH = la.inv(K).dot(H2).dot(K)

    # TODO: lRect or rRect for img1/img2 ??
    map1x, map1y = cv2.initUndistortRectifyMap(K, d, rH, K, imsize,
                                               cv.CV_16SC2)
    map2x, map2y = cv2.initUndistortRectifyMap(K, d, lH, K, imsize,
                                               cv.CV_16SC2)

    # Convert the images to RGBA (add an axis with 4 values)
    img1 = np.tile(img1[:,:,np.newaxis], [1,1,4])
    img1[:,:,3] = 255
    img2 = np.tile(img2[:,:,np.newaxis], [1,1,4])
    img2[:,:,3] = 255

    rimg1 = cv2.remap(img1, map1x, map1y,
                      interpolation=cv.CV_INTER_LINEAR,
                      borderMode=cv2.BORDER_CONSTANT,
                      borderValue=(0,0,0,0))
    rimg2 = cv2.remap(img2, map2x, map2y,
                      interpolation=cv.CV_INTER_LINEAR,
                      borderMode=cv2.BORDER_CONSTANT,
                      borderValue=(0,0,0,0))

    # Put a red background on the invalid values
    # TODO: Return a mask for valid/invalid values
    # TODO: There is aliasing hapenning on the images border. We should
    # invalidate a margin around the border so we're sure we have only valid
    # pixels
    rimg1[rimg1[:,:,3] == 0,:] = (255,0,0,255)
    rimg2[rimg2[:,:,3] == 0,:] = (255,0,0,255)

    return rimg1, rimg2
예제 #13
0
def doStereo(imgL, imgR, params):
    """
    Parameters tuned for q50 car images.

    Parameters:
    minDisparity - Minimum possible disparity value. Normally, it is zero but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
    numDisparities - Maximum disparity minus minimum disparity. The value is always greater than zero. In the current implementation, this parameter must be divisible by 16.
    SADWindowSize - Matched block size. It must be an odd number >=1. Normally, it should be somewhere in the 3..11 range.
    P1 - The first parameter controlling the disparity smoothness. See below.
    P2 - The second parameter controlling the disparity smoothness. The larger the values are, the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1 between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor pixels. The algorithm requires P2 > P1. See stereo_match.cpp sample where some reasonably good P1 and P2 values are shown (like 8*number_of_image_channels*SADWindowSize*SADWindowSize and 32*number_of_image_channels*SADWindowSize*SADWindowSize, respectively).
    disp12MaxDiff - Maximum allowed difference (in integer pixel units) in the left-right disparity check. Set it to a non-positive value to disable the check.
    preFilterCap - Truncation value for the prefiltered image pixels. The algorithm first computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval. The result values are passed to the Birchfield-Tomasi pixel cost function.
    uniquenessRatio - Margin in percentage by which the best (minimum) computed cost function value should "win" the second best value to consider the found match correct. Normally, a value within the 5-15 range is good enough.
    speckleWindowSize - Maximum size of smooth disparity regions to consider their noise speckles and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range.
    speckleRange - Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, it will be implicitly multiplied by 16. Normally, 1 or 2 is good enough.
    fullDP - Set it to true to run the full-scale two-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, which is large for 640x480 stereo and huge for HD-size pictures. By default, it is set to false.
    """
    imsize = (1280, 960) 
    (R1, R2, P1, P2, Q, size1, size2, map1x, map1y, map2x, map2y) = computeStereoRectify(params)

    imgRectL = cv2.remap(imgL, map1x, map1y, 
                interpolation=cv.CV_INTER_LINEAR,
                borderMode=cv2.BORDER_CONSTANT,
                borderValue = (0,0,0,0))
        
    imgRectR = cv2.remap(imgR, map2x, map2y, 
                interpolation=cv.CV_INTER_LINEAR,
                borderMode=cv2.BORDER_CONSTANT,
                borderValue = (0,0,0,0))
    """
    window_size = 1
    min_disp = 0
    num_disp = 64
    stereo = cv2.StereoSGBM(minDisparity = min_disp,
        numDisparities = num_disp,
        SADWindowSize = window_size,
        uniquenessRatio = 30,
        speckleWindowSize = 80,
        speckleRange = 1,
        disp12MaxDiff = 1,
        P1 = 8*3*window_size**2,
        P2 = 128*3*window_size**2,
        fullDP = True
    )
    """
    imgRectL = cv2.cvtColor(imgRectL, cv2.COLOR_RGB2GRAY)
    imgRectR = cv2.cvtColor(imgRectR, cv2.COLOR_RGB2GRAY)
    stereo = cv2.StereoBM(preset=cv.CV_STEREO_BM_NARROW,
            SADWindowSize=35)
    print 'computing stereo...'
    disp = stereo.compute(imgRectL, imgRectR).astype(np.float32) / 16.0
    return (disp, Q, R1, R2)
예제 #14
0
def corregir(src,u,v):
  global l,m,u0,v0,n,U,V,w,ThetaR,PhiR
  # Se proyectan los puntos desde el plano imagen VCA hacia la esfera unitaria
  sph = cv2.remap(src,U,V,cv2.INTER_LINEAR)
  # Se calcula la matriz de rotacion para apuntar la esfera a un punto (u,v) dado
  MR = findMR(u,v,l,m,u0,v0)
  # Se mapean los puntos desde la esfera rotada hacia la esfera original
  Theta,Phi = map_sphrot(n,MR)
  # Se proyectan los puntos desde la esfera original hacia la rotada
  sphrot = cv2.remap(sph,Theta,Phi,cv2.INTER_LINEAR)
  # Se proyectan los puntos desde la esfera rotada hacia el plano imagen PTZ virtual
  ptzv = cv2.remap(sphrot,ThetaR,PhiR,cv2.INTER_LINEAR)
  return ptzv
예제 #15
0
    def UndistortImages(self, left, right):
        """Method used to undistorte the input stereo images."""
        # Prepares the external parameters.
        maps = {}
        for index, parameter in zip(range(2), CaptureManager.Instance.Parameters):
            maps[index] = parameter.Maps

        # Applies a generic geometrical transformation to each stereo image.
        leftUndistort  = cv2.remap(left,  maps[0][0], maps[0][1], cv2.INTER_LINEAR)
        rightUndistort = cv2.remap(right, maps[1][0], maps[1][1], cv2.INTER_LINEAR)

        # Returns the undistorted images.
        return leftUndistort, rightUndistort
예제 #16
0
파일: helios.py 프로젝트: thouis/Helios
 def warp(self, im, repeat=False):
     ybase, xbase = np.mgrid[:im.shape[0], :im.shape[1]]
     if repeat:
         return cv2.remap(im,
                          (xbase + self.u).astype(np.float32),
                          (ybase + self.v).astype(np.float32),
                          cv2.INTER_CUBIC,
                          borderMode=cv2.BORDER_REPLICATE)
     return cv2.remap(im,
                      (xbase + self.u).astype(np.float32),
                      (ybase + self.v).astype(np.float32),
                      cv2.INTER_CUBIC,
                      borderMode=cv2.BORDER_CONSTANT,
                      borderValue=np.nan)
예제 #17
0
 def remap_inv_opencv(self,pts_fwd,img,img_wrapped_inv,
                      interp_method=cv2.INTER_CUBIC):
     if img.shape != img_wrapped_inv.shape:
         raise ValueError
     if img.dtype != img_wrapped_inv.dtype:
         raise ValueError(img.dtype , img_wrapped_inv.dtype)
     if img.dtype == np.float64:
         raise NotImplementedError( img.dtype)
     pts_fwd.gpu2cpu()
     map1=pts_fwd.cpu[:,0].astype(np.float32).reshape(img.shape[:2])
     map2=pts_fwd.cpu[:,1].astype(np.float32).reshape(img.shape[:2])         
     cv2.remap(src=img.cpu, map1=map1,map2=map2,
               interpolation=interp_method,dst=img_wrapped_inv.cpu) 
     img_wrapped_inv.cpu2gpu()     
예제 #18
0
    def __init__(self, params, winname, top, bottom):
        self.top = top
        self.bottom = bottom
        
        top_small = cv2.resize(top, (top.shape[1] / 2, top.shape[0] / 2))
        bottom_small = cv2.resize(bottom, (bottom.shape[1] / 2, bottom.shape[0] / 2))
        cv2.imshow('top', top_small);
        cv2.imshow('bottom', bottom_small);

        extrinsic_filepath = config.PROJPATH + 'extrinsics.yml'
        intrinsic_filepath = config.PROJPATH + 'intrinsics.yml'
        self.R = np.asarray(cv2.cv.Load(extrinsic_filepath, name='R'))
        self.T = np.asarray(cv2.cv.Load(extrinsic_filepath, name='T'))
        self.R1 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='R1'))
        self.R2 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='R2'))
        self.P1 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='P1'))
        self.P2 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='P2'))
        self.Q = np.asarray(cv2.cv.Load(extrinsic_filepath, name='Q'))
        self.M1 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='M1'))
        self.M2 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='M2'))
        self.D1 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='D1'))
        self.D2 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='D2'))
        
        self.do_tune = config.TUNE_DISPARITY_MAP
        
        R1, R2, P1, P2, self.Q, topValidRoi, bottomValidRoi = cv2.stereoRectify(self.M1, self.D1, self.M2, self.D2,
                        (self.top.shape[1], self.top.shape[0]), self.R, self.T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=-1)

        top_map1, top_map2 = cv2.initUndistortRectifyMap(self.M1, self.D1, R1, P1,
                                    (self.top.shape[1], self.top.shape[0]), cv2.CV_16SC2)
        bottom_map1, bottom_map2 = cv2.initUndistortRectifyMap(self.M2, self.D2, R2, P2,
                                (self.bottom.shape[1], self.bottom.shape[0]), cv2.CV_16SC2)
        
        self.top_r = cv2.remap(self.top, top_map1, top_map2, cv2.cv.CV_INTER_LINEAR);
        self.bottom_r = cv2.remap(self.bottom, bottom_map1, bottom_map2, cv2.cv.CV_INTER_LINEAR)
        
        top_r_small = cv2.resize(self.top_r, (self.top_r.shape[1] / 2, self.top_r.shape[0] / 2))
        bottom_r_small = cv2.resize(self.bottom_r, (self.bottom_r.shape[1] / 2, self.bottom_r.shape[0] / 2))
        cv2.imshow('top rectified', top_r_small);
        cv2.imshow('bottom rectified', bottom_r_small);
        
        tx1,ty1,tx2,ty2 = topValidRoi
        bx1,by1,bx2,by2 = bottomValidRoi
        self.roi = (max(tx1, bx1), max(ty1, by1), min(tx2, bx2), min(ty2, by2))
        self.top_r = cv2.blur(self.top_r, (5, 5))
        self.bottom_r = cv2.blur(self.bottom_r, (5, 5))
#        top_r = cv2.equalizeHist(self.top_r)
#        bottom_r = cv2.equalizeHist(self.bottom_r)
        
        super(SGBMTuner, self).__init__(params, winname)
예제 #19
0
def StereoRectification( calibration, left_image, right_image, display = False ) :
	# Remap the images
	left_image = cv2.remap( left_image, calibration['left_map'][0], calibration['left_map'][1], cv2.INTER_LINEAR )
	right_image = cv2.remap( right_image, calibration['right_map'][0], calibration['right_map'][1], cv2.INTER_LINEAR )
	# Display the rectified images
	if display :
		# Print ROI
		cv2.rectangle( left_image, calibration['ROI1'][:2], calibration['ROI1'][2:], (0,0,255), 2 )
		cv2.rectangle( right_image, calibration['ROI2'][:2], calibration['ROI2'][2:], (0,0,255), 2 )
		# Print lines
		for i in range( 0, left_image.shape[0], 32 ) :
			cv2.line( left_image, (0, i), (left_image.shape[1], i), (0, 255, 0), 2 )
			cv2.line( right_image, (0, i), (right_image.shape[1], i), (0, 255, 0), 2 )
	# Return the rectified images
	return left_image, right_image
예제 #20
0
파일: opt_flow.py 프로젝트: 2693/opencv
def warp_flow(img, flow):
    h, w = flow.shape[:2]
    flow = -flow
    flow[:,:,0] += np.arange(w)
    flow[:,:,1] += np.arange(h)[:,np.newaxis]
    res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
    return res
예제 #21
0
def undistort(mtx, dist, objpoints, imgpoints, rvecs, tvecs, img):
    #read in an image of choice
    #usefule for the reading in and segmenting of board to make hough lines easier
    h, w = img.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))

        # undistort
    dst = cv2.undistort(img, mtx, dist, None, newcameramtx)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    cv2.imwrite('calibresult.png', dst)

    # undistort
    mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w, h), 5)
    dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    cv2.imwrite('calibresult.png', dst)

    mean_error = 0
    for i in xrange(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
        error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
        mean_error += error

    print "total error: ", mean_error / len(objpoints)
예제 #22
0
파일: img_proc.py 프로젝트: netanelw/debook
def stretch_to_rect(I,Ipage,box):
    (H,W) = get_box_size(box)
    grid_x, grid_y = np.mgrid[0:H, 0:W]
    destination = np.array([[0,0], [0,H/2-1], [0,H-1],
                      [W/2-1,0],[W/2-1,H/2-1],[W/2-1,H-1],
                      [W-1,0],[W-1,H/2-1],[W-1,H-1]])
    source = find_grid_extremes(Ipage,box)
    
    for p in source:
        cv2.circle(I, (p[0], p[1]), 3, (0, 255, 0), -1)
        tmp = p[0]
        p[0] = p[1]
        p[1] = tmp
    for p in destination:
        tmp = p[0]
        p[0] = p[1]
        p[1] = tmp
    showImageDebug(I)
        
    grid_z = griddata(destination, source, (grid_x, grid_y), method='cubic')
    map_x = np.append([], [ar[:,1] for ar in grid_z]).reshape(H,W)
    map_y = np.append([], [ar[:,0] for ar in grid_z]).reshape(H,W)
    map_x_32 = map_x.astype('float32')
    map_y_32 = map_y.astype('float32')
    orig = I
    warped = cv2.remap(orig, map_x_32, map_y_32, cv2.INTER_CUBIC)
    cv2.imshow('result',I)
    cv2.imshow('warped',warped)
예제 #23
0
def dewarp(imagedir):
    # Loading from json file
    C = CameraParams.fromfile(os.path.join(imagedir, "params.json"))
    K = C.K
    D = C.D
    print("Loaded camera parameters from " + os.path.join(imagedir, "params.json"))

    for f in file_list(imagedir, ['jpg', 'jpeg', 'png']):
        print(f)
        colour = cv2.imread(f)
        grey = cv2.cvtColor(colour, cv2.COLOR_BGR2GRAY)

        h, w = grey.shape[:2]
        newcameramtx, roi=cv2.getOptimalNewCameraMatrix(K, D, (w,h), 1, (w,h))
        mapx, mapy = cv2.initUndistortRectifyMap(K, D, None, newcameramtx, (w,h), 5)
        dewarped = cv2.remap(grey, mapx, mapy, cv2.INTER_LINEAR)

        x, y, w, h = roi
        dewarped = dewarped[y:y+h, x:x+w]
        grey = cv2.resize(grey, (0,0), fx=0.5, fy=0.5) 
        dewarped = cv2.resize(dewarped, (0,0), fx=0.5, fy=0.5) 

        cv2.imshow("Original", grey )
        cv2.imshow("Dewarped", dewarped)
        cv2.waitKey(-1)
예제 #24
0
def undistort(path, imagesArr, K, d):

    print '\n-------- Undistort Images ---------'

    for fname in imagesArr:
        print 'Undistorting', os.path.basename(fname),
        
        img = cv2.imread(fname)
        if img is None:
          print ' -  Failed to load.'
          continue
        
        h, w = img.shape[:2]
        
        # Calculate new optimal matrix to avoid losing pixels in the edges after undistortion
        new_matrix, roi = cv2.getOptimalNewCameraMatrix(K, d, (w, h), 1)

        # Generate undistorted image
        #newimg = cv2.undistort(img, K, d, None, new_matrix)
        
        
        # Alternative undistortion via remapping
        mapx, mapy = cv2.initUndistortRectifyMap(K, d, None, new_matrix, (w, h), cv2.CV_32FC1)
        newimg = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)

        # Output undistorted image to the same location with postfix '_undistorted'
        f = os.path.basename(fname).split('.')
        newimg_path = os.path.join(path, ".".join(f[:-1]) + '_undistorted.' + f[-1])
        cv2.imwrite(newimg_path, newimg)
        print '----->', newimg_path

    print 'Undistorted', len(imagesArr), 'images.'
    print '-------- End of Undistortion ---------\n'
예제 #25
0
def distorted_scenecamera_image_to_angle_image(image):
	# TODO: The mapping should be cached (and optimized)
	import cv2
	w, h = SCENECAMERA_DIMENSIONS
	
	cx, cy = zip(*itertools.product(*zip([0, 0], [w-1, h-1])))

	ch, cp = distorted_scenecamera_to_angles(cx, cy)
	hs, he = np.min(ch), np.max(ch)
	ps, pe = np.min(cp), np.max(cp)

	P, H = np.mgrid[0:h:1.0, 0:w:1.0]

	H /= w/(he - hs)
	H += hs
	
	P /= h/(pe - ps)
	P += ps
	
	
	x, y = angles_to_distorted_scenecamera(H.flatten(), P.flatten())
	
	X = x.reshape((h, w)).astype(np.float32)
	Y = y.reshape((h, w)).astype(np.float32)
	dst = cv2.remap(image, X, Y, cv2.INTER_CUBIC)
	
	return dst[::-1], [hs, he, ps, pe]
예제 #26
0
 def run(self, image):
     print(self.mess)
     self.image = utils.rotateImage(image, self.rotationAngle)
     if self.UndistMapX != None and self.UndistMapY != None:
         self.imageUndistorted = cv2.remap(self.image.astype(np.uint8), \
             self.UndistMapX, self.UndistMapY, cv2.INTER_CUBIC)
     return(self.imageUndistorted)
예제 #27
0
    def update(self, queue):
        self.camera = PiCamera()
        self.image = None
        self.camera.resolution = (w, h)
        self.camera.framerate = 60
        self.camera.brightness = 70
        self.camera.contrast = 100
        self.rawCapture = PiRGBArray(self.camera, size=(w, h))

        time.sleep(0.1)

        mtx = np.matrix([[313.1251541, 0., 157.36763381],
                         [0., 311.84837219, 130.36209271],
                         [0., 0., 1.]])
        dist = np.matrix([[-0.42159111, 0.44966352, -0.00877638, 0.00070653, -0.43508731]])
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 0, (w, h))

        self.mapx, self.mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w, h), 5)
        self.stream = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)
        self.stopped = False

        for f in self.stream:
            self.image = cv2.remap(f.array, self.mapx, self.mapy, cv2.INTER_LINEAR)
            self.rawCapture.truncate(0)

            if not q.full():
                queue.put(self.image, False)

            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return
예제 #28
0
    def undistort(self, img):
        """
        Undistortes an image based on the camera model.
        :param img: Distorted input image
        :return: Undistorted image
        """
        R = np.eye(3)

        map1, map2 = cv2.fisheye.initUndistortRectifyMap(
            np.array(self.K),
            np.array(self.D),
            R,
            np.array(self.K),
            self.resolution,
            cv2.CV_16SC2,
        )

        undistorted_img = cv2.remap(
            img,
            map1,
            map2,
            interpolation=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_CONSTANT,
        )

        return undistorted_img
예제 #29
0
def applyOpticalFlow(img, flow):
    h, w = flow.shape[:2]
    base = np.dstack(np.meshgrid(np.arange(w), np.arange(h)))
    pixel_map = np.array(base + -flow, dtype=np.float32)

    res = cv2.remap(img, pixel_map, None, cv2.INTER_LINEAR)
    return res
예제 #30
0
파일: camera.py 프로젝트: rp3d/ciclop
	def captureImage(self, mirror=False, flush=False, flushValue=1):
		""" If mirror is set to True, the image will be displayed as a mirror,
		otherwise it will be displayed as the camera sees it """
		if self.isConnected:
			self.reading = True
			if flush:
				for i in xrange(0, flushValue):
					self.capture.read() #grab()

			ret, image = self.capture.read()
			self.reading = False
			if ret:
				if self.useDistortion and \
				   self.cameraMatrix is not None and \
				   self.distortionVector is not None and \
				   self.distCameraMatrix is not None:
					mapx, mapy = cv2.initUndistortRectifyMap(self.cameraMatrix, self.distortionVector,
															 R=None, newCameraMatrix=self.distCameraMatrix,
															 size=(self.width, self.height), m1type=5)
					image = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR)
				image = cv2.transpose(image)
				if not mirror:
					image = cv2.flip(image, 1)
				self._success()
				return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
			else:
				self._fail()
				return None
		else:
			self._fail()
			return None
예제 #31
0
    cv2.CALIB_FIX_PRINCIPAL_POINT)

rectifl, rectifr, projl, projr, disparityToDepthMap, roil, roir = cv2.stereoRectify(
    mtxl, distl, mtxr, distr, (1280, 720), R, T, None, None, None, None, None,
    cv2.CALIB_ZERO_DISPARITY)

mapXl, mapYl = cv2.initUndistortRectifyMap(mtxl, distl, rectifl, projl,
                                           (1280, 720), cv2.CV_32FC1)

mapXr, mapYr = cv2.initUndistortRectifyMap(mtxr, distr, rectifr, projr,
                                           (1280, 720), cv2.CV_32FC1)

framel = cv2.imread('./images/chessboard-5l.jpg')
framer = cv2.imread('./images/chessboard-5r.jpg')

undistorted_rectifiedl = cv2.remap(framel, mapXl, mapYl, cv2.INTER_LINEAR)
undistorted_rectifiedr = cv2.remap(framer, mapXr, mapYr, cv2.INTER_LINEAR)

cv2.imwrite('./generated/undistorted_rectifiedl.jpg', undistorted_rectifiedl)
cv2.imwrite('./generated/undistorted_rectifiedr.jpg', undistorted_rectifiedr)

np.savez_compressed('./generated/calibration',
                    imageSize=(1280, 720),
                    leftMapX=mapXl,
                    leftMapY=mapYl,
                    leftROI=roil,
                    rightMapX=mapXr,
                    rightMapY=mapYr,
                    rightROI=roir,
                    leftCamMtx=mtxl,
                    rightCamMtx=mtxr,
예제 #32
0
import cv2 as cv
import numpy as np

#对矩阵进行重映射操作
img = np.random.randint(0, 256, size=[6, 6], dtype=np.uint8)
w, h = img.shape  #获取图像大小信息,shape[0]表示垂直尺寸,[1]代表水平尺寸
x = np.zeros((w, h), np.float32)
y = np.zeros((w, h), np.float32)
for i in range(w):
    for j in range(h):
        x.itemset((i, j), j)  # x--W,y--H,所以x代表列,y代表行
        y.itemset((i, j), i)
rst = cv.remap(img, x, y, cv.INTER_LINEAR)  #将矩阵重映射(这里为复制)至一个新的矩阵
print("image=\n", img)
print("rst=\n", rst)

#利用重映射复制图像操作
image = cv.imread("C:/Users/silencht/Desktop/Opencv_Learn/img/lena.png")
w, h = image.shape[:2]  #获取图像大小信息
print(w, h)
x = np.zeros((w, h), np.float32)
y = np.zeros((w, h), np.float32)
for i in range(w):
    for j in range(h):
        x.itemset((i, j), j)  # x--W,y--H,所以x代表列,y代表行
        y.itemset((i, j), i)
rst = cv.remap(image, x, y, cv.INTER_LINEAR)  #将图像重映射(这里为复制)至一个新的图像
cv.imshow("image", image)
cv.imshow("rst", rst)
cv.waitKey()
cv.destroyAllWindows()
예제 #33
0
stickin = time.time()

while (1):
    ret, img = cap.read()
    (lower, upper) = ([0, 0, 130], [255, 255, 255])
    lower = np.array(lower, dtype="uint8")
    upper = np.array(upper, dtype="uint8")
    mask = cv2.inRange(img, lower, upper)
    img = cv2.bitwise_and(img, img, mask=mask)
    # ############## img的色彩提取 @ input:img     output:img
    img = cv2.resize(img, DIM)
    map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM,
                                                     cv2.CV_16SC2)
    img = cv2.remap(img,
                    map1,
                    map2,
                    interpolation=cv2.INTER_LINEAR,
                    borderMode=cv2.BORDER_CONSTANT)
    # ############## img的畸变矫正 @ input:img   output:img
    cv2.imshow('img', img)
    cv2.imshow('rotate', rotate(img, 10))
    # cv2.imshow('imgcopy', img[0:160, 0:90])
    # img = img[0:90, 0:90]
    img = rotate(img, 10)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    wtf, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
    # cv2.imshow('erzhi', img)
    # ############## img灰度化加二值化 # input:img     output:img
    cany = cv2.GaussianBlur(img, (13, 13), 0)
    edges = cv2.Canny(cany, 50, 70, apertureSize=3)
    # ############## img的canny变换  @ input:img       output:edges  img并没有变化
예제 #34
0
def frontalize(img, proj_matrix, ref_U, eyemask):
    ACC_CONST = 800
    img = img.astype('float32')
    print("query image shape:", img.shape)

    bgind = np.sum(np.abs(ref_U), 2) == 0
    # count the number of times each pixel in the query is accessed
    threedee = np.reshape(ref_U, (-1, 3), order='F').transpose()
    temp_proj = proj_matrix * np.vstack(
        (threedee, np.ones((1, threedee.shape[1]))))
    temp_proj2 = np.divide(temp_proj[0:2, :], np.tile(temp_proj[2, :], (2, 1)))

    bad = np.logical_or(
        temp_proj2.min(axis=0) < 1, temp_proj2[1, :] > img.shape[0])
    bad = np.logical_or(bad, temp_proj2[0, :] > img.shape[1])
    bad = np.logical_or(bad, bgind.reshape((-1), order='F'))
    bad = np.asarray(bad).reshape((-1), order='F')

    temp_proj2 -= 1

    badind = np.nonzero(bad > 0)[0]
    temp_proj2[:, badind] = 0

    ind = np.ravel_multi_index(
        (np.asarray(temp_proj2[1, :].round(), dtype='int64'),
         np.asarray(temp_proj2[0, :].round(), dtype='int64')),
        dims=img.shape[:-1],
        order='F')

    synth_frontal_acc = np.zeros(ref_U.shape[:-1])
    ind_frontal = np.arange(0, ref_U.shape[0] * ref_U.shape[1])

    c, ic = np.unique(ind, return_inverse=True)
    bin_edges = np.r_[-np.Inf, 0.5 * (c[:-1] + c[1:]), np.Inf]
    count, bin_edges = np.histogram(ind, bin_edges)
    synth_frontal_acc = synth_frontal_acc.reshape(-1, order='F')
    synth_frontal_acc[ind_frontal] = count[ic]
    synth_frontal_acc = synth_frontal_acc.reshape((320, 320), order='F')
    synth_frontal_acc[bgind] = 0
    synth_frontal_acc = cv2.GaussianBlur(synth_frontal_acc, (15, 15),
                                         30.,
                                         borderType=cv2.BORDER_REPLICATE)

    #remap
    mapX = temp_proj2[0, :].astype(np.float32)
    mapY = temp_proj2[1, :].astype(np.float32)

    mapX = np.reshape(mapX, (-1, 320), order='F')
    mapY = np.reshape(mapY, (-1, 320), order='F')

    frontal_raw = cv2.remap(img, mapX, mapY, cv2.INTER_CUBIC)

    frontal_raw = frontal_raw.reshape((-1, 3), order='F')
    frontal_raw[badind, :] = 0
    frontal_raw = frontal_raw.reshape((320, 320, 3), order='F')

    # which side has more occlusions?
    midcolumn = np.round(ref_U.shape[1] / 2)
    sumaccs = synth_frontal_acc.sum(axis=0)
    sum_left = sumaccs[0:midcolumn].sum()
    sum_right = sumaccs[midcolumn + 1:].sum()
    sum_diff = sum_left - sum_right

    if np.abs(sum_diff) > ACC_CONST:  # one side is ocluded
        ones = np.ones((ref_U.shape[0], midcolumn))
        zeros = np.zeros((ref_U.shape[0], midcolumn))
        if sum_diff > ACC_CONST:  # left side of face has more occlusions
            weights = np.hstack((zeros, ones))
        else:  # right side of face has more occlusions
            weights = np.hstack((ones, zeros))
        weights = cv2.GaussianBlur(weights, (33, 33),
                                   60.5,
                                   borderType=cv2.BORDER_REPLICATE)

        # apply soft symmetry to use whatever parts are visible in ocluded side
        synth_frontal_acc /= synth_frontal_acc.max()
        weight_take_from_org = 1. / np.exp(0.5 + synth_frontal_acc)
        weight_take_from_sym = 1 - weight_take_from_org

        weight_take_from_org = np.multiply(weight_take_from_org,
                                           np.fliplr(weights))
        weight_take_from_sym = np.multiply(weight_take_from_sym,
                                           np.fliplr(weights))

        weight_take_from_org = np.tile(
            weight_take_from_org.reshape(320, 320, 1), (1, 1, 3))
        weight_take_from_sym = np.tile(
            weight_take_from_sym.reshape(320, 320, 1), (1, 1, 3))
        weights = np.tile(weights.reshape(320, 320, 1), (1, 1, 3))

        denominator = weights + weight_take_from_org + weight_take_from_sym
        frontal_sym = np.multiply(frontal_raw, weights) + np.multiply(
            frontal_raw, weight_take_from_org) + np.multiply(
                np.fliplr(frontal_raw), weight_take_from_sym)
        frontal_sym = np.divide(frontal_sym, denominator)

        # exclude eyes from symmetry
        frontal_sym = np.multiply(frontal_sym, 1 - eyemask) + np.multiply(
            frontal_raw, eyemask)
        frontal_raw[frontal_raw > 255] = 255
        frontal_raw[frontal_raw < 0] = 0
        frontal_raw = frontal_raw.astype('uint8')
        frontal_sym[frontal_sym > 255] = 255
        frontal_sym[frontal_sym < 0] = 0
        frontal_sym = frontal_sym.astype('uint8')
    else:  # both sides are occluded pretty much to the same extent -- do not use symmetry
        frontal_sym = frontal_raw
    return frontal_raw, frontal_sym
예제 #35
0
class disparity_track:

  def __init__(self):
    f = open('L_proto2.pckl', 'rb')
    self.Left_Stereo_Map = pickle.load(f)
    f.close()
    f = open('R_proto2.pckl', 'rb')
    self.Right_Stereo_Map = pickle.load(f)
    f.close()
    f = open('FundamentalMat.pckl', 'rb')
    self.F = pickle.load(f)
    f.close()
    f = open('FundamentalMat_mask.pckl', 'rb')
    self.mask = pickle.load(f)
    f.close()

    
    self.left_point_pub = rospy.Publisher("left_point", PointStamped, queue_size=5)
    self.left_point_pub_min = rospy.Publisher("left_point_min", PointStamped, queue_size=5)
    self.left_point_pub_mid = rospy.Publisher("left_point_mid", PointStamped, queue_size=5)
    self.left_point_pub_max = rospy.Publisher("left_point_max", PointStamped, queue_size=5)
    self.left_point_clound = rospy.Publisher("point_clound", PointCloud, queue_size=5)

    self.pub_list = [self.left_point_pub_min,self.left_point_pub_mid,self.left_point_pub_max]
  
    self.bridge = CvBridge()
    
    self.image_sub = rospy.Subscriber("/left_cam/image_raw",Image,self.left_callback)
    self.image_sub = rospy.Subscriber("/right_cam/image_raw",Image,self.right_callback)
    
    
    self.lower_threshold = np.array([0, 0, 246])
    self.upper_threshold = np.array([255, 255, 255])
    
    self.disparity_ratio = 211.4
    self.center_x = (640.0/2.0) # half x pixels
    self.center_y = (480.0/2.0) # half y pixels
    self.Kinv = np.matrix([[861.7849547322785, 0, 320.0], 
                           [0, 848.6931340450212, 240.0],
                           [0, 0, 1]]).I # K inverse
    
    self.circles_1 = None
    self.circles_2 = None
    
    self.para1L = 200
    self.para2L = 100
    self.para1R = 200
    self.para2R = 100

    cv2.namedWindow("Control"); # Threshold Controller window
    cv2.createTrackbar("Disparity ratio", "Control", int(self.disparity_ratio*10), 5000, self.updateDisparity)
    cv2.createTrackbar('para1L','Control',166,255,self.updatepara1L)
    cv2.createTrackbar('para2L','Control',45,255,self.updatepara2L)
    cv2.createTrackbar('para1R','Control',166,255,self.updatepara1R)
    cv2.createTrackbar('para2R','Control',47,255,self.updatepara2R)
    cv2.createTrackbar('H_low','Control',0,255,self.updateLowH)
    cv2.createTrackbar('S_low','Control',0,255,self.updateLowS)
    cv2.createTrackbar('V_low','Control',255,255,self.updateLowV)
    cv2.createTrackbar('H_High','Control',180,255,self.updateHighH)
    cv2.createTrackbar('S_High','Control',255,255,self.updateHighS)
    cv2.createTrackbar('V_High','Control',255,255,self.updateHighV)


    self.last_left_image_pos = np.array([0.0, 0.0])

  def updateLowH(self, value):
    self.lower_threshold[0] = value
  def updateHighH(self, value):
    self.upper_threshold[0] = value
  def updateLowS(self, value):
    self.lower_threshold[1] = value
  def updateHighS(self, value):
    self.upper_threshold[1] = value
  def updateLowV(self, value):
    self.lower_threshold[2] = value
  def updateHighV(self, value):
    self.upper_threshold[2] = value
  def updateDisparity(self, value):
    self.disparity_ratio = float(value) * 0.1
  def updatepara1L(self, value):
    self.para1L = value
  def updatepara2L(self, value):
    self.para2L = value
  def updatepara1R(self, value):
    self.para1R = value
  def updatepara2R(self, value):
    self.para2R = value

  def left_callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError, e:
      print e
    cv_image= cv2.remap(cv_image,self.Left_Stereo_Map[0],self.Left_Stereo_Map[1], cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)  
    
    hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, self.lower_threshold, self.upper_threshold)
    mask = cv2.morphologyEx(mask,cv2.MORPH_DILATE, np.ones((3,3),np.uint8),iterations = 3)
    
    res = cv2.bitwise_and(cv_image,cv_image, mask= mask )
    cv2.imshow("test",res)

    cv_image = res
    gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
    gray = cv2.medianBlur(gray, 3)
    rows = gray.shape[0]
    
    self.circles_1 = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 8,
                                param1=self.para1L, param2=self.para2L,
                                minRadius=0, maxRadius=0)
    self.points_L = []  
                             
    if self.circles_1 is not None:
      for circle in self.circles_1[0]:
        self.points_L.append(circle[0:3])
    self.points_L = np.array(self.points_L, dtype = int)
    
    #print(self.points_L)
    if len(self.points_L) != 0:
      self.line_estimation(self.points_L.T,0.8,cv_image,True)
    cv_image = self.draw(self.circles_1,cv_image)
    self.cv_image_L = cv_image
    
    cv2.imshow("detected circles Left", cv_image)
    k = cv2.waitKey(3) & 0xFF
    if k == 113 or k == 27:
      rospy.signal_shutdown("User Exit")
예제 #36
0
ys = np.array(ys)
rx, ry = xs - w, ys - h
r_ = (rx ** 2 + ry ** 2) ** 0.5
dist = ((h ** 2) + (w ** 2)) ** 0.5

cv2.imshow("Frame", img)

while 1:
    factor = (cv2.getTrackbarPos("Distortion_Factor", "Frame") + 1) / 100.0
    r = r_ / (dist / factor)
    theta = np.zeros(w * h * 4)
    theta[np.where(r == 0)] = 1.0
    indices = np.where(r != 0)
    theta[indices] = np.arctan(r[indices]) / r[indices]
    sxs, sys = rx * theta + w / 2, ry * theta + h / 2
    a = np.reshape(sxs, (-1, w * 2)).astype(np.float32)
    b = np.reshape(sys, (-1, w * 2)).astype(np.float32)
    dst = cv2.remap(img, a, b, cv2.INTER_LINEAR)
    cv2.imshow("Frame", dst)
    if cv2.waitKey(1) == 27:
        break

cap = cv2.VideoCapture("/home/jaiama/Auto_Camera_Calibration/data_videos/vlc-record-2019-05-17-10h41m37s-GP030106.MP4-.mp4")
while 1:
    ret, frame = cap.read()
    dst = cv2.remap(frame, a, b, cv2.INTER_LINEAR)
    cv2.imshow("Frame", dst)
    if cv2.waitKey(1) == 27:
        break

예제 #37
0
    # newCameraMtxL, roiL = cv.getOptimalNewCameraMatrix(Rc['M1'], Rc['dist1'], (wl, hl), 1, (wl, hl))
    # udImgL = cv.undistort(imgL, Rc['M1'], Rc['dist1'], None, newCameraMtxL)

    rectify_scale = 0  # 0=full crop, 1=no crop
    R1, R2, P1, P2, Q, roi1, roi2 = cv.stereoRectify(Rc["M1"],
                                                     Rc["dist1"],
                                                     Rc["M2"],
                                                     Rc["dist2"], (640, 480),
                                                     Rc["R"],
                                                     Rc["T"],
                                                     alpha=rectify_scale)
    left_maps = cv.initUndistortRectifyMap(Rc["M1"], Rc["dist1"], R1, P1,
                                           (640, 480), cv.CV_16SC2)
    right_maps = cv.initUndistortRectifyMap(Rc["M2"], Rc["dist2"], R2, P2,
                                            (640, 480), cv.CV_16SC2)
    udImgL = cv.remap(imgL, left_maps[0], left_maps[1], cv.INTER_LANCZOS4)
    udImgR = cv.remap(imgR, right_maps[0], right_maps[1], cv.INTER_LANCZOS4)

    udImgR = cv.cvtColor(udImgR, cv.COLOR_BGR2GRAY)  # converting to grayScale
    udImgL = cv.cvtColor(udImgL, cv.COLOR_BGR2GRAY)
    good = []
    pts1 = []
    pts2 = []
    sift = cv.xfeatures2d.SIFT_create()
    # find the keypoints and descriptors with SIFT

    kp1, des1 = sift.detectAndCompute(udImgL, None)
    kp2, des2 = sift.detectAndCompute(udImgR, None)
    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
예제 #38
0
u1 = max(img1.shape[1] - 1, u1_im_)
ur = np.arange(u0, u1 + 1)
v0 = min(0, v0_im_)
v1 = max(img1.shape[0] - 1, v1_im_)
vr = np.arange(v0, v1 + 1)
cw = u1 - u0 + 1
ch = v1 - v0 + 1
print(u0, u1, v0, v1, ch, cw)

u, v = np.meshgrid(ur, vr)

u = np.float32(u)
v = np.float32(v)  # remap函数要求映射矩阵为CV_32F
warped_img1 = cv2.remap(img1,
                        u,
                        v,
                        cv2.INTER_LINEAR,
                        borderMode=cv2.BORDER_REFLECT_101)
mask1 = np.ones((img1.shape[0], img1.shape[1]))
warped_mask1 = cv2.remap(mask1, u, v, cv2.INTER_LINEAR)

z_ = H[2, 0] * u + H[2, 1] * v + H[2, 2]
map_x = (H[0, 0] * u + H[0, 1] * v + H[0, 2]) / z_
map_y = (H[1, 0] * u + H[1, 1] * v + H[1, 2]) / z_
map_x = np.float32(map_x)
map_y = np.float32(map_y)
warped_img2 = cv2.remap(img2,
                        map_x,
                        map_y,
                        cv2.INTER_LINEAR,
                        borderMode=cv2.BORDER_REFLECT_101)
예제 #39
0
def Depth_map():
    global cam_ON, cam_EYE
    Cam_coefficient = '/home/pi/Desktop/PyCharm/Cam_coefficients.npz'

    try:
        calibration = np.load(Cam_coefficient, allow_pickle=False)
        flag = True
        print("Loading camera coefficients from cache file at {0}".format(
            Cam_coefficient))
        imageSize = tuple(calibration["imageSize"])
        leftMapX = calibration["leftMapX"]
        leftMapY = calibration["leftMapY"]
        leftROI = tuple(calibration["leftROI"])
        rightMapX = calibration["rightMapX"]
        rightMapY = calibration["rightMapY"]
        rightROI = tuple(calibration["rightROI"])
        Q = calibration["dispartityToDepthMap"]
    except IOError:
        print("Cache file at {0} not found".format(Cam_coefficient))
        flag = False

    if flag:
        cam_EYE = 'L'
        cam_ON = camera.Open("PI", cam_EYE)
        cam_ON = camera.Setup(cam_ON, cam_setting, TYPE)
        # capture image
        img_L = camera.Capture(cam_ON, TYPE)
        # img_L = cv2.flip(img_L, -1)
        gray_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2GRAY)
        cam_ON.close()

        cam_EYE = 'R'
        cam_ON = camera.Open("PI", cam_EYE)
        cam_ON = camera.Setup(cam_ON, cam_setting, TYPE)
        # capture image
        # time.sleep(0.1)
        img_R = camera.Capture(cam_ON, TYPE)
        # img_R = cv2.flip(img_L, -1)
        gray_R = cv2.cvtColor(img_R, cv2.COLOR_BGR2GRAY)
        cam_ON.close()

        fixedLeft = cv2.remap(gray_L, leftMapX, leftMapY, cv2.INTER_LINEAR)
        cv2.imshow('fixedLeft', fixedLeft)
        # cv2.imwrite('/home/pi/Desktop/PyCharm/fixedLeft.jpg', fixedLeft)
        fixedRight = cv2.remap(gray_R, rightMapX, rightMapY, cv2.INTER_LINEAR)
        cv2.imshow('fixedRight', fixedRight)
        # cv2.imwrite('/home/pi/Desktop/PyCharm/fixedRight.jpg', fixedRight)

        # depth: original depth map, used for calculate 3D distance
        # depth_no: normalised depth map, used to display
        # depth_tho: normalised depth map with threhold applied, used to optimise
        depth, depth_no, depth_tho = Depth_cal(fixedLeft, fixedRight, leftROI,
                                               rightROI)
        # calculate the real world distance
        threeD = cv2.reprojectImageTo3D(depth.astype(np.float32) / 16., Q)
        dis_set = cal_3d_dist(depth_tho, threeD)

        for item in dis_set:
            cv2.drawContours(depth_tho, [item[0]], -1, 255, 2)
            cv2.putText(depth_tho, "%.2fcm" % item[1],
                        (item[2][0], item[2][1] - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 2, 255, 3)

        cv2.imshow("depth", depth_no)
        cv2.imshow("depth_threshold", depth_tho)
        key = cv2.waitKey(-1)
        if key == 27:
            cv2.destroyAllWindows()
예제 #40
0
 def apply_displacement_field_to_image(image, disp_field_map):
     trans_map_i, trans_map_j = displacement_map_to_transformation_maps(
         disp_field_map)
     misaligned_image = cv2.remap(image, trans_map_j, trans_map_i,
                                  cv2.INTER_CUBIC)
     return misaligned_image
예제 #41
0
파일: temp.py 프로젝트: adam99goat/lab_code
I2=cv.imread(path2)


R1, R2, P1, P2, Q, validPixROI1, validPixROI2=\
    cv.stereoRectify(cameraMatrix1,distCoeffs1,
                     cameraMatrix2,distCoeffs2,imageSize,R,T,
                     flags=cv.CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0,0))
left_map1, left_map2=cv.initUndistortRectifyMap(cameraMatrix1,
                                                distCoeffs1,R1,P1,imageSize,cv.CV_16SC2)
right_map1, right_map2=cv.initUndistortRectifyMap(cameraMatrix2,
                                                  distCoeffs2,R2,P2,imageSize,cv.CV_16SC2)




I1_rectified=cv.remap(I1,left_map1,left_map2,cv.INTER_LINEAR)
I2_rectified=cv.remap(I2,right_map1,right_map2,cv.INTER_LINEAR)

imgL = cv.cvtColor(I1_rectified, cv.COLOR_BGR2GRAY)
imgR = cv.cvtColor(I2_rectified, cv.COLOR_BGR2GRAY)

 
cv.imwrite("/Users/zhouying/Desktop/I1_rectified.png", I1_rectified)
cv.imwrite("/Users/zhouying/Desktop/I2_rectified.png", I2_rectified)

minDisparity = 0
numDisparities = 192
SADWindowSize = 5
# P1 = 8 * 3 * SADWindowSize * SADWindowSize
P1 = 1
# P2 = 32 * 3 * SADWindowSize * SADWindowSize
예제 #42
0
파일: normal.py 프로젝트: lizhuoren320/code
        flags.READ_FAIL_CTR = 0
        flags.frame_id += 1

    if 0 == flags.frame_id % cfgs.FIND_CHESSBOARD_DELAY_MOD:
        current = data_t(raw_frame)
        history.append(current)

    if len(history) >= cfgs.N_CALIBRATE_SIZE and history.updated:
        normal.update(history.get_corners(), raw_frame.shape[1::-1])
        calib = normal.data
        calib.map1, calib.map2 = cv2.initUndistortRectifyMap(
            calib.camera_mat, calib.dist_coeff, np.eye(3, 3), calib.camera_mat,
            raw_frame.shape[1::-1], cv2.CV_16SC2)

    if len(history) >= cfgs.N_CALIBRATE_SIZE:
        undist_frame = cv2.remap(raw_frame, calib.map1, calib.map2,
                                 cv2.INTER_LINEAR)
        cv2.imshow("undist_frame", undist_frame)

    cv2.imshow("raw_frame", raw_frame)
    key = cv2.waitKey(1)
    if key == 27: break

with open('undistort.py', 'w+') as f:
    script = """
import cv2
import numpy as np
camera_mat = np.array({})
dist_coeff = np.array({})
frame_shape = None
def undistort(frame):
    global frame_shape
예제 #43
0
    def get_interest_region(self, scale_img, cv_kpts, standardize=True):
        """Get the interest region around a keypoint.
        Args:
            scale_img: DoG image in the scale space.
            cv_kpts: A list of OpenCV keypoints.
            standardize: (True by default) Whether to standardize patches as network inputs.
        Returns:
            Nothing.
        """
        batch_input_grid = []
        all_patches = []
        bs = 30  # limited by OpenCV remap implementation
        for idx, cv_kpt in enumerate(cv_kpts):
            # preprocess
            if self.pyr_off:
                scale = 1
            else:
                _, _, scale = self.unpack_octave(cv_kpt)
            size = cv_kpt.size * scale * 0.5
            ptf = (cv_kpt.pt[0] * scale, cv_kpt.pt[1] * scale)
            ori = 0 if self.ori_off else (360. - cv_kpt.angle) * (np.pi / 180.)
            radius = np.round(self.sift_descr_scl_fctr * size * np.sqrt(2) *
                              (self.sift_descr_width + 1) * 0.5)
            radius = np.minimum(radius,
                                np.sqrt(np.sum(np.square(scale_img.shape))))
            # construct affine transformation matrix.
            affine_mat = np.zeros((3, 2), dtype=np.float32)
            m_cos = np.cos(ori) * radius
            m_sin = np.sin(ori) * radius
            affine_mat[0, 0] = m_cos
            affine_mat[1, 0] = m_sin
            affine_mat[2, 0] = ptf[0]
            affine_mat[0, 1] = -m_sin
            affine_mat[1, 1] = m_cos
            affine_mat[2, 1] = ptf[1]
            # get input grid.
            input_grid = np.matmul(self.output_grid, affine_mat)
            input_grid = np.reshape(input_grid, (-1, 1, 2))
            batch_input_grid.append(input_grid)

            if len(batch_input_grid) != 0 and len(
                    batch_input_grid) % bs == 0 or idx == len(cv_kpts) - 1:
                # sample image pixels.
                batch_input_grid_ = np.concatenate(batch_input_grid, axis=0)
                patches = cv2.remap(scale_img.astype(np.float32),
                                    batch_input_grid_,
                                    None,
                                    interpolation=cv2.INTER_LINEAR,
                                    borderMode=cv2.BORDER_REPLICATE)
                patches = np.reshape(
                    patches,
                    (len(batch_input_grid), self.patch_size, self.patch_size))
                # standardize patches.
                if standardize:
                    patches = (patches - np.mean(patches, axis=(1, 2), keepdims=True)) / \
                        (np.std(patches, axis=(1, 2), keepdims=True) + 1e-8)
                all_patches.append(patches)
                batch_input_grid = []
        if len(all_patches) != 0:
            all_patches = np.concatenate(all_patches, axis=0)
        else:
            all_patches = None
        return all_patches
예제 #44
0
class StereoCalibration(object):
    def __init__(self, filepath):
        
        # termination criteria
        self.criteria = (cv2.TERM_CRITERIA_EPS +
                         cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
        self.criteria_cal = (cv2.TERM_CRITERIA_EPS +
                             cv2.TERM_CRITERIA_MAX_ITER, 100, 1e-5)

        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        self.objp = np.zeros((6*8, 3), np.float32)
        self.objp[:, :2] = np.mgrid[0:8, 0:6].T.reshape(-1, 2)

        # Arrays to store object points and image points from all the images.
        self.objpoints = []  # 3d point in real world space
        self.imgpoints_l = []  # 2d points in image plane.
        self.imgpoints_r = []  # 2d points in image plane.

        self.cal_path = filepath
        

        images_right = glob.glob('right*.png')
        images_left = glob.glob('left*.jpg')
        images_left.sort()
        images_right.sort()
        c=0
        s=0

        for i, fname in enumerate(images_right):
            img_l = cv2.imread(images_left[i])
            img_r = cv2.imread(images_right[i])

            gray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
            gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)

            # Find the chess board corners
            ret_l, corners_l = cv2.findChessboardCorners(gray_l, (8, 6), None)
            ret_r, corners_r = cv2.findChessboardCorners(gray_r, (8, 6), None)

            # If found, add object points, image points (after refining them)
            

            if ret_l is True and ret_r is True:
                self.objpoints.append(self.objp)
                rt = cv2.cornerSubPix(gray_l, corners_l, (11, 11),
                                      (-1, -1), self.criteria)
                self.imgpoints_l.append(corners_l)

                # Draw and display the corners
                ret_l = cv2.drawChessboardCorners(img_l, (8, 6),
                                                  corners_l, ret_l)
                
                print(images_left[i])
                
                c+=1

            #if ret_r is True:
                rt = cv2.cornerSubPix(gray_r, corners_r, (11, 11),
                                      (-1, -1), self.criteria)
                self.imgpoints_r.append(corners_r)

                # Draw and display the corners
                ret_r = cv2.drawChessboardCorners(img_r, (8, 6),
                                                  corners_r, ret_r)
                
                print(images_right[i])
                
                s+=1
                
        
        rt, self.M1, self.d1, self.r1, self.t1 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_l, gray_l.shape[::-1], None, None)
        rt, self.M2, self.d2, self.r2, self.t2 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_r, gray_l.shape[::-1], None, None)

        self.camera_model= self.stereo_calibrate(gray_l.shape[::-1])

        

    def stereo_calibrate(self, dims):
        flags = 0
        flags |= cv2.CALIB_FIX_INTRINSIC
        flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
        flags |= cv2.CALIB_USE_INTRINSIC_GUESS
        flags |= cv2.CALIB_FIX_FOCAL_LENGTH
        flags |= cv2.CALIB_FIX_ASPECT_RATIO
        flags |= cv2.CALIB_ZERO_TANGENT_DIST
        flags |= cv2.CALIB_RATIONAL_MODEL
        flags |= cv2.CALIB_SAME_FOCAL_LENGTH
        flags |= cv2.CALIB_FIX_K3
        flags |= cv2.CALIB_FIX_K4
              flags |= cv2.CALIB_FIX_K5

        stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
                                cv2.TERM_CRITERIA_EPS, 100, 1e-5)
        ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
            self.objpoints, self.imgpoints_l,
            self.imgpoints_r, self.M1, self.d1, self.M2,
            self.d2, dims,
            criteria=stereocalib_criteria, flags=flags)

        



        print('Intrinsic_mtx_1', M1)
        print('dist_1', d1)
        print('Intrinsic_mtx_2', M2)
        print('dist_2', d2)        
        print('R', R)
        print('T', T)
        print('E', E)
        print('F', F)

        (leftRectification, rightRectification, leftProjection, rightProjection,dispartityToDepthMap, leftROI, rightROI) = cv2.stereoRectify(M1, d1, M2, d2,(640,480), R, T,None, None, None, None, None,cv2.CALIB_ZERO_DISPARITY,alpha =0)

        leftMapX, leftMapY = cv2.initUndistortRectifyMap(M1, d1, leftRectification,leftProjection, dims, cv2.CV_32FC1)
        rightMapX, rightMapY = cv2.initUndistortRectifyMap(M2, d2, rightRectification,rightProjection, dims, cv2.CV_32FC1)



        stereoMatcher = cv2.StereoBM_create()

        leftFrame = cv2.imread('left47.jpg')
        rightFrame = cv2.imread('right47.png')

        fixedLeft = cv2.remap(leftFrame, leftMapX, leftMapY, cv2.INTER_LINEAR)
        fixedRight = cv2.remap(rightFrame, rightMapX, rightMapY,cv2.INTER_LINEAR)
        cv2.imshow('leftRect',fixedLeft)
        cv2.waitKey(0)
        cv2.imshow('rightRect',fixedRight)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

#imageSize = tuple(calibration["imageSize"])
#leftMapX = calibration["leftMapX"]
#leftMapY = calibration["leftMapY"]
#leftROI = tuple(calibration["leftROI"])
#rightMapX = calibration["rightMapX"]
#rightMapY = calibration["rightMapY"]
#rightROI = tuple(calibration["rightROI"])
        

     

        # for i in range(len(self.r1)):
        #     print("--- pose[", i+1, "] ---")
        #     self.ext1, _ = cv2.Rodrigues(self.r1[i])
        #     self.ext2, _ = cv2.Rodrigues(self.r2[i])
        #     print('Ext1', self.ext1)
        #     print('Ext2', self.ext2)

        print('')

        camera_model = dict([('M1', M1), ('M2', M2), ('dist1', d1),
                            ('dist2', d2), ('rvecs1', self.r1),
                            ('rvecs2', self.r2), ('R', R), ('T', T),
                            ('E', E), ('F', F)])

        cv2.destroyAllWindows()
        return camera_model
예제 #45
0
# Call the camera.
videoCapture = cv2.VideoCapture(cv2.CAP_DSHOW + 1)
camera_width = 1280
camera_height = 720
videoCapture.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width * 2)
videoCapture.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)

while (videoCapture.isOpened()):
    ret, frame = videoCapture.read()
    if not ret:
        break
    frameL = frame[:, camera_width:, :]
    frameR = frame[:, :camera_width, :]
    # Rectify the images on rotation and alignement.
    leftNice = cv2.remap(frameL, mapXL, mapYL, cv2.INTER_LANCZOS4,
                         cv2.BORDER_CONSTANT, 0)
    rightNice = cv2.remap(frameR, mapXR, mapYR, cv2.INTER_LANCZOS4,
                          cv2.BORDER_CONSTANT, 0)
    # Convert the images from color(BGR) to gray.
    grayL = cv2.cvtColor(leftNice, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(rightNice, cv2.COLOR_BGR2GRAY)
    stereo = cv2.StereoSGBM_create(minDisparity=minDisp,
                                   numDisparities=numDisp,
                                   blockSize=windowSize,
                                   P1=8 * 3 * windowSize**2,
                                   P2=32 * 3 * windowSize**2,
                                   disp12MaxDiff=5,
                                   uniquenessRatio=10,
                                   speckleWindowSize=100,
                                   speckleRange=32)
    # Compute the 2 images for the DepthImage
예제 #46
0
img = cv2.imread(images[0])
h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
print(img.shape)

print("------------------使用undistort函数-------------------")
dst = cv2.undistort(img,mtx,dist,None,newcameramtx)
x,y,w,h = roi
print(roi)
dstc = dst[y:y+h,x:x+w]
cv2.imwrite('calibresultu.jpg', dstc)
print("方法一:dst的大小为:", dstc.shape)
 
print("-------------------使用重映射的方式-----------------------")
mapx,mapy = cv2.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w,h),5)  # 获取映射方程
dst = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)      # 重映射
x,y,w,h = roi
dstd = dst[y:y+h,x:x+w]
cv2.imwrite('calibresultm.jpg', dstd)
print("方法二:dst的大小为:", dstd.shape)        # 图像比方法一的小

print("-------------------计算反向投影误差-----------------------")
tot_error = 0
for i in range(0,len(obj_points)):
    img_points2, _ = cv2.projectPoints(obj_points[i],rvecs[i],tvecs[i],mtx,dist)
    error = cv2.norm(img_points[i],img_points2, cv2.NORM_L2)/len(img_points2)
    tot_error += error
 
mean_error = tot_error/len(obj_points)
print("total error: ", tot_error)
print("mean error: ", mean_error)
예제 #47
0
        imgNotGood = fname

cv.destroyAllWindows()

if npatternfound > 1:
    print(npatternfound, " good images found")
    ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints,
                                                      gray.shape[::-1], None,
                                                      None)
    img = cv.imread('/home/farman/Downloads/object1.jpg')
    h, w = img.shape[:2]
    newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                     (w, h))
    mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx,
                                            (w, h), 5)
    dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)

    # crop the image
    x, y, w, h = roi
    print(roi)
    dst = dst[y:y + h, x:x + w]
    cv.imwrite('calibresult.png', dst)
    print(ret)
    print('calibration matrix')
    print(mtx)
    print('distortion coefficients')
    print(dist)
    total_error = 0
    for i in range(len(objpoints)):
        imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx,
                                         dist)
예제 #48
0
        print("--------------------------------------")
        myStart = time.time()

        # grab an image from the camera
        file = camera.getFileName()
        imageBW = camera.getImage()
        
        # we're out of images
        if imageBW is None:
            break
            
        # AprilDetect, after accounting for distortion  (if fisheye)
        if camParams['fisheye']:
            dim1 = imageBW.shape[:2][::-1]  #dim1 is the dimension of input image to un-distort
            map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, numpy.eye(3), K, dim1, cv2.CV_16SC2)
            undistorted_img = cv2.remap(imageBW, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
           
            tags = at_detector.detect(undistorted_img, True, camParams['cam_params'], args.tagSize/1000)
        else:
            tags = at_detector.detect(imageBW, True, camParams['cam_params'], args.tagSize/1000)
            
        if file:
            print("File: {0}".format(file))
        
        # get time to capture and convert
        print("Time to capture and detect = {0:.3f} sec, found {1} tags".format(time.time() - myStart, len(tags)))
        
        for tag in tags:
                        
            tagpos = getPos(getTransform(tag))
            tagrot = getRotation(getTransform(tag))
예제 #49
0
    def renderfile(self, outpath="Stabilized.mp4", out_size=(1920, 1080)):

        out = cv2.VideoWriter(outpath, -1, 29.97, (1920 * 2, 1080))
        crop = (int((self.width - out_size[0]) / 2),
                int((self.height - out_size[1]) / 2))

        self.cap.set(cv2.CAP_PROP_POS_FRAMES, 21 * 30)

        i = 0
        while (True):
            # Read next frame
            success, frame = self.cap.read()

            frame_num = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))
            print("FRAME: {}, IDX: {}".format(frame_num, i))

            if success:
                i += 1

            if i > 1000:
                break

            if success and i > 0:

                frame_undistort = cv2.remap(frame,
                                            self.map1,
                                            self.map2,
                                            interpolation=cv2.INTER_LINEAR,
                                            borderMode=cv2.BORDER_CONSTANT)

                frame_out = self.undistort.get_rotation_map(
                    frame_undistort, self.stab_transform[frame_num - 1])

                # Fix border artifacts
                frame_out = frame_out[crop[1]:crop[1] + out_size[1],
                                      crop[0]:crop[0] + out_size[0]]
                frame_undistort = frame_undistort[crop[1]:crop[1] +
                                                  out_size[1],
                                                  crop[0]:crop[0] +
                                                  out_size[0]]

                #out.write(frame_out)
                #print(frame_out.shape)

                # If the image is too big, resize it.
                #%if(frame_out.shape[1] > 1920):
                #		frame_out = cv2.resize(frame_out, (int(frame_out.shape[1]/2), int(frame_out.shape[0]/2)));

                size = np.array(frame_out.shape)
                frame_out = cv2.resize(frame_out, (int(size[1]), int(size[0])))

                frame = cv2.resize(frame_undistort,
                                   ((int(size[1]), int(size[0]))))
                concatted = cv2.resize(cv2.hconcat([frame_out, frame], 2),
                                       (1920 * 2, 1080))
                out.write(concatted)
                cv2.imshow("Before and After", concatted)
                cv2.waitKey(5)

        # When everything done, release the capture
        out.release()
예제 #50
0
    def get_video_transform_mtx(self, videopath, background=None,
                output_dir=None, correct_fisheye=False, 
                fisheye_kwargs={}, overwrite=False):

        # ---------------------------------- Set up ---------------------------------- #
        # Check video is good
        check_file_exists(videopath, raise_error=True)

        # Get output directory and save name and check if it exists
        if output_dir is None:
            output_dir = os.path.split(videopath)[0]
        video_name = os.path.split(videopath)[-1].split(".")[0]
        save_path = os.path.join(output_dir, video_name+"_transform_mtx.npy")

        if save_path in list_dir(output_dir) and not overwrite:
            print("A transform matrix already exists, loading it")
            return np.load(save_path)

        # Get background
        if background is None:
            background = get_background(videopath)

        # Check if we need to apply fisheye correction
        if correct_fisheye:
            raise NotImplementedError("Sorry, fisheye correction not ready")
            maps = np.load(fisheye_map_location)
            map1 = maps[:, :, 0:2]
            map2 = maps[:, :, 2]*0

            bg_copy = cv2.copyMakeBorder(background, y_offset, int((map1.shape[0] - background.shape[0]) - y_offset),
                                                    x_offset, int((map1.shape[1] - background.shape[1]) - x_offset), cv2.BORDER_CONSTANT, value=0)

            bg_copy = cv2.remap(bg_copy, map1, map2, interpolation=cv2.INTER_LINEAR,borderMode=cv2.BORDER_CONSTANT, borderValue=0)
            bg_copy = bg_copy[y_offset:-int((map1.shape[0] - background.shape[0]) - y_offset),
                                x_offset:-int((map1.shape[1] - background.shape[1]) - x_offset)]
        else:
            bg_copy = background.copy()


        # ------------------------------ Initialize GUI ------------------------------ #
        # initialize clicked point arrays
        background_data = dict(bg_copy=bg_copy, clicked_points=np.array(([], [])).T) # [background_copy, np.array(([], [])).T]
        arena_data = dict(temp=[], points=[])  # [[], np.array(([], [])).T]

        # add 1-2-3-4 markers to model arena to show where points need to go
        for i, point in enumerate(self.points.astype(np.uint32)):
            self.arena = cv2.circle(self.arena, (point[0], point[1]), 3, 255, -1)
            self.arena = cv2.circle(self.arena, (point[0], point[1]), 4, 0, 1)
            cv2.putText(self.arena, str(i+1), tuple(point), 0, .55, 150, thickness=2)

            point = np.reshape(point, (1, 2))
            arena_data['points'] = np.concatenate((arena_data['points'], point)) # Append point to arena data

        # initialize windows
        cv2.startWindowThread()
        cv2.namedWindow('background')
        cv2.imshow('background', bg_copy)
        cv2.namedWindow('model arena')
        cv2.imshow('model arena', arena)

        # create functions to react to clicked points
        cv2.setMouseCallback('background', self.select_transform_points, background_data)  # Mouse callback

        
        # --------------------------- User interaction loop -------------------------- #
        # take in clicked points until all points are clicked or user presses 'q'
        while True: 
            cv2.imshow('background', bg_copy)

            number_clicked_points = background_data['clicked_points'].shape[0]
            if number_clicked_points == len(self.points):
                break
            if cv2.waitKey(10) & 0xFF == ord('q'):
                break

        # perform projective transform and register background
        M = cv2.estimateRigidTransform(background_data['clicked_points'], arena_data['points'], False)
        registered_background = cv2.warpAffine(bg_copy, M, background.shape[::-1])

        # Start user interaction to refine the matrix
        M = self.get_mtrx_user_interaction()

        return M
예제 #51
0
    if len(self.points_L) != 0:
      self.line_estimation(self.points_L.T,0.8,cv_image,True)
    cv_image = self.draw(self.circles_1,cv_image)
    self.cv_image_L = cv_image
    
    cv2.imshow("detected circles Left", cv_image)
    k = cv2.waitKey(3) & 0xFF
    if k == 113 or k == 27:
      rospy.signal_shutdown("User Exit")

  def right_callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError, e:
      print e
    cv_image= cv2.remap(cv_image,self.Right_Stereo_Map[0],self.Right_Stereo_Map[1], cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
    gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
    gray = cv2.medianBlur(gray, 3)
    rows = gray.shape[0]
    self.circles_2 = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 8,
                                param1=self.para1R, param2=self.para2R,
                                minRadius=0, maxRadius=0)
    self.points_R = []                           
    if self.circles_2 is not None:
      for circle in self.circles_2[0]:
        self.points_R.append(circle[0:3])
    self.points_R = np.array(self.points_R, dtype = int)

    if len(self.points_R) != 0:
      self.line_estimation(self.points_R.T,0.9,cv_image,False)
    cv_image = self.draw(self.circles_2,cv_image)
예제 #52
0
def unwarp(img, xmap, ymap):
    # apply the unwarping map to our image
    output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
    result = Image(output, cv2image=True)
    return result
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_1000)
markerLength = 250  # Here, our measurement unit is millimeters.
arucoParams = aruco.DetectorParameters_create()

imgDir = "imgSequence/6x6_1000-0"  # Specify the image directory
imgFileNames = [os.path.join(imgDir, fn) for fn in next(os.walk(imgDir))[2]]
nbOfImgs = len(imgFileNames)

count = 0
for i in range(0, nbOfImgs):
    img = cv2.imread(imgFileNames[i], cv2.IMREAD_COLOR)
    # Enable the following 2 lines to save the original images.
    # filename = "original" + str(i).zfill(3) +".jpg"
    # cv2.imwrite(filename, img)
    imgRemapped = cv2.remap(img, map1, map2, cv2.INTER_LINEAR,
                            cv2.BORDER_CONSTANT)  # for fisheye remapping
    imgRemapped_gray = cv2.cvtColor(
        imgRemapped,
        cv2.COLOR_BGR2GRAY)  # aruco.detectMarkers() requires gray image
    filename = "remappedgray" + str(i).zfill(3) + ".jpg"
    cv2.imwrite(filename, imgRemapped_gray)

    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        imgRemapped_gray, aruco_dict, parameters=arucoParams)  # Detect aruco
    if ids != None:  # if aruco marker detected
        rvec, tvec, trash = aruco.estimatePoseSingleMarkers(
            corners, markerLength, camera_matrix,
            dist_coeffs)  # posture estimation from a single marker
        imgWithAruco = aruco.drawDetectedMarkers(imgRemapped, corners, ids,
                                                 (0, 255, 0))
        imgWithAruco = aruco.drawAxis(
def lane_pipline():
    start = time.time()
    end = time.time()

    #pts1 = np.float32([[90,1], [230,1], [320,203], [1, 203]])
    pts1 = np.float32([[115, 108], [190, 108], [264, 197], [61, 198]])
    pts2 = np.float32([[100,200], [200, 200], [200, 300], [100, 300]])
    erode_k = np.uint8([[0,0,1,0,0],[0,0,1,0,0],[1,1,1,1,1],[0,0,1,0,0],[0,0,1,0,0]])
    M = cv2.getPerspectiveTransform(pts1,pts2)
    idx = 0
    mapx = np.load('caliberation/caliberation/mapx.npy')
    mapy = np.load('caliberation/caliberation/mapy.npy')

    # capture frames from the camera
    fushi = np.ones((320,300), np.uint8)*255
    mask = np.ones((240,320), np.uint8)*255
    mask = cv2.remap(mask, mapx, mapy, cv2.INTER_NEAREST)
    mask = cv2.warpPerspective(mask, M,(300, 320))
    mask = cv2.erode(mask, erode_k)
    mask = cv2.erode(mask, erode_k)
    for i in range(1, 299):
        # grab the raw NumPy array representing the image, then initialize the timestamp
        # and occupied/unoccupied text
        end = time.time()
        print 1/(end-start)
        start = time.time()
        idx = i
        image = cv2.imread('data_all/%s.jpg' %str(idx),1)
        undis_image = cv2.remap(image, mapx, mapy, cv2.INTER_NEAREST)
        img_gray = cv2.cvtColor(undis_image, cv2.COLOR_BGR2GRAY)
        img_cont = np.copy(img_gray)
        r, thresh = cv2.threshold(img_cont, 130, 255, cv2.THRESH_BINARY)
        img, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        max_contour = 0
        max_cnt = 0
        for contour in contours:
            #img_cont = cv2.drawContours(thresh, contour, -1, (0, 0, 125), 3)
            area = cv2.contourArea(contour)
            if(area > max_contour):
                max_contour = area
                max_cnt = contour
        zeros = np.zeros_like(image)
        #print max_cnt
        img_cont = cv2.drawContours(zeros, max_cnt, -1, (255,0,0),cv2.FILLED)


        # cut out image
        #image = ii.image_cut_out(image, 0, 320, 0, 200)

        #test_image = cv2.imread('shit.jpg', 1)

        #rows, cols, ch = test_image.shape

        cv2.warpPerspective(img_gray, M,(300, 320),fushi,cv2.INTER_CUBIC)
        kernel = np.ones((5,5), np.float32) / 25
        #fushi = cv2.equalizeHist(fushi)
        test = np.zeros((300,320,3),np.uint8)
        dst = cv2.filter2D(fushi, -1, kernel)
        #fushi = cv2.filter2D(fushi, -1, kernel)
        #print dst

        dst = cv2.Canny(dst, 40, 90)
        thresh = cv2.dilate(thresh, erode_k)
        thresh = cv2.dilate(thresh, erode_k)
        #dst = cv2.dilate(dst,erode_k)
        dst = cv2.bitwise_and(dst,dst, mask=mask)
        #dst = cv2.bitwise_and(dst,dst, mask=thresh)
        #dst = cv2.dilate(dst, erode_k)
        dst = cv2.dilate(dst, erode_k)
        #dst = cv2.warpPerspective(dst, M, (200,320))
        #lines = cv2.HoughLines(dst,1, np.pi/180, 1)
        #print lines


        ret = line_fit(dst, window_size=40, line_margin=200, vertical_margin=150)

        #plt.figure(1)
        #plt.axis([0, 300, 320, 0])
        co = 0.1
        left_fit = ret['left_fit']
        left_fit = np.poly1d(left_fit)
        right_fit = ret['right_fit']
        right_fit = np.poly1d(right_fit)
        left_y = left_fit(np.array(range(0,300)))
        left_x = np.array(range(0,300))
        right_y = right_fit(np.array(range(0, 300)))
        right_x = np.array(range(0, 300))
        #plt.plot(left_y, left_x,".")
        #plt.plot(ret['leftx'], ret['lefty'], "*")
        #plt.plot(right_y, right_x,".")
        #plt.plot(ret['rightx'], ret['righty'], "*")
        left_pts = np.stack((left_y, left_x), axis=1)
        right_pts = np.stack((right_y, right_x), axis=1)
        left_pts = left_pts.astype(np.int32)
        right_pts = right_pts.astype(np.int32)        #print left_pts
        fushi = cv2.polylines(fushi, [left_pts], False, (255, 0, 0),1)
        fushi = cv2.polylines(fushi, [right_pts], False, (0, 0, 255), 1)
        try:
            for l in ret:
                #rint l.points
                plt.plot(l.points[:, 0], l.points[:, 1], ".")
                co = co + 0.1
                #plt.plot(rlx, rly, "b.")
        except:
            pass

        cv2.imshow("Frame", dst)
        cv2.imshow("origin", fushi)
        #cv2.imshow("boundary", )
        cv2.imshow("lane_find", thresh)



        key = cv2.waitKey(100) & 0xFF

        # clear the stream in preparation for the next frame
        #rawCapture.truncate(0)
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            cv2.destroyAllWindows()
            break
예제 #55
0
    if not in_front_of_both_cameras(first_inliers, second_inliers, R, T):
        print "ucuncu"
        # Third choice: R = U * Wt * Vt, T = u_3
        R = U.dot(W.T).dot(Vt)
        T = U[:, 2]
        print "dorduncu"
        if not in_front_of_both_cameras(first_inliers, second_inliers, R, T):

            # Fourth choice: R = U * Wt * Vt, T = -u_3
            T = - U[:, 2]

#perform the rectification
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(K, d, K2, d, first_img.shape[:2], R, T, alpha=1.0)
mapx1, mapy1 = cv2.initUndistortRectifyMap(K, d, R1, P1, first_img.shape[:2], cv2.CV_32F)
mapx2, mapy2 = cv2.initUndistortRectifyMap(K2, d, R2, P2, second_img.shape[:2], cv2.CV_32F)
img_rect1 = cv2.remap(first_img, mapx1, mapy1, cv2.INTER_LINEAR)
img_rect2 = cv2.remap(second_img, mapx2, mapy2, cv2.INTER_LINEAR)
print mapx2
# draw the images side by side
total_size = (max(img_rect1.shape[0], img_rect2.shape[0]), img_rect1.shape[1] + img_rect2.shape[1], 3)
img = np.zeros(total_size, dtype=np.uint8)
img[:img_rect1.shape[0], :img_rect1.shape[1]] = img_rect1
img[:img_rect2.shape[0], img_rect1.shape[1]:] = img_rect2

# draw horizontal lines every 25 px accross the side by side image
for i in range(20, img.shape[0], 25):
    cv2.line(img, (0, i), (img.shape[1], i), (110, 110, 110))

img = cv2.resize(img,(0,0), fx=0.5, fy=0.5)

cv2.imshow('rectified', img)
예제 #56
0
    def update(self, event=None):
        if self.mag_msg is None:
            return

        if not self.is_dirty:
            return
        self.is_dirty = False

        rospy.loginfo("update")

        mag = self.bridge.imgmsg_to_cv2(self.mag_msg)
        print "input type", mag.shape, mag.dtype
        if mag.dtype == np.uint8:
            mag = mag / 255.0
        if len(mag.shape) > 2:
            # TODO(lucasw) convert to grayscale properly
            mag = mag[:, :, 0]

        # log scaling of image in y
        # TODO(lucasw) also rescale also?  Maybe the incoming image can be lower
        # resolution, but the remapped one can be 2048 high or some other parameter defined height?
        width = mag.shape[1]
        height = mag.shape[0]
        xr = np.arange(0.0, width, dtype=np.float32).reshape(1, -1)
        map_x = np.repeat(xr, height, axis=0)
        yr = np.arange(0.0, height, dtype=np.float32).reshape(-1, 1)
        if False:
            yr_max = np.max(yr)
            yr_min = np.min(yr)
            rospy.loginfo(str(yr_min) + " " + str(yr_max))  # + " " + str(div))
            div = np.log10(yr_max + 1)
            yr = (np.log10(yr + 1) * yr_max) / div
        elif False:
            yr = (10**(yr / yr_max))
            yr = (yr - np.min(yr)) / np.max(yr) * (yr_max - yr_min) + yr_min

        rospy.loginfo(yr)
        map_y = np.repeat(yr, width, axis=1)

        self.mag = cv2.remap(mag, map_x, map_y, cv2.INTER_LINEAR)
        cv2.imwrite("test.png", self.mag * 255)

        # want to have lower frequencies on the bottom of the image,
        # but the istft expects the opposite.
        mag = np.flipud(self.mag)

        phase = None
        if self.phase_msg is not None:
            phase = np.flipud(self.bridge.imgmsg_to_cv2(self.phase_msg))
            if phase.shape != mag.shape:
                rospy.logwarn(str(phase.shape) + '!=' + str(mag.shape))
                phase = None

        if phase is None:
            phase = mag * 0.0

        # TODO(lucasw) where did the 4 come from?
        mag = np.exp(mag * 4) - 1.0
        zxx = mag * np.exp(1j * phase)

        to, x_unfiltered = signal.istft(zxx,
                                        fs=self.fs,
                                        input_onesided=self.onesided)

        # filter out the DC and the higher frequencies
        # could lean on the producer of the image to do that though
        if self.do_bandpass:
            xo = self.butter_bandpass_filter(x_unfiltered,
                                             lowcut=self.lowcut,
                                             highcut=self.highcut,
                                             fs=self.fs,
                                             order=self.bandpass_order)
        else:
            xo = x_unfiltered

        # TODO(lucasw) notch out 2.0-2.5 KHz

        # TODO(lucasw) does this produce smoother audio?
        if self.second_pass:
            nperseg = zxx.shape[0] * 2 - 1
            print 'nperseg', nperseg
            f2, t2, zxx2 = signal.stft(xo,
                                       fs=fs,
                                       nperseg=nperseg,
                                       return_onesided=self.onesided)
            print 'max frequency', np.max(f2)
            print zxx2.shape

            t, x = signal.istft(zxx2, fs=fs, input_onesided=self.onesided)

            zmag = np.abs(zxx2)
            print zmag
            print 'z min max', np.min(zmag), np.max(zmag)
            logzmag = np.log10(zmag + 1e-10)
            logzmag -= np.min(logzmag)
            zangle = np.angle(zxx2)
        else:
            t = to
            x = xo

        # TODO(lucasw) move the normalization down stream
        # also if the max is < 1.0 then that is probably desired
        x_max = np.max(np.abs(x))
        if x_max != 0.0:
            x = x / x_max
        print x.shape, x.dtype
        msg = Audio()
        msg.data = x.tolist()
        msg.sample_rate = self.fs
        self.pub.publish(msg)
        # TODO(lucasw) need to be notified by loop audio when it loops
        # so can only update a little before then
        rospy.sleep(len(msg.data) / float(self.fs) * 0.4)
예제 #57
0
def elastic_transform(image,
                      alpha,
                      sigma,
                      alpha_affine,
                      interpolation=cv2.INTER_LINEAR,
                      border_mode=cv2.BORDER_REFLECT_101,
                      value=None,
                      random_state=None,
                      approximate=False):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5

    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.
    """
    if random_state is None:
        random_state = np.random.RandomState(1234)

    height, width = image.shape[:2]

    # Random affine
    center_square = np.float32((height, width)) // 2
    square_size = min((height, width)) // 3
    alpha = float(alpha)
    sigma = float(sigma)
    alpha_affine = float(alpha_affine)

    pts1 = np.float32([
        center_square + square_size,
        [center_square[0] + square_size, center_square[1] - square_size],
        center_square - square_size
    ])
    pts2 = pts1 + random_state.uniform(
        -alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    matrix = cv2.getAffineTransform(pts1, pts2)

    image = cv2.warpAffine(image,
                           matrix, (width, height),
                           flags=interpolation,
                           borderMode=border_mode,
                           borderValue=value)

    if approximate:
        # Approximate computation smooth displacement map with a large enough kernel.
        # On large images (512+) this is approximately 2X times faster
        dx = (random_state.rand(height, width).astype(np.float32) * 2 - 1)
        cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx)
        dx *= alpha

        dy = (random_state.rand(height, width).astype(np.float32) * 2 - 1)
        cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy)
        dy *= alpha
    else:
        dx = np.float32(
            gaussian_filter(
                (random_state.rand(height, width) * 2 - 1), sigma) * alpha)
        dy = np.float32(
            gaussian_filter(
                (random_state.rand(height, width) * 2 - 1), sigma) * alpha)

    x, y = np.meshgrid(np.arange(width), np.arange(height))

    mapx = np.float32(x + dx)
    mapy = np.float32(y + dy)

    return cv2.remap(image, mapx, mapy, interpolation, borderMode=border_mode)
예제 #58
0
i = 0
while (True):
    # Read next frame
    success, frame = cap.read()
    if success:
        i += 1

    if i > 2000:
        break

    if success and i > 300:

        frame_undistort = cv2.remap(frame,
                                    map1,
                                    map2,
                                    interpolation=cv2.INTER_LINEAR,
                                    borderMode=cv2.BORDER_CONSTANT)

        # Apply affine wrapping to the given frame
        frame_stabilized = undistort.get_rotation_map(frame_undistort,
                                                      stab_transform[i])

        # crop edges

        frame_out = crop_img = frame_stabilized[crop_start[1]:crop_start[1] +
                                                out_size[1],
                                                crop_start[0]:crop_start[0] +
                                                out_size[0]]
        out.write(frame_out)
        #print(frame_out.shape)
예제 #59
0
    def postprocess(self,
                    binary_seg_result,
                    instance_seg_result=None,
                    min_area_threshold=100,
                    source_image=None,
                    data_source='tusimple'):
        """

        :param binary_seg_result:
        :param instance_seg_result:
        :param min_area_threshold:
        :param source_image:
        :param data_source:
        :return:
        """
        # convert binary_seg_result
        binary_seg_result = np.array(binary_seg_result * 255, dtype=np.uint8)

        # apply image morphology operation to fill in the hold and reduce the small area
        morphological_ret = _morphological_process(binary_seg_result,
                                                   kernel_size=5)

        connect_components_analysis_ret = _connect_components_analysis(
            image=morphological_ret)

        labels = connect_components_analysis_ret[1]
        stats = connect_components_analysis_ret[2]
        for index, stat in enumerate(stats):
            if stat[4] <= min_area_threshold:
                idx = np.where(labels == index)
                morphological_ret[idx] = 0

        # apply embedding features cluster
        mask_image, lane_coords = self._cluster.apply_lane_feats_cluster(
            binary_seg_result=morphological_ret,
            instance_seg_result=instance_seg_result)

        if mask_image is None:
            return {
                'mask_image': None,
                'fit_params': None,
                'source_image': None,
            }

        # lane line fit
        fit_params = []
        src_lane_pts = []  # lane pts every single lane
        for lane_index, coords in enumerate(lane_coords):
            if data_source == 'tusimple':
                tmp_mask = np.zeros(shape=(720, 1280), dtype=np.uint8)
                tmp_mask[tuple((np.int_(coords[:, 1] * 720 / 256),
                                np.int_(coords[:, 0] * 1280 / 512)))] = 255
            else:
                raise ValueError('Wrong data source now only support tusimple')
            tmp_ipm_mask = cv2.remap(tmp_mask,
                                     self._remap_to_ipm_x,
                                     self._remap_to_ipm_y,
                                     interpolation=cv2.INTER_NEAREST)
            nonzero_y = np.array(tmp_ipm_mask.nonzero()[0])
            nonzero_x = np.array(tmp_ipm_mask.nonzero()[1])

            fit_param = np.polyfit(nonzero_y, nonzero_x, 2)
            fit_params.append(fit_param)

            [ipm_image_height, ipm_image_width] = tmp_ipm_mask.shape
            plot_y = np.linspace(10, ipm_image_height, ipm_image_height - 10)
            fit_x = fit_param[0] * plot_y**2 + fit_param[
                1] * plot_y + fit_param[2]
            # fit_x = fit_param[0] * plot_y ** 3 + fit_param[1] * plot_y ** 2 + fit_param[2] * plot_y + fit_param[3]

            lane_pts = []
            for index in range(0, plot_y.shape[0], 5):
                src_x = self._remap_to_ipm_x[
                    int(plot_y[index]),
                    int(np.clip(fit_x[index], 0, ipm_image_width - 1))]
                if src_x <= 0:
                    continue
                src_y = self._remap_to_ipm_y[
                    int(plot_y[index]),
                    int(np.clip(fit_x[index], 0, ipm_image_width - 1))]
                src_y = src_y if src_y > 0 else 0

                lane_pts.append([src_x, src_y])

            src_lane_pts.append(lane_pts)

        # tusimple test data sample point along y axis every 10 pixels
        source_image_width = source_image.shape[1]
        for index, single_lane_pts in enumerate(src_lane_pts):
            single_lane_pt_x = np.array(single_lane_pts, dtype=np.float32)[:,
                                                                           0]
            single_lane_pt_y = np.array(single_lane_pts, dtype=np.float32)[:,
                                                                           1]
            if data_source == 'tusimple':
                start_plot_y = 240
                end_plot_y = 720
            else:
                raise ValueError('Wrong data source now only support tusimple')
            step = int(math.floor((end_plot_y - start_plot_y) / 10))
            for plot_y in np.linspace(start_plot_y, end_plot_y, step):
                diff = single_lane_pt_y - plot_y
                fake_diff_bigger_than_zero = diff.copy()
                fake_diff_smaller_than_zero = diff.copy()
                fake_diff_bigger_than_zero[np.where(diff <= 0)] = float('inf')
                fake_diff_smaller_than_zero[np.where(diff > 0)] = float('-inf')
                idx_low = np.argmax(fake_diff_smaller_than_zero)
                idx_high = np.argmin(fake_diff_bigger_than_zero)

                previous_src_pt_x = single_lane_pt_x[idx_low]
                previous_src_pt_y = single_lane_pt_y[idx_low]
                last_src_pt_x = single_lane_pt_x[idx_high]
                last_src_pt_y = single_lane_pt_y[idx_high]

                if previous_src_pt_y < start_plot_y or last_src_pt_y < start_plot_y or \
                        fake_diff_smaller_than_zero[idx_low] == float('-inf') or \
                        fake_diff_bigger_than_zero[idx_high] == float('inf'):
                    continue

                interpolation_src_pt_x = (abs(previous_src_pt_y - plot_y) * previous_src_pt_x +
                                          abs(last_src_pt_y - plot_y) * last_src_pt_x) / \
                                         (abs(previous_src_pt_y - plot_y) + abs(last_src_pt_y - plot_y))
                interpolation_src_pt_y = (abs(previous_src_pt_y - plot_y) * previous_src_pt_y +
                                          abs(last_src_pt_y - plot_y) * last_src_pt_y) / \
                                         (abs(previous_src_pt_y - plot_y) + abs(last_src_pt_y - plot_y))

                if interpolation_src_pt_x > source_image_width or interpolation_src_pt_x < 10:
                    continue

                lane_color = self._color_map[index].tolist()
                cv2.circle(
                    source_image,
                    (int(interpolation_src_pt_x), int(interpolation_src_pt_y)),
                    5, lane_color, -1)
        ret = {
            'mask_image': mask_image,
            'fit_params': fit_params,
            'source_image': source_image,
        }

        return ret
예제 #60
0
'''

mtx, dist, newcameramtx, roi = s.calibParam()

while (1):
    _, frame = cap.read()

    ##### Calibrazione
    h, w = frame.shape[:2]
    h2 = int(h / 2)
    w2 = int(w / 2)
    cv2.circle(frame, (w2, h2), 5, (0, 255, 255), -1)
    ##### Method 2: Remapping
    # undistort
    mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx,
                                             (w, h), 5)
    dst = cv2.remap(frame, mapx, mapy, cv2.INTER_LINEAR)
    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    frame = dst
    ##### FINE CALIBRAZIONE

    cv2.imshow('Post-calib', frame)

    k = cv2.waitKey(5) & 0xFF

    if k == 27:
        break

cv2.destroyAllWindows()