示例#1
0
def dewarp(image, window, clicked_corners):
    debug = cv.CloneImage(image)

    # draw red line around edges for debug purposes
    cv.PolyLine(debug, [[
        clicked_corners[0], clicked_corners[1], clicked_corners[3],
        clicked_corners[2]
    ]], True, cv.RGB(0, 255, 0), 7)

    cv.ShowImage(window, debug)
    cv.WaitKey()

    # Assemble a rotated rectangle out of that info
    #rot_box = cv.MinAreaRect2(corners)
    enc_box = cv.BoundingRect(clicked_corners)
    new_corners = [(0, 0), (enc_box[2] - 1, 0), (0, enc_box[3] - 1),
                   (enc_box[2] - 1, enc_box[3] - 1)]

    warp_mat = cv.CreateMat(3, 3, cv.CV_32FC1)
    cv.GetPerspectiveTransform(clicked_corners, new_corners, warp_mat)

    rotated = cv.CloneImage(image)
    cv.WarpPerspective(image, rotated, warp_mat)

    cv.ShowImage(window, rotated)
    cv.WaitKey(10)

    return rotated
示例#2
0
    def fix_perspective(self, meter_corners):
        """
        Resize gas meter to the whole image with right perspective
        """

        image_top_left = (0, 0)
        image_top_right = (self.image.width, 0)
        image_bottom_left = (0, self.image.height)
        image_bottom_right = (self.image.width, self.image.height)

        image_corners = (image_top_left, image_top_right, image_bottom_right,
                         image_bottom_left)

        # Make transformation matrix
        transform_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.GetPerspectiveTransform(meter_corners, image_corners,
                                   transform_matrix)

        # Fix gas meter perspective
        fixed_perspective = self.image.transformPerspective(
            transform_matrix).resize(w=self.RESIZE_TO_WIDTH,
                                     h=self.RESIZE_TO_HEIGHT).smooth()

        self._save_debug_image(fixed_perspective, "fixed_perspective")

        return fixed_perspective
示例#3
0
def get_card(color_capture, corners):
    target = [(0, 0), (223, 0), (223, 310), (0, 310)]
    mat = cv.CreateMat(3, 3, cv.CV_32FC1)
    cv.GetPerspectiveTransform(corners, target, mat)
    warped = cv.CloneImage(color_capture)
    cv.WarpPerspective(color_capture, warped, mat)
    cv.SetImageROI(warped, (0, 0, 223, 310))
    return warped
示例#4
0
 def update_transform(self):
     map_mat = cv.CreateMat(3, 3, cv.CV_32FC1)
     cv.GetPerspectiveTransform(map(tuple, self.points[0].points.values),
                                map(tuple, self.points[1].points.values),
                                map_mat)
     flags = cv.CV_WARP_FILL_OUTLIERS
     cv.WarpPerspective(self.im_in, self.im_out, map_mat, flags=flags)
     imshow(self.im_out, axis=self.axes[1], show_axis=True)
     self.refresh()
示例#5
0
def backf(hypH, im, found):
    (code,corners,pattern) = found
    persp = cv.CreateMat(3, 3, cv.CV_32FC1)
    fc = [corners[i,0] for i in range(4)]
    cv.GetPerspectiveTransform(fc, sizcorners, persp)
    cc = cv.Reshape(cv.fromarray(numpy.array(sizcorners).astype(numpy.float32)), 2)
    t1 = cv.CreateMat(4, 1, cv.CV_32FC2)
    t2 = cv.CreateMat(4, 1, cv.CV_32FC2)
    _persp = cv.CreateMat(3, 3, cv.CV_32FC1)
    cv.Invert(persp, _persp)
    _hypH = cv.CreateMat(3, 3, cv.CV_32FC1)
    cv.Invert(hypH, _hypH)

    cv.PerspectiveTransform(cc, t1, _hypH)
    cv.PerspectiveTransform(t1, t2, _persp)
    return [t2[i,0] for i in range(4)]
示例#6
0
    def calc_transform(self, points, correction):
        src = [(0, 0), (0, 0), (0, 0), (0, 0)]
        dst = [(0, 0), (0, CAMERA_SIZE[1]), (CAMERA_SIZE[0], 0), CAMERA_SIZE]

        for i in range(4):
            minimum = sum([j**2 for j in CAMERA_SIZE])
            for j in range(4):
                distance = sum([(dst[i][k] - points[j][k])**2
                                for k in range(2)])
                if distance < minimum:
                    minimum = distance
                    src[i] = tuple(
                        [points[j][k] + correction[i][k] for k in range(2)])

        transform = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.GetPerspectiveTransform(src, dst, transform)
        return transform
示例#7
0
    def __init__(self):
        self.transform = cv.CreateMat(3, 3, cv.CV_64FC1)
        f = open('transformation_points.txt', 'r')
        image_plane = []
        object_plane = []
        count = 0
        zRot_total = 0.0
        for line in f:
            if line.startswith("#"):
                continue
            transforms = line.split()
            if len(transforms) != 5:
                continue
            image_plane.append((float(transforms[0]), float(transforms[1])))
            object_plane.append((float(transforms[2]), float(transforms[3])))
            zRot_total += float(transforms[4])
            count += 1
        cv.GetPerspectiveTransform(image_plane, object_plane, self.transform)
        self.zRot_offset = zRot_total / count

        self.pub = rospy.Publisher(topic_name, Tag_Positions)
示例#8
0
def NormalizeImage(cvmat, cilp_rect, perspective_points):
    u'''読み取りやすくするために画像を正規化する'''
    # 液晶部分の抽出
    lcd = cv.CreateMat(cilp_rect.height, cilp_rect.width, cv.CV_8UC3)
    cv.GetRectSubPix(cvmat, lcd, (cilp_rect.cx, cilp_rect.cy))

    # グレイスケール化
    grayed = cv.CreateMat(lcd.height, lcd.width, cv.CV_8UC1)
    cv.CvtColor(lcd, grayed, cv.CV_BGR2GRAY)

    # 適応的2値化
    filterd = cv.CreateMat(grayed.height, grayed.width, cv.CV_8UC1)
    cv.AdaptiveThreshold(
        grayed,
        filterd,
        255,
        adaptive_method=cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
        thresholdType=cv.CV_THRESH_BINARY,
        blockSize=15,
    )

    # ゆがみ補正
    transformed = cv.CreateMat(grayed.height, grayed.width, filterd.type)
    matrix = cv.CreateMat(3, 3, cv.CV_32F)
    cv.GetPerspectiveTransform(
        ((perspective_points.tl.x, perspective_points.tl.y),
         (perspective_points.tr.x, perspective_points.tr.y),
         (perspective_points.bl.x, perspective_points.bl.y),
         (perspective_points.br.x, perspective_points.br.y)),
        ((0, 0), (filterd.width, 0), (0, filterd.height),
         (filterd.width, filterd.height)), matrix)
    cv.WarpPerspective(
        filterd,
        transformed,
        matrix,
        flags=cv.CV_WARP_FILL_OUTLIERS,
        fillval=255,
    )

    return transformed
示例#9
0
mat = cv.CreateMat(3, 3, cv.CV_32FC1)
#mat = cv.CreateMat(3, 3, cv.IPL_DEPTH_8U)
gray = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(img, gray, cv.CV_RGB2GRAY)

#points  = [(50.,50.), (50.,100.), (100.,100.), (100.,50.)]
#npoints = [(20.,30.), (30.,150.), (160.,170.), (200.,20.)]

#points = [(20.,30.), (30.,150.), (160.,170.), (200.,20.)]
#npoints  = [(50.,50.), (50.,100.), (100.,100.), (100.,50.)]

points = [(20., 30.), (30., 150.), (160., 170.), (200., 20.)]
npoints = [(0., 0.), (640., 0), (640., 480.), (640., 480.)]

cv.GetPerspectiveTransform(points, npoints, mat)
#cv.CvtColor( img, gray, cv.CV_RGB2GRAY );

src = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_32F, 3)
#fimg = cv.CreateImage( cv.GetSize(img), cv.IPL_DEPTH_8U, 3 )

cv.ConvertScale(img, src, (1 / 255.00))
#cv.ConvertScale(gray,src,(1/255.00))

dst = cv.CloneImage(src)
cv.Zero(dst)

cv.WarpPerspective(src, dst, mat)

while 1:
    cv.ShowImage("original", img)
示例#10
0
def Calibration():
    global image

    if from_video:
        if from_camera:
            capture = cv.CaptureFromCAM(0)
        else:
            capture = cv.CaptureFromFile(videofile)


##        image = 0
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

        if from_camera:
            ##we wait for 40 frames...the picture at the very start is always kind of weird,
            ##wait for the picture to stabalize
            for n in range(40):
                ##            RetrieveFrame is just GrabFrame and RetrieveFrame combined into 1
                ##            function call
                image = cv.QueryFrame(capture)
    ##            cv.GrabFrame(capture)
    ##            image = cv.CloneImage(cv.RetrieveFrame(capture))

    ## need to clone this image...otherwise once the capture gets released we won't have access
    ## to the image anymore
            image = cv.CloneImage(cv.QueryFrame(capture))
        else:
            ##            if we're capturing from a video file, then just take the first frame
            image = cv.QueryFrame(capture)
    else:
        image = cv.LoadImage(str(r"dartboard_cam1.bmp"),
                             cv.CV_LOAD_IMAGE_COLOR)

    #data we need for calibration:
    #mapping for a perspective transform
    global mapping
    #center of the dartboard in x, y form
    global center_dartboard
    #initial angle of the 20 - 1 points divider
    global ref_angle
    #the radii of the rings, there are 6 of them
    global ring_radius
    global calibrationComplete
    calibrationComplete = False

    #for grabbing the user's clicks
    global points

    #either grab data from file or user
    while calibrationComplete == False:
        #Read calibration file, if exists
        if os.path.isfile("calibrationData.pkl"):
            try:
                #for grabbing key presses in the python window showing the image
                global keyPressEvent
                keyPressEvent = Event()
                global keyPress
                #for synchronizing the image window thread
                global windowReady
                windowReady = Event()
                #for synchronizing the drawing
                global drawingFinished
                drawingFinished = Event()

                #start a fresh set of points
                points = []

                calFile = open('calibrationData.pkl', 'rb')
                calData = CalibrationData()
                calData = pickle.load(calFile)
                #load the data into the global variables
                points.append(calData.top)
                points.append(calData.bottom)
                points.append(calData.left)
                points.append(calData.right)  #index of 3
                init_point_arr = calData.init_point_arr
                center_dartboard = calData.center_dartboard
                ref_angle = calData.ref_angle
                ring_radius = []
                ring_radius.append(calData.ring_radius[0])
                ring_radius.append(calData.ring_radius[1])
                ring_radius.append(calData.ring_radius[2])
                ring_radius.append(calData.ring_radius[3])
                ring_radius.append(calData.ring_radius[4])
                ring_radius.append(
                    calData.ring_radius[5])  #append the 6 ring radii
                #close the file once we are done reading the data
                calFile.close()

                #copy image for old calibration data
                new_image = cv.CloneImage(image)

                #have the image in another window and thread
                t = Thread(target=CalibrationWindowThread2, args=(new_image, ))
                t.start()
                #wait for the image window to setup
                windowReady.wait()

                #now draw them out:
                #*******************1. Transform image******************************
                newtop = (round(new_image.height / 2),
                          round(new_image.height * 0.20))
                newbottom = (round(new_image.height / 2),
                             round(new_image.height * 0.80))
                #Note: the height is smaller than the width
                newleft = (round(new_image.height * 0.20),
                           round(new_image.height / 2))
                newright = (round(new_image.height * 0.80),
                            round(new_image.height / 2))

                mapping = cv.CreateMat(3, 3, cv.CV_32FC1)

                #get a fresh new image
                new_image = cv.CloneImage(image)

                cv.GetPerspectiveTransform(
                    [points[0], points[1], points[2], points[3]],
                    [newtop, newbottom, newleft, newright], mapping)
                cv.WarpPerspective(image, new_image, mapping)
                cv.ShowImage(prev_calibration_window, new_image)
                #*******************************************************************

                #********************2.Draw points dividers*************************
                #find initial angle of the 20-1 divider
                tempX_mat = cv.CreateMat(1, 1, cv.CV_32FC1)
                #correct the point with respect to the center
                cv.mSet(tempX_mat, 0, 0,
                        init_point_arr[0] - center_dartboard[0])
                tempY_mat = cv.CreateMat(1, 1, cv.CV_32FC1)
                #adjust the origin of y
                cv.mSet(
                    tempY_mat, 0, 0, init_point_arr[1] -
                    (new_image.height - center_dartboard[1]))
                init_mag_mat = cv.CreateMat(1, 1, cv.CV_32FC1)
                init_angle_reversed_mat = cv.CreateMat(1, 1, cv.CV_32FC1)

                #each point region is 360/12 = 18 degrees large
                cv.CartToPolar(tempX_mat,
                               tempY_mat,
                               init_mag_mat,
                               init_angle_reversed_mat,
                               angleInDegrees=True)

                #display dividers
                current_point = (int(round(init_point_arr[0])),
                                 int(round(init_point_arr[1])))
                next_angle = cv.CreateMat(1, 1, cv.CV_32FC1)
                cv.mSet(next_angle, 0, 0, 360 - ref_angle)
                temp_angle = 360.0 - ref_angle
                #draw point dividers counterclockwise, just like how angle is calculated, arctan(y/x)
                for i in range(0, 20):
                    cv.Line(new_image, center_dartboard, current_point,
                            cv.CV_RGB(0, 0, 255), 1, 8)
                    #calculate the cartesian coordinate of the next point divider
                    temp_angle = 360.0 - temp_angle
                    temp_angle += 18.0
                    if temp_angle >= 360.0:
                        temp_angle -= 360.0
                    #make temp_angle reversed
                    temp_angle = 360.0 - temp_angle
                    #print temp_angle
                    cv.mSet(next_angle, 0, 0, temp_angle)

                    cv.PolarToCart(init_mag_mat,
                                   next_angle,
                                   tempX_mat,
                                   tempY_mat,
                                   angleInDegrees=True)

                    #current_point = []
                    #adjust the cartesian points
                    current_point = (int(
                        round(cv.mGet(tempX_mat, 0, 0) + center_dartboard[0])),
                                     int(
                                         round(
                                             cv.mGet(tempY_mat, 0, 0) +
                                             (new_image.height -
                                              center_dartboard[1]))))
                    #print current_point

                cv.ShowImage(prev_calibration_window, new_image)
                #*************************************************************************

                #**********************3. Draw rings**************************************
                for i in range(0, 6):
                    #display the rings
                    cv.Circle(new_image, center_dartboard, ring_radius[i],
                              cv.CV_RGB(0, 255, 0), 1, 8)

                cv.ShowImage(prev_calibration_window, new_image)
                #*************************************************************************

                #we are finished drawing, signal
                drawingFinished.set()

                #wait for key press
                print "Previous calibration data detected. Would you like to keep this calibration data? Press 'y' for yes"
                #wait indefinitely for a key press
                keyPressEvent.wait()

                #ASCII 121 is character 'y'
                if keyPress == 121:
                    #we are good with the previous calibration data
                    calibrationComplete = True
                else:
                    calibrationComplete = False
                    #delete the calibration file and start over
                    os.remove("calibrationData.pkl")

            #corrupted file
            except EOFError as err:
                print err

        #Manual calibration
        else:
            #use two events to emulate wait for mouse click event
            global e
            global key
            e = Event()
            key = Event()

            #start a fresh set of points
            points = []

            #copy image for manual calibration
            new_image = cv.CloneImage(image)

            t = Thread(target=CalibrationWindowThread, args=(new_image, ))
            t.start()

            print "Please select the center of the 20 points outermost rim."
            e.wait()
            e.clear()

            cv.Circle(new_image, points[0], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            print "Please select the center of the 3 points outermost rim."
            e.wait()
            e.clear()

            cv.Circle(new_image, points[1], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            print "Please select the center of the 11 points outermost rim."
            e.wait()
            e.clear()

            cv.Circle(new_image, points[2], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            print "Please select the center of the 6 points outermost rim."
            e.wait()
            e.clear()

            cv.Circle(new_image, points[3], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            #calculate the desired circle dimensions
            newtop = (round(new_image.height / 2),
                      round(new_image.height * 0.20))
            newbottom = (round(new_image.height / 2),
                         round(new_image.height * 0.80))
            #Note: the height is smaller than the width
            newleft = (round(new_image.height * 0.20),
                       round(new_image.height / 2))
            newright = (round(new_image.height * 0.80),
                        round(new_image.height / 2))

            mapping = cv.CreateMat(3, 3, cv.CV_32FC1)

            #get a fresh new image
            new_image = cv.CloneImage(image)

            cv.GetPerspectiveTransform(
                [points[0], points[1], points[2], points[3]],
                [newtop, newbottom, newleft, newright], mapping)
            cv.WarpPerspective(image, new_image, mapping)
            cv.ShowImage(window_name, new_image)

            print "The dartboard image has now been normalized."
            print ""

            center_dartboard = []
            print "Please select the middle of the dartboard. i.e. the middle of the double bull's eye"
            e.wait()
            e.clear()
            center_dartboard = points[4]
            center_dartboard = (int(round(center_dartboard[0])),
                                int(round(center_dartboard[1])))

            cv.Circle(new_image, center_dartboard, 3, cv.CV_RGB(255, 0, 0), 2,
                      8)
            cv.ShowImage(window_name, new_image)

            init_point_arr = []
            print "Please select the outermost intersection of the 20 points and 1 ponit line."
            e.wait()
            e.clear()
            init_point_arr = points[5]

            cv.Circle(new_image, init_point_arr, 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            #find initial angle of the 20-1 divider
            tempX_mat = cv.CreateMat(1, 1, cv.CV_32FC1)
            #correct the point with respect to the center
            cv.mSet(tempX_mat, 0, 0, init_point_arr[0] - center_dartboard[0])
            tempY_mat = cv.CreateMat(1, 1, cv.CV_32FC1)
            #adjust the origin of y
            cv.mSet(
                tempY_mat, 0, 0,
                init_point_arr[1] - (new_image.height - center_dartboard[1]))
            init_mag_mat = cv.CreateMat(1, 1, cv.CV_32FC1)
            init_angle_reversed_mat = cv.CreateMat(1, 1, cv.CV_32FC1)

            #each point region is 360/12 = 18 degrees large
            cv.CartToPolar(tempX_mat,
                           tempY_mat,
                           init_mag_mat,
                           init_angle_reversed_mat,
                           angleInDegrees=True)

            ref_angle = 360.0 - cv.mGet(init_angle_reversed_mat, 0, 0)
            global ref_mag
            ref_mag = cv.mGet(init_mag_mat, 0, 0)

            #print cv.mGet(init_mag_mat, 0, 0)
            #print "Initial angle"
            #print init_angle_val

            #display dividers
            current_point = (int(round(init_point_arr[0])),
                             int(round(init_point_arr[1])))
            next_angle = cv.CreateMat(1, 1, cv.CV_32FC1)
            cv.mSet(next_angle, 0, 0, 360 - ref_angle)
            temp_angle = 360.0 - ref_angle
            #draw point dividers counterclockwise, just like how angle is calculated, arctan(y/x)
            for i in range(0, 20):
                cv.Line(new_image, center_dartboard, current_point,
                        cv.CV_RGB(0, 0, 255), 1, 8)
                #calculate the cartesian coordinate of the next point divider
                temp_angle = 360.0 - temp_angle
                temp_angle += 18.0
                if temp_angle >= 360.0:
                    temp_angle -= 360.0
                #make temp_angle reversed
                temp_angle = 360.0 - temp_angle
                #print temp_angle
                cv.mSet(next_angle, 0, 0, temp_angle)

                cv.PolarToCart(init_mag_mat,
                               next_angle,
                               tempX_mat,
                               tempY_mat,
                               angleInDegrees=True)

                #current_point = []
                #adjust the cartesian points
                current_point = (
                    int(round(cv.mGet(tempX_mat, 0, 0) + center_dartboard[0])),
                    int(
                        round(
                            cv.mGet(tempY_mat, 0, 0) +
                            (new_image.height - center_dartboard[1]))))
                #print current_point

            cv.ShowImage(window_name, new_image)

            ring_arr = []
            print "Please select the first ring (any point). i.e. the ring that encloses the double bull's eye."
            e.wait()
            e.clear()
            ring_arr.append(points[6])

            cv.Circle(new_image, points[6], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            print "Please select the second ring (any point). i.e. the ring that encloses the bull's eye."
            e.wait()
            e.clear()
            ring_arr.append(points[7])

            cv.Circle(new_image, points[7], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            print "Please select the third ring (any point). i.e. the closer ring that encloses the triple score region."
            e.wait()
            e.clear()
            ring_arr.append(points[8])

            cv.Circle(new_image, points[8], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            print "Please select the fourth ring (any point). i.e. the further ring that encloses the triple score region."
            e.wait()
            e.clear()
            ring_arr.append(points[9])

            cv.Circle(new_image, points[9], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            print "Please select the fifth ring (any point). i.e. the closer ring that encloses the double score region."
            e.wait()
            e.clear()
            ring_arr.append(points[10])

            cv.Circle(new_image, points[10], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            print "Please select the sixth ring (any point). i.e. the further ring that encloses the double score region."
            e.wait()
            e.clear()
            ring_arr.append(points[11])

            cv.Circle(new_image, points[11], 3, cv.CV_RGB(255, 0, 0), 2, 8)
            cv.ShowImage(window_name, new_image)

            ring_radius = []
            for i in range(0, 6):
                #find the radius of the ring
                ring_radius.append(
                    int(
                        math.sqrt((ring_arr[i][0] - center_dartboard[0])**2 +
                                  (ring_arr[i][1] - center_dartboard[1])**2)))
                #display the rings
                cv.Circle(new_image, center_dartboard, ring_radius[i],
                          cv.CV_RGB(0, 255, 0), 1, 8)

            cv.ShowImage(window_name, new_image)

            e.wait()

            #destroy calibration window
            key.set()

            #save valuable calibration data into a structure
            calData = CalibrationData()
            calData.top = points[0]
            calData.bottom = points[1]
            calData.left = points[2]
            calData.right = points[3]
            calData.center_dartboard = center_dartboard
            calData.init_point_arr = init_point_arr
            calData.ref_angle = ref_angle
            calData.ring_radius = ring_radius

            #write the calibration data to a file
            calFile = open("calibrationData.pkl", "wb")
            pickle.dump(calData, calFile, 0)
            calFile.close()

            calibrationComplete = True
示例#11
0
    def image_filter(self, cv_image, info, copy=None):
        image = cv_image

        #Only works on a grayscale image
        gray_image = cv.CreateImage(cv.GetSize(image), 8, 1)
        cv.CvtColor(image, gray_image, cv.CV_BGR2GRAY)
        print "Called with mode: %s" % self.mode
        if self.mode == "default" or self.mode == "save_h":
            print "Computing homography matrix from checkerboard"
            #Get the width and height of the board
            board_w = self.cols
            board_h = self.rows
            #Area of the board = "board_n"
            board_n = board_w * board_h
            board_sz = (board_w, board_h)
            #This needs to be fed with a "height", so it knows how high up the perspective transform should be.
            #I've found for the wide_stereo cameras, a value of -15 works well. For the prosilica, -40. Don't ask me why
            init_height = self.height
            #Uses openCV to find the checkerboard
            (found, corners) = cv.FindChessboardCorners(
                image, board_sz,
                (cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_FILTER_QUADS))
            if (not found):
                print "Couldn't aquire checkerboard, only found 0 of %d corners\n" % board_n
                gr = CloneImage(image)
                cv.CvtColor(gray_image, gr, cv.CV_GRAY2BGR)
                return gr
            #We need subpixel accuracy, so we tell it where the corners are and it magically does the rest. I forget what (11,11) and (-1,-1) mean.
            cv.FindCornerSubPix(
                gray_image, corners, (11, 11), (-1, -1),
                (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
            #Pull out the Image Points (3d location of the checkerboard corners, in the camera frame)
            #and the Object Points (2d location of the corners on the checkerboard object)
            objPts = point_array(4)
            imgPts = point_array(4)
            objPts[0] = (0, 0)
            objPts[1] = (board_w - 1, 0)
            objPts[2] = (0, board_h - 1)
            objPts[3] = (board_w - 1, board_h - 1)
            imgPts[0] = corners[0]
            imgPts[1] = corners[board_w - 1]
            imgPts[2] = corners[(board_h - 1) * board_w]
            imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1]

            #Use GetPerspectiveTransform to populate our Homography matrix
            H = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.GetPerspectiveTransform(objPts, imgPts, H)
            #Since we don't get any z information from this, we populate H[2,2] with our hard-coded height
            H[2, 2] = init_height
            if self.mode == "save_h":
                print "Saving Homography matrix to %s" % self.matrix_location
                cv.Save(self.matrix_location, H)
        else:
            print "Loading Homography matrix from %s" % self.matrix_location
            H = cv.Load(self.matrix_location)
        birds_image = CloneImage(image)
        #birds_image = cv.CreateImage((image.width*3,image.height*3),8,3)
        #Uses the homography matrix to warp the perspective.
        cv.WarpPerspective(
            image, birds_image, H, cv.CV_INTER_LINEAR +
            cv.CV_WARP_INVERSE_MAP + cv.CV_WARP_FILL_OUTLIERS)
        #Note: If you need to undo the transformation, you can simply invert H and call cv.WarpPerspective again.
        return birds_image
示例#12
0
    def process_frame(self, frame):
        self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        og_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, self.debug_frame)
        cv.Copy(self.debug_frame, og_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)  #3 before competition #2 at competition
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(binary, binary,
                             255,
                             cv.CV_ADAPTIVE_THRESH_MEAN_C,
                             cv.CV_THRESH_BINARY_INV,
                             self.adaptive_thresh_blocksize,
                             self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)

        # Get Edges
        #cv.Canny(binary, binary, 30, 40)

        cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_PROBABILISTIC,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=self.min_length,
                                   param2=self.max_gap
        )

        lines = []

        for line in raw_lines:
            lines.append(line)

        #Grouping lines depending on endpoint similarities

        for line1 in lines[:]:
            for line2 in lines[:]:
                if line1 in lines and line2 in lines and line1 != line2:
                    if math.fabs(line1[0][0] - line2[0][0]) < self.max_corner_range and \
                       math.fabs(line1[0][1] - line2[0][1]) < self.max_corner_range and \
                       math.fabs(line1[1][0] - line2[1][0]) < self.max_corner_range and \
                       math.fabs(line1[1][1] - line2[1][1]) < self.max_corner_range:
                        if line_distance(line1[0], line1[1]) > line_distance(line2[0], line2[1]):
                            lines.remove(line2)
                        else:
                            lines.remove(line1)
                    elif math.fabs(line1[0][0] - line2[1][0]) < self.max_corner_range and \
                         math.fabs(line1[0][1] - line2[1][1]) < self.max_corner_range and \
                         math.fabs(line1[1][0] - line2[0][0]) < self.max_corner_range and \
                         math.fabs(line1[1][1] - line2[0][1]) < self.max_corner_range:
                        if line_distance(line1[0], line1[1]) > line_distance(line2[0], line2[1]):
                            lines.remove(line2)
                        else:
                            lines.remove(line1)

        self.hough_corners = []
        for line in lines:
            self.hough_corners.append(line[0])
            self.hough_corners.append(line[1])

        for corner1 in self.hough_corners[:]:
            for corner2 in self.hough_corners[:]:
                if corner1 is not corner2 and corner1 in self.hough_corners and corner2 in self.hough_corners:
                    if math.fabs(corner1[0] - corner2[0]) < self.max_corner_range4 and \
                       math.fabs(corner1[1] - corner2[1]) < self.max_corner_range4:
                        corner1 = [(corner1[0] + corner2[0]) / 2, (corner1[1] + corner2[1]) / 2]
                        self.hough_corners.remove(corner2)

        for line1 in lines:
            #cv.Line(self.debug_frame,line1[0],line1[1], (0,0,255), 10, cv.CV_AA, 0)
            for line2 in lines:
                if line1 is not line2:
                    self.find_corners(line1, line2)

        for corner1 in self.corners:
            for corner2 in self.corners:
                if math.fabs(corner1[1][0] - corner2[1][0]) < self.max_corner_range2 and \
                   math.fabs(corner1[1][1] - corner2[1][1]) < self.max_corner_range2 and \
                   math.fabs(corner1[2][0] - corner2[2][0]) < self.max_corner_range2 and \
                   math.fabs(corner1[2][1] - corner2[2][1]) < self.max_corner_range2 and \
                   math.fabs(corner1[0][0] - corner2[0][0]) > self.max_corner_range2 and \
                   math.fabs(corner1[0][1] - corner2[0][1]) > self.max_corner_range2:
                    pt1 = (int(corner1[0][0]), int(corner1[0][1]))
                    pt4 = (int(corner2[0][0]), int(corner2[0][1]))
                    pt3 = (int(corner1[1][0]), int(corner1[1][1]))
                    pt2 = (int(corner1[2][0]), int(corner1[2][1]))
                    #line_color = (0,255,0)s
                    #cv.Line(self.debug_frame,pt1,pt2, line_color, 10, cv.CV_AA, 0)                  
                    #cv.Line(self.debug_frame,pt1,pt3, line_color, 10, cv.CV_AA, 0)
                    #cv.Line(self.debug_frame,pt4,pt2, line_color, 10, cv.CV_AA, 0)                  
                    #cv.Line(self.debug_frame,pt4,pt3, line_color, 10, cv.CV_AA, 0)
                    new_bin = Bin(pt1, pt2, pt3, pt4)
                    new_bin.id = self.bin_id
                    self.bin_id += 1
                    if math.fabs(line_distance(pt1, pt2) - line_distance(pt3, pt4)) < self.parallel_sides_length_thresh and \
                       math.fabs(line_distance(pt1, pt3) - line_distance(pt2, pt4)) < self.parallel_sides_length_thresh:
                        self.Bins.append(new_bin)
                        print "new_bin"

                elif (math.fabs(corner1[1][0] - corner2[2][0]) < self.max_corner_range2 and
                      math.fabs(corner1[1][1] - corner2[2][1]) < self.max_corner_range2 and
                      math.fabs(corner1[2][0] - corner2[1][0]) < self.max_corner_range2 and
                      math.fabs(corner1[2][1] - corner2[1][1]) < self.max_corner_range2 and
                      math.fabs(corner1[0][0] - corner2[0][0]) > self.max_corner_range2 and
                      math.fabs(corner1[0][1] - corner2[0][1]) > self.max_corner_range2):
                    continue

        self.corners = []
        self.final_corners = self.sort_corners() #Results are not used. Experimental corners which have been seen twice, should be only the corners we want, but there were problems
        self.sort_bins()
        self.update_bins()
        self.group_bins()
        self.draw_bins()

        for corner in self.hough_corners:
            line_color = [255, 0, 0]
            cv.Circle(self.debug_frame, corner, 15, (255, 0, 0), 2, 8, 0)

        for line in lines:
            line_color = [255, 0, 0]
            cv.Line(self.debug_frame, line[0], line[1], line_color, 5, cv.CV_AA, 0)
            #cv.Circle(self.debug_frame, line[0], 15, (255,0,0), 2,8,0)
            #cv.Circle(self.debug_frame, line[1], 15, (255,0,0), 2,8,0)

        #Output bins
        self.output.bins = self.Bins
        anglesum = 0
        for bins in self.output.bins:
            bins.theta = (bins.center[0] - frame.width / 2) * 37 / (frame.width / 2)
            bins.phi = -1 * (bins.center[1] - frame.height / 2) * 36 / (frame.height / 2)
            anglesum += bins.angle
            # bins.orientation = bins.angle
        if len(self.output.bins) > 0:
            self.output.orientation = anglesum / len(self.output.bins)
        else:
            self.output.orientation = None
        self.return_output()

        svr.debug("Bins", self.debug_frame)
        svr.debug("Original", og_frame)

        #BEGIN SHAPE PROCESSING

        #constants
        img_width = 128
        img_height = 256

        number_x = 23
        number_y = 111
        number_w = 82
        number_h = 90

        bin_thresh_blocksize = 11
        bin_thresh = 1.9

        red_significance_threshold = 0.4

        #load templates - run once, accessible to number processor

        number_templates = [
            (10, cv.LoadImage("number_templates/10.png")),
            (16, cv.LoadImage("number_templates/16.png")),
            (37, cv.LoadImage("number_templates/37.png")),
            (98, cv.LoadImage("number_templates/98.png")),
        ]

        #Begin Bin Contents Processing

        for bin in self.Bins:
            #Take the bin's corners, and get an image containing an img_width x img_height rectangle of it
            transf = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.GetPerspectiveTransform(
                [bin.corner1, bin.corner2, bin.corner3, bin.corner4],
                [(0, 0), (0, img_height), (img_width, 0), (img_width, img_height)],
                transf
            )
            bin_image = cv.CreateImage([img_width, img_height], 8, 3)
            cv.WarpPerspective(frame, bin_image, transf)

            #AdaptaveThreshold to get black and white image highlighting the number (still works better than my yellow-vs-red threshold attempt
            hsv = cv.CreateImage(cv.GetSize(bin_image), 8, 3)
            bin_thresh_image = cv.CreateImage(cv.GetSize(bin_image), 8, 1)
            cv.CvtColor(bin_image, hsv, cv.CV_BGR2HSV)
            cv.SetImageCOI(hsv, 3)
            cv.Copy(hsv, bin_thresh_image)
            cv.SetImageCOI(hsv, 0)
            cv.AdaptiveThreshold(bin_thresh_image, bin_thresh_image,
                                 255,
                                 cv.CV_ADAPTIVE_THRESH_MEAN_C,
                                 cv.CV_THRESH_BINARY_INV,
                                 bin_thresh_blocksize,
                                 bin_thresh,
            )
            kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
            cv.Erode(bin_thresh_image, bin_thresh_image, kernel, 1)
            cv.Dilate(bin_thresh_image, bin_thresh_image, kernel, 1)

            #Here, we loop through all four different templates, and figure out which one we think is most likely.
            #The comparison function counts corresponding pixels that are non-zero in each image, and then corresponding pixels that are different in each image. The ratio of diff_count/both_count is our "unconfidence" ratio. The lower it is, the more confident we are.
            #There are two nearly identical pieces of code within this loop. One checks the bin right-side-up, and the other one checks it flipped 180.
            last_thought_number = -1
            last_unconfidence_ratio = number_w * number_h + 2
            for i in range(0, len(number_templates)):
                both_count = 0
                diff_count = 0
                this_number_image = number_templates[i][1]
                for y in range(0, number_h):
                    for x in range(0, number_w):
                        if (bin_thresh_image[y + number_y, x + number_x] != 0) and (this_number_image[y, x][0] != 0):
                            both_count += 1
                        elif (bin_thresh_image[y + number_y, x + number_x] != 0) or (this_number_image[y, x][0] != 0):
                            diff_count += 1
                if both_count == 0:
                    unconfidence_ratio = number_w * number_h + 1  # max unconfidence
                else:
                    unconfidence_ratio = 1.0 * diff_count / both_count
                if unconfidence_ratio < last_unconfidence_ratio:
                    last_thought_number = number_templates[i][0]
                    last_unconfidence_ratio = unconfidence_ratio
                both_count = 0
                diff_count = 0
                for y in range(0, number_h):
                    for x in range(0, number_w):
                        if (bin_thresh_image[img_height - number_y - 1 - y, img_width - number_x - 1 - x] != 0) and (
                                this_number_image[y, x][0] != 0):
                            both_count += 1
                        elif (bin_thresh_image[img_height - number_y - 1 - y, img_width - number_x - 1 - x] != 0) or (
                                this_number_image[y, x][0] != 0):
                            diff_count += 1
                if both_count == 0:
                    unconfidence_ratio = number_w * number_h + 1  # max unconfidence
                else:
                    unconfidence_ratio = 1.0 * diff_count / both_count
                if unconfidence_ratio < last_unconfidence_ratio:
                    last_thought_number = number_templates[i][0]
                    last_unconfidence_ratio = unconfidence_ratio

            print str(last_thought_number) + " | " + str(last_unconfidence_ratio)

            try: #check if it's defined
                bin.number_unconfidence_ratio
            except:
                bin.number_unconfidence_ratio = last_unconfidence_ratio
                bin.number = last_thought_number
                print "Set Speed Limit Number"
            else:
                if last_unconfidence_ratio < bin.number_unconfidence_ratio:
                    bin.number_unconfidence_ratio = last_unconfidence_ratio
                    if bin.number == last_thought_number:
                        print "More Confident on Same Number: Updated"
                    else:
                        print "More Confident on Different Number: Updated"
                        bin.icon = last_thought_number
示例#13
0
def refinecorners(im, found):
    """ For a found marker, return the refined corner positions """
    t0 = time.time()
    (code,corners,pattern) = found
    persp = cv.CreateMat(3, 3, cv.CV_32FC1)
    fc = [corners[i,0] for i in range(4)]
    cv.GetPerspectiveTransform(fc, sizcorners, persp)
    cim = cv.CreateMat(siz, siz, cv.CV_8UC1)
    cv.WarpPerspective(im, cim, persp, flags = cv.CV_INTER_LINEAR|cv.CV_WARP_FILL_OUTLIERS, fillval = 255)

    unit = siz / 14.
    hunit = unit / 2
    def nearest1(x, y):
        ix = int((x + hunit) / unit)
        iy = int((y + hunit) / unit)
        if (2 <= ix < 13) and (2 <= iy < 13):
            nx = int(unit * ix)
            ny = int(unit * iy)
            return (nx, ny)
        else:
            return (0,0)

    def nearest(x, y):
        """ Return all grid points within sqrt(2) units of (x,y), closest first """
        close = []
        for ix in range(2, 14):
            for iy in range(2, 14):
                (nx, ny) = (unit * ix, unit * iy)
                d = l2(x, y, nx, ny)
                close.append((d, (nx, ny)))
        return [p for (d,p) in sorted(close) if d < 2*unit*unit]

    corners = strongest(cim)
    pool = [((x,y), nearest(x, y)) for (x, y) in corners]

    ga = dict([(x+y,((x,y),P)) for ((x,y),P) in pool])
    gb = dict([(x-y,((x,y),P)) for ((x,y),P) in pool])
    hyp = [ga[min(ga)], ga[max(ga)],
           gb[min(gb)], gb[max(gb)]]

    aL = [a for (a,bs) in hyp]
    oldcorners = cv.fromarray(numpy.array(corners).astype(numpy.float32))
    oldcorners = cv.Reshape(oldcorners, 2)
    newcorners = cv.CreateMat(len(corners), 1, cv.CV_32FC2)
    best = (9999999, None)
    for bL in itertools.product(*[bs for (a,bs) in hyp]):
        hypH = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.GetPerspectiveTransform(aL, bL, hypH)
        cv.PerspectiveTransform(oldcorners, newcorners, hypH)
        error = 0
        for i in range(newcorners.rows):
            (x,y) = newcorners[i,0]
            (nx, ny) = nearest1(x, y)
            error += l2(x, y, nx, ny)
        best = min(best, (error, hypH))
        if error < 1000:
            break
    # print "took", time.time() - t0, best[0]
    if best[0] < 2500:
        pose = best[1]
        return backf(pose, im, found)
    else:
        return None
示例#14
0
def VirtualMirror():
    cv.NamedWindow("RGB_remap", cv.CV_WINDOW_NORMAL)
    cv.NamedWindow("Depth_remap", cv.CV_WINDOW_AUTOSIZE)
    cv.NamedWindow('dst', cv.CV_WINDOW_NORMAL)
    cv.SetMouseCallback("Depth_remap", on_mouse, None)
    print "Virtual Mirror"
    print "Calibrated 4 Screen corner= ", sn4_ref
    print "Corner 1-2 = ", np.linalg.norm(sn4_ref[0] - sn4_ref[1])
    print "Corner 2-3 = ", np.linalg.norm(sn4_ref[1] - sn4_ref[2])
    print "Corner 3-4 = ", np.linalg.norm(sn4_ref[2] - sn4_ref[3])
    print "Corner 4-1 = ", np.linalg.norm(sn4_ref[3] - sn4_ref[0])
    global head_pos
    global head_virtual
    global scene4_cross
    head_pos = np.array([-0.2, -0.2, 1.0])  #Head_detect()

    while 1:
        (depth, _) = freenect.sync_get_depth()
        (rgb, _) = freenect.sync_get_video()
        #print type(depth)
        img = array2cv(rgb[:, :, ::-1])
        im = array2cv(depth.astype(np.uint8))
        #modulize this part for update_on() and loopcv()
        #q = depth
        X, Y = np.meshgrid(range(640), range(480))
        d = 2  #downsampling if need
        projpts = calibkinect.depth2xyzuv(depth[::d, ::d], X[::d, ::d],
                                          Y[::d, ::d])
        xyz, uv = projpts

        if tracking == 0:
            #*********************************
            if pt is not None:
                print "=================="
                (x_d, y_d) = pt
                print "x=", x_d, " ,y=", y_d
                #print depth.shape
                #Watch out the indexing for depth col,row = 480,640
                d_raw = np.array([depth[y_d, x_d]])
                u_d = np.array([x_d])
                v_d = np.array([y_d])

                print "d_raw= ", d_raw
                print "u_d= ", u_d
                print "v_d= ", v_d
                head3D, head2D = calibkinect.depth2xyzuv(d_raw, u_d, v_d)
                print "XYZ=", head3D
                print "XYZonRGBplane=", head2D

                head_pos = head3D[0]
                #print "head_pos.shape",head_pos.shape
                print "head_pos= ", head_pos
                cv.WaitKey(100)
                cv.Circle(im, (x_d, y_d), 4, (0, 0, 255, 0), -1, 8, 0)
                cv.Circle(im, (int(head2D[0, 0]), int(head2D[0, 1])), 2,
                          (255, 255, 255, 0), -1, 8, 0)

            #*********************************
        elif tracking == 1:
            #find the nearest point (nose) as reference for right eye position
            print "nose"
            inds = np.nonzero(xyz[:, 2] > 0.5)
            #print xyz.shape
            new_xyz = xyz[inds]
            #print new_xyz.shape
            close_ind = np.argmin(new_xyz[:, 2])
            head_pos = new_xyz[close_ind, :] + (0.03, 0.04, 0.01)
            #print head_pos.shape
            #print head_pos

        elif tracking == 2:
            #find the closest point as eye posiiton
            print "camera"
            inds = np.nonzero(xyz[:, 2] > 0.5)
            #print xyz.shape
            new_xyz = xyz[inds]
            #print new_xyz.shape
            close_ind = np.argmin(new_xyz[:, 2])
            head_pos = new_xyz[close_ind, :]
            #print head_pos.shape
            #print head_pos

        else:
            print "please select a tracking mode"

        head_virtual = MirrorReflection(sn4_ref[0:3, :], head_pos)
        print "head_virtual= ", head_virtual

        rgbK = np.array([[520.97092069697146, 0.0, 318.40565581396697],
                         [0.0, 517.85544366622719, 263.46756370601804],
                         [0.0, 0.0, 1.0]])
        rgbD = np.array([[0.22464481251757576], [-0.47968370787671893], [0.0],
                         [0.0]])
        irK = np.array([[588.51686020601733, 0.0, 320.22664144213843],
                        [0.0, 584.73028132692866, 241.98395817513071],
                        [0.0, 0.0, 1.0]])
        irD = np.array([[-0.1273506872313161], [0.36672476189160591], [0.0],
                        [0.0]])

        mapu = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
        mapv = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
        mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
        mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)

        cv.InitUndistortMap(rgbK, rgbD, mapu, mapv)
        cv.InitUndistortMap(irK, irD, mapx, mapy)

        if 1:
            rgb_remap = cv.CloneImage(img)
            cv.Remap(img, rgb_remap, mapu, mapv)

            depth_remap = cv.CloneImage(im)
            cv.Remap(im, depth_remap, mapx, mapy)

        scene4_cross = Cross4Pts.CrossPts(xyz, uv, head_pos, head_virtual,
                                          sn4_ref)
        #[warp] Add whole warpping code here
        #[warp] points = Scene4Pts() as warpping 4 pts
        #Flip the dst image!!!!!!!!!
        #ShowImage("rgb_warp", dst)

        #Within/out of the rgb range
        #Mapping Destination (width, height)=(x,y)

        #Warning: the order of pts in clockwise: pt1(L-T),pt2(R-T),pt3(R-B),pt4(L-B)
        #points = [(test[0,0],test[0,1]), (630.,300.), (700.,500.), (400.,470.)]
        points = [(scene4_cross[0, 0], scene4_cross[0, 1]),
                  (scene4_cross[1, 0], scene4_cross[1, 1]),
                  (scene4_cross[2, 0], scene4_cross[2, 1]),
                  (scene4_cross[3, 0], scene4_cross[3, 1])]
        #Warping the image without flipping (camera image)
        #npoints  = [(0.,0.), (640.,0.), (640.,480.), (0.,480.)]
        #Warping the image with flipping (mirror flip image)
        npoints = [(640., 0.), (0., 0.), (0., 480.), (640., 480.)]
        mat = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.GetPerspectiveTransform(points, npoints, mat)

        #src = cv.CreateImage( cv.GetSize(img), cv.IPL_DEPTH_32F, 3 )
        src = cv.CreateImage(cv.GetSize(rgb_remap), cv.IPL_DEPTH_32F, 3)
        #cv.ConvertScale(img,src,(1/255.00))
        cv.ConvertScale(rgb_remap, src, (1 / 255.00))

        dst = cv.CloneImage(src)
        cv.Zero(dst)
        cv.WarpPerspective(src, dst, mat)
        #************************************************************************

        #Remap the rgb and depth image
        #Warping will use remap rgb image as src

        if 1:
            cv.ShowImage("RGB_remap", rgb_remap)  #rgb[200:440,300:600,::-1]
            cv.ShowImage("Depth_remap", depth_remap)
            cv.ShowImage("dst", dst)  #warp rgb image

        if cv.WaitKey(5) == 27:
            cv.DestroyWindow("RGB_remap")
            cv.DestroyWindow("Depth_remap")
            cv.DestroyWindow("dst")
            break