Example #1
0
    def __Augmentation(self):
        """Projects an augmentation object over the chessboard pattern."""
        # Load the camera.
        cameraID = 0
        SIGBTools.VideoCapture(cameraID,
                               SIGBTools.CAMERA_VIDEOCAPTURE_640X480_30FPS)

        # Read each frame from input camera.
        while True:
            # Read the current image from the camera.
            image = SIGBTools.read()

            # Finds the positions of internal corners of the chessboard.
            corners = SIGBTools.FindCorners(image, False)
            if corners is not None:
                pass

            # Show the final processed image.
            cv2.imshow("Augmentation", image)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
Example #2
0
    def __RealisticTextureMap(self):
        # Load videodata.
        filename = self.__path + "Videos/ITUStudent.avi"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)

        # Load tracking data.
        dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat")
        lenght = dataFile.shape[0]

        # Define the boxes colors.
        boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]  # BGR.

        # Read each frame from input video and draw the rectangules on it.
        for i in range(lenght):
            # Read the current image from a video file.
            image = SIGBTools.read()

            # Draw each color rectangule in the image.
            boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :])
            for j in range(3):
                box = boxes[j]
                cv2.rectangle(image, box[0], box[1], boxColors[j])

            # Show the final processed image.
            cv2.imshow("Ground Floor", image)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
Example #3
0
def createHomography():
    img1 = cv2.imread('Images/ITUMap.bmp')

    fn = "GroundFloorData/sunclipds.avi"
    cap = cv2.VideoCapture(fn)

    #load Tracking data
    _, img2 = cap.read()

    print SIGBTools.getHomographyFromMouse(img2, img1, 6)
Example #4
0
    def __TextureMapGridSequence(self):
        """Skeleton for texturemapping on a video sequence."""
        # Load videodata.
        filename = self.__path + "Videos/Grid05.mp4"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        outputSize = (1280, 720)
        recorder = SIGBTools.RecordingVideos(
            self.__path + "Outputs/TextureMapGridSequence_Grid05.wmv",
            size=outputSize)

        # Load texture mapping image.
        texture = cv2.imread(self.__path + "Images/ITULogo.png")
        texture = cv2.pyrDown(texture)

        # Define the number and ids of inner corners per a chessboard row and column.
        patternSize = (9, 6)
        idx = [0, 8, 45, 53]

        # Read each frame from input video.
        h, w = texture.shape[0:2]
        textureCorners = np.asarray([[0, h], [0, 0], [w, h], [w, 0]])
        while True:
            # Read the current image from a video file.
            image = SIGBTools.read()
            # Blurs an image and downsamples it.
            image = cv2.pyrDown(image)

            # Finds the positions of internal corners of the chessboard.
            corners = SIGBTools.FindCorners(image, False)
            if corners is not None:
                pass
                corners = np.asarray([
                    corners[idx[0]], corners[idx[1]], corners[idx[2]],
                    corners[idx[3]]
                ])
                homography, _ = cv2.findHomography(textureCorners, corners)
                h, w = image.shape[0:2]
                overlay = cv2.warpPerspective(texture, homography, (w, h))
                image = cv2.addWeighted(image, 0.5, overlay, 0.5, 0)

            imagecopy = image.copy()
            imagecopy = cv2.resize(imagecopy, outputSize)
            SIGBTools.write(imagecopy)
            # Show the final processed image.
            cv2.imshow("Image", image)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.close()
        SIGBTools.release()
Example #5
0
    def __CalibrateCamera(self):
        """Main function used for calibrating a common webcam."""
        # Load the camera.
        cameraID = 0
        SIGBTools.VideoCapture(cameraID, SIGBTools.CAMERA_VIDEOCAPTURE_640X480_30FPS)

        # Calibrate the connected camera.
        SIGBTools.calibrate()

        # Close all allocated resources.
        SIGBTools.release()
Example #6
0
    def __ShowFloorTrackingData(self):
        # Load videodata.
        filename = self.__path + "Videos/ITUStudent.avi"
        image2 = cv2.imread(self.__path + "Images/ITUMap.png")
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        SIGBTools.RecordingVideos("C:\\Code\\IIAML\\Project3\\Assignments\\_02\\Outputs\\MapLocation.wmv")
        
        # Load homography
        homography = np.load(self.__path + "Outputs/homography1.npy")
        
        # Load tracking data.
        dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat")
        lenght   = dataFile.shape[0]

        # Define the boxes colors.
        boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # BGR.

        # Read each frame from input video and draw the rectangules on it.
        for i in range(lenght):
            # Read the current image from a video file.
            image = SIGBTools.read()

            # Draw each color rectangule in the image.
            boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :])
            for j in range(3):
                box = boxes[j]
                cv2.rectangle(image, box[0], box[1], boxColors[j])

            point2 = self.__calcHomogenousCoordinates(boxes[2][1], homography)
            # Show the final processed image.
            # Live tracking
            image2_updated = image2.copy()
            cv2.circle(image2_updated, (int(point2[0]), int(point2[1])), 10, (0, 255, 0), -1)
            cv2.imshow("Map", image2_updated)
            # Drawing
            #cv2.circle(image2, (int(point2[0]), int(point2[1])), 3, (0, 255, 0), -1)
            #cv2.imshow("Map", image2)
            
            cv2.imshow("Ground Floor", image)
            SIGBTools.write(image2_updated)
            
            #self.__showPointsOnFrameOfView(image, points)            
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        SIGBTools.close()
        cv2.waitKey(2000)
        cv2.imwrite(self.__path + "Outputs/mapImage.png", image2)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
Example #7
0
    def __Calibrate(self, leftCorners, rightCorners):
        """Calibrate the stereo camera for each new detected pattern."""
        # Get The outer vector contains as many elements as the number of the pattern views.
        objectPoints = SIGBTools.CalculatePattern()

        # <006> Insert the pattern detection results in three vectors.
        self.__LeftCorners.append(leftCorners)
        self.__RightCorners.append(rightCorners)
        self.__ObjectPoints.append(objectPoints)

        # <007> Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
        path = "./Framework/VideoCaptureDevices/CalibrationData/"
        
        for index, parameter in zip(range(2), CaptureManager.Instance.Parameters):
            parameter.K = np.load(path + "Camera_" + str(index) + "_cameraMatrix.npy")
            parameter.DistCoeffs = np.load(path + "Camera_" + str(index) + "_distCoeffs.npy")
        
        # Calibrates the stereo camera.
        R, t = SIGBTools.calibrateStereoCameras(self.__LeftCorners, self.__RightCorners, self.__ObjectPoints)

        # <011> Computes rectification transforms for each head of a calibrated stereo camera.
        SIGBTools.StereoRectify(R, t)

        # <012> Computes the undistortion and rectification transformation maps.
        SIGBTools.UndistortRectifyMap()

        # End the calibration process.
        self.__isCalibrating = False
        self.__isUndistort = True

        # Stop the system for 1 second, because the user will see the processed images.
        cv2.waitKey(1000)
Example #8
0
def texturemapGroundFloor(SequenceInputFile):
    '''
    get four points in the map and overview a logo on the sequence
    '''
    sequence, I2, retval = getImageSequence(SequenceInputFile)
    I1 = cv2.imread('Images/ITULogo.jpg')
    H, Points = SIGBTools.getHomographyFromMouse(I1, I2, -4)  # get 4 points from mouse input
    h, w, d = I2.shape
    if(retval):
        cv2.imshow("Overlayed Image", I2)
    print("SPACE: Run/Pause")
    print("Q or ESC: Stop")
    running = True
    while(retval):
        ch = cv2.waitKey(1)
        # video controls
        if(ch == 32):  # Spacebar
            if(running):
                running = False
            else:
                running = True
        if ch == 27:
            break
        if(ch == ord('q')):
            break
        if(running):
            retval, I2 = sequence.read()
            if(retval):  # if there is an image
                overlay = cv2.warpPerspective(I1, H, (w, h))  # get the perspective image for overlaying on the video
                M = cv2.addWeighted(I2, 0.5, overlay, 0.5, 0)  # overlay the video with the image
                cv2.imshow("Overlayed Image", M)  # show the result
Example #9
0
def texturemapGroundFloor():
    """
    Place the texture on every frame of the clip
    """
    fn = 'data/GroundFloorData/SunClipDS.avi'
    cap = cv2.VideoCapture(fn)

    texture = cv2.imread('data/Images/ITULogo.jpg')
    texture = cv2.pyrDown(texture)
    
    mTex,nTex,t = texture.shape
    
    running, imgOrig = cap.read()
    mI,nI,t = imgOrig.shape

    H,Points  = SIGBTools.getHomographyFromMouse(texture,imgOrig,-1)
    h,w,d = imgOrig.shape
    
    while(running):
        running, imgOrig = cap.read()
        if(running):
            h,w,d = imgOrig.shape
            overlay = cv2.warpPerspective(texture, H,(w, h))
            M = cv2.addWeighted(imgOrig, 0.5, overlay, 0.5,0)
            cv2.imshow("Overlayed",M)
            cv2.waitKey(1)
Example #10
0
def calibrationExample():
    camNum = 0  # The number of the camera to calibrate
    nPoints = 5  # number of images used for the calibration (space presses)
    patternSize = (9, 6)  #size of the calibration pattern
    saveImage = 'calibrationShoots'

    calibrated, camera_matrix, dist_coefs, rms = SIGBTools.calibrateCamera(
        camNum, nPoints, patternSize, saveImage)
    K = camera_matrix
    cam1 = Camera(np.hstack((K, np.dot(K, np.array([[0], [0], [-1]])))))
    cam1.factor()
    #Factor projection matrix into intrinsic and extrinsic parameters
    print "K=", cam1.K
    print "R=", cam1.R
    print "t", cam1.t

    if (calibrated):
        capture = cv2.VideoCapture(camNum)
        running = True
        while running:
            running, img = capture.read()
            imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            ch = cv2.waitKey(1)
            if (ch == 27) or (ch == ord('q')):  #ESC
                running = False
            img = cv2.undistort(img, camera_matrix, dist_coefs)
            found, corners = cv2.findChessboardCorners(imgGray, patternSize)
            if (found != 0):
                cv2.drawChessboardCorners(img, patternSize, corners, found)
            cv2.imshow("Calibrated", img)
Example #11
0
def DisplayTraceSatic(homography):
    trackingData = loadtxt(projectResources + "GroundFloorData/trackingdata.dat")
    trackingData = SIGBTools.toHomogenious(trackingData)
    #rotationCW90 = n.matrix([[n.cos(90), n.sin(90), 0],[-n.sin(90), n.cos(90) ,0],[0,0,1]])
    #homography = homography * rotationCW90
    
    transformedPoints = list()
    
    for p in trackingData:
        currentPoint = n.matrix([p[2], p[3],1]).T
        transformedPoints.append(matrixmultiply(homography, currentPoint))
        
    I = cv2.imread(projectResources + "Images/ITUMap.bmp")
    drawI = I.copy()
    fig = figure(1) 
    ax1  = subplot(1,2,1) 
    ax2  = subplot(1,2,2) 
    ax1.imshow(I) 
    ax2.imshow(drawI)
    ax1.axis('image') 
    ax1.axis('off')  
    fig.hold('on')
    
    for p in transformedPoints:
        subplot(1,2,1)
        plot(p[0],p[1],'rx')
        cv2.circle(drawI,(int(p[0]),int(p[1])),2,(0,255,0),1)
    
    ax2.cla
    ax2.imshow(drawI)
    draw() #update display: updates are usually defered 
    show()
    cv2.imwrite("drawImage.jpg", drawI)
    
    return transformedPoints
Example #12
0
def DispalyTraceDynamic(homography):
    sequence = projectResources + "GroundFloorData/sunclipds.avi"
    sequence = cv2.VideoCapture(fn)
    
    floorPlan = cv2.imread(projectResources + "Images/ITUMap.bmp")
    
    trackingData = loadtxt(projectResources + "GroundFloorData/trackingdata.dat")
    trackingData = SIGBTools.toHomogenious(trackingData)
    m,n = trackingData.shape
    
    drawI = floorPlan.copy()
    running, cap  = sequence.read()
    
    for p in trackingData:
        running, cap = sequence.read();
        cv2.imshow("Sequence", cap)
        cv2.waitKey(1)
        if(running):
            currentPoint = matrix([p[2], p[3],1]).T
            hPoint = matrixmultiply(homography, currentPoint)
            cv2.circle(drawI,(int(hPoint[0]),int(hPoint[1])),2,(0,255,0),1)
            cv2.imshow("Plan", drawI)
            cv2.waitKey(1)
            #print hPoint
    return
Example #13
0
def showFloorTrackingData():
    #Load videodata
    fn = "GroundFloorData/sunclipds.avi"
    cap = cv2.VideoCapture(fn)
    #load Tracking data
    running, imgOrig = cap.read()
    M=cv2.imread("images/ituMap.bmp")
    H,W,Z=M.shape
    writer = cv2.VideoWriter('Map.avi', cv.CV_FOURCC('D','I','V','3'), 10.0, (W,H), True)    
    G=imgOrig
    dataFile = np.loadtxt('GroundFloorData/trackingdata.dat')
    m,n = dataFile.shape
    fig = figure()
    H,points=SIGBTools.getHomographyFromMouse(G, M, N=4)
    for k in range(m):
        running, imgOrig = cap.read() 
        if(running):
            boxes= frameTrackingData2BoxData(dataFile[k,:])
            boxColors = [(255,0,0),(0,255,0),(0,0,255)]
            Hp=displayTrace(H, dataFile, fig, m, boxes[2])
            for i in range(0,1):
                print "THE POINTS: "+str(points)                
                print "THE HOMOGRAPHY: "+str(H)
                print "PLOT COORDINATES: "+str(Hp[i][0])+" , "+str(Hp[i][1])+" , "+str(Hp[i][2])
                cv2.circle(M,(int(Hp[i][0]),int(Hp[i][1])),1,(255,0,0),10)
            for k in range(0,3):
                aBox = boxes[k]
                cv2.rectangle(imgOrig, aBox[0], aBox[1], boxColors[k])
                #cv2.circle(M,(int(Hp[1][0]),int(Hp[1][1])),2,(0,255,0),10)
            cv2.imshow("boxes",imgOrig);
            cv2.imshow("map", M)
            temp=M
            writer.write(temp)
            cv2.waitKey(1)
Example #14
0
def calibrationExample():
    camNum =0           # The number of the camera to calibrate
    nPoints = 5         # number of images used for the calibration (space presses)
    patternSize=(9,6)   #size of the calibration pattern
    saveImage = False

    calibrated, camera_matrix,dist_coefs,rms = SIGBTools.calibrateCamera(camNum,nPoints,patternSize,saveImage)
    K = camera_matrix
    cam1 =Camera( np.hstack((K,np.dot(K,np.array([[0],[0],[-1]])) )) )
    cam1.factor()
    #Factor projection matrix into intrinsic and extrinsic parameters
    print "K=", cam1.K
    print "R=", cam1.R
    print "t", cam1.t
    
    if (calibrated):
        capture = cv2.VideoCapture(camNum)
        running = True
        while running:
            running, img =capture.read()
            imgGray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            
            ch = cv2.waitKey(1)
            if(ch==27) or (ch==ord('q')): #ESC
                running = False
            img=cv2.undistort(img, camera_matrix, dist_coefs )
            found,corners=cv2.findChessboardCorners(imgGray, patternSize  )
            if (found!=0):
                cv2.drawChessboardCorners(img, patternSize, corners,found)
            cv2.imshow("Calibrated",img)
Example #15
0
def textureMapGroundFloor():
    #create H_T_G from first frame of sequence
    texture = cv2.imread('Images/ITULogo.jpg')

    fn = "GroundFloorData/sunclipds.avi"
    sequence = cv2.VideoCapture(fn)
    running, frame = sequence.read()

    h_t_g, calibration_points = SIGBTools.getHomographyFromMouse(texture, frame, -4)
    print h_t_g
    #fig = figure()
    while running:
        running, frame = sequence.read()

        if not running:
            return

        #texture map
        h,w,d = frame.shape
        warped_texture = cv2.warpPerspective(texture, h_t_g,(w, h))
        result = cv2.addWeighted(frame, .7, warped_texture, .3, 50)

        #display
        cv2.imshow("Texture Mapping", result)
        cv2.waitKey(1)
Example #16
0
def texturemapGridSequence():
    """ Skeleton for texturemapping on a video sequence"""
    fn = 'data/GridVideos/grid1.mp4'
    cap = cv2.VideoCapture(fn)
    drawContours = True;

    texture = cv2.imread('data/Images/ITULogo.jpg')
    texture = cv2.pyrDown(texture)

    mTex,nTex,t = texture.shape

    # Use the corners of the texture
    srcPoints = [
            (float(0.0),float(0.0)),
            (float(nTex),0),
            (float(nTex),float(mTex)),
            (0,mTex)]
    
    #load Tracking data
    running, imgOrig = cap.read()
    mI,nI,t = imgOrig.shape

    cv2.imshow("win2",imgOrig)

    pattern_size = (9, 6)

    idx = [0,8,53,45]
    while(running):
    #load Tracking data
        running, imgOrig = cap.read()
        if(running):
            imgOrig = cv2.pyrDown(imgOrig)
            gray = cv2.cvtColor(imgOrig,cv2.COLOR_BGR2GRAY)

            m,n = gray.shape

            found, corners = cv2.findChessboardCorners(gray, pattern_size)
            if found:
                term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
              #  cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
              #  cv2.drawChessboardCorners(imgOrig, pattern_size, corners, found)
                # Get the points based on the chessboard
                dstPoints = []
                for t in idx:
                    dstPoints.append((int(corners[t,0,0]),int(corners[t,0,1])))
                    #cv2.circle(imgOrig,(int(corners[t,0,0]),int(corners[t,0,1])),10,(255,t,t))
                
                H = SIGBTools.estimateHomography(srcPoints,dstPoints)
                
                overlay = cv2.warpPerspective(texture, H,(n, m))
                
                M = cv2.addWeighted(imgOrig, 0.9, overlay, 0.9,0)
                
                cv2.imshow("win2",M)
            else:
                cv2.imshow("win2",imgOrig)
            cv2.waitKey(1)
Example #17
0
    def __TextureMapObjectSequence(self):
        """Poor implementation of simple TextureMap."""
        # Load videodata.
        filename = self.__path + "Videos/Scene01.mp4"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        drawContours = True

        # Load texture mapping image.
        texture = cv2.imread(self.__path + "Images/ITULogo.png")

        # Read each frame from input video.
        while True:
            # Jump for each 20 frames in the video.
            for t in range(20):
                # Read the current image from a video file.
                image = SIGBTools.read()

            # Try to detect an object in the input image.
            squares = SIGBTools.DetectPlaneObject(image)

            # Check the corner of detected object.
            for sqr in squares:
                # Do texturemap here!!!!
                # TODO
                pass

            # Draws contours outlines or filled contours.
            if drawContours and len(squares) > 0:
                cv2.drawContours(image, squares, -1, (0, 255, 0), 3)

            # Show the final processed image.
            cv2.imshow("Detection", image)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
Example #18
0
def simpleTextureMap():
    I1 = cv2.imread('Images/ITULogo.jpg')
    I2 = cv2.imread('Images/ITUMap.bmp')
    cv2.namedWindow("Overlayed Image")
    #Print Help
    H,Points  = SIGBTools.getHomographyFromMouse(I1,I2,4)
    h, w,d = I2.shape
    overlay = cv2.warpPerspective(I1, H,(w, h))
    M = cv2.addWeighted(I2, 0.5, overlay, 0.5,0)

    cv2.imshow("Overlayed Image",M)
    cv2.waitKey(0)
Example #19
0
    def __TextureMapping(self):
        """Apply a texture mapping on an augmented object."""
        # Creates a window to show the stereo images.
        cv2.namedWindow("Original", cv2.WINDOW_AUTOSIZE)

        # Load two video capture devices.
        SIGBTools.VideoCapture(0, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)

        # Repetition statement for analyzing each captured image.
        while True:
            # Grab the video frames.
            image = SIGBTools.read()

            # Find the pattern in the image.
            corners = SIGBTools.FindCorners(image, False)

            # Apply the augmented object.
            if corners is not None:
                image = self.__Augmentation(corners, image)

            # Check what the user wants to do.
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            
            # Record video
            #resultFile = self.__path + "/Videos/TextureMapping02.wmv"
            #size = image.shape
            #SIGBTools.RecordingVideos(resultFile, size=(size[1], size[0]))
            #SIGBTools.write(image)

            # Show the final processed image.
            cv2.imshow("Original", image)

        # Wait 2 seconds before finishing the method.
        SIGBTools.close()
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
def texturemapGridSequence():
    cap = cv2.VideoCapture("/Users/wzdziechowski/Desktop/IA2Resources/GridVideos/grid1.mp4")

    texture = cv2.imread("/Users/wzdziechowski/Desktop/IA2Resources/Images/ITULogo.jpg")
    texture = cv2.pyrDown(texture)
    # find texture corners (start top left follow clockwise)
    mTex, nTex, t = texture.shape
    textureCorners = [(0.0, 0.0), (float(mTex), 0.0), (float(mTex), float(nTex)), (0.0, float(nTex))]

    running, imgOrig = cap.read()

    pattern_size = (9, 6)

    idx = [0, 8, 53, 45]
    while running:

        imgOrig = cv2.pyrDown(imgOrig)
        h, w, d = imgOrig.shape
        gray = cv2.cvtColor(imgOrig, cv2.COLOR_BGR2GRAY)
        found, corners = cv2.findChessboardCorners(gray, pattern_size)

        if found:
            term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
            cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
            cv2.drawChessboardCorners(imgOrig, pattern_size, corners, found)

            #    for t in idx:
            #        cv2.circle(imgOrig,(int(corners[t,0,0]),int(corners[t,0,1])),10,(255,t,t))

            # found image chessboard corners (start top left follow clockwise)
            chessCorners = [
                (corners[0, 0, 0], corners[0, 0, 1]),
                (corners[8, 0, 0], corners[8, 0, 1]),
                (corners[53, 0, 0], corners[53, 0, 1]),
                (corners[45, 0, 0], corners[45, 0, 1]),
            ]

            # Convert to openCV format
            ip1 = np.array([[x, y] for (x, y) in textureCorners])
            ip2 = np.array([[x, y] for (x, y) in chessCorners])

            # find homography
            H = SIGBTools.estimateHomography(ip1, ip2)

            # do the same as for the simple texture (add the images weighted)
            overlay = cv2.warpPerspective(texture, H, (w, h))
            imgOrig = cv2.addWeighted(imgOrig, 1, overlay, 0.5, 0)

        cv2.imshow("win2", imgOrig)
        cv2.waitKey(1)
        running, imgOrig = cap.read()
    return None
def texturemapGroundFloor():
    I1 = cv2.imread('Images/ITULogo.jpg')
    cap = cv2.VideoCapture("GroundFloorData/sunclipds.avi")
    running, I2 = cap.read()
    H, points = SIGBTools.getHomographyFromMouse(I1,I2,-4)
    h, w, d = I2.shape
    overlay = cv2.warpPerspective(cv2.transpose(I1), H,(w, h))
    while(running):
        M = cv2.addWeighted(I2, 1.0, overlay, 0.5,0)
        cv2.imshow("Texture", M)
        cv2.waitKey(1)
        running, I2 = cap.read()
    return
def texturemapGroundFloor():
    I1 = cv2.imread("Images/ITULogo.jpg")
    cap = cv2.VideoCapture("GroundFloorData/sunclipds.avi")
    running, I2 = cap.read()
    H, points = SIGBTools.getHomographyFromMouse(I1, I2, -4)
    h, w, d = I2.shape
    overlay = cv2.warpPerspective(cv2.transpose(I1), H, (w, h))
    while running:
        M = cv2.addWeighted(I2, 1.0, overlay, 0.5, 0)
        cv2.imshow("Texture", M)
        cv2.waitKey(1)
        running, I2 = cap.read()
    return
def simpleTextureMap():

    I1 = cv2.imread('Images/ITULogo.jpg')
    I2 = cv2.imread('Images/ITUMap.bmp')

    #Print Help
    H,Points  = SIGBTools.getHomographyFromMouse(I1,I2,4)
    h, w,d = I2.shape
    overlay = cv2.warpPerspective(I1, H,(w, h))
    M = cv2.addWeighted(I2, 0.5, overlay, 0.5,0)

    cv2.imshow("Overlayed Image",M)
    cv2.waitKey(0)
Example #24
0
    def __EpipolarGeometry(self):
        """Define the epipolar geometry between stereo cameras."""
        # Creates a window to show the stereo images.
        cv2.namedWindow("Stereo",  cv2.WINDOW_AUTOSIZE)
        cv2.setMouseCallback("Stereo", self.__FMEyeMouseEvent)

        # Load two video capture devices.
        SIGBTools.VideoCapture(0, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        SIGBTools.VideoCapture(1, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)

        # Repetition statement for analyzing each captured image.
        while True:
            # Check if the fundamental matrix process is running.
            if not self.__isFrozen:
                # Grab the video frames.
                leftImage, rightImage = SIGBTools.read()
                # Combine two stereo images in only one window.
                self.__Image = self.__CombineImages(leftImage, rightImage, 1)

            # Check what the user wants to do.
            inputKey = cv2.waitKey(1)
            # Esc or letter "q" key.
            if inputKey == 27 or inputKey == ord("q"):
                break
            # Letter "f" key.
            elif inputKey == ord("f"):
                self.__isFrozen = not self.__isFrozen

            # Show the final processed image.
            cv2.imshow("Stereo", self.__Image)

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
def texturemapGridSequence():
    cap = cv2.VideoCapture('/Users/wzdziechowski/Desktop/IA2Resources/GridVideos/grid1.mp4')

    texture = cv2.imread('/Users/wzdziechowski/Desktop/IA2Resources/Images/ITULogo.jpg')
    texture = cv2.pyrDown(texture)
    #find texture corners (start top left follow clockwise)
    mTex,nTex,t = texture.shape
    textureCorners = [(0.,0.),(float(mTex),0.),(float(mTex),float(nTex)),(0.,float(nTex))]

    running, imgOrig = cap.read()

    pattern_size = (9, 6)

    idx = [0,8,53,45]
    while(running):

        imgOrig = cv2.pyrDown(imgOrig)
        h, w, d = imgOrig.shape
        gray = cv2.cvtColor(imgOrig,cv2.COLOR_BGR2GRAY)
        found, corners = cv2.findChessboardCorners(gray, pattern_size)

        if found:
            term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
            cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
            cv2.drawChessboardCorners(imgOrig, pattern_size, corners, found)
            
        #    for t in idx:
        #        cv2.circle(imgOrig,(int(corners[t,0,0]),int(corners[t,0,1])),10,(255,t,t))

            #found image chessboard corners (start top left follow clockwise)
            chessCorners = [(corners[0,0,0],corners[0,0,1]),(corners[8,0,0],corners[8,0,1]),(corners[53,0,0],corners[53,0,1]),(corners[45,0,0],corners[45,0,1])]

            #Convert to openCV format
            ip1 = np.array([[x,y] for (x,y) in textureCorners])
            ip2 = np.array([[x,y] for (x,y) in chessCorners])
            
            

            #find homography
            H = SIGBTools.estimateHomography(ip1, ip2)

            #do the same as for the simple texture (add the images weighted)
            overlay = cv2.warpPerspective(texture, H,(w, h))
            imgOrig = cv2.addWeighted(imgOrig, 1, overlay ,0.5,0)

        cv2.imshow("win2",imgOrig)
        cv2.waitKey(1)
        running, imgOrig = cap.read()
    return None
Example #26
0
def texturemapGroundFloor():
    sequence = projectResources + "GroundFloorData/sunclipds.avi"
    texture = cv2.imread(projectResources + 'Images/ITULogo.jpg')
    sequence = cv2.VideoCapture(sequence)
    texture = texture.copy()
    running, firstFrame = sequence.read();
    H, points = SIGBTools.getHomographyFromMouse(texture,firstFrame,-4)
    h, w, d = firstFrame.shape
    overlay = cv2.warpPerspective(cv2.transpose(texture), H,(w, h))
    while(True):
        if(running):
            running, cap = sequence.read()
            M = cv2.addWeighted(cap, 1.0, overlay, 0.5,0)
            cv2.imshow("Texture", M)
            cv2.waitKey(1)
    return
Example #27
0
def texturemapGroundFloor(G,S,N):
    fn = "GroundFloorData\SunClipDS.avi"
    cap = cv2.VideoCapture(fn)
    running, imgOrig = cap.read()
    cv2.namedWindow("output")
    #temp remove G = imgOrig to change
    G = imgOrig

    for i in S:
        S = cv2.imread(i)
        H,Points  = SIGBTools.getHomographyFromMouse(G,S,N)
        h, w,d = S.shape
        overlay = cv2.warpPerspective(G, H,(w, h))
        M = cv2.addWeighted(S, 0.5, overlay, 0.5,0)
        cv2.imshow("output", M)
    cv2.imshow("output",imgOrig)
    cv2.waitKey(0)
Example #28
0
def findMaxGradientValueOnNormal(gradient_magnitude, gradient_orientation, p1, p2, normal_orientation):
    #Get integer coordinates on the straight line between p1 and p2
	pts = SIGBTools.getLineCoordinates(p1, p2)
	values = gradient_magnitude[pts[:,1],pts[:,0]]
	#orientations = gradient_orientation[pts[:,1],pts[:,0]]
	#normal_angle = np.arctan2(normal_orientation[1], normal_orientation[0]) * (180 / math.pi)

	# orientation_difference = abs(orientations - normal_angle)
	# print orientation_difference[0:10]
	# max_index = 0 #np.argmax(values)
	# max_value = 0
	# for index in range(len(values)):
	# 	if orientation_difference[index] < 20:
	# 		if values[index] > max_value:
	# 			max_index = index
	# 			max_value = values[index]
	#print orientations[max_index], normal_angle
	max_index = np.argmax(values)
	return pts[max_index]
Example #29
0
    def __SimpleTextureMap(self):
        """Example of how linear texture mapping can be done using OpenCV."""
        # Read the input images.
        image1 = cv2.imread(self.__path + "Images/ITULogo.png")
        image2 = cv2.imread(self.__path + "Images/ITUMap.png")

        # Estimate the homography.
        H, points = SIGBTools.GetHomographyFromMouse(image1, image2, 4)

        # Draw the homography transformation.
        h, w = image2.shape[0:2]
        overlay = cv2.warpPerspective(image1, H, (w, h))
        result = cv2.addWeighted(image2, 0.5, overlay, 0.5, 0)

        # Show the result image.
        cv2.imshow("SimpleTextureMap", result)
        cv2.waitKey(0)

        # Close all allocated resources.
        cv2.destroyAllWindows()
Example #30
0
def simpleTextureMap():

    sequence = cv2.VideoCapture("GroundFloorData/SunClipDS.avi")
    retval, I1 = sequence.read()


#    I1 = cv2.imread('Images/ITULogo.jpg')
    I2 = cv2.imread('Images/ITUMap.bmp')

    # Print Help
    H, Points = SIGBTools.getHomographyFromMouse(I1, I2, 4)

    print(H)

    h, w, d = I2.shape
    overlay = cv2.warpPerspective(I1, H, (w, h))
    M = cv2.addWeighted(I2, 0.5, overlay, 0.5, 0)

    cv2.imshow("Overlayed Image", M)
    cv2.waitKey(0)
def findEllipseContour(gray, xc, yc, r, img):
	nPts = 60
	C = (xc,yc)
	gx,gy,gm,gd, res=getGradientImageInfo(cv2.cvtColor(img,cv2.COLOR_RGB2GRAY))
	circleRadius = 40;
	#P=getCircleSamples(center=C, radius=circleRadius, nPoints=nPts)
	t=0;
	P= SIGBTools.getCircleSamples(center=C,radius=r ,nPoints=nPts)
	newPupils = np.zeros((nPts,1,2)).astype(np.float32)	
	newIris = np.zeros((nPts,1,2)).astype(np.float32)
	for (x,y,dx,dy) in P:
		cord1=x+r*dx#+(+x*dx)/math.pi
		cord2=y+r*dy#+(y*dy)/math.pi
		cord1,cord2=coordCheck(cord1,cord2, len(gray), len(gray[0]))
		maxValue=findMaxGradientValueOnNormal(gm,gd,res,(C[0], C[1]),(cord1,cord2))
		newPupils[t]=maxValue
		cv2.circle(img,(int(maxValue[0]), int(maxValue[1])),1, (255,255,0),4)
		dist=math.sqrt(math.pow(C[0]-x*dx, 2)+math.pow(C[1]-y*dy, 2))
		nx=x+x/2*dx
		ny=y+y/2*dy
		nx,ny=coordCheck(nx,ny, len(gray), len(gray[0]))		
		maxValue=findMaxGradientValueOnNormal(gm,gd,res,(cord1, cord2),(nx,ny))
		cv2.line(img, (int(x),int(y)),(int(maxValue[0]), int(maxValue[1])), (55,55,55),1)
		newIris[t]=maxValue
		t=t+1
		cv2.circle(img,(maxValue[0],maxValue[1]),1, (0,255,255),4)
	
	squares= cv2.fitEllipse(newIris)
	ax=int(squares[0][0])
	bx=int(squares[0][1])
	cx=int((squares[1][0]+squares[1][1])/4)
	cv2.circle(img,(ax,bx),cx, (0,255,255),4)
	squares2= cv2.fitEllipse(newPupils)
	ax2=int(squares2[0][0])
	bx2=int(squares2[0][1])
	cx2=int((squares2[1][0]+squares2[1][1])/4)
	cv2.circle(img,(ax2,bx2),cx2, (125,125,0),4)
	return img
def findMaxGradientValueOnNormal(gm, gd,resolution,p1,p2):
	pts = SIGBTools.getLineCoordinates(p1, p2)
	temp=pts/resolution
	normVal=gm[temp[:,1],temp[:,0]]	
	found=False
	temp=len(normVal)
	accuracy=6
	bestAngle=360.0
	if temp<accuracy:
		accuracy=temp
	while found!=True | accuracy>1:
		maxValueIndexes=np.where(normVal==max(normVal))
		maxValueIndex=maxValueIndexes[0][0]
		maxX=pts[maxValueIndex][0]
		maxY=pts[maxValueIndex][1]
		vect=np.subtract(p1,p2)
		endX=vect[0]
		endY=vect[1]
		length=math.sqrt(math.pow(endX, 2)+math.pow(endY, 2))
		p1NormX,p1NormY=endX/length, endY/length
		length=math.sqrt(math.pow(maxX, 2)+math.pow(maxY, 2))
		p2NormX,p2NormY=maxX/length, maxY/length 
		dot=np.dot((p1NormX,p1NormY),(p2NormX,p2NormY))
		angle=math.degrees(math.acos(dot))
		if angle>180:
			angle=math.fabs(angle-360)
		if angle<20:
			found=True
			retX,retY=maxX,maxY
			return retX, retY			
		else:
			if angle<bestAngle:
				bestAngle=angle
				retX,retY=maxX,maxY
			normVal=np.delete(normVal, maxValueIndex, axis=0)
		accuracy=accuracy-1
	return retX, retY
def AugumentImages():
    #Loading callibration matrix
    K = np.load('Results/PMatrix.npy') 
    #setting pattern size
    pattern_size = (9,6)
    #loading calibration images
    L_CP = cv2.imread('Results/L_CP.jpg') #Frontal view
    #Getting cube points from cubePoints.py
    cubePoints = cube.cube_points([0,0,0.1],0.1)
    
    I1 = cv2.imread('Results/calibrationShoots1.jpg')
    I2 = cv2.imread('Results/calibrationShoots2.jpg')
    I3 = cv2.imread('Results/calibrationShoots3.jpg')
    I4 = cv2.imread('Results/calibrationShoots4.jpg')
    I5 = cv2.imread('Results/calibrationShoots5.jpg')
    
    Images = [I1, I2, I3, I4 ,I5]
    ImageH = [] #homographies from frontal view to H respectively
    
    #Getting chesscorners for frontal view
    Fgray = cv2.cvtColor(L_CP,cv2.COLOR_BGR2GRAY)
    Ffound, Fcorners = cv2.findChessboardCorners(Fgray, pattern_size)
    FchessCorners = [(Fcorners[0,0,0],Fcorners[0,0,1]),(Fcorners[8,0,0],Fcorners[8,0,1]),(Fcorners[45,0,0],Fcorners[45,0,1]),(Fcorners[53,0,0],Fcorners[53,0,1])]
    #To open CV format
    FchessCorners = np.array([[x,y] for (x,y) in FchessCorners])
    
    #Getting chesscorners for the rest of pics
    for I in Images:
        #converting to gray for better contrast
        gray = cv2.cvtColor(I,cv2.COLOR_BGR2GRAY)
        #finding all corners on chessboard
        found, corners = cv2.findChessboardCorners(gray, pattern_size)
        #picking utmost corners
        IchessCorners = [(corners[0,0,0],corners[0,0,1]),(corners[8,0,0],corners[8,0,1]),(corners[45,0,0],corners[45,0,1]),(corners[53,0,0],corners[53,0,1])]
        #To openCV format
        IchessCorners = np.array([[x,y] for (x,y) in IchessCorners])
        H,mask = cv2.findHomography(FchessCorners, IchessCorners)
        ImageH.append(H)
        
    cam1 = SIGBTools.Camera(hstack((K,dot(K,array([[0],[0],[-1]])) )) )
    box_cam1 = cam1.project(SIGBTools.toHomogenious(cubePoints[:,:5]))
    
    cam2 = SIGBTools.Camera(dot(ImageH[3],cam1.P))
    A = dot(linalg.inv(K),cam2.P[:,:3])
    A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
    cam2.P[:,:3] = dot(K,A)
    box_cam2 = cam2.project(SIGBTools.toHomogenious(cubePoints))
    print box_cam2
    #figure()
    #imshow(I4) 
    #plot(box_cam2[0,:],box_cam2[1,:],linewidth=3)
    #show()
    p=box_cam2
    
    ''' Drawing the box manually '''
    #bottom
    cv2.line(I4, (int(p[0][1]), int(p[1][1])), (int(p[0][2]),int(p[1][2])),(255,255,0),2)
    cv2.line(I4, (int(p[0][2]), int(p[1][2])), (int(p[0][3]),int(p[1][3])),(255,255,0),2)
    cv2.line(I4, (int(p[0][3]), int(p[1][3])), (int(p[0][4]),int(p[1][4])),(255,255,0),2)
    cv2.line(I4, (int(p[0][1]), int(p[1][1])), (int(p[0][4]),int(p[1][4])),(255,255,0),2)
    
    #connecting lines
    cv2.line(I4, (int(p[0][4]), int(p[1][4])), (int(p[0][5]),int(p[1][5])),(255,255,0),2)
    cv2.line(I4, (int(p[0][1]), int(p[1][1])), (int(p[0][6]),int(p[1][6])),(255,255,0),2)
    cv2.line(I4, (int(p[0][2]), int(p[1][2])), (int(p[0][7]),int(p[1][7])),(255,255,0),2)
    cv2.line(I4, (int(p[0][3]), int(p[1][3])), (int(p[0][8]),int(p[1][8])),(255,255,0),2)
    
    #top
    cv2.line(I4, (int(p[0][5]), int(p[1][5])), (int(p[0][6]),int(p[1][6])),(255,255,0),2)
    cv2.line(I4, (int(p[0][6]), int(p[1][6])), (int(p[0][7]),int(p[1][7])),(255,255,0),2)
    cv2.line(I4, (int(p[0][7]), int(p[1][7])), (int(p[0][8]),int(p[1][8])),(255,255,0),2)
    cv2.line(I4, (int(p[0][8]), int(p[1][8])), (int(p[0][9]),int(p[1][9])),(255,255,0),2)
    
    cv2.imshow('Dupa',I4)
    cv2.waitKey(10000)
    
    return
Example #34
0
    def __ShowFloorTrackingData(self):
        # Load videodata.
        filename = self.__path + "Videos/ITUStudent.avi"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)

        # Load tracking data.
        dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat")
        length = dataFile.shape[0]

        # Define the boxes colors.
        boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]  # BGR.
        outputSize = (1280, 960)
        recorder = SIGBTools.RecordingVideos(self.__path +
                                             "Outputs/MapLocation.wmv",
                                             size=outputSize)

        dataPoints = list()
        # Read each frame from input video and draw the rectangules on it.
        first = True
        homography = None
        itumap = cv2.imread(self.__path + "Images/ITUMap.png")
        for i in range(length):
            # Read the current image from a video file.
            image = SIGBTools.read()

            # Estimate the homograhy based on first frame
            if first:
                h, _ = SIGBTools.GetHomographyFromMouse(image, itumap)
                homography = h
                first = False
                np.save('homography1.npy', homography)

            # Draw each color rectangule in the image.
            boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :])

            # Transform center of the feet box - box1 is feet box
            box1 = boxes[1][0]
            box2 = boxes[1][1]
            center = [(box1[0] + 0.5 * (box2[0] - box1[0])),
                      (box1[1] + 0.5 * (box2[1] - box1[1]))]
            # Reshape the center to be as expected
            center = np.array([center], dtype=np.float32)
            center = np.array([center])
            transformedCenter = cv2.perspectiveTransform(center, homography)
            dataPoints.append(transformedCenter)

            for j in range(3):
                box = boxes[j]
                cv2.rectangle(image, box[0], box[1], boxColors[j])

            # Show the final processed image.
            #Draw current position
            c = transformedCenter[0][0]
            itumapCopy = itumap.copy()
            cv2.circle(itumapCopy, (int(c[0]), int(c[1])), 1, (0, 255, 0), -1)
            cv2.imshow("ITU Map", itumapCopy)

            cv2.imshow("Ground Floor", image)

            height, width, _ = image.shape
            resizedMap = cv2.resize(
                itumapCopy, (width, height))  # resize to same dimensions
            sideBySide = np.concatenate((image, resizedMap), axis=0)
            output = cv2.resize(sideBySide, outputSize)
            SIGBTools.write(output)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        imageMap = itumap.copy()
        for point in dataPoints:
            p = point[0][0]  #due to point being wrapped
            cv2.circle(imageMap, (int(p[0]), int(p[1])), 1, (0, 255, 0), -1)

        cv2.imwrite(self.__path + "Outputs/mapImage.png", imageMap)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.close()
        SIGBTools.release()
Example #35
0
    def __TextureMapGroundFloor(self):
        """Places a texture on the ground floor for each input image."""
        # Load videodata.
        filename = self.__path + "Videos/ITUStudent.avi"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        outputSize = (640, 480)
        recorder = SIGBTools.RecordingVideos(
            self.__path + "Outputs/TextureMapGroundFloor.wmv", size=outputSize)

        # Load tracking data.
        dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat")
        lenght = dataFile.shape[0]

        # Define the boxes colors.
        boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]  # BGR.

        homography = None
        overlay = None
        itulogo = cv2.imread(self.__path + "Images/ITULogo.png")
        # Read each frame from input video and draw the rectangules on it.
        for i in range(lenght):
            # Read the current image from a video file.
            image = SIGBTools.read()
            if homography is None:
                h, _ = SIGBTools.GetHomographyFromMouse(itulogo, image, -4)
                homography = h
                np.save('homography2.npy', homography)
                h, w = image.shape[0:2]
                overlay = cv2.warpPerspective(itulogo, homography, (w, h))

            image = cv2.addWeighted(image, 0.8, overlay, 0.2, 0)

            # Draw each color rectangule in the image.
            boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :])
            for j in range(3):
                box = boxes[j]
                cv2.rectangle(image, box[0], box[1], boxColors[j])

            imagecopy = image.copy()
            imagecopy = cv2.resize(imagecopy, outputSize)
            SIGBTools.write(imagecopy)
            # Show the final processed image.
            cv2.imshow("Ground Floor", image)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.close()
        SIGBTools.release()
def AugumentImages():
    # Loading callibration matrix
    K = np.load("Results/PMatrix.npy")
    # setting pattern size
    pattern_size = (9, 6)
    # loading calibration images
    L_CP = cv2.imread("Results/L_CP.jpg")  # Frontal view
    # Getting cube points from cubePoints.py
    cubePoints = cube.cube_points([0, 0, 0.1], 0.1)

    I1 = cv2.imread("Results/calibrationShoots1.jpg")
    I2 = cv2.imread("Results/calibrationShoots2.jpg")
    I3 = cv2.imread("Results/calibrationShoots3.jpg")
    I4 = cv2.imread("Results/calibrationShoots4.jpg")
    I5 = cv2.imread("Results/calibrationShoots5.jpg")

    Images = [I1, I2, I3, I4, I5]
    ImageH = []  # homographies from frontal view to H respectively

    # Getting chesscorners for frontal view
    Fgray = cv2.cvtColor(L_CP, cv2.COLOR_BGR2GRAY)
    Ffound, Fcorners = cv2.findChessboardCorners(Fgray, pattern_size)
    FchessCorners = [
        (Fcorners[0, 0, 0], Fcorners[0, 0, 1]),
        (Fcorners[8, 0, 0], Fcorners[8, 0, 1]),
        (Fcorners[45, 0, 0], Fcorners[45, 0, 1]),
        (Fcorners[53, 0, 0], Fcorners[53, 0, 1]),
    ]
    # To open CV format
    FchessCorners = np.array([[x, y] for (x, y) in FchessCorners])

    # Getting chesscorners for the rest of pics
    for I in Images:
        # converting to gray for better contrast
        gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
        # finding all corners on chessboard
        found, corners = cv2.findChessboardCorners(gray, pattern_size)
        # picking utmost corners
        IchessCorners = [
            (corners[0, 0, 0], corners[0, 0, 1]),
            (corners[8, 0, 0], corners[8, 0, 1]),
            (corners[45, 0, 0], corners[45, 0, 1]),
            (corners[53, 0, 0], corners[53, 0, 1]),
        ]
        # To openCV format
        IchessCorners = np.array([[x, y] for (x, y) in IchessCorners])
        H, mask = cv2.findHomography(FchessCorners, IchessCorners)
        ImageH.append(H)

    cam1 = SIGBTools.Camera(hstack((K, dot(K, array([[0], [0], [-1]])))))
    box_cam1 = cam1.project(SIGBTools.toHomogenious(cubePoints[:, :5]))

    cam2 = SIGBTools.Camera(dot(ImageH[3], cam1.P))
    A = dot(linalg.inv(K), cam2.P[:, :3])
    A = array([A[:, 0], A[:, 1], cross(A[:, 0], A[:, 1])]).T
    cam2.P[:, :3] = dot(K, A)
    box_cam2 = cam2.project(SIGBTools.toHomogenious(cubePoints))
    print box_cam2
    # figure()
    # imshow(I4)
    # plot(box_cam2[0,:],box_cam2[1,:],linewidth=3)
    # show()
    p = box_cam2

    """ Drawing the box manually """
    # bottom
    cv2.line(I4, (int(p[0][1]), int(p[1][1])), (int(p[0][2]), int(p[1][2])), (255, 255, 0), 2)
    cv2.line(I4, (int(p[0][2]), int(p[1][2])), (int(p[0][3]), int(p[1][3])), (255, 255, 0), 2)
    cv2.line(I4, (int(p[0][3]), int(p[1][3])), (int(p[0][4]), int(p[1][4])), (255, 255, 0), 2)
    cv2.line(I4, (int(p[0][1]), int(p[1][1])), (int(p[0][4]), int(p[1][4])), (255, 255, 0), 2)

    # connecting lines
    cv2.line(I4, (int(p[0][4]), int(p[1][4])), (int(p[0][5]), int(p[1][5])), (255, 255, 0), 2)
    cv2.line(I4, (int(p[0][1]), int(p[1][1])), (int(p[0][6]), int(p[1][6])), (255, 255, 0), 2)
    cv2.line(I4, (int(p[0][2]), int(p[1][2])), (int(p[0][7]), int(p[1][7])), (255, 255, 0), 2)
    cv2.line(I4, (int(p[0][3]), int(p[1][3])), (int(p[0][8]), int(p[1][8])), (255, 255, 0), 2)

    # top
    cv2.line(I4, (int(p[0][5]), int(p[1][5])), (int(p[0][6]), int(p[1][6])), (255, 255, 0), 2)
    cv2.line(I4, (int(p[0][6]), int(p[1][6])), (int(p[0][7]), int(p[1][7])), (255, 255, 0), 2)
    cv2.line(I4, (int(p[0][7]), int(p[1][7])), (int(p[0][8]), int(p[1][8])), (255, 255, 0), 2)
    cv2.line(I4, (int(p[0][8]), int(p[1][8])), (int(p[0][9]), int(p[1][9])), (255, 255, 0), 2)

    cv2.imshow("Dupa", I4)
    cv2.waitKey(10000)

    return
Example #37
0
def realisticTexturemap(scale=1,point=(200,200)):
    #Load in a homography Hg->m, found by selecting 4 corresponding points,
    #and saved from the floor-mapping method
    h**o = np.load("Homography.good.npy")
    
    #A simple attenmpt to get mouse inputs and display images using matplotlib
    T = cv2.imread('data/Images/ITULogo.jpg')
    mp = cv2.imread('data/Images/Ground.jpg')
    M = cv2.imread('data/Images/ITUMap.bmp')

    T = copy(cv2.cvtColor(T,cv2.COLOR_BGR2RGB))
    mp = copy(cv2.cvtColor(mp,cv2.COLOR_BGR2RGB))

    #make figure and two subplots
    fig = figure(1) 
    ax1  = subplot(1,2,1) 
    ax2  = subplot(1,2,2) 
    ax3  = subplot(1,2,2) 
    ax1.imshow(mp) 
    ax2.imshow(T)
    ax1.axis('image') 
    ax1.axis('off') 
    # User selects a point
    point = fig.ginput(1)
    fig.hold('on')
    
    n,m,o = shape(mp)
    n1,m1,o1 = shape(T) 
    
    # Make homogeneous coordinate from selected point (Still in G)
    point = np.matrix([point[0][0],point[0][1],1]).T
    
    #Find the new point based on previously calculated homography
    #(the one saved to a file)
    pointPrime = h**o * point
    pointPrime = normalize(pointPrime) # now a point in M (which is the same plane as T)
   
    # We need 4 four points in the destination image. (actually the map)
    # We "make up" these points, by choosing 4 corners of a (made up) rectangle
    # The points lie close to each other, to reduce risk of making a bad
    # homography. 

    aspect = m1 /n1 #aspect ration of texture /logo
    
    delta = 3 #side length of made up rectangle
    x = pointPrime[0][0]
    y = pointPrime[1][0]
    
    # New points calculated from aspect ratio and projected point
    # (These are the made up points) (origin is top-left, x axis is vertical)
    # (x,y)           |    (x,y+(delta*aspect)
    #-------------------------------------------------
    # (x+delta, y)    |    (x+delta, y+(delta*aspect)

    newPoints = [
                [x,y],   
                [x,y+(delta*aspect)],   
                [x+delta,y+(delta*aspect)],   
                [x+delta,y]   
            ]
    # Texture points are the just the corners of the texture
    TPoints = [
                [0,0],
                [0,n1],
                [m1,n1],
                [m1,0]
            ]

    # The center of the texture
    TCenterX = TPoints[0][0] + TPoints[3][0] / 2
    TCenterY = TPoints[0][1] + TPoints[1][1] / 2
    TCenter = (TCenterX,TCenterY)
   
    # A new homography is estimated based on the new points and the
    # texture points. This will go from Texture to Map
    H_MT = SIGBTools.estimateHomography(newPoints, TPoints)
     
    # The homography going from Groundfloor to Texture
    #Hg->m * Hm->t is Hg->t. we take the inverse of that, Ht->g
    H_TG = h**o.dot(H_MT).I
    
    # phtg becomes the T's origin, projected to G
    phtg = normalize(H_TG.dot(np.matrix([0,0,1]).T))
    #sigurtVector (really?) is the translation vector from
    # the projected texture origin, to where the user clicked in G
    sigurtVector = point - phtg

    # Translation matrix a matrix that will apply sigurtVector as translation.
    # i.e. will translate the projected T, so that T's top-left corner is at the
    # clicked point
    sigurtMat = np.matrix([
            [1.,0.,sigurtVector[0][0]],
            [0.,1.,sigurtVector[1][0]],
            [0.,0.,1.]
            ]);

    # normalize (sigurtMat * (Ht->g * centerOfTInT)) is the center of T, projected and
    # moved so that T's origin would be at the clicked point in G
    # It is therefore also the vector from G's origin to the center of T in G
    GCenter = normalize(sigurtMat * H_TG * np.matrix([TCenter[0],TCenter[1],1]).T)

    # The vector from T's center (in G) to G's origin
    vectorToOrigin = [GCenter[0][0]*-1,GCenter[1][0]*-1]

    #The matrix that will move the projected texture's center to G's origin
    sigurtFlyt = np.matrix([
            [1,0,vectorToOrigin[0]],
            [0,1,vectorToOrigin[1]],
            [0,0,1]
        ]);

    #scaling matrix, as specified by method parameter
    sigurtScale = np.matrix([
            [scale,0,0],
            [0,scale,0],
            [0,0,1]
        ]);

    # (sigurtScale * sigurtFlyt) : make a matrix that moves T in G to G's origin
    # sigurtFlyt.I * (sigurtScale * sigurtFlyt) : this matrix will moove the
    # texture back again.
    scaleMat = sigurtFlyt.I * (sigurtScale * sigurtFlyt)

    cv2.circle(mp,(GCenter[0],GCenter[1]),10,(255,0,0))
    #(sigurtMat.dot(H_TG)) will project T to G, so that the corner of T is at
    #the clicked point
    #scaleMat.dot(sigurtMat.dot(H_TG) will do that, and move T to G's origin,
    #scale it and move it back
    warp = cv2.warpPerspective(T, scaleMat.dot(sigurtMat.dot(H_TG)), (m,n))

    mp = cv2.addWeighted(mp, 0.9, warp, 0.9, 0)
     
    ax1.imshow(mp)
    ax2.imshow(warp)
    draw() #update display: updates are usually defered 
    show()
Example #38
0
def augmentImage(imagesNr):
    patternSize=(9,6)   #size of the calibration pattern
    camNum =0           # The number of the camera to calibrate
    camera_matrix = np.load('PMatrix.npy')
    dist_coefs = np.load('distCoef.npy')
    #loads the video
    capture = cv2.VideoCapture(camNum)
    running = True
    idx = np.array([1,7,37,43])
    #while the video is running
    while running:
        images=[]
        #Gets all calibrated images.
        for i in range(imagesNr):
            file="CalibrationImage"+str(i+1)+".jpg"
            images.append(cv2.imread(file))
        
        running, img =capture.read()
        imgGray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #find chessBoardCorners
        found,corners=cv2.findChessboardCorners(imgGray, patternSize)
        if (found!=0): #if the corners are found.
            srcPoints=[]
            for i in idx:
                srcPoints.append(corners[i][0]) # store points
            for i in range(len(images)):
                imgGray=cv2.cvtColor(images[i], cv2.COLOR_BGR2GRAY)
                nothing,calCorners=cv2.findChessboardCorners(imgGray, patternSize) #for each calibration image find the corners and paint.
                calPoints=[];
                width=(srcPoints[0][0]-srcPoints[1][0])/(srcPoints[1][0]-srcPoints[1][1])
                for j in idx:
                    calPoints.append(calCorners[j][0]);
                ip1 = np.array([[x,y] for (x,y) in srcPoints])#convert points for both src and cal points to np.array().
                ip2 =np.array([[x,y] for (x,y) in calPoints])#convert points for both src and cal points to np.array().
                H, mask =cv2.findHomography(ip1, ip2)#finds the homography from the two point sets.
                cam1 = Camera( np.hstack((camera_matrix,np.dot(camera_matrix,np.array([[0],[0],[-1]])))))
                cam2 = Camera(np.dot(H,cam1.P))
                A = dot(np.linalg.inv(camera_matrix),cam2.P[:,:3])
                A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
                cam2.P[:,:3] = dot(camera_matrix,A)
                box=cube_points((0,0,0,1),0.1) 
                box_cam1=cam1.project(SIGBTools.toHomogenious(box[:,:5]))
                box_cam2=cam2.project(SIGBTools.toHomogenious(box))
                linePoints=[]
                for j in range(len(box_cam2[0])/2):
                    p1=(int(box_cam2[0][j]),int(box_cam2[1][j]));
                    linePoints.append(p1)
                    cv2.circle(img,p1 , 5, (0,0,255))
                    cv2.circle(images[i],p1 , 5, (0,0,255))
                for j in range(len(linePoints)):
                    for k in range(len(linePoints)):
                        cv2.line(img, linePoints[j], linePoints[k], (0,0,255))
                        cv2.line(images[i], linePoints[j], linePoints[k], (0,0,255))
                fileName = 'ArgumentedCalibrationImage'+str(i+1)+".jpg"
                cv2.imwrite(fileName, images[i])
                #print("HOMOGRAPHY FOR IMAGE"+str(j)+": "+str(H))
        ch = cv2.waitKey(1)
        if(ch==27) or (ch==ord('q')): #ESC
            running = False
        cv2.namedWindow(str(i)+" Image")
        cv2.imshow(str(i)+" Image",img)
Example #39
0
    def __StereoCamera(self):
        """Define the epipolar geometry between stereo cameras."""
        # Load two video capture devices.
        SIGBTools.VideoCapture(0, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        SIGBTools.VideoCapture(1, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        
        # Calibrate each individual camera.
        SIGBTools.calibrate()

        # Creates a window to show the stereo images.
        cv2.namedWindow("Stereo",  cv2.WINDOW_AUTOSIZE)
        cv2.setMouseCallback("Stereo", self.__SCEyeMouseEvent)
        self.__Disparity = np.zeros((1, 1, 1))

        # Repetition statement for analyzing each captured image.
        while True:
            # Grab the video frames.
            leftImage, rightImage = SIGBTools.read()

            # Find the pattern in the image.
            leftCorners  = SIGBTools.FindCorners(leftImage)
            rightCorners = SIGBTools.FindCorners(rightImage)

            # Check if the calibration process is running.
            if self.__isCalibrating:
                # If both pattern have been recognized, start the calibration process.
                if leftCorners is not None and rightCorners is not None:
                    self.__Calibrate(leftCorners, rightCorners)
                # Otherwise, stop the calibrations process.
                else:
                    self.__isCalibrating = False

            # Combine two stereo images in only one window.
            self.__Image = self.__CombineImages(leftImage, rightImage, 0.5)

            # Undistort the stereo images.
            if self.__isUndistort:
                leftUndistort, rightUndistort = SIGBTools.UndistortImages(leftImage, rightImage)
                self.__Undistort = self.__CombineImages(leftUndistort, rightUndistort, 0.5)
                if self.__isDepth:
                    self.__Disparity = self.__DepthMap(leftUndistort, rightUndistort)

            # Check what the user wants to do.
            inputKey = cv2.waitKey(1)
            # Esc or letter "q" key.
            if inputKey == 27 or inputKey == ord("q"):
                break
            # Space key.
            elif inputKey == 32:
                self.__isCalibrating = True
            # Letter "s" key.
            elif inputKey == ord("s") and self.__isDepth:
                print "s pressed"
                self.__isSaving = True
            elif inputKey == ord("d"):
                if not self.__isDepth:
                    print "opening depth"
                    # Creates a window to show the depth map.
                    cv2.namedWindow("DepthMap", cv2.WINDOW_AUTOSIZE)
                    cv2.createTrackbar("minDisparity", "DepthMap", 1, 32, self.__SetMinDisparity)
                    cv2.createTrackbar("blockSize",    "DepthMap", 1,  5, self.__SetNothing)
                    self.__isDepth = True
                else:
                    cv2.destroyWindow("DepthMap")
                    self.__isDepth = False

            # Show the final processed image.
            cv2.imshow("Stereo", self.__Image)
            if self.__isUndistort:
                cv2.imshow("Undistort", self.__Undistort)
                if self.__isDepth:
                    cv2.imshow("DepthMap", self.__Disparity)

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
Example #40
0
    def __TextureMapGroundFloor(self):
        """Places a texture on the ground floor for each input image."""
        # Load videodata.
        filename = self.__path + "Videos/ITUStudent.avi"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        
        # Needs full path
        SIGBTools.RecordingVideos("C:\\Code\IIAML\\Project3\\Assignments\\_02\\Outputs\\TextureMapGroundFloor.wmv")
        
        # ======================================================
        # Read homography from ground to map.
        H_g2m = np.load(self.__path + "Outputs/homography1.npy")
        
        # Read the input images.
        image1 = cv2.imread(self.__path + "Images/ITULogo.PNG")
        image2 = cv2.imread(self.__path + "Images/ITUMap.png")
        
        # Estimate the homography from image to map.
        H_i2m, points = SIGBTools.GetHomographyFromMouse(image1, image2, -4)
        
        # Calculate homography from image to ground.
        H_i2g = np.dot(np.linalg.inv(H_g2m), H_i2m)
        
        np.save(self.__path + "Outputs/homography2.npy", H_i2g)
        # ========================================================
        
        # Load tracking data.
        dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat")
        lenght   = dataFile.shape[0]

        # Define the boxes colors.
        boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # BGR.
        images = []
        
        # Read each frame from input video and draw the rectangules on it.
        for i in range(lenght):
            # Read the current image from a video file.
            image = SIGBTools.read()

            # Draw each color rectangule in the image.
            boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :])
            for j in range(3):
                box = boxes[j]
                cv2.rectangle(image, box[0], box[1], boxColors[j])
            
            
            # ========================================================
            # Draw the homography transformation.
            h, w    = image.shape[0:2]
            overlay = cv2.warpPerspective(image1, H_i2g, (w, h))
            result  = cv2.addWeighted(image, 0.5, overlay, 0.5, 0)
            #images.append(result)
            SIGBTools.write(result)
            # ========================================================

            # Show the final processed image.
            cv2.imshow("Camera", result)
            
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break
        
        
        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)
        SIGBTools.close()
        # Close all allocated resources.
        cv2.destroyAllWindows()
        
        SIGBTools.release()
def AugumentSequence():
    #Loading callibration matrix
    K = np.load('Results/PMatrix.npy') 
    #setting pattern size
    pattern_size = (9,6)
    #loading calibration images
    L_CP = cv2.imread('Results/L_CPSeq.jpg') #Frontal view
    #Getting cube points from cubePoints.py
    cubePoints = cube.cube_points([0,0,0.1],0.1)
    #Getting chesscorners for frontal view
    Fgray = cv2.cvtColor(L_CP,cv2.COLOR_BGR2GRAY)
    Ffound, Fcorners = cv2.findChessboardCorners(Fgray, pattern_size)
    FchessCorners = [(Fcorners[0,0,0],Fcorners[0,0,1]),(Fcorners[8,0,0],Fcorners[8,0,1]),(Fcorners[45,0,0],Fcorners[45,0,1]),(Fcorners[53,0,0],Fcorners[53,0,1])]
    #To open CV format
    FchessCorners = np.array([[x,y] for (x,y) in FchessCorners])
    #Getting chesscorners for the sequence images
    cap = cv2.VideoCapture(0)
    running, I = cap.read()
    #Get camera model for first view 
    cam1 = SIGBTools.Camera(hstack((K,dot(K,array([[0],[0],[-1]])) )) )
    
    #imSize = np.shape(I)
    #videoWriter = cv2.VideoWriter("sequence.mp4", cv.FOURCC('D','I','V','X'), 30,(imSize[1], imSize[0]),True)
    
    while(running):
        running, I = cap.read()
        #converting to gray for better contrast
        gray = cv2.cvtColor(I,cv2.COLOR_BGR2GRAY)
        found, corners = cv2.findChessboardCorners(gray, pattern_size)
        
        if (found):
        #picking utmost corners
            IchessCorners = [(corners[0,0,0],corners[0,0,1]),(corners[8,0,0],corners[8,0,1]),(corners[45,0,0],corners[45,0,1]),(corners[53,0,0],corners[53,0,1])]
            #To openCV format
            IchessCorners = np.array([[x,y] for (x,y) in IchessCorners])
            #Find homography between frontal image and current frame
            H,mask = cv2.findHomography(FchessCorners, IchessCorners)
            #Transofrm camera view
            camFrame = SIGBTools.Camera(dot(H,cam1.P))
            A = dot(linalg.inv(K),camFrame.P[:,:3])
            A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
            camFrame.P[:,:3] = dot(K,A)
            #Get cube projection points
            box_peojection = camFrame.project(SIGBTools.toHomogenious(cubePoints))
            #Draw  box
            p = box_peojection
            ''' Drawing the box manually '''
            #bottom
            cv2.line(I, (int(p[0][1]), int(p[1][1])), (int(p[0][2]),int(p[1][2])),(255,255,0),2)
            cv2.line(I, (int(p[0][2]), int(p[1][2])), (int(p[0][3]),int(p[1][3])),(255,255,0),2)
            cv2.line(I, (int(p[0][3]), int(p[1][3])), (int(p[0][4]),int(p[1][4])),(255,255,0),2)
            cv2.line(I, (int(p[0][1]), int(p[1][1])), (int(p[0][4]),int(p[1][4])),(255,255,0),2)
            
            #connecting lines
            cv2.line(I, (int(p[0][4]), int(p[1][4])), (int(p[0][5]),int(p[1][5])),(255,255,0),2)
            cv2.line(I, (int(p[0][1]), int(p[1][1])), (int(p[0][6]),int(p[1][6])),(255,255,0),2)
            cv2.line(I, (int(p[0][2]), int(p[1][2])), (int(p[0][7]),int(p[1][7])),(255,255,0),2)
            cv2.line(I, (int(p[0][3]), int(p[1][3])), (int(p[0][8]),int(p[1][8])),(255,255,0),2)
            
            #top
            cv2.line(I, (int(p[0][5]), int(p[1][5])), (int(p[0][6]),int(p[1][6])),(255,255,0),2)
            cv2.line(I, (int(p[0][6]), int(p[1][6])), (int(p[0][7]),int(p[1][7])),(255,255,0),2)
            cv2.line(I, (int(p[0][7]), int(p[1][7])), (int(p[0][8]),int(p[1][8])),(255,255,0),2)
            cv2.line(I, (int(p[0][8]), int(p[1][8])), (int(p[0][9]),int(p[1][9])),(255,255,0),2)
            
            cv2.imshow('Augumentation',I)
            cv2.waitKey(1)
            
    return
def AugumentSequence():
    # Loading callibration matrix
    K = np.load("Results/PMatrix.npy")
    # setting pattern size
    pattern_size = (9, 6)
    # loading calibration images
    L_CP = cv2.imread("Results/L_CPSeq.jpg")  # Frontal view
    # Getting cube points from cubePoints.py
    cubePoints = cube.cube_points([0, 0, 0.1], 0.1)
    # Getting chesscorners for frontal view
    Fgray = cv2.cvtColor(L_CP, cv2.COLOR_BGR2GRAY)
    Ffound, Fcorners = cv2.findChessboardCorners(Fgray, pattern_size)
    FchessCorners = [
        (Fcorners[0, 0, 0], Fcorners[0, 0, 1]),
        (Fcorners[8, 0, 0], Fcorners[8, 0, 1]),
        (Fcorners[45, 0, 0], Fcorners[45, 0, 1]),
        (Fcorners[53, 0, 0], Fcorners[53, 0, 1]),
    ]
    # To open CV format
    FchessCorners = np.array([[x, y] for (x, y) in FchessCorners])
    # Getting chesscorners for the sequence images
    cap = cv2.VideoCapture(0)
    running, I = cap.read()
    # Get camera model for first view
    cam1 = SIGBTools.Camera(hstack((K, dot(K, array([[0], [0], [-1]])))))

    # imSize = np.shape(I)
    # videoWriter = cv2.VideoWriter("sequence.mp4", cv.FOURCC('D','I','V','X'), 30,(imSize[1], imSize[0]),True)

    while running:
        running, I = cap.read()
        # converting to gray for better contrast
        gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
        found, corners = cv2.findChessboardCorners(gray, pattern_size)

        if found:
            # picking utmost corners
            IchessCorners = [
                (corners[0, 0, 0], corners[0, 0, 1]),
                (corners[8, 0, 0], corners[8, 0, 1]),
                (corners[45, 0, 0], corners[45, 0, 1]),
                (corners[53, 0, 0], corners[53, 0, 1]),
            ]
            # To openCV format
            IchessCorners = np.array([[x, y] for (x, y) in IchessCorners])
            # Find homography between frontal image and current frame
            H, mask = cv2.findHomography(FchessCorners, IchessCorners)
            # Transofrm camera view
            camFrame = SIGBTools.Camera(dot(H, cam1.P))
            A = dot(linalg.inv(K), camFrame.P[:, :3])
            A = array([A[:, 0], A[:, 1], cross(A[:, 0], A[:, 1])]).T
            camFrame.P[:, :3] = dot(K, A)
            # Get cube projection points
            box_peojection = camFrame.project(SIGBTools.toHomogenious(cubePoints))
            # Draw  box
            p = box_peojection
            """ Drawing the box manually """
            # bottom
            cv2.line(I, (int(p[0][1]), int(p[1][1])), (int(p[0][2]), int(p[1][2])), (255, 255, 0), 2)
            cv2.line(I, (int(p[0][2]), int(p[1][2])), (int(p[0][3]), int(p[1][3])), (255, 255, 0), 2)
            cv2.line(I, (int(p[0][3]), int(p[1][3])), (int(p[0][4]), int(p[1][4])), (255, 255, 0), 2)
            cv2.line(I, (int(p[0][1]), int(p[1][1])), (int(p[0][4]), int(p[1][4])), (255, 255, 0), 2)

            # connecting lines
            cv2.line(I, (int(p[0][4]), int(p[1][4])), (int(p[0][5]), int(p[1][5])), (255, 255, 0), 2)
            cv2.line(I, (int(p[0][1]), int(p[1][1])), (int(p[0][6]), int(p[1][6])), (255, 255, 0), 2)
            cv2.line(I, (int(p[0][2]), int(p[1][2])), (int(p[0][7]), int(p[1][7])), (255, 255, 0), 2)
            cv2.line(I, (int(p[0][3]), int(p[1][3])), (int(p[0][8]), int(p[1][8])), (255, 255, 0), 2)

            # top
            cv2.line(I, (int(p[0][5]), int(p[1][5])), (int(p[0][6]), int(p[1][6])), (255, 255, 0), 2)
            cv2.line(I, (int(p[0][6]), int(p[1][6])), (int(p[0][7]), int(p[1][7])), (255, 255, 0), 2)
            cv2.line(I, (int(p[0][7]), int(p[1][7])), (int(p[0][8]), int(p[1][8])), (255, 255, 0), 2)
            cv2.line(I, (int(p[0][8]), int(p[1][8])), (int(p[0][9]), int(p[1][9])), (255, 255, 0), 2)

            cv2.imshow("Augumentation", I)
            cv2.waitKey(1)

    return
Example #43
0
    def Start(self):
        """Start the eye tracking system."""
        # Show the menu.
        self.__Menu()
        # Read a video file.
        filename = raw_input("\n\tType a filename from \"Inputs\" folder: ")
        #filename = "eye01.avi"
        filepath = self.__path + "/Inputs/" + filename
        if not os.path.isfile(filepath):
            print "\tInvalid filename!"
            time.sleep(1)
            return

        # Show the menu.
        self.__Menu()

        # Define windows for displaying the results and create trackbars.
        self.__SetupWindowSliders()

        # Load the video file.
        SIGBTools.VideoCapture(filepath)

        # Shows the first frame.
        self.OriginalImage = SIGBTools.read()
        self.FrameNumber = 1
        self.__UpdateImage()

        # Initial variables.
        saveFrames = False

        # Read each frame from input video.
        while True:            
            # Extract the values of the sliders.
            sliderVals = self.__GetSliderValues()

            # Read the keyboard selection.
            ch = cv2.waitKey(1)

            # Select regions in the input images.
            if ch is ord("m"):
                if not sliderVals["Running"]:
                    roiSelector = SIGBTools.ROISelector(self.OriginalImage)
                    
                    points, regionSelected = roiSelector.SelectArea("Select eye corner", (400, 200))
                    if regionSelected:
                        self.LeftTemplate = self.OriginalImage[points[0][1]:points[1][1],points[0][0]:points[1][0]]
                            
                    points, regionSelected = roiSelector.SelectArea("Select eye corner", (400, 200))
                    if regionSelected:
                        self.RightTemplate = self.OriginalImage[points[0][1]:points[1][1],points[0][0]:points[1][0]]
                    
            # Recording a video file.
            elif ch is ord("s"):
                if saveFrames:
                    SIGBTools.close()
                    saveFrames = False
                else:
                    resultFile = raw_input("\n\tType a filename (result.wmv): ")
                    resultFile = self.__path + "/Outputs/" + resultFile
                    if os.path.isfile(resultFile):
                        print "\tThis file exist! Try again."
                        time.sleep(1)
                        self.__Menu()
                        continue
                    elif not resultFile.endswith("wmv"):
                        print "\tThis format is not supported! Try again."
                        time.sleep(1)
                        self.__Menu()
                        continue
                    self.__Menu()
                    size = self.OriginalImage.shape
                    SIGBTools.RecordingVideos(resultFile, fps=30.0, size=(size[1], size[0]))
                    saveFrames = True

            # Spacebar to stop or start the video.
            elif ch is 32:
                cv2.setTrackbarPos("Stop/Start", "TrackBars", not sliderVals["Running"])

            # Restart the video.
            elif ch is ord("r"):
                # Release all connected videos/cameras.
                SIGBTools. release()
                time.sleep(0.5)

                # Load the video file.
                SIGBTools.VideoCapture(filepath)

                # Shows the first frame.
                self.OriginalImage = SIGBTools.read()
                self.FrameNumber = 1
                self.__UpdateImage()

            # Quit the eye tracking system.
            elif ch is 27 or ch is ord("q"):
                break

            # Check if the video is running.
            if sliderVals["Running"]:
                self.OriginalImage = SIGBTools.read()
                self.FrameNumber += 1
                self.__UpdateImage()

                if saveFrames:
                    SIGBTools.write(self.ResultImage)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
        if saveFrames:
            SIGBTools.close()
Example #44
0
def augmentImages():
    '''
    We augment arbitrary images with the chessboard with a cube
    using a two-camera approach
    '''
    # load calibration
    cam_calibration, distCoef = loadCameraCalibration()

    # choose points on the chessboard pattern
    idx = np.array([1, 7, 37, 43])

    # load calibration pattern and transform the image
    calibration_pattern = cv2.imread("Images/CalibrationPattern.png")
    calibration_pattern = cv2.resize(calibration_pattern, (640, 480))
    calibration_pattern = cv2.cvtColor(calibration_pattern, cv2.COLOR_BGR2GRAY)

    # get corners from the calibration pattern
    found, calibrationCorners = cv2.findChessboardCorners(calibration_pattern, (9, 6))

    # load images to be augmented
    images = []
    for i in range(1, 8):
        images.append(cv2.imread("Solutions/cam_calibration{}.jpg".format(i)))

    # augment the images one by one
    for image_id, image in enumerate(images):

        # find the same corners as we had found previously in the
        # chessboard pattern itself, only this one is in the video
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        found, corners = cv2.findChessboardCorners(gray, (9, 6))

        if not found:
            continue

        # load up coords in the respective images
        imagePoints = []
        calibrationPoints = []
        for i in idx:
            p = corners[i][0]
            cp = calibrationCorners[i][0]
            imagePoints.append(p)
            calibrationPoints.append(cp)

        imagePoints = np.array(imagePoints)
        calibrationPoints = np.array(calibrationPoints)

#         cv2.imshow('calibration image', calibration_pattern)

        # Create 1st camera, this one is looking at the pattern image
        cam1 = Camera(hstack((cam_calibration, dot(cam_calibration, np.array([[0], [0], [-1]])))))
        cam1.factor()

        # Create the cube
        cube = cube_points([0, 0, 0.1], 0.3)

        # Project the bottom square of the cube, this will transform
        # point coordinates from the object space to the calibration
        # world space where the camera looks
        calibration_rect = cam1.project(SIGBTools.toHomogenious(cube[:, :5]))

        # Calculate the homography from the corners in the calibration image
        # to the same points in the image that we want to project the cube to
        homography = SIGBTools.estimateHomography(calibrationPoints, imagePoints)

        # Transform the rect from the calibration image world space to the final
        # image world space
        transRect = SIGBTools.normalizeHomogenious(dot(homography, calibration_rect))

        # Create the second camera, looking into the world of the final image
        cam2 = Camera(dot(homography, cam1.P))

        # Recalculate the projection matrix
        calibrationInverse = np.linalg.inv(cam_calibration)
        rot = dot(calibrationInverse, cam2.P[:, :3])

        # reassemble the rotation translation matrix
        r1, r2, t = tuple(np.hsplit(rot, 3))
        r3 = cross(r1.T, r2.T).T
        rotationTranslationMatrix = np.hstack((r1, r2, r3, t))

        # Create the projection matrix
        cam2.P = dot(cam_calibration, rotationTranslationMatrix)
        cam2.factor()

        # project the cube using the 2nd camera
        cube = cube_points([0, 0, 0.1], 0.3)
        box = cam2.project(SIGBTools.toHomogenious(cube))

        for i in range(1, 17):
            x1 = box[0, i - 1]
            y1 = box[1, i - 1]
            x2 = box[0, i]
            y2 = box[1, i]
            cv2.line(image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2)

        # save image
        cv2.imwrite("Solutions/augmentation{}.png".format(image_id), image)

        cv2.imshow("Test", image)
        cv2.waitKey(0)
def update(img):
    image = copy(img)

    if Undistorting:  # Use previous stored camera matrix and distortion coefficient to undistort the image
        ''' <004> Here Undistoret the image'''
        image=cv2.undistort(image, camera_matrix, distortionCoefficient)

    if (ProcessFrame):
        ''' <005> Here Find the Chess pattern in the current frame'''
        patternFound = True
        drawContours = True;
        mI,nI,t = image.shape
        pattern_size = (9, 6)
        idx = [0,8,45,53] 
        image = cv2.pyrDown(image)
        gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        found, corners = cv2.findChessboardCorners(gray, pattern_size)
        if found:
            term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
            cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
            #cv2.drawChessboardCorners(image, pattern_size, corners, found)

            for t in idx:
                cv2.circle(image,(int(corners[t,0,0]),int(corners[t,0,1])),10,(255,t,t))
                    

        if patternFound == True:
            ''' <006> Here Define the cameraMatrix P=K[R|t] of the current frame'''
            points1=[]
            for t in idx:
                points1.append((int(img_points_first[t,0]),int(img_points_first[t,1])))            
            
            points2=[]
            for t in idx:
                points2.append((int(corners[t,0,0]),int(corners[t,0,1])))        
            
            
            H_2_1=SIGBTools.estimateHomography(points1, points2)
            H_2_cs=np.dot(H_2_1,H_1_cs)
            
            #calculate camera matrix for current view
            A = np.dot(linalg.inv(camera_matrix),H_2_cs)
            A = np.array([A[:,0],A[:,1],cross(A[:,0],A[:,1]),A[:,2]])
            P2_Method1= np.dot(camera_matrix,A)     
            
            

            if ShowText:
                ''' <011> Here show the distance between the camera origin and the world origin in the image'''

                cv2.putText(image,str("frame:" + str(frameNumber)), (20,10),cv2.FONT_HERSHEY_PLAIN,1, (255, 255, 255)) # Draw the text

            ''' <008> Here Draw the world coordinate system in the image'''            
            
            if TextureMap:
                ''' <010> Here Do he texture mapping and draw the texture on the faces of the cube'''

                ''' <012>  calculate the normal vectors of the cube faces and draw these normal vectors on the center of each face'''

                ''' <013> Here Remove the hidden faces'''  


            if ProjectPattern:                  
                ''' <007> Here Test the camera matrix of the current view by projecting the pattern points''' 
            
        

            if WireFrame:                      
                ''' <009> Here Project the box into the current camera image and draw the box edges''' 
    
    cv2.namedWindow('Web cam')
    cv2.imshow('Web cam', image)  
    global result
    result=copy(image)
Example #46
0
    def __TextureMapGridSequence(self):
        """Skeleton for texturemapping on a video sequence."""
        # Load videodata.
        filename = self.__path + "Videos/Grid01.mp4"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)

        SIGBTools.RecordingVideos("C:\\ITU programming\\IIAML\\Project3\\Assignments\\_02\\Outputs\\TextureMapGridSequenceGrid01.wmv")

        # Load texture mapping image.
        texture = cv2.imread(self.__path + "Images/ITULogo.png")
        texture = cv2.pyrDown(texture)

        # Define the number and ids of inner corners per a chessboard row and column.
        patternSize = (9, 6)
        idx = [53, 45, 8, 0]

        # Read each frame from input video.
        while True:
            # Read the current image from a video file.
            image = SIGBTools.read()
            # Blurs an image and downsamples it.
            image = cv2.pyrDown(image)

            # Finds the positions of internal corners of the chessboard.
            corners = SIGBTools.FindCorners(image)
            if corners is not None:
                # ====================================================
                # Find corner points image
                corner_points = []
                for i, point in enumerate(corners[idx]):
                    corner_points.append(point[0].astype(int).tolist())
                corner_points = np.array(corner_points)
                
                # Corner points texture
                corner_points_texture = np.array([[0,0], 
                                                  [texture.shape[1]-1,0],
                                                  [0,texture.shape[0]-1],
                                                  [texture.shape[1]-1,texture.shape[0]-1]],
                                                 dtype=int)
                
                # Calculate homography
                H = cv2.findHomography(corner_points_texture, corner_points)[0]
                
                # Draw the homography transformation.
                h, w = image.shape[0:2]
                overlay = cv2.warpPerspective(texture, H, (w, h))
                image  = cv2.addWeighted(image, 1, overlay, 1, 0)
                # ====================================================
            
            # Show the final processed image.
            SIGBTools.write(image)
            cv2.imshow("Image", image)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        SIGBTools.close()
        cv2.waitKey(2000)
        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()