Esempio n. 1
0
    def __TextureMapGridSequence(self):
        """Skeleton for texturemapping on a video sequence."""
        # Load videodata.
        filename = self.__path + "Videos/Grid05.mp4"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        outputSize = (1280, 720)
        recorder = SIGBTools.RecordingVideos(
            self.__path + "Outputs/TextureMapGridSequence_Grid05.wmv",
            size=outputSize)

        # Load texture mapping image.
        texture = cv2.imread(self.__path + "Images/ITULogo.png")
        texture = cv2.pyrDown(texture)

        # Define the number and ids of inner corners per a chessboard row and column.
        patternSize = (9, 6)
        idx = [0, 8, 45, 53]

        # Read each frame from input video.
        h, w = texture.shape[0:2]
        textureCorners = np.asarray([[0, h], [0, 0], [w, h], [w, 0]])
        while True:
            # Read the current image from a video file.
            image = SIGBTools.read()
            # Blurs an image and downsamples it.
            image = cv2.pyrDown(image)

            # Finds the positions of internal corners of the chessboard.
            corners = SIGBTools.FindCorners(image, False)
            if corners is not None:
                pass
                corners = np.asarray([
                    corners[idx[0]], corners[idx[1]], corners[idx[2]],
                    corners[idx[3]]
                ])
                homography, _ = cv2.findHomography(textureCorners, corners)
                h, w = image.shape[0:2]
                overlay = cv2.warpPerspective(texture, homography, (w, h))
                image = cv2.addWeighted(image, 0.5, overlay, 0.5, 0)

            imagecopy = image.copy()
            imagecopy = cv2.resize(imagecopy, outputSize)
            SIGBTools.write(imagecopy)
            # Show the final processed image.
            cv2.imshow("Image", image)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.close()
        SIGBTools.release()
Esempio n. 2
0
    def __TextureMapGroundFloor(self):
        """Places a texture on the ground floor for each input image."""
        # Load videodata.
        filename = self.__path + "Videos/ITUStudent.avi"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        outputSize = (640, 480)
        recorder = SIGBTools.RecordingVideos(
            self.__path + "Outputs/TextureMapGroundFloor.wmv", size=outputSize)

        # Load tracking data.
        dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat")
        lenght = dataFile.shape[0]

        # Define the boxes colors.
        boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]  # BGR.

        homography = None
        overlay = None
        itulogo = cv2.imread(self.__path + "Images/ITULogo.png")
        # Read each frame from input video and draw the rectangules on it.
        for i in range(lenght):
            # Read the current image from a video file.
            image = SIGBTools.read()
            if homography is None:
                h, _ = SIGBTools.GetHomographyFromMouse(itulogo, image, -4)
                homography = h
                np.save('homography2.npy', homography)
                h, w = image.shape[0:2]
                overlay = cv2.warpPerspective(itulogo, homography, (w, h))

            image = cv2.addWeighted(image, 0.8, overlay, 0.2, 0)

            # Draw each color rectangule in the image.
            boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :])
            for j in range(3):
                box = boxes[j]
                cv2.rectangle(image, box[0], box[1], boxColors[j])

            imagecopy = image.copy()
            imagecopy = cv2.resize(imagecopy, outputSize)
            SIGBTools.write(imagecopy)
            # Show the final processed image.
            cv2.imshow("Ground Floor", image)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.close()
        SIGBTools.release()
Esempio n. 3
0
    def __ShowFloorTrackingData(self):
        # Load videodata.
        filename = self.__path + "Videos/ITUStudent.avi"
        image2 = cv2.imread(self.__path + "Images/ITUMap.png")
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        SIGBTools.RecordingVideos("C:\\Code\\IIAML\\Project3\\Assignments\\_02\\Outputs\\MapLocation.wmv")
        
        # Load homography
        homography = np.load(self.__path + "Outputs/homography1.npy")
        
        # Load tracking data.
        dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat")
        lenght   = dataFile.shape[0]

        # Define the boxes colors.
        boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # BGR.

        # Read each frame from input video and draw the rectangules on it.
        for i in range(lenght):
            # Read the current image from a video file.
            image = SIGBTools.read()

            # Draw each color rectangule in the image.
            boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :])
            for j in range(3):
                box = boxes[j]
                cv2.rectangle(image, box[0], box[1], boxColors[j])

            point2 = self.__calcHomogenousCoordinates(boxes[2][1], homography)
            # Show the final processed image.
            # Live tracking
            image2_updated = image2.copy()
            cv2.circle(image2_updated, (int(point2[0]), int(point2[1])), 10, (0, 255, 0), -1)
            cv2.imshow("Map", image2_updated)
            # Drawing
            #cv2.circle(image2, (int(point2[0]), int(point2[1])), 3, (0, 255, 0), -1)
            #cv2.imshow("Map", image2)
            
            cv2.imshow("Ground Floor", image)
            SIGBTools.write(image2_updated)
            
            #self.__showPointsOnFrameOfView(image, points)            
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        SIGBTools.close()
        cv2.waitKey(2000)
        cv2.imwrite(self.__path + "Outputs/mapImage.png", image2)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
Esempio n. 4
0
    def __ShowFloorTrackingData(self):
        # Load videodata.
        filename = self.__path + "Videos/ITUStudent.avi"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)

        # Load tracking data.
        dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat")
        length = dataFile.shape[0]

        # Define the boxes colors.
        boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]  # BGR.
        outputSize = (1280, 960)
        recorder = SIGBTools.RecordingVideos(self.__path +
                                             "Outputs/MapLocation.wmv",
                                             size=outputSize)

        dataPoints = list()
        # Read each frame from input video and draw the rectangules on it.
        first = True
        homography = None
        itumap = cv2.imread(self.__path + "Images/ITUMap.png")
        for i in range(length):
            # Read the current image from a video file.
            image = SIGBTools.read()

            # Estimate the homograhy based on first frame
            if first:
                h, _ = SIGBTools.GetHomographyFromMouse(image, itumap)
                homography = h
                first = False
                np.save('homography1.npy', homography)

            # Draw each color rectangule in the image.
            boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :])

            # Transform center of the feet box - box1 is feet box
            box1 = boxes[1][0]
            box2 = boxes[1][1]
            center = [(box1[0] + 0.5 * (box2[0] - box1[0])),
                      (box1[1] + 0.5 * (box2[1] - box1[1]))]
            # Reshape the center to be as expected
            center = np.array([center], dtype=np.float32)
            center = np.array([center])
            transformedCenter = cv2.perspectiveTransform(center, homography)
            dataPoints.append(transformedCenter)

            for j in range(3):
                box = boxes[j]
                cv2.rectangle(image, box[0], box[1], boxColors[j])

            # Show the final processed image.
            #Draw current position
            c = transformedCenter[0][0]
            itumapCopy = itumap.copy()
            cv2.circle(itumapCopy, (int(c[0]), int(c[1])), 1, (0, 255, 0), -1)
            cv2.imshow("ITU Map", itumapCopy)

            cv2.imshow("Ground Floor", image)

            height, width, _ = image.shape
            resizedMap = cv2.resize(
                itumapCopy, (width, height))  # resize to same dimensions
            sideBySide = np.concatenate((image, resizedMap), axis=0)
            output = cv2.resize(sideBySide, outputSize)
            SIGBTools.write(output)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)

        imageMap = itumap.copy()
        for point in dataPoints:
            p = point[0][0]  #due to point being wrapped
            cv2.circle(imageMap, (int(p[0]), int(p[1])), 1, (0, 255, 0), -1)

        cv2.imwrite(self.__path + "Outputs/mapImage.png", imageMap)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.close()
        SIGBTools.release()
Esempio n. 5
0
    def __TextureMapGridSequence(self):
        """Skeleton for texturemapping on a video sequence."""
        # Load videodata.
        filename = self.__path + "Videos/Grid01.mp4"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)

        SIGBTools.RecordingVideos("C:\\ITU programming\\IIAML\\Project3\\Assignments\\_02\\Outputs\\TextureMapGridSequenceGrid01.wmv")

        # Load texture mapping image.
        texture = cv2.imread(self.__path + "Images/ITULogo.png")
        texture = cv2.pyrDown(texture)

        # Define the number and ids of inner corners per a chessboard row and column.
        patternSize = (9, 6)
        idx = [53, 45, 8, 0]

        # Read each frame from input video.
        while True:
            # Read the current image from a video file.
            image = SIGBTools.read()
            # Blurs an image and downsamples it.
            image = cv2.pyrDown(image)

            # Finds the positions of internal corners of the chessboard.
            corners = SIGBTools.FindCorners(image)
            if corners is not None:
                # ====================================================
                # Find corner points image
                corner_points = []
                for i, point in enumerate(corners[idx]):
                    corner_points.append(point[0].astype(int).tolist())
                corner_points = np.array(corner_points)
                
                # Corner points texture
                corner_points_texture = np.array([[0,0], 
                                                  [texture.shape[1]-1,0],
                                                  [0,texture.shape[0]-1],
                                                  [texture.shape[1]-1,texture.shape[0]-1]],
                                                 dtype=int)
                
                # Calculate homography
                H = cv2.findHomography(corner_points_texture, corner_points)[0]
                
                # Draw the homography transformation.
                h, w = image.shape[0:2]
                overlay = cv2.warpPerspective(texture, H, (w, h))
                image  = cv2.addWeighted(image, 1, overlay, 1, 0)
                # ====================================================
            
            # Show the final processed image.
            SIGBTools.write(image)
            cv2.imshow("Image", image)
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # Wait 2 seconds before finishing the method.
        SIGBTools.close()
        cv2.waitKey(2000)
        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
Esempio n. 6
0
    def __TextureMapGroundFloor(self):
        """Places a texture on the ground floor for each input image."""
        # Load videodata.
        filename = self.__path + "Videos/ITUStudent.avi"
        SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480)
        
        # Needs full path
        SIGBTools.RecordingVideos("C:\\Code\IIAML\\Project3\\Assignments\\_02\\Outputs\\TextureMapGroundFloor.wmv")
        
        # ======================================================
        # Read homography from ground to map.
        H_g2m = np.load(self.__path + "Outputs/homography1.npy")
        
        # Read the input images.
        image1 = cv2.imread(self.__path + "Images/ITULogo.PNG")
        image2 = cv2.imread(self.__path + "Images/ITUMap.png")
        
        # Estimate the homography from image to map.
        H_i2m, points = SIGBTools.GetHomographyFromMouse(image1, image2, -4)
        
        # Calculate homography from image to ground.
        H_i2g = np.dot(np.linalg.inv(H_g2m), H_i2m)
        
        np.save(self.__path + "Outputs/homography2.npy", H_i2g)
        # ========================================================
        
        # Load tracking data.
        dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat")
        lenght   = dataFile.shape[0]

        # Define the boxes colors.
        boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # BGR.
        images = []
        
        # Read each frame from input video and draw the rectangules on it.
        for i in range(lenght):
            # Read the current image from a video file.
            image = SIGBTools.read()

            # Draw each color rectangule in the image.
            boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :])
            for j in range(3):
                box = boxes[j]
                cv2.rectangle(image, box[0], box[1], boxColors[j])
            
            
            # ========================================================
            # Draw the homography transformation.
            h, w    = image.shape[0:2]
            overlay = cv2.warpPerspective(image1, H_i2g, (w, h))
            result  = cv2.addWeighted(image, 0.5, overlay, 0.5, 0)
            #images.append(result)
            SIGBTools.write(result)
            # ========================================================

            # Show the final processed image.
            cv2.imshow("Camera", result)
            
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break
        
        
        # Wait 2 seconds before finishing the method.
        cv2.waitKey(2000)
        SIGBTools.close()
        # Close all allocated resources.
        cv2.destroyAllWindows()
        
        SIGBTools.release()
Esempio n. 7
0
    def Start(self):
        """Start the eye tracking system."""
        # Show the menu.
        self.__Menu()
        # Read a video file.
        filename = raw_input("\n\tType a filename from \"Inputs\" folder: ")
        #filename = "eye01.avi"
        filepath = self.__path + "/Inputs/" + filename
        if not os.path.isfile(filepath):
            print "\tInvalid filename!"
            time.sleep(1)
            return

        # Show the menu.
        self.__Menu()

        # Define windows for displaying the results and create trackbars.
        self.__SetupWindowSliders()

        # Load the video file.
        SIGBTools.VideoCapture(filepath)

        # Shows the first frame.
        self.OriginalImage = SIGBTools.read()
        self.FrameNumber = 1
        self.__UpdateImage()

        # Initial variables.
        saveFrames = False

        # Read each frame from input video.
        while True:            
            # Extract the values of the sliders.
            sliderVals = self.__GetSliderValues()

            # Read the keyboard selection.
            ch = cv2.waitKey(1)

            # Select regions in the input images.
            if ch is ord("m"):
                if not sliderVals["Running"]:
                    roiSelector = SIGBTools.ROISelector(self.OriginalImage)
                    
                    points, regionSelected = roiSelector.SelectArea("Select eye corner", (400, 200))
                    if regionSelected:
                        self.LeftTemplate = self.OriginalImage[points[0][1]:points[1][1],points[0][0]:points[1][0]]
                            
                    points, regionSelected = roiSelector.SelectArea("Select eye corner", (400, 200))
                    if regionSelected:
                        self.RightTemplate = self.OriginalImage[points[0][1]:points[1][1],points[0][0]:points[1][0]]
                    
            # Recording a video file.
            elif ch is ord("s"):
                if saveFrames:
                    SIGBTools.close()
                    saveFrames = False
                else:
                    resultFile = raw_input("\n\tType a filename (result.wmv): ")
                    resultFile = self.__path + "/Outputs/" + resultFile
                    if os.path.isfile(resultFile):
                        print "\tThis file exist! Try again."
                        time.sleep(1)
                        self.__Menu()
                        continue
                    elif not resultFile.endswith("wmv"):
                        print "\tThis format is not supported! Try again."
                        time.sleep(1)
                        self.__Menu()
                        continue
                    self.__Menu()
                    size = self.OriginalImage.shape
                    SIGBTools.RecordingVideos(resultFile, fps=30.0, size=(size[1], size[0]))
                    saveFrames = True

            # Spacebar to stop or start the video.
            elif ch is 32:
                cv2.setTrackbarPos("Stop/Start", "TrackBars", not sliderVals["Running"])

            # Restart the video.
            elif ch is ord("r"):
                # Release all connected videos/cameras.
                SIGBTools. release()
                time.sleep(0.5)

                # Load the video file.
                SIGBTools.VideoCapture(filepath)

                # Shows the first frame.
                self.OriginalImage = SIGBTools.read()
                self.FrameNumber = 1
                self.__UpdateImage()

            # Quit the eye tracking system.
            elif ch is 27 or ch is ord("q"):
                break

            # Check if the video is running.
            if sliderVals["Running"]:
                self.OriginalImage = SIGBTools.read()
                self.FrameNumber += 1
                self.__UpdateImage()

                if saveFrames:
                    SIGBTools.write(self.ResultImage)

        # Close all allocated resources.
        cv2.destroyAllWindows()
        SIGBTools.release()
        if saveFrames:
            SIGBTools.close()