def __Augmentation(self): """Projects an augmentation object over the chessboard pattern.""" # Load the camera. cameraID = 0 SIGBTools.VideoCapture(cameraID, SIGBTools.CAMERA_VIDEOCAPTURE_640X480_30FPS) # Read each frame from input camera. while True: # Read the current image from the camera. image = SIGBTools.read() # Finds the positions of internal corners of the chessboard. corners = SIGBTools.FindCorners(image, False) if corners is not None: pass # Show the final processed image. cv2.imshow("Augmentation", image) if cv2.waitKey(1) & 0xFF == ord("q"): break # Wait 2 seconds before finishing the method. cv2.waitKey(2000) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release()
def __RealisticTextureMap(self): # Load videodata. filename = self.__path + "Videos/ITUStudent.avi" SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) # Load tracking data. dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat") lenght = dataFile.shape[0] # Define the boxes colors. boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # BGR. # Read each frame from input video and draw the rectangules on it. for i in range(lenght): # Read the current image from a video file. image = SIGBTools.read() # Draw each color rectangule in the image. boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :]) for j in range(3): box = boxes[j] cv2.rectangle(image, box[0], box[1], boxColors[j]) # Show the final processed image. cv2.imshow("Ground Floor", image) if cv2.waitKey(1) & 0xFF == ord("q"): break # Wait 2 seconds before finishing the method. cv2.waitKey(2000) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release()
def __TextureMapGridSequence(self): """Skeleton for texturemapping on a video sequence.""" # Load videodata. filename = self.__path + "Videos/Grid05.mp4" SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) outputSize = (1280, 720) recorder = SIGBTools.RecordingVideos( self.__path + "Outputs/TextureMapGridSequence_Grid05.wmv", size=outputSize) # Load texture mapping image. texture = cv2.imread(self.__path + "Images/ITULogo.png") texture = cv2.pyrDown(texture) # Define the number and ids of inner corners per a chessboard row and column. patternSize = (9, 6) idx = [0, 8, 45, 53] # Read each frame from input video. h, w = texture.shape[0:2] textureCorners = np.asarray([[0, h], [0, 0], [w, h], [w, 0]]) while True: # Read the current image from a video file. image = SIGBTools.read() # Blurs an image and downsamples it. image = cv2.pyrDown(image) # Finds the positions of internal corners of the chessboard. corners = SIGBTools.FindCorners(image, False) if corners is not None: pass corners = np.asarray([ corners[idx[0]], corners[idx[1]], corners[idx[2]], corners[idx[3]] ]) homography, _ = cv2.findHomography(textureCorners, corners) h, w = image.shape[0:2] overlay = cv2.warpPerspective(texture, homography, (w, h)) image = cv2.addWeighted(image, 0.5, overlay, 0.5, 0) imagecopy = image.copy() imagecopy = cv2.resize(imagecopy, outputSize) SIGBTools.write(imagecopy) # Show the final processed image. cv2.imshow("Image", image) if cv2.waitKey(1) & 0xFF == ord("q"): break # Wait 2 seconds before finishing the method. cv2.waitKey(2000) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.close() SIGBTools.release()
def __CalibrateCamera(self): """Main function used for calibrating a common webcam.""" # Load the camera. cameraID = 0 SIGBTools.VideoCapture(cameraID, SIGBTools.CAMERA_VIDEOCAPTURE_640X480_30FPS) # Calibrate the connected camera. SIGBTools.calibrate() # Close all allocated resources. SIGBTools.release()
def __TextureMapGroundFloor(self): """Places a texture on the ground floor for each input image.""" # Load videodata. filename = self.__path + "Videos/ITUStudent.avi" SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) outputSize = (640, 480) recorder = SIGBTools.RecordingVideos( self.__path + "Outputs/TextureMapGroundFloor.wmv", size=outputSize) # Load tracking data. dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat") lenght = dataFile.shape[0] # Define the boxes colors. boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # BGR. homography = None overlay = None itulogo = cv2.imread(self.__path + "Images/ITULogo.png") # Read each frame from input video and draw the rectangules on it. for i in range(lenght): # Read the current image from a video file. image = SIGBTools.read() if homography is None: h, _ = SIGBTools.GetHomographyFromMouse(itulogo, image, -4) homography = h np.save('homography2.npy', homography) h, w = image.shape[0:2] overlay = cv2.warpPerspective(itulogo, homography, (w, h)) image = cv2.addWeighted(image, 0.8, overlay, 0.2, 0) # Draw each color rectangule in the image. boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :]) for j in range(3): box = boxes[j] cv2.rectangle(image, box[0], box[1], boxColors[j]) imagecopy = image.copy() imagecopy = cv2.resize(imagecopy, outputSize) SIGBTools.write(imagecopy) # Show the final processed image. cv2.imshow("Ground Floor", image) if cv2.waitKey(1) & 0xFF == ord("q"): break # Wait 2 seconds before finishing the method. cv2.waitKey(2000) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.close() SIGBTools.release()
def __ShowFloorTrackingData(self): # Load videodata. filename = self.__path + "Videos/ITUStudent.avi" image2 = cv2.imread(self.__path + "Images/ITUMap.png") SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) SIGBTools.RecordingVideos("C:\\Code\\IIAML\\Project3\\Assignments\\_02\\Outputs\\MapLocation.wmv") # Load homography homography = np.load(self.__path + "Outputs/homography1.npy") # Load tracking data. dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat") lenght = dataFile.shape[0] # Define the boxes colors. boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # BGR. # Read each frame from input video and draw the rectangules on it. for i in range(lenght): # Read the current image from a video file. image = SIGBTools.read() # Draw each color rectangule in the image. boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :]) for j in range(3): box = boxes[j] cv2.rectangle(image, box[0], box[1], boxColors[j]) point2 = self.__calcHomogenousCoordinates(boxes[2][1], homography) # Show the final processed image. # Live tracking image2_updated = image2.copy() cv2.circle(image2_updated, (int(point2[0]), int(point2[1])), 10, (0, 255, 0), -1) cv2.imshow("Map", image2_updated) # Drawing #cv2.circle(image2, (int(point2[0]), int(point2[1])), 3, (0, 255, 0), -1) #cv2.imshow("Map", image2) cv2.imshow("Ground Floor", image) SIGBTools.write(image2_updated) #self.__showPointsOnFrameOfView(image, points) if cv2.waitKey(1) & 0xFF == ord("q"): break # Wait 2 seconds before finishing the method. SIGBTools.close() cv2.waitKey(2000) cv2.imwrite(self.__path + "Outputs/mapImage.png", image2) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release()
def __TextureMapObjectSequence(self): """Poor implementation of simple TextureMap.""" # Load videodata. filename = self.__path + "Videos/Scene01.mp4" SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) drawContours = True # Load texture mapping image. texture = cv2.imread(self.__path + "Images/ITULogo.png") # Read each frame from input video. while True: # Jump for each 20 frames in the video. for t in range(20): # Read the current image from a video file. image = SIGBTools.read() # Try to detect an object in the input image. squares = SIGBTools.DetectPlaneObject(image) # Check the corner of detected object. for sqr in squares: # Do texturemap here!!!! # TODO pass # Draws contours outlines or filled contours. if drawContours and len(squares) > 0: cv2.drawContours(image, squares, -1, (0, 255, 0), 3) # Show the final processed image. cv2.imshow("Detection", image) if cv2.waitKey(1) & 0xFF == ord("q"): break # Wait 2 seconds before finishing the method. cv2.waitKey(2000) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release()
def __TextureMapping(self): """Apply a texture mapping on an augmented object.""" # Creates a window to show the stereo images. cv2.namedWindow("Original", cv2.WINDOW_AUTOSIZE) # Load two video capture devices. SIGBTools.VideoCapture(0, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) # Repetition statement for analyzing each captured image. while True: # Grab the video frames. image = SIGBTools.read() # Find the pattern in the image. corners = SIGBTools.FindCorners(image, False) # Apply the augmented object. if corners is not None: image = self.__Augmentation(corners, image) # Check what the user wants to do. if cv2.waitKey(1) & 0xFF == ord('q'): break # Record video #resultFile = self.__path + "/Videos/TextureMapping02.wmv" #size = image.shape #SIGBTools.RecordingVideos(resultFile, size=(size[1], size[0])) #SIGBTools.write(image) # Show the final processed image. cv2.imshow("Original", image) # Wait 2 seconds before finishing the method. SIGBTools.close() cv2.waitKey(2000) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release()
def __EpipolarGeometry(self): """Define the epipolar geometry between stereo cameras.""" # Creates a window to show the stereo images. cv2.namedWindow("Stereo", cv2.WINDOW_AUTOSIZE) cv2.setMouseCallback("Stereo", self.__FMEyeMouseEvent) # Load two video capture devices. SIGBTools.VideoCapture(0, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) SIGBTools.VideoCapture(1, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) # Repetition statement for analyzing each captured image. while True: # Check if the fundamental matrix process is running. if not self.__isFrozen: # Grab the video frames. leftImage, rightImage = SIGBTools.read() # Combine two stereo images in only one window. self.__Image = self.__CombineImages(leftImage, rightImage, 1) # Check what the user wants to do. inputKey = cv2.waitKey(1) # Esc or letter "q" key. if inputKey == 27 or inputKey == ord("q"): break # Letter "f" key. elif inputKey == ord("f"): self.__isFrozen = not self.__isFrozen # Show the final processed image. cv2.imshow("Stereo", self.__Image) # Wait 2 seconds before finishing the method. cv2.waitKey(2000) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release()
def __ShowFloorTrackingData(self): # Load videodata. filename = self.__path + "Videos/ITUStudent.avi" SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) # Load tracking data. dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat") length = dataFile.shape[0] # Define the boxes colors. boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # BGR. outputSize = (1280, 960) recorder = SIGBTools.RecordingVideos(self.__path + "Outputs/MapLocation.wmv", size=outputSize) dataPoints = list() # Read each frame from input video and draw the rectangules on it. first = True homography = None itumap = cv2.imread(self.__path + "Images/ITUMap.png") for i in range(length): # Read the current image from a video file. image = SIGBTools.read() # Estimate the homograhy based on first frame if first: h, _ = SIGBTools.GetHomographyFromMouse(image, itumap) homography = h first = False np.save('homography1.npy', homography) # Draw each color rectangule in the image. boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :]) # Transform center of the feet box - box1 is feet box box1 = boxes[1][0] box2 = boxes[1][1] center = [(box1[0] + 0.5 * (box2[0] - box1[0])), (box1[1] + 0.5 * (box2[1] - box1[1]))] # Reshape the center to be as expected center = np.array([center], dtype=np.float32) center = np.array([center]) transformedCenter = cv2.perspectiveTransform(center, homography) dataPoints.append(transformedCenter) for j in range(3): box = boxes[j] cv2.rectangle(image, box[0], box[1], boxColors[j]) # Show the final processed image. #Draw current position c = transformedCenter[0][0] itumapCopy = itumap.copy() cv2.circle(itumapCopy, (int(c[0]), int(c[1])), 1, (0, 255, 0), -1) cv2.imshow("ITU Map", itumapCopy) cv2.imshow("Ground Floor", image) height, width, _ = image.shape resizedMap = cv2.resize( itumapCopy, (width, height)) # resize to same dimensions sideBySide = np.concatenate((image, resizedMap), axis=0) output = cv2.resize(sideBySide, outputSize) SIGBTools.write(output) if cv2.waitKey(1) & 0xFF == ord("q"): break # Wait 2 seconds before finishing the method. cv2.waitKey(2000) imageMap = itumap.copy() for point in dataPoints: p = point[0][0] #due to point being wrapped cv2.circle(imageMap, (int(p[0]), int(p[1])), 1, (0, 255, 0), -1) cv2.imwrite(self.__path + "Outputs/mapImage.png", imageMap) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.close() SIGBTools.release()
def __TextureMapGridSequence(self): """Skeleton for texturemapping on a video sequence.""" # Load videodata. filename = self.__path + "Videos/Grid01.mp4" SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) SIGBTools.RecordingVideos("C:\\ITU programming\\IIAML\\Project3\\Assignments\\_02\\Outputs\\TextureMapGridSequenceGrid01.wmv") # Load texture mapping image. texture = cv2.imread(self.__path + "Images/ITULogo.png") texture = cv2.pyrDown(texture) # Define the number and ids of inner corners per a chessboard row and column. patternSize = (9, 6) idx = [53, 45, 8, 0] # Read each frame from input video. while True: # Read the current image from a video file. image = SIGBTools.read() # Blurs an image and downsamples it. image = cv2.pyrDown(image) # Finds the positions of internal corners of the chessboard. corners = SIGBTools.FindCorners(image) if corners is not None: # ==================================================== # Find corner points image corner_points = [] for i, point in enumerate(corners[idx]): corner_points.append(point[0].astype(int).tolist()) corner_points = np.array(corner_points) # Corner points texture corner_points_texture = np.array([[0,0], [texture.shape[1]-1,0], [0,texture.shape[0]-1], [texture.shape[1]-1,texture.shape[0]-1]], dtype=int) # Calculate homography H = cv2.findHomography(corner_points_texture, corner_points)[0] # Draw the homography transformation. h, w = image.shape[0:2] overlay = cv2.warpPerspective(texture, H, (w, h)) image = cv2.addWeighted(image, 1, overlay, 1, 0) # ==================================================== # Show the final processed image. SIGBTools.write(image) cv2.imshow("Image", image) if cv2.waitKey(1) & 0xFF == ord("q"): break # Wait 2 seconds before finishing the method. SIGBTools.close() cv2.waitKey(2000) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release()
def __TextureMapGroundFloor(self): """Places a texture on the ground floor for each input image.""" # Load videodata. filename = self.__path + "Videos/ITUStudent.avi" SIGBTools.VideoCapture(filename, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) # Needs full path SIGBTools.RecordingVideos("C:\\Code\IIAML\\Project3\\Assignments\\_02\\Outputs\\TextureMapGroundFloor.wmv") # ====================================================== # Read homography from ground to map. H_g2m = np.load(self.__path + "Outputs/homography1.npy") # Read the input images. image1 = cv2.imread(self.__path + "Images/ITULogo.PNG") image2 = cv2.imread(self.__path + "Images/ITUMap.png") # Estimate the homography from image to map. H_i2m, points = SIGBTools.GetHomographyFromMouse(image1, image2, -4) # Calculate homography from image to ground. H_i2g = np.dot(np.linalg.inv(H_g2m), H_i2m) np.save(self.__path + "Outputs/homography2.npy", H_i2g) # ======================================================== # Load tracking data. dataFile = np.loadtxt(self.__path + "Inputs/trackingdata.dat") lenght = dataFile.shape[0] # Define the boxes colors. boxColors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # BGR. images = [] # Read each frame from input video and draw the rectangules on it. for i in range(lenght): # Read the current image from a video file. image = SIGBTools.read() # Draw each color rectangule in the image. boxes = SIGBTools.FrameTrackingData2BoxData(dataFile[i, :]) for j in range(3): box = boxes[j] cv2.rectangle(image, box[0], box[1], boxColors[j]) # ======================================================== # Draw the homography transformation. h, w = image.shape[0:2] overlay = cv2.warpPerspective(image1, H_i2g, (w, h)) result = cv2.addWeighted(image, 0.5, overlay, 0.5, 0) #images.append(result) SIGBTools.write(result) # ======================================================== # Show the final processed image. cv2.imshow("Camera", result) if cv2.waitKey(1) & 0xFF == ord("q"): break # Wait 2 seconds before finishing the method. cv2.waitKey(2000) SIGBTools.close() # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release()
def __StereoCamera(self): """Define the epipolar geometry between stereo cameras.""" # Load two video capture devices. SIGBTools.VideoCapture(0, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) SIGBTools.VideoCapture(1, SIGBTools.CAMERA_VIDEOCAPTURE_640X480) # Calibrate each individual camera. SIGBTools.calibrate() # Creates a window to show the stereo images. cv2.namedWindow("Stereo", cv2.WINDOW_AUTOSIZE) cv2.setMouseCallback("Stereo", self.__SCEyeMouseEvent) self.__Disparity = np.zeros((1, 1, 1)) # Repetition statement for analyzing each captured image. while True: # Grab the video frames. leftImage, rightImage = SIGBTools.read() # Find the pattern in the image. leftCorners = SIGBTools.FindCorners(leftImage) rightCorners = SIGBTools.FindCorners(rightImage) # Check if the calibration process is running. if self.__isCalibrating: # If both pattern have been recognized, start the calibration process. if leftCorners is not None and rightCorners is not None: self.__Calibrate(leftCorners, rightCorners) # Otherwise, stop the calibrations process. else: self.__isCalibrating = False # Combine two stereo images in only one window. self.__Image = self.__CombineImages(leftImage, rightImage, 0.5) # Undistort the stereo images. if self.__isUndistort: leftUndistort, rightUndistort = SIGBTools.UndistortImages(leftImage, rightImage) self.__Undistort = self.__CombineImages(leftUndistort, rightUndistort, 0.5) if self.__isDepth: self.__Disparity = self.__DepthMap(leftUndistort, rightUndistort) # Check what the user wants to do. inputKey = cv2.waitKey(1) # Esc or letter "q" key. if inputKey == 27 or inputKey == ord("q"): break # Space key. elif inputKey == 32: self.__isCalibrating = True # Letter "s" key. elif inputKey == ord("s") and self.__isDepth: print "s pressed" self.__isSaving = True elif inputKey == ord("d"): if not self.__isDepth: print "opening depth" # Creates a window to show the depth map. cv2.namedWindow("DepthMap", cv2.WINDOW_AUTOSIZE) cv2.createTrackbar("minDisparity", "DepthMap", 1, 32, self.__SetMinDisparity) cv2.createTrackbar("blockSize", "DepthMap", 1, 5, self.__SetNothing) self.__isDepth = True else: cv2.destroyWindow("DepthMap") self.__isDepth = False # Show the final processed image. cv2.imshow("Stereo", self.__Image) if self.__isUndistort: cv2.imshow("Undistort", self.__Undistort) if self.__isDepth: cv2.imshow("DepthMap", self.__Disparity) # Wait 2 seconds before finishing the method. cv2.waitKey(2000) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release()
def Start(self): """Start the eye tracking system.""" # Show the menu. self.__Menu() # Read a video file. filename = raw_input("\n\tType a filename from \"Inputs\" folder: ") #filename = "eye01.avi" filepath = self.__path + "/Inputs/" + filename if not os.path.isfile(filepath): print "\tInvalid filename!" time.sleep(1) return # Show the menu. self.__Menu() # Define windows for displaying the results and create trackbars. self.__SetupWindowSliders() # Load the video file. SIGBTools.VideoCapture(filepath) # Shows the first frame. self.OriginalImage = SIGBTools.read() self.FrameNumber = 1 self.__UpdateImage() # Initial variables. saveFrames = False # Read each frame from input video. while True: # Extract the values of the sliders. sliderVals = self.__GetSliderValues() # Read the keyboard selection. ch = cv2.waitKey(1) # Select regions in the input images. if ch is ord("m"): if not sliderVals["Running"]: roiSelector = SIGBTools.ROISelector(self.OriginalImage) points, regionSelected = roiSelector.SelectArea("Select eye corner", (400, 200)) if regionSelected: self.LeftTemplate = self.OriginalImage[points[0][1]:points[1][1],points[0][0]:points[1][0]] points, regionSelected = roiSelector.SelectArea("Select eye corner", (400, 200)) if regionSelected: self.RightTemplate = self.OriginalImage[points[0][1]:points[1][1],points[0][0]:points[1][0]] # Recording a video file. elif ch is ord("s"): if saveFrames: SIGBTools.close() saveFrames = False else: resultFile = raw_input("\n\tType a filename (result.wmv): ") resultFile = self.__path + "/Outputs/" + resultFile if os.path.isfile(resultFile): print "\tThis file exist! Try again." time.sleep(1) self.__Menu() continue elif not resultFile.endswith("wmv"): print "\tThis format is not supported! Try again." time.sleep(1) self.__Menu() continue self.__Menu() size = self.OriginalImage.shape SIGBTools.RecordingVideos(resultFile, fps=30.0, size=(size[1], size[0])) saveFrames = True # Spacebar to stop or start the video. elif ch is 32: cv2.setTrackbarPos("Stop/Start", "TrackBars", not sliderVals["Running"]) # Restart the video. elif ch is ord("r"): # Release all connected videos/cameras. SIGBTools. release() time.sleep(0.5) # Load the video file. SIGBTools.VideoCapture(filepath) # Shows the first frame. self.OriginalImage = SIGBTools.read() self.FrameNumber = 1 self.__UpdateImage() # Quit the eye tracking system. elif ch is 27 or ch is ord("q"): break # Check if the video is running. if sliderVals["Running"]: self.OriginalImage = SIGBTools.read() self.FrameNumber += 1 self.__UpdateImage() if saveFrames: SIGBTools.write(self.ResultImage) # Close all allocated resources. cv2.destroyAllWindows() SIGBTools.release() if saveFrames: SIGBTools.close()