def grab_images(video_file, frame_inc=100, delay=100): """ Walks through the entire video and save image for each increment """ my_video = init_video(video_file) if my_video != None: # Display the video and save evry increment frames cpt = 0 img = cv2.QueryFrame(my_video) if img != None: cv2.NamedWindow("Vid", cv2.CV_WINDOW_AUTOSIZE) else: return None nFrames = int( cv2.GetCaptureProperty(my_video, cv2.CV_CAP_PROP_FRAME_COUNT)) while cpt < nFrames: for ii in range(frame_inc): img = cv2.QueryFrame(my_video) cpt += 1 cv2.ShowImage("Vid", img) out_name = "" + str(cpt) + ".jpg" cv2.SaveImage(out_name, img) print out_name, str(nFrames) cv2.WaitKey(delay) else: return None
def display_video(my_video, frame_inc=100, delay=100): """ Displays frames of the video in a dumb way. Used to see if everything is working fine my_video = cv2Capture object frame_inc = Nmber of increments between each frame displayed delay = time delay between each image """ cpt = 0 img = cv2.QueryFrame(my_video) if img != None: cv2.NamedWindow("Vid", cv2.CV_WINDOW_AUTOSIZE) else: return None nFrames = int(cv2.GetCaptureProperty(my_video, cv2.CV_CAP_PROP_FRAME_COUNT)) while cpt < nFrames: for ii in range(frame_inc): img = cv2.QueryFrame(my_video) cpt + 1 cv2.ShowImage("Vid", img) cv2.WaitKey(delay)
def found_face(self): # global frame_copy if (not self.camera_is_on()) or (not self.find_face_is_on()): return False self.flushCameraBuffer() # this reduces the frame delay frame = cv.QueryFrame(self.capture) if frame is None: self.close_camera() return False if not frame: cv.WaitKey(0) if not self.frame_copy: self.frame_copy = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if frame.origin == cv.IPL_ORIGIN_TL: cv.Copy(frame, self.frame_copy) else: cv.Flip(frame, self.frame_copy, 0) if self.showVideo: result = self.detect_and_draw(self.frame_copy) else: result = self.detect_no_draw(self.frame_copy) cv.WaitKey(10) return result
def run(self): started = time.time() while True: currentframe = cv.QueryFrame(self.capture) instant = time.time() #Get timestamp o the frame self.processImage(currentframe) #Process the image if not self.isRecording: if self.somethingHasMoved(): self.trigger_time = instant #Update the trigger_time if instant > started + 10: #Wait 5 second after the webcam start for luminosity adjusting etc.. print "Something is moving !" if self.doRecord: #set isRecording=True only if we record a video self.isRecording = True cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED) else: if instant >= self.trigger_time + 10: #Record during 10 seconds print "Stop recording" self.isRecording = False else: cv.PutText(currentframe, datetime.now().strftime("%b %d, %H:%M:%S"), (25, 30), self.font, 0) #Put date on the frame cv.WriteFrame(self.writer, currentframe) #Write the frame if self.show: cv.ShowImage("Image", currentframe) c = cv.WaitKey(1) % 0x100 if c == 27 or c == 10: #Break if user enters 'Esc'. break
def __init__(self, threshold=8, doRecord=True, showWindows=True): self.writer = None self.font = None self.doRecord = doRecord #Either or not record the moving object self.show = showWindows #Either or not show the 2 windows self.frame = None self.capture = cv2.CaptureFromCAM(0) self.frame = cv2.QueryFrame( self.capture) #Take a frame to init recorder if doRecord: self.initRecorder() self.frame1gray = cv2.CreateMat(self.frame.height, self.frame.width, cv2.CV_8U) #Gray frame at t-1 cv2.CvtColor(self.frame, self.frame1gray, cv2.CV_RGB2GRAY) #Will hold the thresholded result self.res = cv2.CreateMat(self.frame.height, self.frame.width, cv2.CV_8U) self.frame2gray = cv2.CreateMat(self.frame.height, self.frame.width, cv2.CV_8U) #Gray frame at t self.width = self.frame.width self.height = self.frame.height self.nb_pixels = self.width * self.height self.threshold = threshold self.isRecording = False self.trigger_time = 0 #Hold timestamp of the last detection if showWindows: cv2.NamedWindow("Image") cv2.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange)
def __init__(self, threshold=1, doRecord=True, showWindows=True): self.writer = None self.font = None self.doRecord = doRecord # Either or not record the moving object self.show = showWindows # Either or not show the 2 windows self.frame = None self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame( self.capture) # Take a frame to init recorder if doRecord: self.initRecorder() self.gray_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_8U, 1) self.average_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_32F, 3) self.absdiff_frame = None self.previous_frame = None self.surface = self.frame.width * self.frame.height self.currentsurface = 0 self.currentcontours = None self.threshold = threshold self.isRecording = False self.trigger_time = 0 # Hold timestamp of the last detection if showWindows: cv.NamedWindow("Image") cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onThresholdChange)
def run(self): started = time.time() while True: curframe = cv.QueryFrame(self.capture) instant = time.time() #Get timestamp o the frame self.processImage(curframe) #Process the image if not self.isRecording: if self.somethingHasMoved(): self.trigger_time = instant #Update the trigger_time if instant > started + 5: #Wait 5 second after the webcam start for luminosity adjusting etc.. print("Something is moving !") if self.doRecord: #set isRecording=True only if we record a video self.isRecording = True else: if instant >= self.trigger_time + 10: #Record during 10 seconds print("Stop recording") self.isRecording = False else: cv.PutText(curframe, datetime.now().strftime("%b %d, %H:%M:%S"), (25, 30), self.font, 0) #Put date on the frame cv.WriteFrame(self.writer, curframe) #Write the frame if self.show: cv.ShowImage("Image", curframe) cv.ShowImage("Res", self.res) cv.Copy(self.frame2gray, self.frame1gray) c = cv.WaitKey(1) if c == 27 or c == 1048603: #Break if user enters 'Esc'. break
def repeat(): global capture global camera_index global count frame = cv2.GetMat(cv2.QueryFrame(capture)) framegray = cv2.CreateMat(480, 640, cv2.CV_8UC1) cv2.CvtColor(frame, framegray, cv2.CV_BGR2GRAY) sys.stdout.write(framegray.tostring()) c = cv2.WaitKey(1) if c == 27: print(count) sys.exit()
def __init__(self, parent=None): QWidget.__init__(self) self._capture = cv2.CreateCameraCapture(1) # Take one frame to query height frame = cv2.QueryFrame(self._capture) self.setMinimumSize(frame.width, frame.height) self.setMaximumSize(self.minimumSize()) self._frame = None self._image = self._build_image(frame) # Paint every 50 ms self._timer = QTimer(self) self._timer.timeout.connect(self.queryFrame) self._timer.start(50)
def take_preview_image(self): # get image from webcam if not self.camera_is_on(): self.use_camera() self.flushCameraBuffer() # this reduces the frame delay frame = cv.QueryFrame(self.capture) if frame is None: self.close_camera() return self.create_folder_if_not_exist() cv.SaveImage(os.path.join(PICTURE_PATH, "current.jpg"), frame) # this is the preview image
def take_snapshot(self): # get image from webcam if not self.camera_is_on(): self.use_camera() self.flushCameraBuffer() # this reduces the frame delay frame = cv.QueryFrame(self.capture) self.create_folder_if_not_exist() image_name = "capture_%s.jpg" % time.strftime("%y_%m_%d_%H_%M_%S") image_path = os.path.join(PICTURE_PATH, image_name) cv.SaveImage(image_path, frame) cv.SaveImage(os.path.join(PICTURE_PATH, "current.jpg"), frame) # this is the preview image return image_name
def run(self): while True: img = cv.QueryFrame(self.capture) #blur the source image to reduce color noise cv.Smooth(img, img, cv.CV_BLUR, 3) #convert the image to hsv(Hue, Saturation, Value) so its #easier to determine the color to track(hue) hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3) cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV) #limit all pixels that don't match our criteria, in this case we are #looking for purple but if you want you can adjust the first value in #both turples which is the hue range(120,140). OpenCV uses 0-180 as #a hue range for the HSV color model thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1) cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img) #determine the objects moments and check that the area is large #enough to be our object moments = cv.Moments(thresholded_img, 0) area = cv.GetCentralMoment(moments, 0, 0) #there can be noise in the video so ignore objects with small areas if (area > 100000): #determine the x and y coordinates of the center of the object #we are tracking by dividing the 1, 0 and 0, 1 moments by the area x = cv.GetSpatialMoment(moments, 1, 0) / area y = cv.GetSpatialMoment(moments, 0, 1) / area #print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area) #create an overlay to mark the center of the tracked object overlay = cv.CreateImage(cv.GetSize(img), 8, 3) cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20) cv.Add(img, overlay, img) #add the thresholded image back to the img so we can see what was #left after it was applied cv.Merge(thresholded_img, None, None, None, img) #display the image cv.ShowImage(color_tracker_window, img) if cv.WaitKey(10) == 27: break
def __init__(self, ceil=8, doRecord=True, showWindows=True): self.writer = None self.font = None self.doRecord = doRecord #Either or not record the moving object self.show = showWindows #Either or not show the 2 windows self.frame = None #self.capture=cv.CaptureFromCAM(0) self.capture = cv.VideoCapture("./videos_tcc/pessoas.h264") newx, newy = self.capture.shape[1] / 4, self.capture.shape[ 0] / 4 #new size (w,h) self.frame = cv2.resize(self.capture, (newx, newy)) self.frame = cv.QueryFrame( self.capture) #Take a frame to init recorder if doRecord: self.initRecorder() self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t-1 cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY) #Will hold the thresholded result self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t self.width = self.frame.width self.height = self.frame.height self.nb_pixels = self.width * self.height self.ceil = ceil self.isRecording = False self.trigger_time = 0 #Hold timestamp of the last detection if showWindows: cv.NamedWindow("Image") cv.CreateTrackbar("Mytrack", "Image", self.ceil, 100, self.onChange)
def queryFrame(self): frame = cv.QueryFrame(self._capture) self._image = self._build_image(frame) self.update()
def CamGui(): capture = cv.VideoCapture(0) width = int(capture.get(cv.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv.CAP_PROP_FRAME_HEIGHT)) prev_gray = cv.CreateImage((width, height), 8, 1) gray = cv.CreateImage((width, height), 8, 1) # Will hold the pyr frame at t-1 prevPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) currPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) # idem at t max_count = 500 qLevel = 0.01 minDist = 10 prev_points = [] # Points at t-1 curr_points = [] # Points at t lines = [] # To keep all the lines overtime while True: frame = cv.QueryFrame(capture) cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) # Convert to gray output = cv.CloneImage(frame) prev_points = cv.GoodFeaturesToTrack( gray, None, None, max_count, qLevel, minDist) curr_points, status, err = cv.CalcOpticalFlowPyrLK( prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0) # If points status are ok and distance not negligible keep the point k = 0 for i in range(len(curr_points)): nb = abs(int(prev_points[i][0]) - int(curr_points[i][0])) + \ abs(int(prev_points[i][1]) - int(curr_points[i][1])) if status[i] and nb > 2: prev_points[k] = prev_points[i] curr_points[k] = curr_points[i] k += 1 prev_points = prev_points[:k] curr_points = curr_points[:k] # At the end only interesting points are kept # Draw all the previously kept lines otherwise they would be lost the next frame for (pt1, pt2) in lines: cv.Line(frame, pt1, pt2, (255, 255, 255)) # Draw the lines between each points at t-1 and t for prevpoint, point in zip(prev_points, curr_points): prevpoint = (int(prevpoint[0]), int(prevpoint[1])) cv.Circle(frame, prevpoint, 15, 0) point = (int(point[0]), int(point[1])) cv.Circle(frame, point, 3, 255) cv.Line(frame, prevpoint, point, (255, 255, 255)) # Append current lines to the lines list lines.append((prevpoint, point)) cv.Copy(gray, prev_gray) # Put the current frame prev_gray prev_points = curr_points cv.ShowImage("The Video", frame) #cv.WriteFrame(writer, frame) c = cv.WaitKey(1) if c == 27: # Esc on Windows break
if __name__ == '__main__': #create window and move to screen position cv2.NamedWindow('Camera', cv2.CV_WINDOW_AUTOSIZE) cv2.namedWindow('Camera', cv2.WINDOW_AUTOSIZE) if len(sys.argv) == 1: # no argument on the command line, try to use the camera capture = cv2.CreateCameraCapture(0) # ### check that capture device is OK if not capture: print("Error opening capture device") sys.exit(1) # ### capture the 1st frame to get some propertie on it frame = cv2.QueryFrame(capture) # ### get size of the frame frame_size = cv2.GetSize(frame) gray = cv2.CreateImage(frame_size, 8, 1) small_img = cv2.CreateImage( (int(frame_size[0] / image_scale), int(frame_size[1] / image_scale)), 8, 1) cascade = cv2.Load(cascade_name) # while 1: # do forever # capture the current image frame = cv2.QueryFrame(capture) if frame is None: # no image captured... end the processing break
from time import time as timer import tensorflow as tf import numpy as np import sys import cv2 as cv import os vidFile = cv.CaptureFromFile('Test_Avi') nFrames = int(cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FRAME_COUNT)) fps = cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FPS) waitPerFrameInMillisec = int(1 / fps * 1000 / 1) print('Num. Frames = ', nFrames) print('Frame Rate = ', fps, ' frames per sec') for f in xrange(nFrames): frameImg = cv.QueryFrame(vidFile) cv.ShowImage("My Video Window", frameImg) cv.WaitKey(waitPerFrameInMillisec) # When playing is done, delete the window # NOTE: this step is not strictly necessary, # when the script terminates it will close all windows it owns anyways cv.DestroyWindow("My Video Window")
def run(self): # Capture first frame to get size frame = cv.QueryFrame(self.capture) frame_size = cv.GetSize(frame) width = frame.width height = frame.height surface = width * height #Surface area of the image cursurface = 0 #Hold the current surface that have changed grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3) difference = None while True: color_image = cv.QueryFrame(self.capture) cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) #Remove false positives if not difference: #For the first time put values in difference, temp and moving_average difference = cv.CloneImage(color_image) temp = cv.CloneImage(color_image) cv.ConvertScale(color_image, moving_average, 1.0, 0.0) else: cv.RunningAvg(color_image, moving_average, 0.020, None) #Compute the average # Convert the scale of the moving average. cv.ConvertScale(moving_average, temp, 1.0, 0.0) # Minus the current frame from the moving average. cv.AbsDiff(color_image, temp, difference) #Convert the image so that it can be thresholded cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY) cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY) cv.Dilate(grey_image, grey_image, None, 18) #to get object blobs cv.Erode(grey_image, grey_image, None, 10) # Find contours storage = cv.CreateMemStorage(0) contours = cv.FindContours(grey_image, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) backcontours = contours #Save contours while contours: #For all contours compute the area cursurface += cv.ContourArea(contours) contours = contours.h_next() avg = ( cursurface * 100 ) / surface #Calculate the average of contour area on the total size if avg > self.ceil: print("Something is moving !") #print avg,"%" cursurface = 0 #Put back the current surface to 0 #Draw the contours on the image _red = (0, 0, 255) #Red for external contours _green = (0, 255, 0) # Gren internal contours levels = 1 #1 contours drawn, 2 internal contours as well, 3 ... cv.DrawContours(color_image, backcontours, _red, _green, levels, 2, cv.CV_FILLED) cv.ShowImage("Target", color_image) # Listen for ESC or ENTER key c = cv.WaitKey(7) % 0x100 if c == 27 or c == 10: break
def get_img(capture): img = cv2.QueryFrame(capture) return img
#coding=utf-8 import cv2 as cv import time if __name__ == '__main__': cv.namedWindow("camera", 1) #开启ip摄像头 video = "http://*****:*****@172.16.200.214:8081/" capture = cv.VideoCapture(video) num = 0 while True: img = cv.QueryFrame(capture) cv.ShowImage("camera", img) #按键处理,注意,焦点应当在摄像头窗口,不是在终端命令行窗口 key = cv.WaitKey(10) if key == 27: #esc键退出 print('esc break...') break if key == ord(' '): #保存一张图像 num = num + 1 filename = "frames_%s.jpg" % num cv.SaveImage(filename, img) del (capture)
def run(self): #initiate font font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8) # instantiate images hsv_img = cv.CreateImage(cv.GetSize(cv.QueryFrame(self.capture)), 8, 3) threshold_img1 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1) threshold_img1a = cv.CreateImage(cv.GetSize(hsv_img), 8, 1) threshold_img2 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1) i = 0 writer = cv.CreateVideoWriter('angle_tracking.avi', cv.CV_FOURCC('M', 'J', 'P', 'G'), 30, cv.GetSize(hsv_img), 1) while True: # capture the image from the cam img = cv.QueryFrame(self.capture) # convert the image to HSV cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV) # threshold the image to isolate two colors cv.InRangeS(hsv_img, (165, 145, 100), (250, 210, 160), threshold_img1) # red cv.InRangeS(hsv_img, (0, 145, 100), (10, 210, 160), threshold_img1a) # red again cv.Add(threshold_img1, threshold_img1a, threshold_img1) # this is combining the two limits for red cv.InRangeS(hsv_img, (105, 180, 40), (120, 260, 100), threshold_img2) # blue # determine the moments of the two objects threshold_img1 = cv.GetMat(threshold_img1) threshold_img2 = cv.GetMat(threshold_img2) moments1 = cv.Moments(threshold_img1, 0) moments2 = cv.Moments(threshold_img2, 0) area1 = cv.GetCentralMoment(moments1, 0, 0) area2 = cv.GetCentralMoment(moments2, 0, 0) # initialize x and y x1, y1, x2, y2 = (1, 2, 3, 4) coord_list = [x1, y1, x2, y2] for x in coord_list: x = 0 # there can be noise in the video so ignore objects with small areas if (area1 > 200000): # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area x1 = int(cv.GetSpatialMoment(moments1, 1, 0) / area1) y1 = int(cv.GetSpatialMoment(moments1, 0, 1) / area1) # draw circle cv.Circle(img, (x1, y1), 2, (0, 255, 0), 20) # write x and y position cv.PutText(img, str(x1) +', '+str(y1), (x1, y1 + 20), font, 255) # Draw the text if (area2 > 100000): # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area x2 = int(cv.GetSpatialMoment(moments2, 1, 0) / area2) y2 = int(cv.GetSpatialMoment(moments2, 0, 1) / area2) # draw circle cv.Circle(img, (x2, y2), 2, (0, 255, 0), 20) cv.PutText(img, str(x2) +', '+str(y2), (x2, y2 + 20), font, 255) # Draw the text cv.Line(img, (x1, y1), (x2, y2), (0, 255, 0), 4, cv.CV_AA) # draw line and angle cv.Line(img, (x1, y1), (cv.GetSize(img)[0], y1), (100, 100, 100, 100), 4, cv.CV_AA) x1 = float(x1) y1 = float(y1) x2 = float(x2) y2 = float(y2) angle = int(math.atan((y1 - y2) / (x2 - x1)) * 180 / math.pi) cv.PutText(img, str(angle), (int(x1) + 50, (int(y2) + int(y1)) / 2), font, 255) # cv.WriteFrame(writer,img) # display frames to users cv.ShowImage('Target', img) cv.ShowImage('Threshold1', threshold_img1) cv.ShowImage('Threshold2', threshold_img2) cv.ShowImage('hsv', hsv_img) # Listen for ESC or ENTER key c = cv.WaitKey(7) % 0x100 if c == 27 or c == 10: break cv.DestroyAllWindows()