def main(): global degree_value global pi global s global fps if getArgs() == 1: return degree_value = 2 * pi / n #drawImage(0) #return writer = cv.CreateVideoWriter("original.avi", 0, 7 * s, (width, height)) writer2 = cv.CreateVideoWriter("output.avi", 0, fps, (width, height)) nFrames = int(14 * s) addon = pi * 2 / 7 c = addon drawImage(0) for i in range(nFrames): c = c + addon drawImage(c) img2 = cv.LoadImage(img_name) cv.WriteFrame(writer, img2) nFrames = int(2 * fps) addon2 = pi * 2 * s / fps c2 = addon2 drawImage(0) for j in range(nFrames): c2 = c2 + addon2 drawImage(c2) img2 = cv.LoadImage(img_name) cv.WriteFrame(writer2, img2)
def initGrab(self): image = ImageGrab.grab(self.geometry) cv_im = cv.CreateImageHeader(image.size, cv.IPL_DEPTH_8U, 3) cv.SetData(cv_im, image.tostring()) cv.CvtColor(cv_im, cv_im, cv.CV_RGB2BGR) fourcc = cv.CV_FOURCC('D','I','V','X') fps = 25 width, height = cv.GetSize(cv_im) #print width, height self.writer = cv.CreateVideoWriter('out3.avi', fourcc, fps, (int(width), int(height)), 1) cv.WriteFrame(self.writer, cv_im) self.frames_count = 1 timer = QtCore.QTimer() time_interval = 1000 / 25 timer.setInterval(time_interval) timer.timeout.connect(self.grabFrame) timer.start() self.timer = timer self.stopTimer = QtCore.QTimer() self.stopTimer.setInterval(self.total_time) self.stopTimer.timeout.connect(self.stopCapture) self.stopTimer.setSingleShot(True) self.stopTimer.start()
def initGrabQt(self): image_qt = QtGui.QPixmap.grabWidget(self.view).toImage() image_qt_size = (image_qt.size().width(), image_qt.size().height()) cv_im_4chan = cv.CreateImageHeader(image_qt_size, cv.IPL_DEPTH_8U, 4) cv_im = cv.CreateImage(image_qt_size, cv.IPL_DEPTH_8U, 3) cv.SetData(cv_im_4chan, image_qt.bits().asstring(image_qt.numBytes())) cv.CvtColor(cv_im_4chan, cv_im, cv.CV_RGBA2RGB) fourcc = cv.CV_FOURCC('D','I','V','X') fps = 25 width, height = cv.GetSize(cv_im) self.writer = cv.CreateVideoWriter('out3.avi', fourcc, fps, (int(width), int(height)), 1) cv.WriteFrame(self.writer, cv_im) timer = QtCore.QTimer() time_interval = 1000 / 25 timer.setInterval(time_interval) timer.timeout.connect(self.grabFrameQt) timer.start() self.timer = timer self.stopTimer = QtCore.QTimer() self.stopTimer.setInterval(self.total_time) self.stopTimer.timeout.connect(self.stopCapture) self.stopTimer.setSingleShot(True) self.stopTimer.start()
def initRecorder(self): #Create the recorder codec = cv.CV_FOURCC('M', 'J', 'P', 'G') #('W', 'M', 'V', '2') now = datetime.now().strftime("%b-%d_%H%M%S") print now + ".wmv" self.writer=cv.CreateVideoWriter( now +".wmv", codec, 5, cv.GetSize(self.frame), 1) #FPS set to 5 because it seems to be the fps of my cam but should be ajusted to your needs self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font
def __init__(self, threshold=70, showWindows=True): self.writer = None self.font = None self.show = showWindows # Either or not show the 2 windows self.frame = None self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame(self.capture) # Take a frame to init recorder self.frame=self.frame[1:100,540:640] self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) # Gray frame at t-1 cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY) # Will hold the thresholded result self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) # Gray frame at t self.width = self.frame.width self.height = self.frame.height self.nb_pixels = self.width * self.height self.threshold = threshold self.trigger_time = 0 # Hold timestamp of the last detection codec = cv.CV_FOURCC('M', 'J', 'P', 'G') # ('W', 'M', 'V', '2') self.writer = cv.CreateVideoWriter(datetime.now().strftime("%b-%d_%H_%M_%S") + ".wmv", codec, 5, cv.GetSize(self.frame), 1) # FPS set to 5 because it seems to be the fps of my cam but should be ajusted to your needs self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) # Creates a font
def initRecorder(self): #Create the recorder codec = cv.CV_FOURCC('D', 'I', 'V', 'X') #('W', 'M', 'V', '2') self.writer = cv.CreateVideoWriter( datetime.now().strftime("%b-%d_%H_%M_%S") + ".avi", codec, 20, cv.GetSize(self.frame), 1) #FPS set to 20 because it seems to be the fps of my cam but should be ajusted to your needs self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font
def initRecorder(self): # Create the recorder codec = cv.CV_FOURCC('M', 'J', 'P', 'G') self.writer = cv.CreateVideoWriter( datetime.now().strftime("%Y%m%d_%H%M%S") + ".wmv", codec, 8, cv.GetSize(self.frame), 1) # FPS set to 30 because it seems to be the fps of my cam but should be ajusted to your needs self.countdownFont = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 3, 3, 0, 5, 8) # Creates a font self.timeFont = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) # Creates a font
def initRecorder(self): #Create the recorder codec = cv.CV_FOURCC('M', 'J', 'P', 'G') self.writer = cv.CreateVideoWriter( datetime.now().strftime("/home/pi/python_programs/camera_output/" + "%b-%d_%H_%M_%S") + ".wmv", codec, 5, cv.GetSize(self.frame), 1) #FPS set to 5 because it seems to be the fps of my cam but should be ajusted to your needs self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font # Comments: """ By changing threshold in def __init__(..,self.threshold = 15,...) you control sensitivity of image capture""" """e.g threshold = 15 is more sensitive in threshold = 25""" # end self.captureCounter += 1 img = cv.QueryFrame(self.capture) filename = "capture" + str(self.captureCounter) file = "/home/pi/python_programs/camera_output/" + filename + ".png" cv.SaveImage(file, img)
def normalize_video_lenght(i_name, o_name, fps, length, verbose=False): capture = cv.CaptureFromFile(i_name) width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)) original_fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) original_frames_number = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT) final_frames_number = int(fps * length) if verbose: print '\ninput video: ', i_name print 'size: %s:%s' % (width, height), ' fps:', original_fps, 'frames:', \ original_frames_number, 'estimated length:', float(original_frames_number)/original_fps print '\noutput video: ', o_name print 'size: %s:%s' % (width, height), ' fps:', fps, 'frames:', \ final_frames_number, 'estimated length:', float(final_frames_number)/fps, '\n' my_fourcc = cv.CV_FOURCC('m', 'p', 'g', '2') writer = cv.CreateVideoWriter(o_name, my_fourcc, fps, (width, height)) diff = final_frames_number - original_frames_number step = operation = None if diff > 0: step = int(original_frames_number / diff) operation = expand_video elif diff < 0: step = int(final_frames_number / abs(diff)) operation = reduce_video if step == 0: print 'The desired final length is too short' return 1 result = operation(capture, writer, final_frames_number, step, verbose) if verbose: print 'A total of', result, 'frames were removed/duplicated from the original video.' return 0
def initGrab(self): start = time.clock() elapsed = time.clock() elapsed -= start #print "Time spent in (Qt image grab) is: %0.3f ms\n" % (elapsed * 1000) image_qt = QtGui.QPixmap.grabWidget(self.view) image_qt_i = image_qt.toImage() i2 = image_qt_i.convertToFormat(QtGui.QImage.Format_RGB888) i3 = i2.rgbSwapped() i3_bits = i3.bits() image_qt_size = (i3.size().width(), i3.size().height()) #image = ImageGrab.grab(self.geometry) cv_im = cv.CreateImageHeader(image_qt_size, cv.IPL_DEPTH_8U, 3) cv.SetData(cv_im, i3_bits.asstring(i3.numBytes())) fourcc = cv.CV_FOURCC('D', 'I', 'V', 'X') fps = 25 width, height = cv.GetSize(cv_im) #print width, height self.writer = cv.CreateVideoWriter('out3.avi', int(fourcc), fps, (int(width), int(height)), 1) start = time.time() cv.WriteFrame(self.writer, cv_im) elapsed = time.time() elapsed -= start #print "Time spent in (Write Frame) is:%0.3f ms \n" % (elapsed * 1000) self.frames_count = 1 timer = QtCore.QTimer() time_interval = 1000 / 25 timer.setInterval(time_interval) timer.timeout.connect(self.grabFrame) timer.start() self.timer = timer
def setupVideoWriter(self, fileName, framesPerSecond, iuyMode = False): if(iuyMode == True): fourcc = cv.CV_FOURCC('I','4','2','0') #Strange output else: fourcc = cv.CV_FOURCC('M','J','P','G') self._videoWriter = cv.CreateVideoWriter(fileName, fourcc, framesPerSecond, (self._internalResolutionX, self._internalResolutionY), 1)
codec = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC) fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) duration = (nbFrames * fps) / 1000 # 时长计算 print 'Num. Frames = ', nbFrames # frame为帧数,frames为总帧数 print 'Frame Rate = ', fps, 'fps' # fps为文件的帧率 print 'Duration = ', duration, 'sec' print 'codec = ', codec # 定义输出 out_list = ['airport', 'hall', 'office', 'pedestrian', 'smoke'] out_f = 'data/noshake_static/' + out_list[file_num] + '/yu_foreground' + '.avi' out_m = 'data/noshake_static/' + out_list[file_num] + '/yu_mask' + '.avi' # out_foreground = cv2.VideoWriter(out_f, -1, 30.0, (height, width)) # 此为新版cv2的存储视频api # out_mask = cv2.VideoWriter(out_m, -1, 30.0, (height, width)) # writer=cv.CreateVideoWriter("output.avi", cv.CV_FOURCC("D", "I", "V", "X"), 5, cv.GetSize(temp), 1) out_foreground=cv.CreateVideoWriter(out_f, int(codec), int(fps), (width,height), 1) #Create writer with same parameters out_mask=cv.CreateVideoWriter(out_m, int(codec), int(fps), (width,height), 1) #Create writer with same parameters # On linux I used to take "M","J","P","G" as fourcc # print(frame1.height, frame1.width) # 建立中间变量:frame1gray 和 frame2gray frame1gray = cv.CreateMat(height, width, cv.CV_8U) # CreateMat(rows, cols, type) cv.CvtColor(frame1, frame1gray, cv.CV_RGB2GRAY) # cv.CvtColor(src, dst, code) ,输入图像,输出图像,颜色值 res = cv.CreateMat(height, width, cv.CV_8U) frame2gray = cv.CreateMat(height, width, cv.CV_8U) # frame2gray = np.array([height, width, cv2.CV_8U]) # gray = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1) w= width
def sd_loop(self): """ The main seizure detector loop - call this function to start the seizure detector. """ self.timeSeries = [] # array of times that data points were collected. self.maxFreq = None if (self.X11): cv.NamedWindow('Seizure_Detector', cv.CV_WINDOW_AUTOSIZE) cv.CreateTrackbar('FeatureTrackbar', 'Seizure_Detector', 0, self.MAX_COUNT, self.onTrackbarChanged) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8) # Intialise the video input source # ('camera' - may be a file or network stream though). #camera = cv.CaptureFromFile("rtsp://192.168.1.18/live_mpeg4.sdp") #camera = cv.CaptureFromFile("../testcards/testcard.mpg") #camera = cv.CaptureFromFile("/home/graham/laura_sample.mpeg") camera = cv.CaptureFromCAM(0) # Set the VideoWriter that produces the output video file. frameSize = (640, 480) videoFormat = cv.FOURCC('p', 'i', 'm', '1') # videoFormat = cv.FOURCC('l','m','p','4') vw = cv.CreateVideoWriter(self.videoOut, videoFormat, self.outputfps, frameSize, 1) if (vw == None): print "ERROR - Failed to create VideoWriter...." # Get the first frame. last_analysis_time = datetime.datetime.now() last_feature_search_time = datetime.datetime.now() last_frame_time = datetime.datetime.now() frame = cv.QueryFrame(camera) print "frame=" print frame # Main loop - repeat forever while 1: # Carry out initialisation, memory allocation etc. if necessary if self.image is None: self.image = cv.CreateImage(cv.GetSize(frame), 8, 3) self.image.origin = frame.origin grey = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_grey = cv.CreateImage(cv.GetSize(frame), 8, 1) pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1) # self.features = [] # copy the captured frame to our self.image object. cv.Copy(frame, self.image) # create a grey version of the image cv.CvtColor(self.image, grey, cv.CV_BGR2GRAY) # Look for features to track. if self.need_to_init: #cv.ShowImage ('loop_grey',grey) self.initFeatures(grey) self.timeSeries = [] self.maxFreq = None last_analysis_time = datetime.datetime.now() self.need_to_init = False # Now track the features, if we have some. if self.features != []: # we have points to track, so track them and add them to # our time series of positions. self.features, status, track_error = cv.CalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid, self.features, (self.win_size, self.win_size), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), self.flags) self.timeSeries.append((last_frame_time, self.features)) print "Features..." for featNo in range(len(self.features)): if (status[featNo] == 0): self.features[featNo] = (-1, -1) print status[featNo], self.features[featNo] # and plot them. for featNo in range(len(self.features)): pointPos = self.features[featNo] cv.Circle(self.image, (int(pointPos[0]), int(pointPos[1])), 3, (0, 255, 0, 0), -1, 8, 0) if (self.alarmActive[featNo] == 2): cv.Circle(self.image, (int(pointPos[0]), int(pointPos[1])), 10, (0, 0, 255, 0), 5, 8, 0) if (self.alarmActive[featNo] == 1): cv.Circle(self.image, (int(pointPos[0]), int(pointPos[1])), 10, (0, 0, 255, 0), 2, 8, 0) # there will be no maxFreq data until we have # run doAnalysis for the first time. if (not self.maxFreq == None): msg = "%d-%3.1f" % (featNo, self.maxFreq[featNo]) cv.PutText( self.image, msg, (int(pointPos[0] + 5), int(pointPos[1] + 5)), font, (255, 255, 255)) # end of for loop over features else: #print "Oh no, no features to track, and you haven't told me to look for more." # no features, so better look for some more... self.need_to_init = True # Is it time to analyse the captured time series. if ((datetime.datetime.now() - last_analysis_time).total_seconds() > self.Analysis_Period): if (len(self.timeSeries) > 0): self.doAnalysis() self.doAlarmCheck() last_analysis_time = datetime.datetime.now() else: # print "Not doing analysis - no time series data..." a = True # Is it time to re-acquire the features to track. if ((datetime.datetime.now() - last_feature_search_time).total_seconds() > self.Feature_Search_Period): print "resetting..." last_feature_search_time = datetime.datetime.now() self.need_to_init = True # save current data for use next time around. prev_grey, grey = grey, prev_grey prev_pyramid, pyramid = pyramid, prev_pyramid # we can now display the image if (self.X11): cv.ShowImage('Seizure_Detector', self.image) cv.WriteFrame(vw, self.image) # handle events c = cv.WaitKey(10) if c == 27: # user has press the ESC key, so exit break # Control frame rate by pausing if we are going too fast. frameTime = (datetime.datetime.now() - last_frame_time)\ .total_seconds() actFps = 1.0 / frameTime if (frameTime < 1 / self.inputfps): cv.WaitKey(1 + int(1000. * (1. / self.inputfps - frameTime))) # Grab the next frame last_frame_time = datetime.datetime.now() frame = cv.QueryFrame(camera)
import cv2.cv as cv capture = cv.CaptureFromCAM(0) temp = cv.QueryFrame(capture) writer = cv.CreateVideoWriter("output.avi", cv.CV_FOURCC('D', 'I', 'V', 'X'), 15, cv.GetSize(temp), 1) count = 0 while count < 50: print count image = cv.QueryFrame(capture) cv.WriteFrame(writer, image) cv.ShowImage("image_windows", image) cv.WaitKey(1) count += 1
import cv2.cv as cv capture = cv.CaptureFromCAM(0) temp = cv.QueryFrame(capture) writer = cv.CreateVideoWriter("output.avi", cv.CV_FOURCC("D", "I", "B", " "), 5, cv.GetSize(temp), 1) #On linux I used to take "M","J","P","G" as fourcc count = 0 while count < 50: print count image = cv.QueryFrame(capture) cv.WriteFrame(writer, image) cv.ShowImage('Image_Window', image) cv.WaitKey(1) count += 1 ''' capture = cv.CaptureFromFile('img/mic.avi') nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)) width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)) fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) codec = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC) wait = int(1/fps * 1000/1) #Compute the time to wait between each frame query duration = (nbFrames * fps) / 1000 #Compute duration print 'Num. Frames = ', nbFrames
# import cv2.cv as cv # capture = cv.CaptureFromCAM(0) # temp = cv.QueryFrame(capture) # writer = cv.CreateVideoWriter("output.avi", cv.CV_FOURCC("M","J","P","G"), 25, cv.GetSize(temp), 1) # count = 0 # while count < 500: # image = cv.QueryFrame(capture) # cv.WriteFrame(writer, image) # cv.ShowImage('Image_Window',image) # cv.WaitKey(1) # count += 1 # coding=utf-8 import cv2 import cv2.cv as cv capture = cv.CaptureFromCAM(0) temp = cv.QueryFrame(capture) writer = cv.CreateVideoWriter("output.avi", cv.CV_FOURCC("M", "J", "P", "G"), 25, cv.GetSize(temp), 1) while 1: image = cv.QueryFrame(capture) cv.WriteFrame(writer, image) cv.ShowImage('Image_Window', image) cv.WaitKey(1) if cv2.waitKey(1) & 0xFF == ord('q'): break