def startCameras(self): self.video1 = cv.CaptureFromCAM(0) cv.SetCaptureProperty(self.video1, cv.CV_CAP_PROP_FRAME_WIDTH, width) cv.SetCaptureProperty(self.video1, cv.CV_CAP_PROP_FRAME_HEIGHT, height) self.video2 = cv.CaptureFromCAM(1) cv.SetCaptureProperty(self.video2, cv.CV_CAP_PROP_FRAME_WIDTH, width) cv.SetCaptureProperty(self.video2, cv.CV_CAP_PROP_FRAME_HEIGHT, height)
def repeat(): global capture #declare as globals since we are assigning to them now global camera_index frame = cv.QueryFrame(capture) cv.ShowImage("w1", frame) c = cv.WaitKey(1) if (c == "n"): #in "n" key is pressed while the popup window is in focus camera_index += 1 #try the next camera index capture = cv.CaptureFromCAM(camera_index) if not capture: #if the next camera index didn't work, reset to 0. camera_index = 0 capture = cv.CaptureFromCAM(camera_index)
def get_frame(self): self.frame = cv.QueryFrame(self.capture) self.c = cv.WaitKey(1) if (self.c == "n" ): #in "n" key is pressed while the popup window is in focus self.camera_index += 1 #try the next camera index self.capture = cv.CaptureFromCAM(camera_index) if not self.capture: #if the next camera index didn't work, reset to 0. self.camera_index = 0 self.capture = cv.CaptureFromCAM(camera_index) jpegImg = Image.fromstring("RGB", cv.GetSize(self.frame), self.frame.tostring()) retStr = jpegImg.tostring("jpeg", "RGB") print "Compressed Size = ", len(retStr) return retStr
def __init__(self, parent=None): QWidget.__init__(self) self.setMinimumSize(640, 480) self.setMaximumSize(self.minimumSize()) # register this callbacks to interact with the faces and the camera # image before the widget will view the frame self.image_callback = None self.face_callback = None # init view with correct size, depth, channels self.frame = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 3) self.storage = cv.CreateMemStorage() self.capture = cv.CaptureFromCAM(0) self.face_cascade = cv.Load(CSC_PATH + "haarcascade_frontalface_alt.xml") self.fd_wait_frames = 1 self._fd_wait = self.fd_wait_frames # get first frame self._query_frame() # set refresh rate self.timer = QTimer(self) self.timer.timeout.connect(self._query_frame) self.timer.start(75)
def __init__(self, threshold=70, showWindows=True): self.writer = None self.font = None self.show = showWindows # Either or not show the 2 windows self.frame = None self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame(self.capture) # Take a frame to init recorder self.frame=self.frame[1:100,540:640] self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) # Gray frame at t-1 cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY) # Will hold the thresholded result self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) # Gray frame at t self.width = self.frame.width self.height = self.frame.height self.nb_pixels = self.width * self.height self.threshold = threshold self.trigger_time = 0 # Hold timestamp of the last detection codec = cv.CV_FOURCC('M', 'J', 'P', 'G') # ('W', 'M', 'V', '2') self.writer = cv.CreateVideoWriter(datetime.now().strftime("%b-%d_%H_%M_%S") + ".wmv", codec, 5, cv.GetSize(self.frame), 1) # FPS set to 5 because it seems to be the fps of my cam but should be ajusted to your needs self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) # Creates a font
def _cameraInitialize(self, camType): if camType == "usb": try: self.capture = cv.CaptureFromCAM(0) cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, IMAGE_WIDTH) cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, IMAGE_HEIGHT) img = cv.QueryFrame(self.capture) self.ImageProcess = ImageProcess(img) #initialize ImageProcess self.t = Thread(target=RpiWSHandler.loop, args=(self, )) self.t.setDaemon(True) self.t.start() except Exception as e: print "Error:", e sys.exit(-1) elif camType == "rpi": import picamera import io try: self.camera = picamera.PiCamera() self.camera.resolution = (IMAGE_WIDTH, IMAGE_HEIGHT) self.camera.framerate = FPS self.camera.led = False time.sleep(2) self.stream = io.BytesIO() self.t = Thread(target=RpiWSHandler.rloop, args=(self, )) self.t.setDaemon(True) self.t.start() except picamera.PiCameraError as e: print e sys.exit(-1)
def __init__(self, threshold=8, doRecord=True, showWindows=True): self.writer = None self.font = None self.doRecord = doRecord #Either or not record the moving object self.show = showWindows #Either or not show the 2 windows self.frame = None self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame( self.capture) #Take a frame to init recorder if doRecord: self.initRecorder() self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t-1 cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY) #Will hold the thresholded result self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t self.width = self.frame.width self.height = self.frame.height self.nb_pixels = self.width * self.height self.threshold = threshold self.isRecording = False self.trigger_time = 0 #Hold timestamp of the last detection if showWindows: cv.NamedWindow("Image") cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange)
def init_camera(): cam = cv.CaptureFromCAM(0) if not cam: sys.stdout("Error Initializing Camera! Aborting.") sys.exit(1) return cam
def initialize_motion_detection(self): """Initialize objects used for motion detection.""" self.camera = cv.CaptureFromCAM(0) #cv.NamedWindow(self.windowName, cv.CV_WINDOW_AUTOSIZE) # lower the resolution of the camera height = 120 width = 160 cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_WIDTH, width) cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_HEIGHT, height) # set variables containing video information self.colorFrame = cv.QueryFrame(self.camera) self.imageHeight = self.colorFrame.height self.imageWidth= self.colorFrame.width self.numPixels = self.imageHeight * self.imageWidth depth = self.colorFrame.depth imageSize = cv.GetSize(self.colorFrame) # create image structure for processing self.previousGrayFrame = cv.CreateImage(imageSize, depth, 1) self.currentGrayFrame = cv.CreateImage(imageSize, depth, 1) self.resultImage = cv.CreateImage(imageSize, depth, 1) cv.CvtColor(self.colorFrame, self.previousGrayFrame, cv.CV_RGB2GRAY) self.previousGrayFrame = self.reduce_image_noise(self.previousGrayFrame)
def __init__(self, threshold=20, showWindows=False): self.writer = None self.font = None self.show = showWindows #Either or not show the 2 windows self.frame = None root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) os.chdir(os.path.join(root_path, "etc", "video")) self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame( self.capture) #Take a frame to init recorder self.gray_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_8U, 1) self.average_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_32F, 3) self.absdiff_frame = None self.previous_frame = None self.surface = self.frame.width * self.frame.height self.currentsurface = 0 self.currentcontours = None self.threshold = threshold self.isRecording = False self.trigger_time = 0 #Hold timestamp of the last detection if showWindows: cv.NamedWindow("Image") cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange)
def __init__(self, parent): wx.Panel.__init__(self, parent, -1) #magic to stop the flickering def SetCompositeMode(self, on=True): exstyle = win32api.GetWindowLong(self.GetHandle(), win32con.GWL_EXSTYLE) if on: exstyle |= win32con.WS_EX_COMPOSITED else: exstyle &= ~win32con.WS_EX_COMPOSITED win32api.SetWindowLong(self.GetHandle(), win32con.GWL_EXSTYLE, exstyle) SetCompositeMode(self, True) self.capture = cv.CaptureFromCAM(0) # turn on the webcam img = cv.QueryFrame( self.capture ) # Convert the raw image data to something wxpython can handle. cv.CvtColor(img, img, cv.CV_BGR2RGB) # fix color distortions self.bmp = wx.BitmapFromBuffer(img.width, img.height, img.tostring()) sbmp = wx.StaticBitmap(self, -1, bitmap=self.bmp) # Display the resulting image self.playTimer = wx.Timer(self, self.TIMER_PLAY_ID) wx.EVT_TIMER(self, self.TIMER_PLAY_ID, self.onNextFrame) fps = cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FPS) if fps != 0: self.playTimer.Start(1000 / fps) # every X ms else: self.playTimer.Start(1000 / 15) # assuming 15 fps
def __init__(self): """ O construtor obtem a referencia da webcam e cria uma janela para exibir as imagens. """ # Variavel que vai definir o estado do monitoramento. self.estado = True # Obtendo a referencia da captura da webCam. self.webCam = cv.CaptureFromCAM(0) # Obtendo a imagem atual da webCam. self.imagem_atual = cv.QueryFrame(self.webCam) if self.imagem_atual is None: stderr.write('A Web Cam esta desligada. Por favor ligue-a\n') exit() else: # Cria uma nova imagem que sera utilizada para descobrir os contornos na imagem_atual. self.imagem_cinza = cv.CreateImage(cv.GetSize(self.imagem_atual), cv.IPL_DEPTH_8U, 1) # Cria uma nova imagem que sera utilizada para converter a imagem atual em 32F. self.imagem_auxiliar = cv.CreateImage(cv.GetSize(self.imagem_atual), cv.IPL_DEPTH_32F, 3) # Imagem sera utilizada para guardar a diferenca entre a imagem atual e anterior. self.imagem_diferenca = None # Obtendo a area total da imagem da webCam. self.area = self.imagem_atual.width * self.imagem_atual.height self.area_corrente = 0 self.imagem_diferenca = cv.CloneImage(self.imagem_atual) self.imagem_anterior = cv.CloneImage(self.imagem_atual) # Tenho que converter a imagem_atual em 32F para poder calcular a media em "RuningAvg". cv.Convert(self.imagem_atual, self.imagem_auxiliar)
def __init__(self, threshold=25, doRecord=True, showWindows=True): self.writer = None self.font = None self.doRecord = doRecord #Either or not record the moving object self.show = showWindows #Either or not show the 2 windows self.frame = None self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame( self.capture) #Take a frame to init recorder if doRecord: self.initRecorder() self.gray_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_8U, 1) self.average_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_32F, 3) self.absdiff_frame = None self.previous_frame = None self.surface = self.frame.width * self.frame.height self.currentsurface = 0 self.currentcontours = None self.threshold = threshold self.isRecording = False self.trigger_time = 0 #Hold timestamp of the last detection if showWindows: cv.NamedWindow("Image") cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange)
def __init__(self): #self.capture = cv.CaptureFromFile("E:\Documents and Settings\Owner\My Documents\Downloads\Walking Slowly in Front of People.mp4") self.capture = cv.CaptureFromCAM(0) cv.NamedWindow("Target", 1) cv.NamedWindow("BG1", 1) #cv.NamedWindow("BG2", 1) cv.NamedWindow("BG3", 1)
def __init__(self, threshold=1): self.timeSinceLastMoved = None self.timeSinceLastLog = time.time() self.timeSinceClean = time.time() self.writer = None self.font = None self.frame = None # Incase logging needs to be turned on # self.log = sys.stdout # self.logFile = open("logFile.log", "w") # sys.stdout = self.logFile # Monitor on/or off self.isMonitorOn = True self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame( self.capture) #Take a frame to init recorder self.gray_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_8U, 1) self.average_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_32F, 3) self.absdiff_frame = None self.previous_frame = None self.surface = self.frame.width * self.frame.height self.currentsurface = 0 self.currentcontours = None self.threshold = threshold self.trigger_time = 0 # Hold timestamp of the last detection
def Camera(filepath): cv.NamedWindow(":)", cv.CV_WINDOW_AUTOSIZE) capture = cv.CaptureFromCAM(0) file = filepath camera_port = 0 #ramp_frames = 30 # Now we can initialize the camera capture object with the cv2.VideoCapture class. # All it needs is the index to a camera port. camera = cv2.VideoCapture(camera_port) def repeat(): frame = cv.QueryFrame(capture) cv.ShowImage(":)", frame) def get_image(): # read is the easiest way to get a full image out of a VideoCapture object. retval, im = camera.read() return im while True: repeat() #c = cv.WaitKey(10) imag = get_image() cv2.imwrite(file, imag) if cv.WaitKey(10) == 27: break
def init_camera(rx=640, ry=480): setup_uvc_camera() camera = cv.CaptureFromCAM(0) # set camera properties cv.SetCaptureProperty(camera, CV_CAP_PROP_FRAME_WIDTH, rx) cv.SetCaptureProperty(camera, CV_CAP_PROP_FRAME_HEIGHT, ry) return camera
def main_program(usn): #capture from inbuilt camera i.e web cam cap = cv.CaptureFromCAM(0) while (True): #frame for video img = cv.QueryFrame(cap) #display the image from frame cv.ShowImage("image", img) #wait for response k = cv.WaitKey(10) #if esc is pressed the break if k == 27: break #if 'c' is pressed then save the image elif k == ord('c'): time = datetime.now() cv.SaveImage("face/database/image.png", img) cv.DestroyAllWindows() os.system("python face.py") path = "c:/python27/face/database/image.png" if os.path.exists(path): #renaming the file with the usn along with the current date stmp for making it unique newPath = "c:/python27/face/database/" + usn + str( time.month) + str(time.day) + str(time.hour) + str( time.minute) + str(time.second) + str( time.microsecond) + ".png" #path1="c:python27/face/database/"+usn+".png" cmd = os.rename(path, newPath) print "image captured, resized and renamed successfully" #cv.ShowImage("image","face/database"+usn+exp+".png") else: print "no face detected......image deleted"
def __init__(self, camera=0): """ A simple web-cam wrapper. """ self.cam = cv.CaptureFromCAM(camera) if not self.cam: raise Exception("Camera not accessible.")
def capture(): capture = cv.CaptureFromCAM(0) filename = str(uuid.uuid4()) while True: img = cv.QueryFrame(capture) cv.SaveImage(filename+".jpg", img) break cv.DestroyAllWindows()
def __init__(self, mode=1, name="w1", capture=1): print name if mode == 1: cv.StartWindowThread() cv.NamedWindow(name, cv.CV_WINDOW_AUTOSIZE) self.camera_index = 0 self.name = name if capture == 1: self.capture = cv.CaptureFromCAM(self.camera_index)
def __init__(self): if len(sys.argv) > 1: self.writer = None self.capture = cv.CaptureFromFile(sys.argv[1]) frame = cv.QueryFrame(self.capture) frame_size = cv.GetSize(frame) else: fps = 60 #15 is_color = True self.capture = cv.CaptureFromCAM(0) # cv.SetCaptureProperty( self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640 ); # cv.SetCaptureProperty( self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480 ); cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, width); cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height); self.capture2 = cv.CaptureFromCAM(1) cv.SetCaptureProperty(self.capture2, cv.CV_CAP_PROP_FRAME_WIDTH, width); cv.SetCaptureProperty(self.capture2, cv.CV_CAP_PROP_FRAME_HEIGHT, height); frame = cv.QueryFrame(self.capture) frame_size = cv.GetSize(frame) self.writer = None # self.writer = cv.CreateVideoWriter("/dev/shm/test1.mp4", cv.CV_FOURCC('D', 'I', 'V', 'X'), fps, frame_size, is_color ) # self.writer = cv.CreateVideoWriter("test2.mpg", cv.CV_FOURCC('P', 'I', 'M', '1'), fps, frame_size, is_color ) # self.writer = cv.CreateVideoWriter("test3.mp4", cv.CV_FOURCC('D', 'I', 'V', 'X'), fps, cv.GetSize(frame), is_color ) # self.writer = cv.CreateVideoWriter("test4.mpg", cv.CV_FOURCC('P', 'I', 'M', '1'), fps, (320, 240), is_color ) # These both gave no error message, but saved no file: # ##self.writer = cv.CreateVideoWriter("test5.h263i", cv.CV_FOURCC('I', '2', '6', '3'), fps, cv.GetSize(frame), is_color ) # ##self.writer = cv.CreateVideoWriter("test6.fli", cv.CV_FOURCC('F', 'L', 'V', '1'), fps, cv.GetSize(frame), is_color ) # Can't play this one: # ##self.writer = cv.CreateVideoWriter("test7.mp4", cv.CV_FOURCC('D', 'I', 'V', '3'), fps, cv.GetSize(frame), is_color ) # 320x240 15fpx in DIVX is about 4 gigs per day. frame = cv.QueryFrame(self.capture) cv.NamedWindow("Target", 1)
def main(): #~ APP_KEY = 'vz2q46h298i5bqy' #~ APP_SECRET = 'f8xid4xf294wrv1' #~ ACCESS_TYPE = 'app_folder' data_serial = "text" width = 1000 height = 700 window_name = "test" number = 100 delay = 5 line_type = cv.CV_AA # change it to 8 to see non-antialiased graphics cpture = cv.CaptureFromCAM(1) cv.SetCaptureProperty(cpture, cv.CV_CAP_PROP_FRAME_WIDTH, 640 / 2) cv.SetCaptureProperty(cpture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480 / 2) ulang = 1 sess = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE) request_token = sess.obtain_request_token() url = sess.build_authorize_url(request_token) print "url:", url raw_input() access_token = sess.obtain_access_token(request_token) client = client.DropboxClient(sess) print "linked account:", client.account_info() nomorfile = 0 namafile = "initiasinama" aktiv = 0 ser.timeout = 5 print "standby" while (ulang == 1): image = cv.QueryFrame(cpture) cv.NamedWindow(window_name, 1) cv.ShowImage(window_name, image) data = ser.read(1) if (data == "1"): print("alarm aktiv") aktiv = 1 if (data == "0"): print("password terautentifikasi") aktiv = 0 print("standby") if (aktiv == 1): cv.SaveImage("photo.jpg", image) f = open('photo.jpg') namafile = "photo" + elinsGetDate() + ".jpg" nomorfile = nomorfile + 1 response = client.put_file(namafile, f) f.close() print "uploaded: ", namafile
def main(): capture = cv.CaptureFromCAM(0) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 240) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 180) video = videoStore(capture) video.start() time.sleep(1) server = VideoServer(video) reactor.listenUDP(0, server)
def __init__(self, camera_num=0, xmin=0, xmax=300, ymin=0, ymax=300): self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax self.cam = cv.CaptureFromCAM(camera_num) print "W:", cv.GetCaptureProperty(self.cam, cv.CV_CAP_PROP_FRAME_WIDTH) print "H:", cv.GetCaptureProperty(self.cam, cv.CV_CAP_PROP_FRAME_HEIGHT) print "M:", cv.GetCaptureProperty(self.cam, cv.CV_CAP_PROP_MODE)
def __init__(self): self.capture = cv.CaptureFromCAM(0) self.mv_step = 16 self.mv_scale = 1.5 self.mv_color = (0, 255, 0) self.cflow = None self.flow = None cv.NamedWindow("Optical Flow", 1) print("Press ESC - quit the program\n")
def __init__(self): AbstractSensor.__init__(self) from cv2 import cv self.type = 'Photo' self.units = 'jpeg' # TODO configurable camera port self._camera = cv.CaptureFromCAM(0) # Ramp up images logging.debug('Ramping up Camera') # TODO configure ramp up for i in xrange(30): cv.QueryFrame(self._camera)
def __init__(self): self.capture = cv.CaptureFromCAM(0) cv.NamedWindow("CamShiftDemo", 1) cv.NamedWindow("Backprojection", 1) cv.NamedWindow("Histogram", 1) cv.SetMouseCallback( "CamShiftDemo", self.on_mouse) #Instantiate call back for mouse event self.drag_start = None # Set to (x,y) when mouse starts drag self.track_window = None # Set to rect when the mouse drag finishes
def click_save_img(frames, path): capture = cv.CaptureFromCAM(0) i = 0 while i < frames: img = cv.QueryFrame(capture) #cv.ShowImage("camera", img) cv.SaveImage(path + 'pic{:>05}.jpg'.format(i), img) time.sleep(delay) #if cv.WaitKey(10) == 27: #break i += 1
def getImageFromCam(): capture = cv.CaptureFromCAM(0) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240) img = cv.QueryFrame(capture) cv.SaveImage('img.png', img) img = Image.open('img.png') return img