def __init__(self, window): # メンバ変数の初期化 --------------------------------------------------- super(PrototypeScene, self).__init__(window) self.__sceneID = IScene.SceneID.PROTOTYPE_SCENE self.__sceneInfo = SceneInfo() self.__bestEmotion = BestEmotion() self.__window = window # シーンを扱うウィンドウを取得する self.__initFlags() # シーン終了フラグをOFFにする self.__sceneEventId = SceneEventID.SceneEventID() # シーンイベントを初期化する self.__videoCapture = VideoCapture.VideoCapture( window, self.__sceneInfo.videoType) # フレーム等の表示/非表示フラグの初期設定を行う self.__initDrawFlags() self.__faces = faces.Faces( self.__videoCapture.getCaptureImage()) # 顔リストクラスの初期化を行う self.__emotionImages = EmotionImages.EmotionImages( self.__window.getWidth(), self.__window.getHeight()) # 装飾クラスの初期化 self.__decorator = Decoration.Decoration(self.__window.getWidth(), self.__window.getHeight())
def main(videoPath="", verbose=False, videoWidth=0, videoHeight=0, fontScale=1.0, inference=False, confidenceLevel=0.8): global hubManager global videoCapture try: print("\nPython %s\n" % sys.version) print("Yolo Capture Azure IoT Edge Module. Press Ctrl-C to exit.") with VideoCapture(videoPath, verbose, videoWidth, videoHeight, fontScale, inference, confidenceLevel) as videoCapture: try: hubManager = HubManager(10000, IoTHubTransportProvider.MQTT, False) AppState.init(hubManager) except IoTHubError as iothub_error: print("Unexpected error %s from IoTHub" % iothub_error) return videoCapture.start() except KeyboardInterrupt: print("Camera capture module stopped")
def video_capture(resolution): try: import VideoCapture except ImportError: pass else: capture = VideoCapture.Device() capture.setResolution(int(resolution[0]), int(resolution[1])) return VideoCaptureManager(capture, resolution)
def __init__(self): if camera_in_use(): print 'WARNING: camera in use, not loading imager' raise Exception('Camera in use') if not VC: raise Exception('Failed to import VC') self.cam = VC.Device() # Some devices first image is special, throw it away self.cam.getImage() # give sometime for the device to come up time.sleep(1)
def run(capture, captured_img_save_folder, face_img_save_folder): print datetime.datetime.now(), captured_img_filepath, img = VideoCapture.WriteImage( capture, captured_img_save_folder) face_detect_result = FaceDetection.FaceDetect( img, os.path.basename(captured_img_filepath.split(".jpg")[0]), face_img_save_folder) if face_detect_result is None: print "OK" return print "%d faces detected" % len(face_detect_result)
def __init__(self, device=None, camres=(320, 240)): """Initializes a CamEyeTracker instance arguments None keyword arguments device -- a string or an integer, indicating either device name (e.g. '/dev/video0'), or a device number (e.g. 0); None can be passed too, in this case Setup will autodetect a useable device (default = None) camres -- the resolution of the webcam, e.g. (640,480) (default = (640,480)) """ global vcAvailable if vcAvailable == False: # select a device if none was selected if device == None: available = available_devices() if available == []: raise Exception( "Error in camtracker.CamEyeTracker.__init__: no available camera devices found (did you forget to plug it in?)" ) else: device = available[1] # start the webcam self.cam = pygame.camera.Camera(device, camres, 'RGB') self.cam.start() else: self.cam = VideoCapture.Device() # get the webcam resolution (get_size not available on all systems) try: self.camres = self.cam.get_size() except: self.camres = camres # default settings self.settings = {'pupilcol':(0,0,0), \ 'threshold_1':100, \ 'threshold_2':2, \ 'nonthresholdcol':(100,100,255,255), \ 'pupilpos':(-1,-1), \ 'spotpos':(-1,-1),\ 'pupilrect':pygame.Rect(self.camres[0]/2-50,self.camres[1]/2-25,100,50), \ 'pupilbounds': [0,0,0,0], \ 'spotbounds':[0,0,0,0],\ '':None }
def load_camera_list(self): i = 0 while 1: try: c = VideoCapture.Device(i) self.camera_list.append({ 'name': c.getDisplayName(), 'device': c }) del (c) i += 1 except: break
def __init__(self, device_number=0): """Creates an instance and connects to the specified VideoCapture device. :param device_number: The number of the camera device to use. For more information please consult the VideoCapture documentation. """ try: import VideoCapture except ImportError: SMOBaseObject.debug_handler.out( "Cannot import VideoCapture which is required for this camera type" ) raise self._device = VideoCapture.Device(device_number)
def __init__(self, root, cols=6, rows=4, radius=50, video_source=0, file_name="training_data.csv"): self.root = root self.cols = cols self.rows = rows self.circle_radius = radius self.video_source = video_source self.vid = VideoCapture.MyVideoCapture(self.video_source) self.eye_tracker = EyeTracking.EyeTracker(self.vid) self.calib_rdy = 0 self.calibrated = False cv2.namedWindow('image') cv2.createTrackbar('threshold', 'image', 42, 255, self.set_thresh) self.left_eye = [] self.file_name = file_name self.screen_width = self.root.winfo_screenwidth() self.screen_height = self.root.winfo_screenheight() self.circles = [] self.left = [] self.right = [] self.color_ind = 2 self.clicked = "cyan" self.eye_detected = "green" self.no_detection = "red" self.data = [] self.col_scale = (self.screen_width - (self.circle_radius * 2)) / self.cols self.row_scale = (self.screen_height - (self.circle_radius * 2)) / self.rows self.pcol_scale = 0 self.pcol_offset = 0 self.prow_scale = 0 self.prow_offset = 0 #center, top, left, right, bottom self.coords = [0, 0, 0, 0, 0] self.canvas = Canvas(self.root, width=self.screen_width, height=self.screen_height, bg="white") self.canvas.bind("<Button-1>", self.mouse_pressed) self.init_circles() self.canvas.pack() self.delay = 1 self.update() self.root.mainloop()
def takePhotoByCamera(self, devnum=0): ''' param devnum:int VideoCapture enumerates the available video capture devices on your system. If you have more than one device, specify the desired one here. The device number starts from 0. ''' picPath = os.path.join(rootPath, 'report') timeNow = time.strftime("%Y-%m-%d_%H-%M-%S") camera = VideoCapture.Device(devnum=devnum) camera.saveSnapshot(picPath + "\\" + '%s.png' % timeNow, quality=75, timestamp=0, boldfont=1) # print(picPath+"\\"+'%s.png'%timeNow) return picPath + "\\" + '%s.png' % timeNow
def __init__(self, window, window_title, video_source=0): self.window = window self.window.title(window_title) self.video_source = video_source # open video source (by default this will try to open the computer webcam) self.vid = VideoCapture.MyVideoCapture(self.video_source) # Create a canvas that can fit the above video source size self.canvas = tkinter.Canvas(window, width=self.vid.width, height=self.vid.height) self.canvas.pack() # After it is called once, the update method will be automatically called every delay milliseconds self.delay = 15 self.finder = Finder.FeatureFinder() self.update() self.window.mainloop()
def open(self): self._cam = VideoCapture.Device(self.camera_number) # Capture a throwaway frame in order to get the resolution # and bytes per pixel buffer, self._width, self._height = self._cam.getBuffer() itemsize = len(buffer) / (self._width * self._height * 3) # Pick an appropriate dtype and cache it if itemsize == 1: self._dtype = N.uint8 elif itemsize == 2: self._dtype = N.uint16 elif itemsize == 4: self._dtype = N.uint32 else: raise CameraError( "Unsupported bytes per pixel '{}'".format(itemsize), self.camera_number)
def btTakeImage(self): student_id = str(self.txt.get()).upper() name = str(self.txt2.get()) if (name.isalpha() or (' ' in name)): mycamera = VideoCapture.MyVideoCapture(0) if not mycamera.vid.isOpened(): res = "Unable to open this camera" self.message.configure(text=res) else: sql3 = Sqlite3Brower.Sqlite3(name, student_id) if sql3.checkStudentId(): res = 'Student ID already exists' else: mycamera.getFrame() sql3.insertStudent() res = "Images Taken for ID: " + student_id self.flag = True self.message.configure(text=res) elif not name.isalpha(): res = "Please Enter correct information" self.message.configure(text=res)
def on_callback_query(msg): query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query') print('Callback Query:', query_id, from_id, query_data) keyboard = InlineKeyboardMarkup(inline_keyboard=[ [ InlineKeyboardButton(text='Take another photo', callback_data='take_photo') ], ]) if query_data == 'take_photo': camera = VideoCapture.Device() camera.setResolution(1920, 1080) image = camera.getImage() image = image.rotate(90, expand=True) temp_file = BytesIO() image.save(temp_file, "jpeg") temp_file.seek(0) bot.sendPhoto(from_id, temp_file, reply_markup=keyboard) temp_file.close() camera = None
def __init__(self, parent): global mself global cam global zoom mself=self zoom=5 self._init_ctrls(parent) try: cam = VideoCapture.Device(devnum=Frame1.devid) print 'cam init' time.sleep(2) im=cam.getImage() im=cam.getImage() im=cam.getImage() print 'cam start' self.timer1.Start(601) except: wx.MessageBox("视频设备连接失败,请确定正确连接和设置正确设备序号", "错误", wx.OK)
def btTakeAttendance(self): mycamera = VideoCapture.MyVideoCapture(0) if not mycamera.vid.isOpened(): res = "Unable to open this camera" self.message.configure(text=res) else: profile = mycamera.reconigizer() date = datetime.datetime.fromtimestamp( time.time()).strftime('%d-%m-%Y') exists = os.path.isfile("Attendance\Attendance_" + date + ".csv") col_names = ['ID', 'STID', 'Name', 'Time'] if exists: with open("Attendance\Attendance_" + date + ".csv", 'a+') as csvFile1: writer = csv.writer(csvFile1) for student in profile: self.tv.insert('', 0, text=student[1], value=(str(student[2]), date, student[3])) writer.writerow(student) csvFile1.close() else: with open("Attendance\Attendance_" + date + ".csv", 'a+') as csvFile1: writer = csv.writer(csvFile1) writer.writerow(col_names) for student in profile: self.tv.insert('', 0, text=student[1], value=(str(student[2]), date, student[3])) writer.writerow(student) csvFile1.close() res = "Attendance complete" self.message.configure(text=res)
def __init__(self, device=None, camres=(640, 480)): """Initializes a CamEyeTracker instance """ global vcAvailable if vcAvailable == False: # select a device if none was selected if device == None: available = available_devices() if available == []: raise Exception( "Error in camtracker.CamEyeTracker.__init__: no available camera devices found (did you forget to plug it in?)" ) else: device = available[0] # start the webcam self.cam = pygame.camera.Camera(device, camres, 'RGB') self.cam.start() else: self.cam = VideoCapture.Device() # get the webcam resolution (get_size not available on all systems) try: self.camres = self.cam.get_size() except: self.camres = camres # default settings self.settings = {'pupilcol':(0,0,0), \ 'threshold':60, \ 'nonthresholdcol':(100,100,255,255), \ 'pupilpos':(-1,-1), \ 'pupilrect':pygame.Rect(self.camres[0]/2-50,self.camres[1]/2-25,100,50), \ 'pupilbounds': [0,0,0,0], \ '':None }
def main(): parser = ConfigParser.ConfigParser() parser.read('webcam-server.ini') # Start the image capture stuff devnum = int(parser.get('httpd', 'devnum')) quality = int(parser.get('httpd', 'quality')) cam = VideoCapture.Device(devnum=devnum) VideoCaptureRequestHandler.SharedCamera = cam VideoCaptureRequestHandler.Quality = quality # Start the web server address = parser.get('httpd', 'address') port = int(parser.get('httpd', 'port')) server_address = (address, port) httpd = BaseHTTPServer.HTTPServer(server_address, VideoCaptureRequestHandler) sa = httpd.socket.getsockname() print "Serving HTTP on", sa[0], "port", sa[1], "..." httpd.serve_forever()
def main_loop(args): print(args) settings = Settings.Settings(args) history = History.History(settings) connection = Connection.Connection(settings, history) #if connection.failed: return -1 if connection.hard_stop: return -1 cropscoordinates = CropsCoordinates.CropsCoordinates(settings, history) videocapture = VideoCapture.VideoCapture(settings, history) evaluation = Evaluation.Evaluation(settings, connection, cropscoordinates, history) attentionmodel = AttentionModel.AttentionModel(settings, cropscoordinates, evaluation, history) postprocess = Postprocess.Postprocess(settings, history) renderer = Renderer.Renderer(settings, history) debugger = Debugger.Debugger(settings, cropscoordinates, evaluation) settings.save_settings() settings.set_debugger(debugger) for frame, next_frames, frame_number in videocapture.frame_generator_thread_loading( ): settings.frame_number = frame_number print("frame: ", frame[2]) for i in range(len(next_frames)): print("next_frames", i, ": ", next_frames[i][2], next_frames[i][0], next_frames[i][2:]) attention_coordinates = cropscoordinates.get_crops_coordinates( 'attention') #debugger.debug_coordinates_in_frame(attention_coordinates, frame[1],'attention') attention_evaluation = evaluation.evaluate_attention_with_precomputing( frame_number, attention_coordinates, frame, 'attention', next_frames) # attention_evaluation start in attention crops space (size of frame downscaled for attention evaluation # so that we can cut crops of 608x608 from it easily) projected_evaluation = cropscoordinates.project_evaluation_back( attention_evaluation, 'attention') #debugger.debug_evaluation_to_bboxes_after_reprojection(projected_evaluation, frame[1], 'attention', 'afterRepro') # projected_evaluation are now in original image space evaluation_coordinates = cropscoordinates.get_crops_coordinates( 'evaluation') # evaluation_coordinates are in evaluation space. (size of frame downscaled for regular evaluation # so that we can cut crops of 608x608 from it easily) #debugger.debug_coordinates_in_frame(evaluation_coordinates, frame[1], 'evaluation') active_coordinates = attentionmodel.get_active_crops_intersections( projected_evaluation, evaluation_coordinates, frame) #debugger.debug_coordinates_in_frame(active_coordinates, frame[1], 'evaluation', "__"+str(settings.frame_number)+'activeonly') if len(active_coordinates) == 0: print("Nothing left active - that's possibly ok, skip") renderer.render([], frame) history.report_skipped_final_evaluation(frame_number) continue final_evaluation = evaluation.evaluate(active_coordinates, frame, 'evaluation', frame_number) # evaluation are in evaluation space projected_final_evaluation = cropscoordinates.project_evaluation_back( final_evaluation, 'evaluation') # projected back to original space projected_active_coordinates = cropscoordinates.project_coordinates_back( active_coordinates, 'evaluation') processed_evaluations = postprocess.postprocess( projected_active_coordinates, projected_final_evaluation) #debugger.debug_evaluation_to_bboxes_after_reprojection(processed_evaluations, frame[1], 'finalpostprocessed'+frame[0][-8:-4]) renderer.render(processed_evaluations, frame) history.tick_loop(frame_number, True) history.save_whole_history_and_settings()
import integrate import VideoCapture import time import cv2 import numpy import serial import pandas as pd cap = VideoCapture.VideoCapture('http://192.168.42.129:8080/video') crtwt=0 inv=0 quant =[0 for i in range(5)] ser=serial.Serial('COM3') ds=pd.read_csv("product.csv") ds=ds.as_matrix() while True: frame=cap.read() cv2.imshow("jjk",frame) cv2.waitKey(1) wt=integrate.getWT(ser) wtchng=wt-crtwt if integrate.getMotion(frame)==1: pred1=integrate.getML(frame) pred2=integrate.getQR(frame) if pred2==-1: pred2=pred1 if pred2==-1: continue if abs(wtchng)<15 and pred2==-1: continue if wtchng<15: #object is being removed
capture_started = [False] direction = [0, 0, 0, 0] capture_start_flag = [False] is_capture_running = [False] key = [0] # detecting key and controlling car are in two threads key_detect_in_thread = threading.Thread(target=KeyControlCar.key_detect, args=(direction, )) key_detect_in_thread.start() key_control_car_in_thread = threading.Thread( target=KeyControlCar.key_control_car, args=( direction, capture_started, capture_start_flag, is_capture_running, key, )) key_control_car_in_thread.start() # video capture while not capture_start_flag[0]: time.sleep(.1) pass VideoCapture.capture(is_capture_running, capture_started, key) key_detect_in_thread.join() key_control_car_in_thread.join() print("Yeah, finished!!")
def run(capture, captured_img_save_folder, face_img_save_folder): print datetime.datetime.now(), captured_img_filepath, img = VideoCapture.WriteImage( capture, captured_img_save_folder) face_detect_result = FaceDetection.FaceDetect( img, os.path.basename(captured_img_filepath.split(".jpg")[0]), face_img_save_folder) if face_detect_result is None: print "OK" return print "%d faces detected" % len(face_detect_result) def CheckIsExist(folder): if not os.path.isdir(folder): os.mkdir(folder) if __name__ == "__main__": captured_img_save_folder = "./CapturedImg" face_img_save_folder = "./FaceImg" CheckIsExist(captured_img_save_folder) CheckIsExist(face_img_save_folder) capture = VideoCapture.TurnOnCam() while True: run(capture, captured_img_save_folder, face_img_save_folder) time.sleep(1)
def OnButton(self, event): # 拍照按键函数 self.cam = VideoCapture.Device() # 摄像头初始化 self.cam.saveSnapshot('Photo.jpg') # 保存图片 self.timer.Start(100) # 时间事件间隔
def __initCameraCapture(self): self.__videoCapture = VideoCapture.VideoCapture( self.__window, self.__sceneInfo.videoType)
def GetCSV(self, master): if(parameter.RecoCap.isOpened()): parameter.RecoCap.release() thread = threading.Thread(target = VideoCapture.FacialFeatureCollection()) thread.daemon = 1 thread.start() master.switch_frame(StartPage)
def defineUserInterface(self): node.Node.defineUserInterface(self) self.continuous = uidata.Boolean('Continuous', False, 'rw') self.acquiremethod = uidata.Method('Acquire', self.uiAcquire) self.stopmethod = uidata.Method('Stop', self.uiStop) self.uiwebcamimage = uidata.PILImage('Image', None) container = uidata.LargeContainer('Webcam') container.addObjects((self.uiwebcamimage, self.continuous, self.acquiremethod, self.stopmethod)) self.uicontainer.addObject(container) self.stopmethod.disable() if __name__ == '__main__': import sys sys.coinit_flags = 0 import pythoncom import Image import MrcImagePlugin import cStringIO webcam = VideoCapture.Device() image = webcam.getImage() stream = cStringIO.StringIO() image.save(stream, 'jpeg') buffer = stream.getvalue() stream.close() bar = Image.open(cStringIO.StringIO(buffer)) bar.show()
def __init__(self, id, session, managerlocation, **kwargs): node.Node.__init__(self, id, session, managerlocation, **kwargs) self.webcam = VideoCapture.Device() self.stop = False self.defineUserInterface() self.start()
# determine the desired image format ext = ext.lower() if ext == '.jpg' or ext == '.jpeg': format = 'JPEG' elif ext == '.gif': format = 'GIF' elif ext == '.bmp': format = 'BMP' else: raise ValueError, 'unsupportet image format' if testWithoutVideoCapture: from PIL import Image else: import VideoCapture cam = VideoCapture.Device(devnum=devnum) if verbose: uploadedimages = 0 totaluploadtime = 0 firsttime = 1 sessionstarttime = int(time.time()) while 1: try: recenttime = now = int(time.time()) upload(firsttime=firsttime, uploadstarttime=now) firsttime = 0 while now == recenttime or (now - sessionstarttime) % interval: now = int(time.time()) time.sleep(0.2) except KeyboardInterrupt:
csv_writer.writerow(rowx) rowy = [cursorx / 1919, cursory / 1079] with open("ytrain2.csv", 'a+', newline='') as write_obj: csv_writer = writer(write_obj) csv_writer.writerow(rowy) return [frame, rowx] def nothing(val): pass # # # cv2.namedWindow('image') cv2.createTrackbar('threshold', 'image', 42, 255, nothing) vs = VideoCapture.MyVideoCapture() sp = seqpose.SEQP() et = EyeTracker(vid=vs, seqp=sp) s = Seq.SEQ() while True: thresh_val = cv2.getTrackbarPos('threshold', 'image') et.pupil_thresh = thresh_val # print(pyautogui.position()) save = win32api.GetAsyncKeyState(0x20) # start_time = time.time() f = et.mainloop(save) if len(f) != 0: print(np.shape(f[1])) p = s.predict(np.array([f[1]])) x = p[0][0] * 1919 y = p[0][1] * 1079
# 除了使用opencv,还可以使用VideoCapture这个库 import VideoCapture import tkinter root = tkinter.Tk() dev = VideoCapture.Device(0, 1) dev.getBuffer() root.mainloop()