def __init__(self, config, bus): self.input_thread = Thread(target=self.run_input, daemon=True) self.bus = bus bus = config['bus'] ports = config['ports'] # to find proper bus and ports numbers run # python -c "import uvc; from pprint import pprint; pprint(uvc.device_list())" self.sleep = config.get('sleep', 0.1) self.cap = None dev_list = uvc.device_list() for dev in dev_list: if dev['bus_number'] == bus and dev['bus_ports'] == ports: self.cap = uvc.Capture(dev["uid"]) self.cap.frame_mode = self.cap.avaible_modes[0] self.cap.bandwidth_factor = 1.2 # when incomplete pictures are being received, increase bandwidth_factor # when "no space left on device" error, decrease bandwidth_factor # for more info see https://docs.pupil-labs.com/#jpeg-size-estimation-and-custom-video-backends return import pprint error = "camera not found on bus %i and ports %s\n" % (bus, str(ports)) error += "avalable cameras:\n" error += pprint.pformat(uvc.device_list()) self.bus.report_error(error) print(error, file=sys.stderr)
def __new__(cls, cam_name, *more): """ Class creator which prevents an UvcCapture object from being created if we can't connect to the specified camera. It also finds the UID of the camera for us, so we can choose the camera more easily through cam name or index. :param cam_name: Either the desired camera name or the index in the list of available devices to connect to :param more: Any other params that would be passed on to the uvc.Capture creator :return: A new object connected to the specified camera, or None if there was an error (eg: camera not found). """ cam_uid = "" if isinstance(cam_name, str): # str -> Find by camera name for d in uvc.device_list(): # From the available devices list, find the one we want if d['name'] == cam_name: cam_uid = d['uid'] # And figure out its UID break elif isinstance(cam_name, int): # int -> Find by camera index if cam_name < len(uvc.device_list()): cam_uid = uvc.device_list()[cam_name]['uid'] # If index is within bounds, figure out camera's UID if cam_uid == "": # This only happens if we couldn't find the camera from the information we were provided logging.warning("Couldn't find camera '{}' in the list of available UVC devices, aborting creation of {} object.".format(cam_name, UvcCapture)) return None # Don't allow the creation of the object -> Return None obj = super(UvcCapture, cls).__new__(cls, more) # Create a new object by calling the parent constructor try: super(UvcCapture, obj).__init__(cam_uid) # Initialize the object using the camera UID (uvc.Capture opens the device) return obj # Finally return the newly created object if everything went fine except: logging.error("Something went wrong opening camera '{}', is it already open by another process? Aborting creation of {} object.".format(cam_name, UvcCapture)) return None # In case of an error (eg: the camera is already in use) return None
def check_hw_ts_support(self): # hw timestamping: # uvc supports Sart of Exposure hardware timestamping ofr UVC Capture devices # these HW timestamps are excellent referece times and # preferred over softwaretimestamp denoting the avaibleilt of frames to the user. # however not all uvc cameras report valid hw timestamps, notably microsoft hd-6000 # becasue all used devices need to properly implement hw timestamping for it to be usefull # but we cannot now what device the other process is using + the user may select a differet capture device during runtime # we use some fuzzy logic to determine if hw timestamping should be employed. return False blacklist = ["Microsoft", "HD-6000"] qualifying_devices = ["C930e", "Integrated Camera", "USB 2.0 Camera"] attached_devices = [c.name for c in device_list()] if any(qd in self.name for qd in qualifying_devices): use_hw_ts = True logger.info("Capture device: '%s' supports HW timestamping. Using hardware timestamps." % self.name) else: use_hw_ts = False logger.info( "Capture device: '%s' is not known to support HW timestamping. Using software timestamps." % self.name ) for d in attached_devices: if any(bd in d for bd in blacklist): logger.info("Capture device: '%s' detected as attached device. Falling back to software timestamps" % d) use_hw_ts = False return use_hw_ts
def check_hw_ts_support(self): # hw timestamping: # uvc supports Sart of Exposure hardware timestamping ofr UVC Capture devices # these HW timestamps are excellent referece times and # preferred over softwaretimestamp denoting the avaibleilt of frames to the user. # however not all uvc cameras report valid hw timestamps, notably microsoft hd-6000 # becasue all used devices need to properly implement hw timestamping for it to be usefull # but we cannot now what device the other process is using + the user may select a differet capture device during runtime # we use some fuzzy logic to determine if hw timestamping should be employed. blacklist = ["Microsoft", "HD-6000"] qualifying_devices = ["C930e", "Integrated Camera", "USB 2.0 Camera"] attached_devices = [c.name for c in device_list()] if any(qd in self.name for qd in qualifying_devices): use_hw_ts = True logger.info( "Capture device: '%s' supports HW timestamping. Using hardware timestamps." % self.name) else: use_hw_ts = False logger.info( "Capture device: '%s' is not known to support HW timestamping. Using software timestamps." % self.name) for d in attached_devices: if any(bd in d for bd in blacklist): logger.info( "Capture device: '%s' detected as attached device. Falling back to software timestamps" % d) use_hw_ts = False return use_hw_ts
def run(self): self.capturing.value = 1 dev_list = uvc.device_list() cap = uvc.Capture(dev_list[self.source]['uid']) self.__setup_eye_cam(cap) cap.frame_mode = self.mode attempt, attempts = 0, 6 gamma, color, flip = 1, True, False while attempt < attempts: try: frame = cap.get_frame(2.0) img = self.__adjust_gamma(frame.bgr, gamma) img = self.__cvtBlackWhite(img, color) img = self.__flip_img(img, flip) if img is not None: attempt = 0 shared_img = self.__get_shared_np_array(img) np.copyto(shared_img, img) except Exception as e: traceback.print_exc() cap = self.__reset_mode(cap) attempt += 1 if self.pipe.poll(): msg = self.pipe.recv() if msg == "stop": break elif msg == "gamma": gamma = self.pipe.recv() elif msg == "color": color = self.pipe.recv() elif msg == "flip": flip = self.pipe.recv() self.capturing.value = 0 print("camera", self.source, "closed")
def gui_init_cam_by_uid(requested_id): for cam in uvc.device_list(): if cam["uid"] == requested_id: self.re_init(requested_id) return logger.warning("could not reinit capture, src_id not valid anymore") return
def __init__(self, name): """Note: All class members, which are not process variables should be prefixed with _""" # initial image, the heght, width and number of plane could be approxiamate h, w, p = 4, 3, 3 image = np.arange(h * w * p).astype('uint8').reshape(h, w, p) pars = { 'count': LDO('R','Image count', [0]), 'image': LDO('R','Image', image), 'sleep': LDO('RWE','Sleep time between image acquisitions',[1.], units='s', opLimits=(0.02,10)), 'shape': LDO('R','Frame shape Y,X,Planes', [0,0,0]), 'fps': LDO('R','Frames/s', [0]), 'imgps': LDO('R','Images/s', [0], units='img/s'), 'subscribe': LDO('RWE','Subscribe to image', ['On'],legalValues\ = ['On','Off']), } super().__init__(name, pars) dev_list = uvc.device_list() print(dev_list) self._cap = uvc.Capture(dev_list[0]["uid"]) print(f'available modes: {self._cap.avaible_modes}') frame_mode = (640, 480, 30) try: self._cap.frame_mode = (640, 480, 30) #self._cap.frame_mode = (960, 720, 15) self.fps.value[0] = self._cap.frame_mode[2] except Exception as e: printw(f'Failed to setup frame mode {frame_mode}: {e}') thread = threading.Thread(target=self._state_machine) thread.daemon = False thread.start()
def gui_init_cam_by_uid(requested_id): for cam in uvc.device_list(): if cam['uid'] == requested_id: self.re_init(requested_id) return logger.warning( "could not reinit capture, src_id not valid anymore") return
def check_mode_availability(self, source, mode): dev_list = uvc.device_list() cap = uvc.Capture(dev_list[source]['uid']) if mode not in cap.avaible_modes: m = cap.avaible_modes[0] mode = (m[1], m[0], m[2]) self.shared_array = self.create_shared_array(mode) self.mode = mode return mode
def run(self): while not self.done: logging.info("Scanning for new devices...") dev_list = uvc.device_list() # Find available devices if self.dev_list != dev_list: # If found a new device or an old device was disconnected self.dev_list = dev_list # Update the list of devices self.sig_new_dev.emit() # And fire a signal print "Compatible devices found:\n\t" + str(self.dev_list) self.sleep(self.REFRESH_RATE)
def _re_init_capture_by_name(self,name): for x in range(4): devices = device_list() for d in devices: if d['name'] == name: logger.info("Found device.%s."%name) self.re_init_capture(d['uid']) return logger.warning('Could not find Camera %s during re initilization.'%name) sleep(1.5) raise CameraCaptureError('Could not find Camera %s during re initilization.'%name)
def __reset_mode(self, cap): print("resetting...") mode = cap.frame_mode cap.close() time.sleep(0.5) dev_list = uvc.device_list() cap2 = uvc.Capture(dev_list[self.source]['uid']) print("Trying mode:", mode) cap2.frame_mode = mode cap2.bandwidth_factor = 1.3 return cap2
def gui_init_cam_by_uid(requested_id): if requested_id is None: self.re_init_capture(None) else: for cam in uvc.device_list(): if cam['uid'] == requested_id: if is_accessible(requested_id): self.re_init_capture(requested_id) else: logger.error("The selected Camera is already in use or blocked.") return logger.warning("could not reinit capture, src_id not valid anymore") return
def __set_fps_modes(self): self.fps_res, self.modes = {}, {} dev_list = uvc.device_list() cap = uvc.Capture(dev_list[self.source]['uid']) for i in range(len(cap.avaible_modes)): mode = cap.avaible_modes[i] fps = mode[2] if fps not in self.fps_res.keys(): self.fps_res[fps] = [] self.modes[fps] = [] resolution = str(mode[0]) + " x " + str(mode[1]) self.modes[fps].append(mode) self.fps_res[fps].append(resolution) if self.mode not in cap.avaible_modes: self.mode = sorted(cap.avaible_modes)[0] cap.close()
def __new__(cls, cam_name, cam_vend_id=-1, cam_prod_id=-1, cam_serial_num=None, *more): """ Class creator which prevents an UvcCapture object from being created if we can't connect to the specified camera. It also finds the UID of the camera for us, so we can choose the camera more easily through cam name or index. :param cam_name: If str, it's the desired camera name; If int, the index in the list of available devices to connect to :param cam_vend_id: If specified (-1=any), it indicates the vendor id of the camera we want to connect to. :param cam_prod_id: If specified (-1=any), it indicates the product id of the camera we want to connect to. :param cam_serial_num: If specified (None=any), it indicates the serial number of the camera we want to connect to. This is particularly useful when we use more than one camera with the same USB camera name, vendor ID, product ID... :param more: Any other params that would be passed on to the uvc.Capture creator :return: UvcCapture object connected to the specified camera, or None if there was an error (eg: camera not found) """ cam = None device_list = uvc.device_list() if isinstance(cam_name, int): # cam_name is int -> Find by camera index if cam_name < len(device_list): cam = device_list[cam_name] # If index is within bounds, use that device, but check for additional constraints if (cam_vend_id >= 0 and cam['idVendor'] != cam_vend_id) or (cam_prod_id >= 0 and cam['idProduct'] != cam_prod_id) or (cam_serial_num is not None and cam['serialNumber'] != cam_serial_num): logging.info("Camera with index cam_name={} doesn't match additional constraints (vend_id={}, prod_id={}, serial_num={})".format(cam_name, cam_vend_id, cam_prod_id, cam_serial_num)) cam = None # Additional constraints not met, don't use this device else: logging.info("Provided camera index (cam_name={}) out of range (there's only {} devices)".format(cam_name, len(device_list))) else: for d in device_list: # From the available devices list, find the one we want if d['name'] == cam_name and (cam_vend_id < 0 or cam_vend_id == d['idVendor']) and (cam_prod_id < 0 or cam_prod_id == d['idProduct']) and (cam_serial_num is None or cam_serial_num == d['serialNumber']): cam = d break if cam is None: # This only happens if we couldn't find the camera from the information we were provided logging.warning("Couldn't find camera {} in the list of available UVC devices, aborting creation of {} object.".format(UvcCapture.get_human_friendly_identifier(cam_name, cam_vend_id, cam_prod_id, cam_serial_num), UvcCapture)) return None # Don't allow the creation of the object -> Return None obj = super(UvcCapture, cls).__new__(cls, more) # Create a new UvcCapture object by calling the parent constructor obj.vend_id = cam['idVendor'] obj.prod_id = cam['idProduct'] obj.serial_num = cam['serialNumber'] obj.provided_vend_id = (cam_vend_id >= 0) obj.provided_prod_id = (cam_prod_id >= 0) obj.provided_serial_num = (cam_serial_num is not None) try: super(UvcCapture, obj).__init__(cam['uid']) # Initialize the object using the camera UID (uvc.Capture opens the device) return obj # Finally return the newly created object if everything went fine except: logging.error("Something went wrong opening camera {}, is it already open by another process? Aborting creation of {} object.".format(UvcCapture.get_human_friendly_identifier(cam_name, cam_vend_id, cam_prod_id, cam_serial_num), UvcCapture)) return None # In case of an error (eg: the camera is already in use) return None
def __init__(self, uid_or_device_index, use_uvc: bool): if use_uvc: import uvc lg.info('using uvc as backend') devices = uvc.device_list() lg.info('there are {} camera devices, devices:'.format( len(devices))) select_device = None for d in devices: lg.info(d) if d['uid'] == uid_or_device_index: select_device = d lg.info('opening {}'.format(select_device['uid'])) self.cap = uvc.Capture(select_device['uid']) else: lg.info('using opencv as camera backend') self.cap = cv2.VideoCapture(uid_or_device_index) #self.cap.set(cv2.CAP_PROP_EXPOSURE,30) assert self.cap.isOpened(), 'can not open {}'.format( uid_or_device_index)
def __init__(self, name=None): QQuickImageProvider.__init__(self, QQuickImageProvider.Pixmap) QObject.__init__(self) self.__image = self.to_QPixmap(cv2.imread("../UI/imgs/test.jpg")) self.__np_img = None self.name = name self.capturing = Value('i', 0) self.dev_list = uvc.device_list() self.fps_res = {} self.modes = {} self.mode = None # --> subclassed property self.shared_array = None # --> subclassed property self.source = None self.cap = None self.pipe, self.child = Pipe() self.cam_process = None self.vid_process = None self.cam_thread = None self.paused = False self.gamma = 1.0 self.color = True
def init_gui(self, sidebar): # lets define some helper functions: def gui_load_defaults(): for c in self.capture.controls: try: c.value = c.def_val except: pass def gui_update_from_device(): for c in self.capture.controls: c.refresh() def gui_init_cam_by_uid(requested_id): for cam in uvc.device_list(): if cam["uid"] == requested_id: self.re_init(requested_id) return logger.warning("could not reinit capture, src_id not valid anymore") return # create the menu entry self.menu = ui.Growing_Menu(label="Camera Settings") cameras = uvc.device_list() camera_names = [c["name"] for c in cameras] camera_ids = [c["uid"] for c in cameras] self.menu.append( ui.Selector( "uid", self, selection=camera_ids, labels=camera_names, label="Capture Device", setter=gui_init_cam_by_uid, ) ) # hardware_ts_switch = ui.Switch('use_hw_ts',self,label='use hardware timestamps') # hardware_ts_switch.read_only = True # self.menu.append(hardware_ts_switch) sensor_control = ui.Growing_Menu(label="Sensor Settings") sensor_control.collapsed = False image_processing = ui.Growing_Menu(label="Image Post Processing") image_processing.collapsed = True sensor_control.append( ui.Selector("frame_rate", self, selection=self.capture.frame_rates, label="Frames per second") ) for control in self.capture.controls: c = None ctl_name = control.display_name # now we add controls if control.d_type == bool: c = ui.Switch("value", control, label=ctl_name, on_val=control.max_val, off_val=control.min_val) elif control.d_type == int: c = ui.Slider( "value", control, label=ctl_name, min=control.min_val, max=control.max_val, step=control.step ) elif type(control.d_type) == dict: selection = [value for name, value in control.d_type.iteritems()] labels = [name for name, value in control.d_type.iteritems()] c = ui.Selector("value", control, label=ctl_name, selection=selection, labels=labels) else: pass # if control['disabled']: # c.read_only = True # if ctl_name == 'Exposure, Auto Priority': # # the controll should always be off. we set it to 0 on init (see above) # c.read_only = True if c is not None: if control.unit == "processing_unit": image_processing.append(c) else: sensor_control.append(c) self.menu.append(sensor_control) self.menu.append(image_processing) self.menu.append(ui.Button("refresh", gui_update_from_device)) self.menu.append(ui.Button("load defaults", gui_load_defaults)) self.sidebar = sidebar # add below geneal settings self.sidebar.insert(1, self.menu)
def __init__(self): rospy.init_node('{}_node'.format(DEFAULT_CAMERA_NAME), argv=sys.argv) self.image_topic = rospy.get_param('~image', DEFAULT_IMAGE_TOPIC) self.camera_topic = rospy.get_param('~camera', DEFAULT_CAMERA_TOPIC) self.calibration = rospy.get_param('~calibration', '') self.encoding = rospy.get_param('~encoding', 'mono8') self.width = rospy.get_param('~width', DEFAULT_WIDTH) self.height = rospy.get_param('~height', DEFAULT_HEIGHT) self.fps = rospy.get_param('~fps', DEFAULT_FPS) self.cam_prod_id = rospy.get_param('~cam_prod_id', 9760) self.contrast = rospy.get_param('~contrast', 0) self.sharpness = rospy.get_param('~sharpness', 1) self.gamma = rospy.get_param('~gamma', 80) self.exposure_abs = rospy.get_param('~exposure', 50) #50 - self.brightness = rospy.get_param('~brightness', 1) self.hue = rospy.get_param('~hue', 0) self.saturation = rospy.get_param('~saturation', 0) self.pwr_line_freq = rospy.get_param('~pwr_line_freq', 1) self.backlight_comp = rospy.get_param('~backlight_comp', 0) self.auto_exposure = rospy.get_param('~auto_exposure', 8) self.auto_exp_prio = rospy.get_param('~auto_exp_prio', 0) self.white_bal_auto = rospy.get_param('~white_bal_auto', True) self.white_bal = rospy.get_param('~white_bal', 4600) # possible values to set can be found by running "rosrun uvc_camera uvc_camera_node _device:=/dev/video0", see http://github.com/ros-drivers/camera_umd.git dev_list = uvc.device_list() for i in range(0, len(dev_list)): print dev_list[i] rospy.loginfo( 'available device %i: idProd: %i - comparing to idProd: %i' % (i, int(dev_list[i]["idProduct"]), self.cam_prod_id)) if i == self.cam_prod_id: #int(dev_list[i]["idProduct"]) == self.cam_prod_id: rospy.loginfo("connecting to camera idProd: %i, device %i" % (self.cam_prod_id, i)) self.cap = uvc.Capture(dev_list[i]["uid"]) #self.cap.set(CV_CAP_PROP_CONVERT_RGB, false) rospy.loginfo("successfully connected to camera %i" % i) rospy.loginfo('starting cam at %ifps with %ix%i resolution' % (self.fps, self.width, self.height)) self.cap.frame_mode = (self.width, self.height, self.fps) frame = self.cap.get_frame_robust() self.controls_dict = dict([(c.display_name, c) for c in self.cap.controls]) self.controls_dict[ 'Brightness'].value = self.brightness #10 #[-64,64], not 0 (no effect)!! self.controls_dict['Contrast'].value = self.contrast #0 #[0,95] self.controls_dict['Hue'].value = self.hue #[-2000,2000] self.controls_dict['Saturation'].value = self.saturation #[0,100] self.controls_dict['Sharpness'].value = self.sharpness #1 #[1,100] self.controls_dict['Gamma'].value = self.gamma #[80,300] self.controls_dict[ 'Power Line frequency'].value = self.pwr_line_freq #1:50Hz, 2:60Hz self.controls_dict[ 'Backlight Compensation'].value = self.backlight_comp #True or False self.controls_dict[ 'Absolute Exposure Time'].value = self.exposure_abs #[78,10000] set Auto Exposure Mode to 1 self.controls_dict[ 'Auto Exposure Mode'].value = self.auto_exposure #1:manual, 8:apperturePriority self.controls_dict[ 'Auto Exposure Priority'].value = self.auto_exp_prio #1:manual, 8:apperturePriority self.controls_dict[ 'White Balance temperature,Auto'].value = self.white_bal_auto self.controls_dict[ 'White Balance temperature'].value = self.white_bal #[2800,6500] rospy.loginfo("These camera settings will be applied:") for c in self.cap.controls: rospy.loginfo('%s: %i' % (c.display_name, c.value)) self.manager = CameraInfoManager(cname=DEFAULT_CAMERA_NAME, url='file://' + self.calibration, namespace=DEFAULT_CAMERA_NAME) self.manager.loadCameraInfo() # Needs to be called before getter! self.camera_info = self.manager.getCameraInfo() self.bridge = CvBridge() self.image_publisher = rospy.Publisher(self.image_topic, Image, queue_size=1) self.camera_publisher = rospy.Publisher(self.camera_topic, CameraInfo, queue_size=1) self.seq = 0 self.counter = 0
from __future__ import print_function import uvc import logging logging.basicConfig(level=logging.INFO) dev_list = uvc.device_list() print(dev_list) cap = uvc.Capture(dev_list[0]["uid"]) # Uncomment the following lines to configure the Pupil 200Hz IR cameras: # controls_dict = dict([(c.display_name, c) for c in cap.controls]) # controls_dict['Auto Exposure Mode'].value = 1 # controls_dict['Gamma'].value = 200 print(cap.avaible_modes) for x in range(10): print(x) cap.frame_mode = (640, 480, 30) for x in range(100): frame = cap.get_frame_robust() print(frame.img.shape) # cv2.imshow("img",frame.gray) # cv2.waitKey(1) cap = None
def init_gui(self,sidebar): #lets define some helper functions: def gui_load_defaults(): for c in self.capture.controls: try: c.value = c.def_val except: pass def set_size(new_size): self.frame_size = new_size menu_conf = self.menu.configuration self.deinit_gui() self.init_gui(self.sidebar) self.menu.configuration = menu_conf def gui_update_from_device(): for c in self.capture.controls: c.refresh() def gui_init_cam_by_uid(requested_id): if requested_id is None: self.re_init_capture(None) else: for cam in uvc.device_list(): if cam['uid'] == requested_id: if is_accessible(requested_id): self.re_init_capture(requested_id) else: logger.error("The selected Camera is already in use or blocked.") return logger.warning("could not reinit capture, src_id not valid anymore") return #create the menu entry self.menu = ui.Growing_Menu(label='Camera Settings') cameras = uvc.device_list() camera_names = ['Fake Capture']+[c['name'] for c in cameras] camera_ids = [None]+[c['uid'] for c in cameras] self.menu.append(ui.Selector('uid',self,selection=camera_ids,labels=camera_names,label='Capture device', setter=gui_init_cam_by_uid) ) sensor_control = ui.Growing_Menu(label='Sensor Settings') sensor_control.append(ui.Info_Text("Do not change these during calibration or recording!")) sensor_control.collapsed=False image_processing = ui.Growing_Menu(label='Image Post Processing') image_processing.collapsed=True sensor_control.append(ui.Selector('frame_size',self,setter=set_size, selection=self.capture.frame_sizes,label='Resolution' ) ) sensor_control.append(ui.Selector('frame_rate',self, selection=self.capture.frame_rates,label='Frame rate' ) ) for control in self.capture.controls: c = None ctl_name = control.display_name #now we add controls if control.d_type == bool : c = ui.Switch('value',control,label=ctl_name, on_val=control.max_val, off_val=control.min_val) elif control.d_type == int: c = ui.Slider('value',control,label=ctl_name,min=control.min_val,max=control.max_val,step=control.step) elif type(control.d_type) == dict: selection = [value for name,value in control.d_type.iteritems()] labels = [name for name,value in control.d_type.iteritems()] c = ui.Selector('value',control, label = ctl_name, selection=selection,labels = labels) else: pass # if control['disabled']: # c.read_only = True # if ctl_name == 'Exposure, Auto Priority': # # the controll should always be off. we set it to 0 on init (see above) # c.read_only = True if c is not None: if control.unit == 'processing_unit': image_processing.append(c) else: sensor_control.append(c) self.menu.append(sensor_control) if image_processing.elements: self.menu.append(image_processing) self.menu.append(ui.Button("refresh",gui_update_from_device)) self.menu.append(ui.Button("load defaults",gui_load_defaults)) self.sidebar = sidebar #add below geneal settings self.sidebar.insert(1,self.menu)
error = "camera not found on bus %i and ports %s\n" % (bus, str(ports)) error += "avalable cameras:\n" error += pprint.pformat(uvc.device_list()) self.bus.report_error(error) print(error, file=sys.stderr) def start(self): self.input_thread.start() def join(self, timeout=None): self.input_thread.join(timeout=timeout) def run_input(self): if self.cap is None: return while self.bus.is_alive(): frame = self.cap.get_frame_robust() self.bus.publish('jpg', bytes(frame.jpeg_buffer)) self.bus.sleep(self.sleep) self.cap.close() def request_stop(self): self.bus.shutdown() if __name__ == "__main__": from pprint import pprint pprint(uvc.device_list()) # vim: expandtab sw=4 ts=4
def init_gui(self, sidebar): #lets define some helper functions: def gui_load_defaults(): for c in self.capture.controls: try: c.value = c.def_val except: pass def set_size(new_size): self.frame_size = new_size menu_conf = self.menu.configuration self.deinit_gui() self.init_gui(self.sidebar) self.menu.configuration = menu_conf def gui_update_from_device(): for c in self.capture.controls: c.refresh() def gui_init_cam_by_uid(requested_id): if requested_id is None: self.re_init_capture(None) else: for cam in uvc.device_list(): if cam['uid'] == requested_id: if is_accessible(requested_id): self.re_init_capture(requested_id) else: logger.error( "The selected Camera is already in use or blocked." ) return logger.warning( "could not reinit capture, src_id not valid anymore") return #create the menu entry self.menu = ui.Growing_Menu(label='Camera Settings') cameras = uvc.device_list() camera_names = ['Fake Capture'] + [c['name'] for c in cameras] camera_ids = [None] + [c['uid'] for c in cameras] self.menu.append( ui.Selector('uid', self, selection=camera_ids, labels=camera_names, label='Capture Device', setter=gui_init_cam_by_uid)) sensor_control = ui.Growing_Menu(label='Sensor Settings') sensor_control.append( ui.Info_Text( "Do not change these during calibration or recording!")) sensor_control.collapsed = False image_processing = ui.Growing_Menu(label='Image Post Processing') image_processing.collapsed = True sensor_control.append( ui.Selector('frame_size', self, setter=set_size, selection=self.capture.frame_sizes, label='Resolution')) sensor_control.append( ui.Selector('frame_rate', self, selection=self.capture.frame_rates, label='Framerate')) for control in self.capture.controls: c = None ctl_name = control.display_name #now we add controls if control.d_type == bool: c = ui.Switch('value', control, label=ctl_name, on_val=control.max_val, off_val=control.min_val) elif control.d_type == int: c = ui.Slider('value', control, label=ctl_name, min=control.min_val, max=control.max_val, step=control.step) elif type(control.d_type) == dict: selection = [ value for name, value in control.d_type.iteritems() ] labels = [name for name, value in control.d_type.iteritems()] c = ui.Selector('value', control, label=ctl_name, selection=selection, labels=labels) else: pass # if control['disabled']: # c.read_only = True # if ctl_name == 'Exposure, Auto Priority': # # the controll should always be off. we set it to 0 on init (see above) # c.read_only = True if c is not None: if control.unit == 'processing_unit': image_processing.append(c) else: sensor_control.append(c) self.menu.append(sensor_control) if image_processing.elements: self.menu.append(image_processing) self.menu.append(ui.Button("refresh", gui_update_from_device)) self.menu.append(ui.Button("load defaults", gui_load_defaults)) self.sidebar = sidebar #add below geneal settings self.sidebar.insert(1, self.menu)
def init_gui(self, sidebar): #lets define some helper functions: #def gui_load_defaults(): #for c in self.capture.controls: #try: #c.value = c.def_val #except: #pass #def gui_update_from_device(): #for c in self.capture.controls: #c.refresh() def gui_init_cam_by_uid(requested_id): for cam in uvc.device_list(): if cam['uid'] == requested_id: self.re_init(requested_id) return logger.warning( "could not reinit capture, src_id not valid anymore") return #create the menu entry self.menu = ui.Growing_Menu(label='Camera Settings') cameras = uvc.device_list() camera_names = [c['name'] for c in cameras] camera_ids = [c['uid'] for c in cameras] self.menu.append( ui.Selector('uid', self, selection=camera_ids, labels=camera_names, label='Capture Device', setter=gui_init_cam_by_uid)) # hardware_ts_switch = ui.Switch('use_hw_ts',self,label='use hardware timestamps') # hardware_ts_switch.read_only = True # self.menu.append(hardware_ts_switch) sensor_control = ui.Growing_Menu(label='Sensor Settings') sensor_control.collapsed = False image_processing = ui.Growing_Menu(label='Image Post Processing') image_processing.collapsed = True #sensor_control.append(ui.Selector('frame_rate',self, selection=self.capture.frame_rates,label='Frames per second' ) ) #for control in self.capture.controls: #c = None #ctl_name = control.display_name #now we add controls #if control.d_type == bool : #c = ui.Switch('value',control,label=ctl_name, on_val=control.max_val, off_val=control.min_val) #elif control.d_type == int: #c = ui.Slider('value',control,label=ctl_name,min=control.min_val,max=control.max_val,step=control.step) #elif type(control.d_type) == dict: #selection = [value for name,value in control.d_type.iteritems()] #labels = [name for name,value in control.d_type.iteritems()] #c = ui.Selector('value',control, label = ctl_name, selection=selection,labels = labels) #else: #pass # if control['disabled']: # c.read_only = True # if ctl_name == 'Exposure, Auto Priority': # # the controll should always be off. we set it to 0 on init (see above) # c.read_only = True #if c is not None: #if control.unit == 'processing_unit': #image_processing.append(c) #else: #sensor_control.append(c) self.menu.append(sensor_control) self.menu.append(image_processing) #self.menu.append(ui.Button("refresh",gui_update_from_device)) #self.menu.append(ui.Button("load defaults",gui_load_defaults)) self.sidebar = sidebar #add below geneal settings self.sidebar.insert(1, self.menu)
def run(self): self.running = True while (self.running): world_capture = None eye_capture = None try: # Setup camera captures dev_list = uvc.device_list() world_capture = uvc.Capture(dev_list[1]["uid"]) eye_capture = uvc.Capture(dev_list[0]["uid"]) # Setup resolutions width = 1280 height = 720 self.cam_center = (int(width / 2), int(height / 2)) world_capture.frame_mode = (width, height, 60) eye_capture.frame_mode = (640, 480, 60) # Aruco self.aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) self.aruco_params = aruco.DetectorParameters_create() # Initialize variables last_pupil_count = 1 eye_closed_start = None selected_marker_id = -1 while (self.running): # Blink detection eye_frame = eye_capture.get_frame(1) eye_bgr = eye_frame.bgr eye_gray = eye_frame.gray pupil_count = self.process_eye_frame(eye_bgr, eye_gray) cv2.imshow('eye_frame', eye_bgr) if last_pupil_count == 1 and pupil_count == 0: last_pupil_count = 0 eye_closed_start = time.time() elif last_pupil_count == 0 and pupil_count >= 1: last_pupil_count = 1 t = time.time() - eye_closed_start if t > 0.42: self.blink_action(selected_marker_id) # Marker detection world_frame = world_capture.get_frame(1) world_bgr = world_frame.bgr world_gray = world_frame.gray selected_marker = self.process_world_frame( world_bgr, world_gray) cv2.imshow('world_frame', world_bgr) if pupil_count == 1: curr_id = selected_marker[ 0] if selected_marker is not None else -1 if curr_id != selected_marker_id: selected_marker_id = curr_id self.select_action(selected_marker_id) # Exit if the user presses 'q' if cv2.waitKey(1) & 0xFF == ord('q'): self.running = False except KeyboardInterrupt: self.running = False except Exception: cv2.destroyAllWindows() if world_capture is not None: world_capture.close() if eye_capture is not None: eye_capture.close() #self.retry_dialog(traceback.format_exc()) input('Press ENTER to restart...') cv2.destroyAllWindows()
def read_inputs(self): dev_list = uvc.device_list() for i in range(len(dev_list)): name = dev_list[i]['name'] self.cameras[i] = name
def device_list(): """ Returns a list of currently available devices. :return: List containing currently available devices """ return uvc.device_list()
try: from multiprocessing import Process, forking_enable, freeze_support except ImportError: try: # python3 from multiprocessing import Process, set_start_method, freeze_support def forking_enable(_): set_start_method('spawn') except ImportError: # python2 macos from billiard import Process, forking_enable, freeze_support import numpy as np dev_list = uvc.device_list() def test_cap(i, mode=(640, 480, 30), format='bgr', bandwidth_factor=1.3): print("started cap: %s\n" % i) cap = uvc.Capture(dev_list[i]['uid']) # cap.print_info() cap.bandwidth_factor = bandwidth_factor cap.frame_mode = mode title = cap.name + ' - ' + str(mode) + ' - ' + format ts = time() while True: frame = cap.get_frame_robust() print("%s - %s" % (title, time() - ts)) ts = time()
def main(): # Begin capturing video. You can modify what video source to use with VideoCapture's argument. It's currently set # to be your webcam. # print device list dev_list = uvc.device_list() print(dev_list) worldCapture = uvc.Capture(dev_list[1]["uid"]) eyeCapture = uvc.Capture(dev_list[0]["uid"]) # Resolution width = 1280 height = 720 midX = int(width / 2) midY = int(height / 2) worldCapture.frame_mode = (width, height, 60) eyeCapture.frame_mode = (640, 480, 60) # Blink detection variables eyeState = 1 eyeTime = None sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) while True: # To quit this program press q. if cv2.waitKey(1) & 0xFF == ord('q'): cv2.destroyAllWindows() break frame = worldCapture.get_frame_robust() # Eye tracking stuff per eyeFrame eyeFrame = eyeCapture.get_frame_robust() kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) retval, thresholded = cv2.threshold(eyeFrame.gray, 40, 255, 0) cv2.imshow("threshold", thresholded) closed = cv2.erode(cv2.dilate(thresholded, kernel, iterations=1), kernel, iterations=1) cv2.imshow("closed", closed) thresholded, contours, hierarchy = cv2.findContours(closed, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) drawing = np.copy(eyeFrame.bgr) cv2.drawContours(drawing, contours, -1, (255, 0, 0), 2) i = 0 # Calc and draw contours for contour in contours: area = cv2.contourArea(contour) if area < 100: continue bounding_box = cv2.boundingRect(contour) extend = area / (bounding_box[2] * bounding_box[3]) # reject the contours with big extend if extend > 0.8: continue i += 1 # calculate countour center and draw a dot there m = cv2.moments(contour) if m['m00'] != 0: center = (int(m['m10'] / m['m00']), int(m['m01'] / m['m00'])) cv2.circle(drawing, center, 3, (0, 255, 0), -1) # fit an ellipse around the contour and draw it into the image try: ellipse = cv2.fitEllipse(contour) cv2.ellipse(drawing, box=ellipse, color=(0, 255, 0)) except: pass if eyeState == 1 and i == 0: eyeState = 0 eyeTime = time.time() elif eyeState == 0 and i == 1: eyeState = 1 t = time.time() - eyeTime if t > 0.3: print("BLINK DETECTION!") sock.sendto('SELECT'.encode(), ('127.0.0.1', 8898)) # show the frame cv2.imshow("Drawing", drawing) image_gray = np.array(frame.gray) image = np.array(frame.bgr) qr_codes = decode(image) # Draw the center cv2.rectangle(image,(midX - 1, midY - 1),(midX + 1, midY +1),(255, 255, 255),3) # Pupil tracking # QR Code processing if len(qr_codes) > 0: qrs = [] for qr in qr_codes: tmp = qr.rect cv2.rectangle(image, (tmp.left, tmp.top), (tmp.left + tmp.width, tmp.top + tmp.height), (255, 0, 0), 3) # Draw the middle of the qr code qrX = tmp.left + int(tmp.width / 2) qrY = tmp.top + int(tmp.height / 2) cv2.rectangle(image, (qrX - 1, qrY - 1), (qrX + 1, qrY + 1), (255, 255, 255), 3) distance = round(math.hypot(qrX - midX, qrY - midY)) print(qr) qrs.append((distance, qr)) selected = sorted(qrs, key=lambda x: x[0])[0][1] print(selected.data) # Send color data of the selected qr code to helm. qr_values = str(selected.data).split(',') sock.sendto(qr_values[1].encode(), ('127.0.0.1', 8899)) sock.sendto(qr_values[0].encode(), ('127.0.0.1', 8898)) print(selected) cv2.rectangle(image, (selected.rect.left, selected.rect.top), (selected.rect.left + selected.rect.width, selected.rect.top + selected.rect.height), (0,255,0), 3) # Displays the current frame cv2.imshow('Current', image)