Esempio n. 1
0
    def __init__(self, g_pool, frame_size, frame_rate, name=None, preferred_names=(), uid=None, uvc_controls={}):
        import platform

        super().__init__(g_pool)
        self.uvc_capture = None
        self._restart_in = 3
        assert name or preferred_names or uid

        if platform.system() == 'Windows':
            self.verify_drivers()

        self.devices = uvc.Device_List()

        devices_by_name = {dev['name']: dev for dev in self.devices}

        # if uid is supplied we init with that
        if uid:
            try:
                self.uvc_capture = uvc.Capture(uid)
            except uvc.OpenError:
                logger.warning("No avalilable camera found that matched {}".format(preferred_names))
            except uvc.InitError:
                logger.error("Camera failed to initialize.")
            except uvc.DeviceNotFoundError:
                logger.warning("No camera found that matched {}".format(preferred_names))

        # otherwise we use name or preffered_names
        else:
            if name:
                preferred_names = (name,)
            else:
                pass
            assert preferred_names

            # try to init by name
            for name in preferred_names:
                for d_name in devices_by_name.keys():
                    if name in d_name:
                        uid_for_name = devices_by_name[d_name]['uid']
                        try:
                            self.uvc_capture = uvc.Capture(uid_for_name)
                        except uvc.OpenError:
                            logger.info("{} matches {} but is already in use or blocked.".format(uid_for_name, name))
                        except uvc.InitError:
                            logger.error("Camera failed to initialize.")
                        else:
                            break

        # check if we were sucessfull
        if not self.uvc_capture:
            logger.error("Init failed. Capture is started in ghost mode. No images will be supplied.")
            self.name_backup = preferred_names
            self.frame_size_backup = frame_size
            self.frame_rate_backup = frame_rate
            self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size)
        else:
            self.configure_capture(frame_size, frame_rate, uvc_controls)
            self.name_backup = (self.name,)
            self.frame_size_backup = frame_size
            self.frame_rate_backup = frame_rate
Esempio n. 2
0
    def __init__(self,
                 g_pool,
                 frame_size,
                 frame_rate,
                 name=None,
                 preferred_names=None,
                 uid=None,
                 uvc_controls={},
                 **settings):
        super(UVC_Source, self).__init__(g_pool)
        self.uvc_capture = None
        self.devices = uvc.Device_List()
        devices_by_name = {dev['name']: dev for dev in self.devices}
        devices_by_uid = {dev['uid']: dev for dev in self.devices}

        #if uid is supplied we init with that
        if uid:
            try:
                self.uvc_capture = uvc.Capture(uid)
            except uvc.OpenError:
                raise AvalilabeDeviceNotFoundError(
                    "No avalilable camera found that matched %s" %
                    preferred_names)
            except uvc.InitError:
                raise InitialisationError("Camera failed to initialize.")
            except uvc.DeviceNotFoundError as e:
                raise AvalilabeDeviceNotFoundError(
                    "No camera found that matched %s" % preferred_names)

        #otherwise we use name or preffered_names
        else:
            if name:
                preferred_names = [name]
            else:
                pass

            assert preferred_names

            #try to init by name
            for name in preferred_names:
                if name in devices_by_name:
                    uid_for_name = devices_by_name[name]['uid']
                    try:
                        self.uvc_capture = uvc.Capture(uid_for_name)
                    except uvc.OpenError:
                        logger.info(
                            "%s matches %s but is already in use or blocked." %
                            (uid_for_name, name))
                    except uvc.InitError as e:
                        raise InitialisationError(
                            "Camera failed to initialize.")
                    else:
                        break
            #check if we where sucessfull
            if not self.uvc_capture:
                raise AvalilabeDeviceNotFoundError(
                    "No avalilable camera found that matched %s" %
                    preferred_names)
        self.update_capture(frame_size, frame_rate, uvc_controls)
Esempio n. 3
0
    def __init__(self, name):
        """Note: All class members, which are not process variables should 
        be prefixed with _"""
        # initial image, the heght, width and number of plane could be approxiamate
        h, w, p = 4, 3, 3
        image = np.arange(h * w * p).astype('uint8').reshape(h, w, p)

        pars = {
          'count':  LDO('R','Image count', [0]),
          'image':  LDO('R','Image', image),
          'sleep':  LDO('RWE','Sleep time between image acquisitions',[1.],
            units='s', opLimits=(0.02,10)),
          'shape':  LDO('R','Frame shape Y,X,Planes', [0,0,0]),
          'fps':    LDO('R','Frames/s', [0]),
          'imgps':  LDO('R','Images/s', [0], units='img/s'),
          'subscribe': LDO('RWE','Subscribe to image', ['On'],legalValues\
            = ['On','Off']),
        }
        super().__init__(name, pars)

        dev_list = uvc.device_list()
        print(dev_list)
        self._cap = uvc.Capture(dev_list[0]["uid"])
        print(f'available modes: {self._cap.avaible_modes}')
        frame_mode = (640, 480, 30)
        try:
            self._cap.frame_mode = (640, 480, 30)
            #self._cap.frame_mode = (960, 720, 15)
            self.fps.value[0] = self._cap.frame_mode[2]
        except Exception as e:
            printw(f'Failed to setup frame mode {frame_mode}: {e}')

        thread = threading.Thread(target=self._state_machine)
        thread.daemon = False
        thread.start()
Esempio n. 4
0
def test_cap(i, mode=(640, 480, 30), format='bgr', bandwidth_factor=1.3):
    print("started cap: %s\n" % i)
    cap = uvc.Capture(dev_list[i]['uid'])
    # cap.print_info()
    cap.bandwidth_factor = bandwidth_factor
    cap.frame_mode = mode

    title = cap.name + ' - ' + str(mode) + ' - ' + format
    ts = time()
    while True:
        frame = cap.get_frame_robust()
        print("%s - %s" % (title, time() - ts))
        ts = time()

        if USE_CV2:
            if format == 'bgr':
                data = frame.bgr
            elif format == 'gray':
                data = frame.gray

            cv2.imshow(title, data)
            k = cv2.waitKey(1)
            if k == 27:
                break

    cap = None
Esempio n. 5
0
    def __init__(self, config, bus):
        self.input_thread = Thread(target=self.run_input, daemon=True)
        self.bus = bus

        bus = config['bus']
        ports = config['ports']
        # to find proper bus and ports numbers run
        # python -c "import uvc; from pprint import pprint; pprint(uvc.device_list())"

        self.sleep = config.get('sleep', 0.1)
        self.cap = None
        
        dev_list = uvc.device_list()
        for dev in dev_list:
            if dev['bus_number'] == bus and dev['bus_ports'] == ports:
                self.cap = uvc.Capture(dev["uid"])
                self.cap.frame_mode = self.cap.avaible_modes[0]
                self.cap.bandwidth_factor = 1.2
                # when incomplete pictures are being received, increase bandwidth_factor
                # when "no space left on device" error, decrease bandwidth_factor
                # for more info see https://docs.pupil-labs.com/#jpeg-size-estimation-and-custom-video-backends
                return

        import pprint
        error = "camera not found on bus %i and ports %s\n" % (bus, str(ports))
        error += "avalable cameras:\n"
        error += pprint.pformat(uvc.device_list())
        self.bus.report_error(error)
        print(error, file=sys.stderr)
Esempio n. 6
0
 def run(self):
     self.capturing.value = 1
     dev_list = uvc.device_list()
     cap = uvc.Capture(dev_list[self.source]['uid'])
     self.__setup_eye_cam(cap)
     cap.frame_mode = self.mode
     attempt, attempts = 0, 6
     gamma, color, flip = 1, True, False
     while attempt < attempts:     
         try:
             frame = cap.get_frame(2.0)
             img   = self.__adjust_gamma(frame.bgr, gamma)
             img   = self.__cvtBlackWhite(img, color)   
             img   = self.__flip_img(img, flip)       
             if img is not None:
                 attempt = 0
                 shared_img = self.__get_shared_np_array(img)
                 np.copyto(shared_img, img)
         except Exception as e:
             traceback.print_exc()
             cap = self.__reset_mode(cap)
             attempt += 1           
         if self.pipe.poll():
             msg = self.pipe.recv()
             if msg == "stop": 
                 break
             elif msg == "gamma":
                 gamma = self.pipe.recv()
             elif msg == "color":
                 color = self.pipe.recv()
             elif msg == "flip":
                 flip = self.pipe.recv()
     self.capturing.value = 0
     print("camera", self.source, "closed")
Esempio n. 7
0
    def __init__(self, uvc_id):
        super(Bridge, self).__init__()

        self.data_seq = 0
        self.note_seq = 0

        # init capture
        self.cap = uvc.Capture(uvc_id)
        logger.info('Initialised uvc device {}'.format(self.cap.name))

        # init pyre
        self.network = Pyre(socket.gethostname() + self.cap.name[-4:])
        self.network.join(GROUP)
        self.network.start()
        logger.info('Bridging under "{}"'.format(self.network.name()))

        # init sensor sockets
        ctx = zmq.Context()
        generic_url = 'tcp://*:*'
        public_ep = self.network.endpoint()
        self.note, self.note_url = self.bind(ctx, zmq.PUB, generic_url,
                                             public_ep)
        self.data, self.data_url = self.bind(ctx,
                                             zmq.PUB,
                                             generic_url,
                                             public_ep,
                                             set_hwm=1)
        self.cmd, self.cmd_url = self.bind(ctx, zmq.PULL, generic_url,
                                           public_ep)
Esempio n. 8
0
def test_cap(i, mode=(640, 480, 30), format='bgr', bandwidth_factor=1.3):
    print("started cap: %s\n" % i)
    cap = uvc.Capture(dev_list[i]['uid'])
    # cap.print_info()
    cap.bandwidth_factor = bandwidth_factor
    cap.frame_mode = mode

    title = cap.name + ' - ' + str(mode) + ' - ' + format
    ts = time()
    while True:
        frame = cap.get_frame_robust()
        print("%s - %s" % (title, time() - ts))
        ts = time()

        # uncomment below lines for opencv preview

        # if format == 'bgr':
        #     data = frame.bgr
        # elif format == 'gray':
        #     data = frame.gray

        # cv2.imshow(title,data)
        # k = cv2.waitKey(1)
        # if k == 27:
        #     break

    cap = None
Esempio n. 9
0
 def re_init_capture(self, uid):
     current_size = self.uvc_capture.frame_size
     current_fps = self.uvc_capture.frame_rate
     self.deinit_gui()
     self.uvc_capture.close()
     self.uvc_capture = uvc.Capture(uid)
     self.update_capture(current_size, current_fps)
     self.init_gui()
Esempio n. 10
0
 def _re_init_capture(self, uid):
     current_size = self.uvc_capture.frame_size
     current_fps = self.uvc_capture.frame_rate
     current_uvc_controls = self._get_uvc_controls()
     self.uvc_capture.close()
     self.uvc_capture = uvc.Capture(uid)
     self.configure_capture(current_size, current_fps, current_uvc_controls)
     self.update_menu()
Esempio n. 11
0
 def check_mode_availability(self, source, mode):
     dev_list = uvc.device_list()
     cap = uvc.Capture(dev_list[source]['uid'])
     if mode not in cap.avaible_modes:
         m = cap.avaible_modes[0]
         mode = (m[1], m[0], m[2])
         self.shared_array = self.create_shared_array(mode)
         self.mode = mode
     return mode
Esempio n. 12
0
 def __reset_mode(self, cap):
     print("resetting...")
     mode = cap.frame_mode
     cap.close()
     time.sleep(0.5)
     dev_list = uvc.device_list()
     cap2 = uvc.Capture(dev_list[self.source]['uid'])
     print("Trying mode:", mode)
     cap2.frame_mode = mode
     cap2.bandwidth_factor = 1.3
     return cap2
Esempio n. 13
0
    def init_capture(self,uid,size,fps):


        if uid is not None:
            self.capture = uvc.Capture(uid)
        else:
            self.capture = Fake_Capture()
        self.uid = uid


        self.frame_size = size
        self.frame_rate = fps


        if 'C930e' in self.capture.name:
            logger.debug('Timestamp offset for c930 applied: -0.1sec')
            self.ts_offset = -0.1
        else:
            self.ts_offset = 0.0


        #UVC setting quirks:
        controls_dict = dict([(c.display_name,c) for c in self.capture.controls])
        try:
            controls_dict['Auto Focus'].value = 0
        except KeyError:
            pass

        if "Pupil Cam1" in self.capture.name or "USB2.0 Camera" in self.capture.name:
            self.capture.bandwidth_factor = 1.3

            if "ID0" in self.capture.name or "ID1" in self.capture.name:
                try:
                    # Auto Exposure Priority = 1 leads to reduced framerates under low light and corrupt timestamps.
                    controls_dict['Auto Exposure Priority'].value = 1
                except KeyError:
                    pass

                try:
                    controls_dict['Absolute Exposure Time'].value = 59
                except KeyError:
                    pass


        try:
            controls_dict['Auto Focus'].value = 0
        except KeyError:
            pass
Esempio n. 14
0
 def __set_fps_modes(self):
     self.fps_res, self.modes = {}, {}
     dev_list = uvc.device_list()
     cap = uvc.Capture(dev_list[self.source]['uid'])
     for i in range(len(cap.avaible_modes)):
         mode = cap.avaible_modes[i]
         fps = mode[2]
         if fps not in self.fps_res.keys():
             self.fps_res[fps] = []
             self.modes[fps] = []
         resolution = str(mode[0]) + " x " + str(mode[1])
         self.modes[fps].append(mode)
         self.fps_res[fps].append(resolution)
     if self.mode not in cap.avaible_modes:
         self.mode = sorted(cap.avaible_modes)[0]
     cap.close()
Esempio n. 15
0
def my_name(i,
            title,
            mode=(640, 480, 120),
            format='bgr',
            bandwidth_factor=1.3):
    # get the ID of the camera and define it as an object
    cap = uvc.Capture(dev_list[i]['uid'])

    # set the bandwidth factor
    # to know more about what it is https://github.com/pupil-labs/pupil-docs/blob/master/developer-docs/usb-bandwidth-sync.md
    cap.bandwidth_factor = bandwidth_factor
    cap.frame_mode = mode

    while True:
        # get the ID of the camera and define it as an object
        frame = cap.get_frame_robust()
        data = frame.img

        if title == 'LEFT EYE CAPTURE':
            # vertical flip just to orient the video in the right view
            data = cv2.flip(data, 1)

            out_left_eye.write(data)
            cv2.imshow(title, data)
            cv2.moveWindow(title, 60, 0)

        if title == 'RIGHT EYE CAPTURE':
            # horizontal flip just to orient the video in the right view
            data = cv2.flip(data, 0)

            out_right_eye.write(data)
            cv2.imshow(title, data)
            cv2.moveWindow(title, np.size(data, 1) + 70, 0)

        if title == 'MOUTH CAPTURE':
            # horizontal flip just to orient the video in the right view
            data = cv2.flip(data, 0)

            out_mouth.write(data)
            cv2.imshow(title, data)
            cv2.moveWindow(title, int((np.size(data, 1) / 2) + 60),
                           np.size(data, 0) + 40)

        k = cv2.waitKey(1)
        if k == 27:
            cv2.destroyAllWindows()
            exit(0)
Esempio n. 16
0
    def __init__(self, uid, timebase=None):
        if timebase == None:
            logger.debug("Capture will run with default system timebase")
            self.timebase = c_double(0)
        elif hasattr(timebase, 'value'):
            logger.debug("Capture will run with app wide adjustable timebase")
            self.timebase = timebase
        else:
            logger.error(
                "Invalid timebase variable type. Will use default system timebase"
            )
            self.timebase = c_double(0)

        self.use_hw_ts = self.check_hw_ts_support()
        self._last_timestamp = self.get_now()
        self.capture = uvc.Capture(uid)
        self.uid = uid
        if 'C930e' in self.capture.name:
            logger.debug('Timestamp offset for c930 applied: -0.1sec')
            self.ts_offset = -0.1
        else:
            self.ts_offset = 0.0

        if "USB 2.0 Camera" in self.capture.name:
            self.capture.bandwidth_factor = 1.2

        logger.debug('avaible modes %s' % self.capture.avaible_modes)

        controls_dict = dict([(c.display_name, c)
                              for c in self.capture.controls])
        try:
            controls_dict['Auto Focus'].value = 0
        except KeyError:
            pass
        try:
            # Auto Exposure Priority = 1 leads to reduced framerates under low light and corrupt timestamps.
            controls_dict['Auto Exposure Priority'].value = 0
        except KeyError:
            pass

        self.sidebar = None
        self.menu = None
Esempio n. 17
0
    def init_capture(self,uid):
        self.uid = uid

        if uid is not None:
            self.capture = uvc.Capture(uid)
        else:
            self.capture = Fake_Capture()

        if 'C930e' in self.capture.name:
                logger.debug('Timestamp offset for c930 applied: -0.1sec')
                self.ts_offset = -0.1
        else:
            self.ts_offset = 0.0


        #UVC setting quirks:
        controls_dict = dict([(c.display_name,c) for c in self.capture.controls])
        try:
            controls_dict['Auto Focus'].value = 0
        except KeyError:
            pass

        if "Pupil Cam1" in self.capture.name or "USB2.0 Camera" in self.capture.name:
            self.capture.bandwidth_factor = 1.8
            if "ID0" in self.capture.name or "ID1" in self.capture.name:
                self.capture.bandwidth_factor = 1.3
                try:
                    controls_dict['Auto Exposure Priority'].value = 1
                except KeyError:
                    pass
                try:
                    controls_dict['Saturation'].value = 0
                except KeyError:
                    pass
                try:
                    controls_dict['Absolute Exposure Time'].value = 63
                except KeyError:
                    pass
            try:
                controls_dict['Auto Focus'].value = 0
            except KeyError:
                pass
Esempio n. 18
0
 def __init__(self, uid_or_device_index, use_uvc: bool):
     if use_uvc:
         import uvc
         lg.info('using uvc as backend')
         devices = uvc.device_list()
         lg.info('there are {} camera devices, devices:'.format(
             len(devices)))
         select_device = None
         for d in devices:
             lg.info(d)
             if d['uid'] == uid_or_device_index:
                 select_device = d
         lg.info('opening {}'.format(select_device['uid']))
         self.cap = uvc.Capture(select_device['uid'])
     else:
         lg.info('using opencv as camera backend')
         self.cap = cv2.VideoCapture(uid_or_device_index)
         #self.cap.set(cv2.CAP_PROP_EXPOSURE,30)
         assert self.cap.isOpened(), 'can not open {}'.format(
             uid_or_device_index)
Esempio n. 19
0
    def re_init(self, uid, size=(640, 480), fps=30):

        current_size = self.capture.frame_size
        current_fps = self.capture.frame_rate

        self.capture = None
        #recreate the bar with new values
        menu_conf = self.menu.configuration
        self.deinit_gui()

        self.use_hw_ts = self.check_hw_ts_support()
        self.capture = uvc.Capture(uid)
        self.uid = uid

        self.frame_size = current_size
        self.frame_rate = current_fps
        controls_dict = dict([(c.display_name, c)
                              for c in self.capture.controls])
        try:
            controls_dict['Auto Focus'].value = 0
        except KeyError:
            pass
        # try:
        #     # exposure_auto_priority == 1
        #     # leads to reduced framerates under low light and corrupt timestamps.
        #     self.capture.set_control(controls_dict['Exposure, Auto Priority']['id'], 0)
        # except KeyError:
        #     pass

        self.init_gui(self.sidebar)
        self.menu.configuration = menu_conf

        if 'C930e' in self.capture.name:
            logger.debug('Timestamp offset for c930 applied: -0.1sec')
            self.ts_offset = -0.1
        else:
            self.ts_offset = 0.0
Esempio n. 20
0
 def _init_capture(self, uid):
     self.uvc_capture = uvc.Capture(uid)
     self.configure_capture(self.frame_size_backup, self.frame_rate_backup, self._get_uvc_controls())
     self.update_menu()
Esempio n. 21
0
    def __init__(
        self,
        g_pool,
        frame_size,
        frame_rate,
        name=None,
        preferred_names=(),
        uid=None,
        uvc_controls={},
        check_stripes=True,
        exposure_mode="manual",
        *args,
        **kwargs,
    ):

        super().__init__(g_pool, *args, **kwargs)
        self.uvc_capture = None
        self._last_ts = None
        self._restart_in = 3
        assert name or preferred_names or uid

        if platform.system() == "Windows":
            self.verify_drivers()

        self.devices = uvc.Device_List()

        devices_by_name = {dev["name"]: dev for dev in self.devices}

        # if uid is supplied we init with that
        if uid:
            try:
                self.uvc_capture = uvc.Capture(uid)
            except uvc.OpenError:
                logger.warning(
                    "No avalilable camera found that matched {}".format(
                        preferred_names))
            except uvc.InitError:
                logger.error("Camera failed to initialize.")
            except uvc.DeviceNotFoundError:
                logger.warning(
                    "No camera found that matched {}".format(preferred_names))

        # otherwise we use name or preffered_names
        else:
            if name:
                preferred_names = (name, )
            else:
                pass
            assert preferred_names

            # try to init by name
            for name in preferred_names:
                for d_name in devices_by_name.keys():
                    if name in d_name:
                        uid_for_name = devices_by_name[d_name]["uid"]
                        try:
                            self.uvc_capture = uvc.Capture(uid_for_name)
                            break
                        except uvc.OpenError:
                            logger.info(
                                f"{uid_for_name} matches {name} but is already in use "
                                "or blocked.")
                        except uvc.InitError:
                            logger.error("Camera failed to initialize.")
                if self.uvc_capture:
                    break

        # checkframestripes will be initialized accordingly in configure_capture()
        self.enable_stripe_checks = check_stripes
        self.exposure_mode = exposure_mode
        self.stripe_detector = None
        self.preferred_exposure_time = None

        # check if we were sucessfull
        if not self.uvc_capture:
            logger.error(
                "Could not connect to device! No images will be supplied.")
            self.name_backup = preferred_names
            self.frame_size_backup = frame_size
            self.frame_rate_backup = frame_rate
            self.exposure_time_backup = None
            self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name,
                                               self.frame_size)
        else:
            self.configure_capture(frame_size, frame_rate, uvc_controls)
            self.name_backup = (self.name, )
            self.frame_size_backup = frame_size
            self.frame_rate_backup = frame_rate
            controls_dict = dict([(c.display_name, c)
                                  for c in self.uvc_capture.controls])
            try:
                self.exposure_time_backup = controls_dict[
                    "Absolute Exposure Time"].value
            except KeyError:
                self.exposure_time_backup = None

        self.backup_uvc_controls = {}
Esempio n. 22
0
 def _init_capture(self, uid, backup_uvc_controls={}):
     self.uvc_capture = uvc.Capture(uid)
     self.configure_capture(self.frame_size_backup, self.frame_rate_backup,
                            backup_uvc_controls)
     self.update_menu()
Esempio n. 23
0
import logging
import pygame
from pygame.locals import *

logging.basicConfig(level=logging.INFO)

SCREEN_SIZE = (1280, 720)
CAP_MODE = (1280, 720, 30)  # TODO: how to add int to tuple?

pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption('wtfcam')

dev_list = uvc.device_list()
print dev_list
cap = uvc.Capture(dev_list[0]['uid'])  # TODO: choosable camera
print cap.avaible_modes

cap.frame_mode = CAP_MODE
while True:
    for event in pygame.event.get():
        if event.type == QUIT:
            sys.exit(0)
    frame = cap.get_frame_robust()
    # TODO: fails with anything else than RGB
    surf = pygame.image.fromstring(frame.bgr.tostring(), SCREEN_SIZE, 'RGB',
                                   True).convert()
    screen.blit(surf, (0, 0))
    pygame.display.update()
    pygame.time.delay(100)
import tensorflow.contrib.eager as tfe

import torch
import Model_UNet_Segmentation_PupilLabs
import numpy as np
import time

opts = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
conf = tf.ConfigProto(gpu_options=opts)
tfe.enable_eager_execution(config=conf)
device = torch.device("cuda")
logging.basicConfig(level=logging.INFO)

dev_list = uvc.device_list()
print(dev_list)
cap_world = uvc.Capture(dev_list[1]["uid"])
cap_right = uvc.Capture(dev_list[0]["uid"])
cap_left = uvc.Capture(dev_list[2]["uid"])

# Uncomment the following lines to configure the Pupil 200Hz IR cameras:
# controls_dict = dict([(c.display_name, c) for c in cap.controls])
# controls_dict['Auto Exposure Mode'].value = 1
# controls_dict['Gamma'].value = 200
print(cap_world.avaible_modes)
cap_world.frame_mode = (1280, 720, 60)
cap_left.frame_mode = (320, 240, 120)
cap_right.frame_mode = (320, 240, 120)

model = Model_UNet_Segmentation_PupilLabs.UNet4f16ch_seg_reg()
print('Loading Model....')
model.load_state_dict(
Esempio n. 25
0
        lock.release()  # release object
        print(f"key input:{key}")
        if key == TERMINATE:
            print("Closing key_check...")
            time.sleep(1)
            break


# Camera open
devices = uvc.Device_List()
if not devices:
    print("EyeTracker not found")
    sys.exit(1)
for i in range(len(devices)):
    if devices[i]['name'] == 'Pupil Cam1 ID0':
        cap[0] = uvc.Capture(devices[i]['uid'])
        cap[0].frame_mode = (640, 480, 60)
    if devices[i]['name'] == 'Pupil Cam1 ID1':
        cap[1] = uvc.Capture(devices[i]['uid'])
        cap[1].frame_mode = (640, 480, 60)

# Eye detector init
trk = EyeDetector.alloc(len(EYES))

# for FPS calculation
tm = cv2.TickMeter()
tm.start()
count = 0
max_count = 100
fps = 0
Esempio n. 26
0
    def __init__(self):

        rospy.init_node('{}_node'.format(DEFAULT_CAMERA_NAME), argv=sys.argv)

        self.image_topic = rospy.get_param('~image', DEFAULT_IMAGE_TOPIC)
        self.camera_topic = rospy.get_param('~camera', DEFAULT_CAMERA_TOPIC)
        self.calibration = rospy.get_param('~calibration', '')
        self.encoding = rospy.get_param('~encoding', 'mono8')
        self.width = rospy.get_param('~width', DEFAULT_WIDTH)
        self.height = rospy.get_param('~height', DEFAULT_HEIGHT)
        self.fps = rospy.get_param('~fps', DEFAULT_FPS)
        self.cam_prod_id = rospy.get_param('~cam_prod_id', 9760)

        self.contrast = rospy.get_param('~contrast', 0)
        self.sharpness = rospy.get_param('~sharpness', 1)
        self.gamma = rospy.get_param('~gamma', 80)
        self.exposure_abs = rospy.get_param('~exposure', 50)  #50 -
        self.brightness = rospy.get_param('~brightness', 1)
        self.hue = rospy.get_param('~hue', 0)
        self.saturation = rospy.get_param('~saturation', 0)
        self.pwr_line_freq = rospy.get_param('~pwr_line_freq', 1)
        self.backlight_comp = rospy.get_param('~backlight_comp', 0)
        self.auto_exposure = rospy.get_param('~auto_exposure', 8)
        self.auto_exp_prio = rospy.get_param('~auto_exp_prio', 0)
        self.white_bal_auto = rospy.get_param('~white_bal_auto', True)
        self.white_bal = rospy.get_param('~white_bal', 4600)

        # possible values to set can be found by running "rosrun uvc_camera uvc_camera_node _device:=/dev/video0", see http://github.com/ros-drivers/camera_umd.git
        dev_list = uvc.device_list()
        for i in range(0, len(dev_list)):
            print dev_list[i]
            rospy.loginfo(
                'available device %i: idProd: %i   -   comparing to idProd: %i'
                % (i, int(dev_list[i]["idProduct"]), self.cam_prod_id))
            if i == self.cam_prod_id:  #int(dev_list[i]["idProduct"]) == self.cam_prod_id:
                rospy.loginfo("connecting to camera idProd: %i, device %i" %
                              (self.cam_prod_id, i))
                self.cap = uvc.Capture(dev_list[i]["uid"])
                #self.cap.set(CV_CAP_PROP_CONVERT_RGB, false)
                rospy.loginfo("successfully connected to camera %i" % i)
        rospy.loginfo('starting cam at %ifps with %ix%i resolution' %
                      (self.fps, self.width, self.height))
        self.cap.frame_mode = (self.width, self.height, self.fps)
        frame = self.cap.get_frame_robust()
        self.controls_dict = dict([(c.display_name, c)
                                   for c in self.cap.controls])
        self.controls_dict[
            'Brightness'].value = self.brightness  #10 #[-64,64], not 0 (no effect)!!
        self.controls_dict['Contrast'].value = self.contrast  #0 #[0,95]
        self.controls_dict['Hue'].value = self.hue  #[-2000,2000]
        self.controls_dict['Saturation'].value = self.saturation  #[0,100]
        self.controls_dict['Sharpness'].value = self.sharpness  #1 #[1,100]
        self.controls_dict['Gamma'].value = self.gamma  #[80,300]
        self.controls_dict[
            'Power Line frequency'].value = self.pwr_line_freq  #1:50Hz, 2:60Hz
        self.controls_dict[
            'Backlight Compensation'].value = self.backlight_comp  #True or False
        self.controls_dict[
            'Absolute Exposure Time'].value = self.exposure_abs  #[78,10000] set Auto Exposure Mode to 1
        self.controls_dict[
            'Auto Exposure Mode'].value = self.auto_exposure  #1:manual, 8:apperturePriority
        self.controls_dict[
            'Auto Exposure Priority'].value = self.auto_exp_prio  #1:manual, 8:apperturePriority
        self.controls_dict[
            'White Balance temperature,Auto'].value = self.white_bal_auto
        self.controls_dict[
            'White Balance temperature'].value = self.white_bal  #[2800,6500]
        rospy.loginfo("These camera settings will be applied:")
        for c in self.cap.controls:
            rospy.loginfo('%s: %i' % (c.display_name, c.value))

        self.manager = CameraInfoManager(cname=DEFAULT_CAMERA_NAME,
                                         url='file://' + self.calibration,
                                         namespace=DEFAULT_CAMERA_NAME)
        self.manager.loadCameraInfo()  # Needs to be called before getter!
        self.camera_info = self.manager.getCameraInfo()
        self.bridge = CvBridge()
        self.image_publisher = rospy.Publisher(self.image_topic,
                                               Image,
                                               queue_size=1)
        self.camera_publisher = rospy.Publisher(self.camera_topic,
                                                CameraInfo,
                                                queue_size=1)
        self.seq = 0
        self.counter = 0
# if tookPicture:
#     print(tookPicture, [len(image) if image is not None else 0])
#     cv2.imshow("Test Picture", image)  # displays captured image
#     cv2.imwrite("test.jpg", image)
# else:
#     print('Couldn\'t take a picture')
cv2.findTransformECC()

from __future__ import print_function
import uvc
import logging
logging.basicConfig(level=logging.INFO)

dev_list = uvc.device_list()
print(dev_list)
cap = uvc.Capture(dev_list[0]['uid'])

# Uncomment the following lines to configure the Pupil 200Hz IR cameras:
# controls_dict = dict([(c.display_name, c) for c in cap.controls])
# controls_dict['Auto Exposure Mode'].value = 1
# controls_dict['Gamma'].value = 200

print(cap.avaible_modes)
for x in range(10):
    print(x)
    cap.frame_mode = (640, 480, 30)
    for x in range(100):
        frame = cap.get_frame_robust()
        print(frame.img.shape)
        #cv2.imshow("img",frame.gray)
        #cv2.waitKey(1)
def my_name(i,
            title,
            mode=(640, 480, 60),
            format='gray',
            bandwidth_factor=1.3):
    # get the ID of the camera and define it as an object
    cap = uvc.Capture(dev_list[i]['uid'])

    # set the bandwidth factor
    # to know more about what it is https://github.com/pupil-labs/pupil-docs/blob/master/developer-docs/usb-bandwidth-sync.md
    cap.bandwidth_factor = bandwidth_factor

    # set the resolution of the capture
    cap.frame_mode = mode

    # logic to select the correct server to access the camera of interest
    if title == 'LEFT EYE CAPTURE':
        # sock1.connect((TCP_IP, TCP_PORT1))
        sock2.connect((TCP_IP, TCP_PORT2))
        print(title)
    if title == 'RIGHT EYE CAPTURE':
        # sock3.connect((TCP_IP, TCP_PORT3))
        sock4.connect((TCP_IP, TCP_PORT4))
        print(title)
    if title == 'MOUTH CAPTURE':
        # sock5.connect((TCP_IP, TCP_PORT5))
        sock6.connect((TCP_IP, TCP_PORT6))
        print(title)

    # title = cap.name + ' - ' + str(mode) + ' - ' + format
    while True:
        # capture every frame from the video
        frame = cap.get_frame_robust()
        data = frame.img

        # encoding done in order to adjust the quality of the frames that needs to be transmitted
        # be default it is it can range from 0 to 100, 100 being the highest quality
        # curently being transmitted at 55% compression
        encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 45]

        if title == 'LEFT EYE CAPTURE':
            # vertical flip just to orient the video in the right view
            data = cv2.flip(data, 1)

            # encode the image as a jpg compressed before transmission
            result, imgencode = cv2.imencode('.jpg', data, encode_param)

            data = np.array(imgencode)
            stringData = data.tostring()
            str1 = str(len(stringData)).ljust(16)
            # sock1.send(str1.encode())  # size of the data
            sock2.send(stringData)  # the data
            decimg = cv2.imdecode(data, 1)
            cv2.imshow('CLIENT ' + title, decimg)  # leye
            cv2.moveWindow('CLIENT ' + title, 0, np.size(decimg, 0) + 40)

        if title == 'RIGHT EYE CAPTURE':
            # horizontal flip just to orient the video in the right view
            data = cv2.flip(data, 0)

            # encode the image as a jpg compressed before transmission
            result, imgencode = cv2.imencode('.jpg', data, encode_param)

            data = np.array(imgencode)
            stringData = data.tostring()
            str2 = str(len(stringData)).ljust(16)
            # sock3.send(str2.encode())  # size of the data
            sock4.send(stringData)  # the data
            decimg = cv2.imdecode(data, 1)
            cv2.imshow('CLIENT ' + title, decimg)  # reye
            cv2.moveWindow('CLIENT ' + title,
                           np.size(decimg, 1) + 10,
                           np.size(decimg, 0) + 40)

        if title == 'MOUTH CAPTURE':
            # horizontal flip just to orient the video in the right view
            data = cv2.flip(data, 0)

            # encode the image as a jpg compressed before transmission
            result, imgencode = cv2.imencode('.jpg', data, encode_param)

            data = np.array(imgencode)
            stringData = data.tostring()
            str3 = str(len(stringData)).ljust(16)
            # sock5.send(str3.encode())  # size of the data
            sock6.send(stringData)  # the data
            decimg = cv2.imdecode(data, 1)
            cv2.imshow('CLIENT ' + title, decimg)  # reye
            cv2.moveWindow('CLIENT ' + title, 2 * np.size(decimg, 1) + 20,
                           np.size(decimg, 0) + 40)

        k = cv2.waitKey(1)
        if k == 27:
            cv2.destroyAllWindows()
            sock.close()
            exit(0)
Esempio n. 29
0
from time import time, sleep
from datetime import datetime
import os
import numpy as np

# print (uvc.is_accessible(0))
dev_list = uvc.device_list()
print "Compatible devices found:\n\t" + str(dev_list)
cam_uid = ''
for d in dev_list:
    if d['name'] == "USB 2.0 Camera":
        cam_uid = d['uid']
if cam_uid == '':
    print "No compatible cameras found, exiting :("
    exit()
cap = uvc.Capture(cam_uid)
for c in cap.controls:
    # if 'Focus' in c.display_name:
    #     c.value = c.def_val
    if 'Auto Exposure Mode' in c.display_name:
        c.value = c.def_val
    # if 'Absolute Exposure Time' in c.display_name:
    #     c.value = c.def_val
    print "{} = {} {} (def:{}, min:{}, max:{})".format(c.display_name,
                                                       str(c.value),
                                                       str(c.unit),
                                                       str(c.def_val),
                                                       str(c.min_val),
                                                       str(c.max_val))
print cap.name + " has the following available modes:\n\t" + str(
    cap.avaible_modes)
Esempio n. 30
0
from __future__ import print_function
import uvc
import logging

logging.basicConfig(level=logging.INFO)

dev_list = uvc.device_list()
print(dev_list)
cap = uvc.Capture(dev_list[0]["uid"])

# Uncomment the following lines to configure the Pupil 200Hz IR cameras:
# controls_dict = dict([(c.display_name, c) for c in cap.controls])
# controls_dict['Auto Exposure Mode'].value = 1
# controls_dict['Gamma'].value = 200

print(cap.avaible_modes)
for x in range(10):
    print(x)
    cap.frame_mode = (640, 480, 30)
    for x in range(100):
        frame = cap.get_frame_robust()
        print(frame.img.shape)
        # cv2.imshow("img",frame.gray)
        # cv2.waitKey(1)
cap = None