Esempio n. 1
0
class WebCamStream(object):
    """
    A class that handles threaded video streaming through a USB webcam.
    based on imutils library for python: https://github.com/jrosebr1/imutils

    """

    def __init__(self, src=0):
        """
        Initialize an object that controls a video
        stream from a USB Webcam on a separate thread.
        Args:
          src: the source for the USB webcam, 0 is the default camera
        """
        self.src = src
        self.stream = VideoCapture(src)  # initialize video source
        self.grabbed, self.frame = self.stream.read()  # grab initial frame
        self.stopped = False
        self.has_frame = Event()

    def start(self):
        """
        Start the video stream on a separate daemon thread once the
        capturing device is opened.
        """
        while not self.stream.isOpened():  # wait for I/O to come online
            sleep(8)  # 8 seconds = 1 successful re-open attempt
            self.stream.open(self.src)  # attempt to open again
        thread = Thread(target=self.update, args=())
        thread.daemon = True  # set thread to daemon
        thread.start()  # start thread

    def update(self):
        """
        Continuously update the stream with the most recent frame
        until stopped.
        """
        while not self.stopped:
            self.grabbed, self.frame = self.stream.read()
            if self.grabbed:
                self.has_frame.set()  # notify

    def read(self):
        """
        Read the current frame in the video stream.
        Returns:
          The most recent frame captured
        """
        self.has_frame.wait()
        frame = self.frame
        self.has_frame.clear()
        return frame

    def stop(self):
        """
        Stop the video stream.
        """
        self.stopped = True
        self.stream.release()  # close capturing device
Esempio n. 2
0
class Capture(QThread):
    def __init__(self, window):
        super(Capture, self).__init__(window)
        self.window = window
        self.capturing = False
        self.cam = VideoCapture(0)
        self.cam.set(3, 640)
        self.cam.set(4, 480)

    def takePhoto(self):
        if not self.cam.isOpened():
            self.cam.open(0)
            waitKey(5)
        _, frame = self.cam.read()
        waitKey(1)

        return frame
Esempio n. 3
0
class OpenCVCamera(BaseCamera):
    def __init__(self, videoSource, dims=(640, 480)):
        w, h = dims
        self.w = w
        self.h = h
        self.midX = int(w / 2)
        self.midY = int(h / 2)
        self.cropDim2 = int(min(w, h) / 2)

        self.videoSource = videoSource
        self.vcap = VideoCapture()

    def open(self):
        if not self.vcap.isOpened() and not self.vcap.open(
                self.videoSource, CAP_V4L):
            raise RuntimeError('Could not open camera')

        # Minimize the frame buffer to always receive frames in realtime.
        self.vcap.set(CAP_PROP_BUFFERSIZE, 1)
        self.vcap.set(CAP_PROP_FRAME_WIDTH, self.w)
        self.vcap.set(CAP_PROP_FRAME_HEIGHT, self.h)

        if (self.vcap.get(CAP_PROP_FRAME_WIDTH) != self.w
                or self.vcap.get(CAP_PROP_FRAME_HEIGHT) != self.h):
            raise RuntimeError('Resolution not supported')

    def close(self):
        self.vcap.release()

    @property
    def resolution(self):
        return {
            'original': {
                'width': self.w,
                'height': self.h
            },
            'crop': {
                'width': self.cropDim2 * 2,
                'height': self.cropDim2 * 2
            }
        }

    def read(self):
        (success, frame) = self.vcap.read()
        if (success == False):
            raise IOError('Frame could not be read from source')

        crop = frame[self.midY - self.cropDim2:self.midY + self.cropDim2,
                     self.midX - self.cropDim2:self.midX + self.cropDim2]
        # cv2 uses BGR instead of RGB by default.
        return Image.fromarray(cvtColor(crop, COLOR_BGR2RGB))

    def encodeJPG(self, pilImage):
        jpg = BytesIO()
        pilImage.save(jpg, format='JPEG')
        return jpg.getvalue()
class Capture(QThread):
    def __init__(self, window):
        super(Capture, self).__init__(window)
        self.window = window
        self.capturing = False
        self.cam = VideoCapture(0)
        self.cam.set(3, 640)
        self.cam.set(4, 480)

    def takePhoto(self):
        if not self.cam.isOpened():
            self.cam.open(0)
            waitKey(5)
        _, frame = self.cam.read()
        # cv2.imwrite('tst.png', frame)
        waitKey(1)
        # Optional - save image.
        # cv2.imwrite('save.png', frame)
        return frame
Esempio n. 5
0
def find_camera_ids():
    """
    Function used to determine which video capture ids are associated with cameras.

    :return: Set of valid ids
    """

    # Initialise the ids set
    camera_ids = set()

    # Create a new video capture instance
    temp = VideoCapture()

    # Crawl over the possible ids
    for camera_id in range(CAMERA_CAPTURES_COUNT):

        # Assign a new id to the video capture
        temp.open(camera_id)

        # Fetch the frame
        _, frame = temp.read()

        # If the frame is valid
        if frame is not None:

            # Add the id to the set
            camera_ids.add(camera_id)

        # Release the current camera id
        temp.release()

    # Remove the video capture instance
    del temp

    # Return the final result
    return camera_ids
Esempio n. 6
0
class Camera(object):
    ''' Communicate with the camera.

    Class governing the communication with the camera.

    Parameters
    -----------
    camera : int
        the index of the camera, best taken from func lookForCameras,
        from eyetracker.camera.capture
    dic : dic{propID  value}
        to check corresponding propIDs check
        opencv documentation under the term VideoCapture. 
        They will be set in the moment of object creation.

    Defines
    --------
    self.camera : index of the camera
    self.cap : capturing object
    self.frame : returns a frame from camera
    self.close : closes cap
    self.reOpen : reopens cap
    '''
    def __init__(self, camera, dic=None):
        self.camera = int(camera)
        self.cap = VideoCapture(self.camera)
        if dic:
            for propID, value in dic.iteritems():
                self.cap.set(propID, value)
        first_frame = self.frame()

    def frame(self):
        ''' Read frame from camera.

        Returns
        --------
        frame : np.array
            frame from camera
        '''
        if self.cap.isOpened:
            return self.cap.read()[1]
        else:
            print 'Cap is not opened.'
            return None

    def set(self, **kwargs):
        ''' Set camera parameters.

        Parameters
        -----------
        kwargs : {propID : value}
        '''
        for propID, value in kwargs.iteritems():
            self.cap.set(propID, value)

    def close(self):
        ''' Closes cap, you can reopen it with self.reOpen.
        '''
        self.cap.release()

    def reOpen(self, cameraIndex):
        ''' Reopens cap.
        '''
        self.cap.open(self.camera)
        first_frame = self.frame()
Esempio n. 7
0
def infer_on_stream(args, client):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    # Initialise the class
    infer_network = Network()
    # Set Probability threshold for detections
    # prob_threshold = args.prob_threshold

    ### Load the model through `infer_network` ###
    infer_network.load_model(args.model)

    # Get input shape
    input_shape = infer_network.get_input_shape()

    ### Handle the input stream ###
    input_stream = VideoCapture(args.input)
    input_stream.open(args.input)

    # Out stream setup
    fourcc = VideoWriter_fourcc(*'mp4v')
    frames = 24
    # Grab the shape of the input, since it's requiered for cv.VideoWriter
    # without using it cv gets a buffer size error and crashes
    width = int(input_stream.get(3))
    height = int(input_stream.get(4))

    out = VideoWriter('/app/out/out.mp4', fourcc, frames, (width, height))

    ### Loop until stream is over ###
    while input_stream.isOpened():

        ### Read from the video capture ###
        flag, frame = input_stream.read()
        if not flag:
            break
        key_pressed = waitKey(60)

        ### Pre-process the image as needed ###
        preprocessed_frame = preprocessing(frame, input_shape)

        ### Start asynchronous inference for specified request ###
        infer_network.exec_net(preprocessed_frame, 0)

        ### Wait for the result ###
        # Paramater is request number not wait time
        status = infer_network.wait(0)

        ### Get the results of the inference request ###
        if status == 0:
            output_shape = infer_network.get_output(0)
            drawn_frame = draw_boxes(frame, output_shape, args, width, height)
            out.write(drawn_frame)
            sys.stdout.buffer.write(drawn_frame)
            sys.stdout.flush()

        ### TODO: Extract any desired stats from the results ###

        ### TODO: Calculate and send relevant information on ###
        ### current_count, total_count and duration to the MQTT server ###
        ### Topic "person": keys of "count" and "total" ###
        ### Topic "person/duration": key of "duration" ###

        ### TODO: Send the frame to the FFMPEG server ###

        ### Write an output image if `single_image_mode` ###
        # imwrite('out/output.png', frame)

    # Break if escape key pressed
        if key_pressed == 27:
            break

    # Release the out writer, capture, and destroy any OpenCV windows
    out.release()
    input_stream.release()
    destroyAllWindows()
Esempio n. 8
0
class Camera(object):
    ''' Communicate with the camera.

    Class governing the communication with the camera.

    Parameters
    -----------
    camera : int
        the index of the camera, best taken from func lookForCameras,
        from eyetracker.camera.capture
    dic : dic{propID  value}
        to check corresponding propIDs check
        opencv documentation under the term VideoCapture. 
        They will be set in the moment of object creation.

    Defines
    --------
    self.camera : index of the camera
    self.cap : capturing object
    self.frame : returns a frame from camera
    self.close : closes cap
    self.reOpen : reopens cap
    '''
    def __init__(self, camera, dic=None):
        self.camera = int(camera)
        self.cap = VideoCapture(self.camera)
        if dic:
            for propID, value in dic.iteritems():
                self.cap.set(propID, value)
        first_frame = self.frame()

    def frame(self):
        ''' Read frame from camera.

        Returns
        --------
        frame : np.array
            frame from camera
        '''
        if self.cap.isOpened:
            return self.cap.read()[1]
        else:
            print 'Cap is not opened.'
            return None

    def set(self, **kwargs):
        ''' Set camera parameters.

        Parameters
        -----------
        kwargs : {propID : value}
        '''
        for propID, value in kwargs.iteritems():
            self.cap.set(propID, value)

    def close(self):
        ''' Closes cap, you can reopen it with self.reOpen.
        '''
        self.cap.release()

    def reOpen(self, cameraIndex):
        ''' Reopens cap.
        '''
        self.cap.open(self.camera)
        first_frame = self.frame()
Esempio n. 9
0
class Camera(SingletonConfigurable):

    value = traitlets.Any()

    # config
    width = traitlets.Integer(default_value=224).tag(config=True)
    height = traitlets.Integer(default_value=224).tag(config=True)
    fps = traitlets.Integer(default_value=21).tag(config=True)
    capture_width = traitlets.Integer(default_value=3280).tag(config=True)
    capture_height = traitlets.Integer(default_value=2464).tag(config=True)
    argusmode = traitlets.Integer(default_value=-1).tag(config=True)
    flipmode = traitlets.Integer(default_value=0).tag(config=True)
    autostart = traitlets.Bool(default_value=True).tag(config=True)
    extraconfig = traitlets.Unicode(default_value="").tag(config=True)

    def __init__(self, *args, **kwargs):
        self.value = np.empty((self.height, self.width, 3), dtype=np.uint8)
        super(Camera, self).__init__(*args, **kwargs)

        self.undistort = False
        self.undistorter = None
        self.undistort_dim = None
        self.undistort_k = None
        self.undistort_d = None
        self.undistort_balance = 0
        self.undistort_dim2 = None
        self.undistort_dim3 = None
        self.undistort_map1 = None
        self.undistort_map2 = None
        self.crop_x1 = None
        self.crop_y1 = None
        self.crop_x2 = None
        self.crop_y2 = None
        self.warp = False

        if self.argusmode >= 0:
            vw, vh, vf = self._get_argus_mode(self.argusmode)
            self.capture_width = vw
            self.capture_height = vh
            self.fps = vf
        if self.width == 0 or self.height == 0:
            self.width = self.capture_width
            self.height = self.capture_height

        try:
            self.cap = VideoCapture(self._gst_str(), CAP_GSTREAMER)

            re, image = self.cap.read()

            if not re:
                raise RuntimeError('Could not read image from camera.')

            self.value = image
            if self.autostart:
                self.start()
        except:
            self.stop()
            raise RuntimeError(
                'Could not initialize camera.  Please see error trace.')

        atexit.register(self.stop)

    def _capture_frames(self):
        while True:
            re, image = self.cap.read()
            if re:
                self.value = self.post_process_image(image)
            else:
                break

    def _gst_str(self):
        return 'nvarguscamerasrc %s ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv flip-method=%d ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % (
            self.extraconfig, self.capture_width, self.capture_height,
            self.fps, self.flipmode, self.width, self.height)

    def _get_argus_mode(self, mode):
        if mode == 0:
            return 3280, 2464, 21
        elif mode == 1:
            return 3280, 1848, 28
        elif mode == 2:
            return 1920, 1080, 30
        elif mode == 3:
            return 1280, 720, 60
        elif mode == 4:
            return 1280, 720, 120

    def start(self):
        if not self.cap.isOpened():
            self.cap.open(self._gst_str(), CAP_GSTREAMER)
        if not hasattr(self, 'thread') or not self.thread.isAlive():
            self.thread = threading.Thread(target=self._capture_frames)
            self.thread.start()

    def stop(self):
        if hasattr(self, 'cap'):
            self.cap.release()
        if hasattr(self, 'thread'):
            self.thread.join()

    def restart(self):
        self.stop()
        self.start()

    def enable_undistort(self, balance=0.0, dim2=None, dim3=None):
        self.undistort_balance = balance
        self.undistort_dim2 = dim2
        self.undistort_dim3 = dim3
        # allow the caller to load up the required parameters manually
        if self.undistort_dim != None and self.undistort_k != None and self.undistort_d != None:
            self.undistort = True
        else:
            fK, fD = get_fisheye(self.width, self.height)
            if fK is not None and fD is not None:
                self.undistort_dim = (self.width, self.height)
                self.undistort_k = fK
                self.undistort_d = fD
                self.undistort = True
            else:
                self.undistort = False
        if self.undistort:
            self.undistorter = FisheyeUndistorter(self.undistort_dim,
                                                  self.undistort_k,
                                                  self.undistort_d,
                                                  bal=self.undistort_balance,
                                                  dim2=self.undistort_dim2,
                                                  dim3=self.undistort_dim3)
            self.warper = None  # reset the warper

    def disable_undistort(self):
        self.undistort = False
        self.warper = None  # reset the warper

    def enable_warp(self, horizon=0.0, angle=45, vstretch=1.8):
        self.warp = True
        self.warp_horizon = horizon
        self.warp_angle = angle
        self.warp_vstretch = vstretch
        self.warper = None

    def disable_warp(self):
        self.warp = False
        self.warper = None

    def enable_crop(self, x1, y1, x2=None, y2=None, width=None, height=None):
        self.crop_x1 = x1
        self.crop_y1 = y1
        if (x2 != None
                and width != None) or (x2 == None and width == None) or (
                    y2 != None and height != None) or (y2 == None
                                                       and height == None):
            self.crop_x1 = None
            self.crop_y1 = None
            raise ValueError("Too many or not enough arguments for cropping")
        else:
            if x2 != None:
                self.crop_x2 = x2
            else:
                self.crop_x2 = x1 + width
            if y2 != None:
                self.crop_y2 = y2
            else:
                self.crop_y2 = y1 + height

    def disable_crop(self):
        self.crop_x1 = None
        self.crop_y1 = None
        self.crop_x2 = None
        self.crop_y2 = None

    def post_process_image(self, img):
        if self.undistort and self.undistorter != None:
            img = self.undistorter.undistort_image(img)
        if self.warp:
            if self.warper == None:
                self.warper = PerspectiveUndistorter(
                    img.shape[1],
                    img.shape[0],
                    horizon=self.warp_horizon,
                    angle=self.warp_angle,
                    vstretch=self.warp_vstretch)
            img = self.warper.undistort_image(img)
        if self.crop_x1 != None and self.crop_y1 != None and self.crop_x2 != None and self.crop_y2 != None:
            img = img[self.crop_y1:self.crop_y2, self.crop_x1:self.crop_x2]
        return img