Esempio n. 1
0
class VisionManager():
    def __init__(self, vertex_callback=None, direction_callback=None):
        self.camera = None

        if USE_PI:
            self.camera = PiVideoStream(resolution=(640, 480))
        else:
            #self.stream = cv2.VideoCapture(0)
            self.camera = WebcamVideoStream(src=0)

            # Set width/height
            self.camera.stream.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
            self.camera.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

        self.frames = []
        self.on_vertex = False

        self.vertex_callback = vertex_callback
        self.direction_callback = direction_callback

        for i in range(N_SLICES):
            self.frames.append(image.Image())

    def start(self):
        self.camera.start()

        return self

    # Used for tk
    def read_rgb(self, detect_lanes=True):
        frame = self.camera.read()
        if frame is None:
            return None

        (b, g, r) = cv2.split(frame)
        frame = cv2.merge((r, g, b))

        if detect_lanes:
            tmp_frame = utils.RemoveBackground(frame)

            directions, contours = utils.SlicePart(tmp_frame, self.frames,
                                                   N_SLICES)

            # Check vertex every 2 seconds
            if int(time.time() % 2) == 0:
                vertex = True if statistics.stdev(
                    contours[0:5]) >= 30 else False
                if vertex != self.on_vertex and self.vertex_callback:
                    self.vertex_callback(vertex)

                self.on_vertex = vertex

            if not self.on_vertex:
                m = statistics.mean(directions[0:6])
                self.direction_callback(m / 250)

        return tmp_frame

    def stop(self):
        self.camera.stop()
Esempio n. 2
0
class PersonDetector(object):
    def __init__(self, flip = True):
        self.last_upload = time.time()
        self.vs = WebcamVideoStream().start()
        self.flip = flip
        time.sleep(2.0)
        
    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        frame = imutils.resize(frame, width=300)
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 127.5)
        net.setInput(blob)
        detections = net.forward()

        count = 0
        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]

            if confidence < 0.2:
                continue

            idx = int(detections[0, 0, i, 1])
            if idx != 15:
                continue

            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype('int')
            label = '{}: {:.2f}%'.format('person', confidence * 100)#('Person', confidence * 100)
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
            count += 1
        
        if count > 0:
            print('Count_person: {}'.format(count))
            elapsed = time.time() - self.last_upload
            if elapsed > 5:
                request(count)
                self.last_upload = time.time()
                
        return frame
Esempio n. 3
0
class VideoStream:
    def __init__(self,
                 src=0,
                 usePiCamera=False,
                 useFlirCamera=False,
                 resolution=(320, 240),
                 framerate=32,
                 **kwargs):
        # check to see if the picamera module should be used

        if usePiCamera:
            # only import the picamera packages unless we are
            # explicity told to do so -- this helps remove the
            # requirement of `picamera[array]` from desktops or
            # laptops that still want to use the `imutils` package
            from imutils.video.pivideostream import PiVideoStream

            # initialize the picamera stream and allow the camera
            # sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate,
                                        **kwargs)

        elif useFlirCamera:
            from .thermalcamvideostream import ThermalcamVideoStream
            self.stream = ThermalcamVideoStream(src=src)

        # otherwise, we are using OpenCV so initialize the webcam
        # stream
        else:
            from imutils.video.webcamvideostream import WebcamVideoStream
            self.stream = WebcamVideoStream(src=src)

    def start(self):
        # start the threaded video stream
        return self.stream.start()

    def update(self):
        # grab the next frame from the stream
        self.stream.update()

    def read(self):
        # return the current frame
        return self.stream.read()

    def stop(self):
        # stop the thread and release any resources
        self.stream.stop()
Esempio n. 4
0
class VideoStream:
    WEBCAM = 0
    PICAMERA = 1
    JETSONCAMERA = 2

    def __init__(self,
                 src=0,
                 camera=JETSONCAMERA,
                 resolution=(1280, 720),
                 framerate=21):
        # check to see if the picamera module should be used
        if camera == self.PICAMERA:
            # only import the picamera packages unless we are
            # explicity told to do so -- this helps remove the
            # requirement of `picamera[array]` from desktops or
            # laptops that still want to use the `imutils` package
            from imutils.video.pivideostream import PiVideoStream

            # initialize the picamera stream and allow the camera
            # sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate)
        elif camera == self.JETSONCAMERA:
            from .jetsonvideostream import JetsonVideoStream
            self.stream = JetsonVideoStream(resolution=resolution,
                                            framerate=framerate)

        # otherwise, we are using OpenCV so initialize the webcam
        # stream
        else:
            self.stream = WebcamVideoStream(src=src)

    def start(self):
        # start the threaded video stream
        return self.stream.start()

    def update(self):
        # grab the next frame from the stream
        self.stream.update()

    def read(self):
        # return the current frame
        return self.stream.read()

    def stop(self):
        # stop the thread and release any resources
        self.stream.stop()
Esempio n. 5
0
class CameraStream:
    def __init__(self, src=0, use_pi_camera=False, resolution=(800, 600),
                 framerate=30):
        if use_pi_camera:
            from imutils.video.pivideostream import PiVideoStream
            self.stream = PiVideoStream(resolution=resolution, framerate=framerate)

        else:
            from imutils.video.webcamvideostream import WebcamVideoStream
            self.stream = WebcamVideoStream(src=src)

    def start(self):
        return self.stream.start()

    def update(self):
        self.stream.update()

    def read(self):
        return self.stream.read()

    def stop(self):
        self.stream.stop()
Esempio n. 6
0
class VideoCamera(object):
    def __init__(self, flip=False):
        self.vs = WebcamVideoStream(settings.facevideosource).start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):
        found_objects = False
        frame = self.flip_if_needed(self.vs.read()).copy()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        if len(objects) > 0:
            found_objects = True

        # Draw a rectangle around the objects
        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        ret, jpeg = cv2.imencode('.jpg', frame)
        return (jpeg.tobytes(), found_objects)
    thresh = cv2.threshold(frame_delta, 30, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=5)

    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    for c in cnts:
        if cv2.contourArea(c) < args["min_area"]:
            continue

        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
        text = "Occupied"

    cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(frame,
                datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                (0, 0, 255), 1)

    cv2.imshow("Security Feed", frame)

    key = cv2.waitKey(1) & 0xFF

    if key == ord("q"):
        break

vs.stop()
Esempio n. 8
0
class Camera:

    cap = None
    output = None
    enabled = True
    pause = False
    recording = False
    frame_counter = 0
    current_frame = 0
    window_name = 'Main'
    multiple_monitors = False
    monitor_counter = 0
    full_screen = True

    def __init__(self, config_file='config.yml') -> None:
        self.config_file = config_file
        self.load_config()

    def load_config(self) -> None:
        with open(self.config_file) as file:
            config = yaml.safe_load(file)['camera']
        self.debug = config['debug']
        self.number = config['number']
        self.width = config['resolution'][0]
        self.height = config['resolution'][1]
        self.display = tuple(config['display'])
        self.center_x_offset = config['center_x_offset']
        self.center_y_offset = config['center_y_offset']
        self.roi = config['roi']
        self.rpi_camera = config['rpi_camera']
        self.fps = config['fps']
        self.src = config['src']
        self.full_screen = config['full_screen']
        self.brightness = config['brightness']
        self.contrast = config['contrast']
        self.saturation = config['saturation']
        self.sharpness = config['sharpness']
        self.exposure = config['exposure']
        self.exposure_comp = config['exposure']
        self.white_balance = config['white_balance']
        self.awb_mode = config['awb_mode']
        self.threaded = config['threaded']
        self.monitor_display = tuple(config['display'])
        self.hue = 0

    def start(self) -> None:
        logger.info('Starting Camera')
        if self.rpi_camera:
            self.cap = VideoStream(src=self.src,
                                   usePiCamera=True,
                                   resolution=(self.width, self.height),
                                   framerate=self.fps)
            self.cap.start()
            self.set_hardware_rpi()
        elif self.threaded:
            self.cap = WebcamVideoStream(self.src, name='cam')
            self.cap.start()
            self.set_hardware_threaded()
        else:
            self.cap = cv.VideoCapture(self.src)

        if self.multiple_monitors:
            self.move_window(self.MONITOR_LIMIT + 1, 0)

        cv.namedWindow(self.window_name, cv.WINDOW_NORMAL)
        cv.resizeWindow(self.window_name, self.monitor_display)
        if self.full_screen:
            cv.setWindowProperty(self.window_name, cv.WND_PROP_FULLSCREEN,
                                 cv.WINDOW_FULLSCREEN)

    def stop(self) -> None:
        if self.threaded or self.rpi_camera:
            self.cap.stop()
            self.cap.stream.release()
        else:
            self.cap.release()

    def set_hardware_threaded(self, **kwargs) -> None:
        self.cap.stream.set(cv.CAP_PROP_BRIGHTNESS,
                            self.brightness)  # # min: 0 max: 255 increment:1
        self.cap.stream.set(cv.CAP_PROP_CONTRAST,
                            self.contrast)  # min: 0 max: 255 increment:1
        self.cap.stream.set(cv.CAP_PROP_SATURATION,
                            self.saturation)  #  min: 0 max: 255 increment:1

    def set_hardware_rpi(self) -> None:
        self.cap.stream.camera.iso = 640  # 100, 200, 320, 400, 500, 640, 800.
        self.cap.stream.camera.awb_mode = self.awb_mode
        self.cap.stream.camera.exposure_mode = 'off'  # snow, beach, spotlight
        self.cap.stream.camera.exposure_compensation = self.__scale(
            self.exposure_comp, -25, 25)
        self.cap.stream.camera.brightness = self.__scale(
            self.brightness, 0, 100)
        self.cap.stream.camera.contrast = self.__scale(self.contrast, -100,
                                                       100)
        self.cap.stream.camera.saturation = self.__scale(
            self.saturation, -100, 100)
        self.cap.stream.camera.sharpness = self.__scale(
            self.sharpness, -100, 100)

    def __scale(self, x, y0, y1) -> int:
        x0, x1 = 0, 255
        return int(y0 + ((y1 - y0) / (x1 - x0) * (x - x0)))

    def show(self, frame: np.ndarray = np.ones((400, 400, 1))):
        frame = cv.resize(frame, self.display)
        cv.imshow(self.window_name, frame)
        self.__update_frame_counter()

    def move_window(self, x, y) -> None:
        if self.MAX_MONITORS == 2:
            logging.info(f'Moving window to {x}, {y}')
            cv.moveWindow(self.window_name, x, y)

    def __update_frame_counter(self) -> None:
        if self.frame_counter % 200 == 0:
            logging.info("Keep Alive Camera Message")
        self.frame_counter = self.frame_counter + \
            1 if not self.pause else self.frame_counter
        self.frame_counter = 0 if self.frame_counter >= 1000 else self.frame_counter

    def read(self):
        if self.threaded:
            return True, self.cap.read()
        return self.cap.read()

    def get_tune_point(self, frame, x, y):
        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        return gray[y, x]

    def auto_tune(self, pixel):
        # set contrast, bright and threshold
        if pixel <= 120:
            pass
        elif pixel >= 180:
            pass
        elif pixel >= 220:
            pass

    def set_alpha(self, image):
        # loop over the alpha transparency values
        alpha = 0.5
        beta = (1 - alpha)
        gamma = 0.0
        background = np.zeros((self.height, self.width, 3), np.uint8)
        background[:, :, 1] = 200
        background[:, :, 2] = 200
        added_image = cv.addWeighted(background, alpha, image, beta, gamma)
        return added_image
Esempio n. 9
0
while True:

    # Process a frame
    frame = stream.read()

    # Increment counter
    fps.update()
    fps.stop()

    cv2.putText(frame, "FPS: {}".format(fps.fps()), (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0))

    # TODO: Convert to headless
    # cv2.imshow("Video", frame)
    _, buffer = cv2.imencode('.jpg', frame)
    # TODO: change timezone handling?
    event_timestamp = datetime.now(
        tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
    future = client.publish(topic_name,
                            buffer.tostring(),
                            event_timestamp=event_timestamp)
    message_id = future.result()

    # Check to see if 'q' is pressed to quit
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything is done, release the capture
cv2.destroyAllWindows()
stream.stop()
Esempio n. 10
0
class PersonDetector(object):
    def __init__(self, flip=True):
        self.vs = WebcamVideoStream().start()
        #self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(1.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        frame = imutils.resize(frame, width=300)
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 127.5)
        net.setInput(blob)
        detections = net.forward()

        data_list = []
        data = {'device': os.environ['DEVICE'], 'data': {}}
        data['data']['timestamp'] = str(datetime.datetime.now())
        data['data']['person_id'] = 0
        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]

            if confidence < 0.2:
                continue

            idx = int(detections[0, 0, i, 1])
            if idx != 15:
                continue

            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype('int')
            label = '{}: {:.2f}%'.format('Person', confidence * 100)
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0),
                          2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 1)

            data['timestamp'] = str(datetime.datetime.now())
            data['data']['x'] = str((endX + startX) / 2)
            data['data']['y'] = str((endY + startY) / 2)
            print(data)
            data_list.append(copy.deepcopy(data))
            data['data']['person_id'] += 1

        q = threading.Thread(target=insert, args=(data_list, ))
        q.start()
        data_list = []

        return frame