Ejemplo n.º 1
0
class VideoStream:
    def __init__(self,
                 src=0,
                 isPiCamera=False,
                 resolution=(320, 240),
                 framerate=32):

        if isPiCamera:
            from pivideostream import PiVideoStream

            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate)

        else:
            from usbvideostream import usbVideoStream

            self.stream = usbVideoStream(src, resolution=resolution)

    def start(self):
        return self.stream.start()

    def update(self):
        self.stream.update()

    def read(self):
        return self.stream.read()

    def stop(self):
        self.stream.stop()
Ejemplo n.º 2
0
class VideoStream:
    def __init__(self,
                 src=0,
                 usePiCamera=False,
                 resolution=(370, 290),
                 framerate=32):
        if usePiCamera:
            from pivideostream import PiVideoStream

            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate)

        else:
            self.stream() = WebcamVideoStream(src=src)

    def start(self):
        return self.stream.start()

    def update(self):
        self.stream.update()

    def read(self):
        return self.stream.read()

    def stop(self):
        self.stream.stop()
Ejemplo n.º 3
0
def main():
    vs = PiVideoStream()
    vs.start()
    time.sleep(2.0)
    vs.consistent()

    setup_trackbars(range_filter)
    cv2.namedWindow("Original", cv2.WINDOW_NORMAL)
    cv2.namedWindow("Thresh", cv2.WINDOW_NORMAL)

    while True:
        image = vs.read()
        frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        v1_min, v2_min, v3_min, v1_max, v2_max, v3_max = get_trackbar_values(range_filter)
        thresh = cv2.inRange(frame_to_thresh,(v1_min, v2_min, v3_min),(v1_max, v2_max, v3_max))

        cv2.imshow("Original", image)
        cv2.imshow("Thresh", thresh)

        if cv2.waitKey(1) & 0xFF is ord('q'):
            break
Ejemplo n.º 4
0
class VideoStream:
    def __init__(self,
                 src=0,
                 usePiCamera=False,
                 resolution=(320, 240),
                 framerate=32,
                 **kwargs):
        # check to see if the picamera module should be used
        if usePiCamera:
            # only import the picamera packages unless we are
            # explicity told to do so -- this helps remove the
            # requirement of `picamera[array]` from desktops or
            # laptops that still want to use the `imutils` package
            from pivideostream import PiVideoStream

            # initialize the picamera stream and allow the camera
            # sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate,
                                        **kwargs)

        # otherwise, we are using OpenCV so initialize the webcam
        # stream

    def start(self):
        # start the threaded video stream
        return self.stream.start()

    def update(self):
        # grab the next frame from the stream
        self.stream.update()

    def read(self):
        # return the current frame
        return self.stream.read()

    def stop(self):
        # stop the thread and release any resources
        self.stream.stop()
Ejemplo n.º 5
0
    Lecture et affichage de la video
"""
from pivideostream import PiVideoStream
from imutils.video import FPS # Pour les mesures de framerate
import cv2
import time
import numpy as np

# Nom de la fenetre d'affichage
window_name = 'preview'

# Creation du thread de lecture + setup
vs=PiVideoStream()
vs.camera.video_stabilization = True
# Demarrage du flux video + warmup de la camera
vs.start()
time.sleep(2.0)

# Creation de la fenetre d'affichage
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

fps = FPS().start()

while True :

    frame = vs.read()
    fps.update()

    cv2.imshow(window_name, frame) 
    key = cv2.waitKey(1) & 0xFF
    if key == ord("q") :
Ejemplo n.º 6
0
from pivideostream import PiVideoStream
import datetime
import imutils
import time
import cv2
import numpy as np

death = 0.0
temp = 0
counter = 0

cond = False

# initialize the video stream and allow the cammera sensor to warmup
vs = PiVideoStream(resolution=(320, 240), framerate=32)
vs.start()
time.sleep(2.0)

start_time = time.time()

while (True):
    elapsed_time = time.time() - start_time
    elap = time.strftime("%H:%M:%S", time.gmtime(elapsed_time))

    onset = time.time()

    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
Ejemplo n.º 7
0
class VideoCamera(object):
    def __init__(self, resolution=(320, 240), framerate=32):
        self.conf = json.load(open("conf.json"))
        self.lt = LocalTime("Baltimore")
        self.avg = None
        self.avg_count = 0
        self.motionCounter = 0
        self.motion_frames = []
        self.x = 0
        self.y = 0
        self.w = 0
        self.h = 0
        self.contour_area = 0
        self.vs = PiVideoStream(resolution, framerate).start()
        time.sleep(self.conf["camera_warmup_time"])

    def hflip(self, hflip=True):
        self.vs.hflip(hflip)

    def vflip(self, vflip=True):
        self.vs.vflip(vflip)

    def rotation(self, angle=0):
        self.vs.rotation(angle)

    def exposure_mode(self, exposure_mode="auto"):
        self.vs.exposure_mode(exposure_mode)

    def iso(self, iso=0):
        self.vs.iso(iso)

    def shutter_speed(self, speed):
        self.vs.shutter_speed(speed)

    def change_framerate(self, framerate=32):
        self.vs.stop(stop_camera=False)
        time.sleep(self.conf["camera_cooldown_time"])
        self.vs.camera.framerate = framerate
        self.vs.shutter_speed(0)
        self.vs.start()
        time.sleep(self.conf["camera_warmup_time"])
        self.avg_count = 0

    def __del__(self):
        self.vs.stop(stop_camera=True)

    def get_frame(self):
        frame = self.vs.read().copy()
        framerate = self.vs.camera.framerate
        # draw the text and timestamp on the frame
        timestamp = self.lt.now()
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
        cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
        cv2.putText(
            frame, "Motion on: {}; FPS: {}; Contour area: {}".format(
                self.avg_count == self.conf["camera_adjustment_frames"],
                framerate, self.contour_area), (10, 20),
            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        if self.w > 0:
            cv2.rectangle(frame, (self.x, self.y),
                          (self.x + self.w, self.y + self.h), (0, 255, 0), 2)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self):
        frame = self.vs.read().copy()
        timestamp = self.lt.now()
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
        found_obj = False

        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None or self.avg_count < self.conf[
                "camera_adjustment_frames"]:
            self.avg = gray.copy().astype("float")
            self.avg_count += 1
            if self.avg_count == self.conf["camera_adjustment_frames"]:
                print("[INFO] motion detector live...")
            return (None, False)

        cv2.accumulateWeighted(gray, self.avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))

        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        thresh = cv2.threshold(frameDelta, self.conf["delta_thresh"], 255,
                               cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # loop over the contours
        for c in cnts:
            # if the contour is too small, ignore it
            ca = cv2.contourArea(c)
            self.contour_area = ca
            if ca < self.conf["min_area"]:
                continue

            # compute the bounding box for the contour, draw it on the frame,
            # and update found_obj
            (self.x, self.y, self.w, self.h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (self.x, self.y),
                          (self.x + self.w, self.y + self.h), (0, 255, 0), 2)
            found_obj = True

        # check to see if the room is occupied
        if found_obj:
            print("[INFO] found object!")
            # increment the motion counter
            self.motionCounter += 1
            cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
            self.motion_frames.append(frame)

            # check to see if the number of frames with consistent motion is
            # high enough
            if self.motionCounter >= self.conf["min_motion_frames"]:
                print("[INFO] occupied!")
                self.motionCounter = 0
                vis = np.concatenate(self.motion_frames, axis=0)
                return (vis, found_obj)

            return (None, False)

        # otherwise, the room is not occupied
        else:
            (self.x, self.y, self.w, self.h) = (0, 0, 0, 0)
            self.contour_area = 0
            self.motionCounter = 0
            self.motion_frames = []
            return (None, False)
Ejemplo n.º 8
0
class VisionSystem:
    """docstring for visionSystem"""
    def __init__(self, q1, q2):
        self.queue_MAIN_2_VS = q1
        self.queue_VS_2_STM = q2
        self.resolution = (320, 240)
        self.video_stream = PiVideoStream(self.resolution, 60)

        self.settings = {
            'disp': False,
            'dispThresh': False,
            'dispContours': False,
            'dispApproxContours': False,
            'dispVertices': False,
            'dispNames': False,
            'dispCenters': False,
            'dispTHEcenter': False,
            'erodeValue': 0,
            'lowerThresh': 40,
            'working': True,
            'autoMode': False,
            'dispGoal': True
        }

        self.prevStateDisp = self.settings['disp']
        self.prevStateDispThresh = self.settings['dispThresh']

        self.objs = []

        self.classLogger = logging.getLogger('droneNav.VisionSys')

        self.working = True
        self.t = Thread(target=self.update, args=())
        self.t.daemon = True

    def start(self):
        self.classLogger.debug('Starting vision system.')
        self.video_stream.start()
        time.sleep(2)
        self.working = True
        self.t.start()
        return

    def stop(self):
        self.working = False
        self.t.join()
        return

    def update(self):
        while 1:
            if self.working is False:
                break

            if self.queue_MAIN_2_VS.empty():
                pass
            if not self.queue_MAIN_2_VS.empty():
                self.settings = self.queue_MAIN_2_VS.get()
                self.queue_MAIN_2_VS.task_done()

            frame = self.video_stream.read()
            frame_processed = self.process_frame(frame, self.settings)
            self.detect_shapes(frame, frame_processed)

            if self.settings['disp'] is False and self.prevStateDisp is False:
                pass
            if self.settings['disp'] is True and self.prevStateDisp is False:
                cv2.namedWindow('Frame')
                key = cv2.waitKey(1) & 0xFF
                # cv2.startWindowThread()
            elif self.settings['disp'] is True and self.prevStateDisp is True:
                key = cv2.waitKey(1) & 0xFF
                cv2.imshow('Frame', frame)
            elif self.settings['disp'] is False and self.prevStateDisp is True:
                cv2.destroyWindow('Frame')

            if self.settings[
                    'dispThresh'] is False and self.prevStateDispThresh is False:
                pass
            if self.settings[
                    'dispThresh'] is True and self.prevStateDispThresh is False:
                cv2.namedWindow('Processed')
                key = cv2.waitKey(1) & 0xFF
                # cv2.startWindowThread()
            elif self.settings[
                    'dispThresh'] is True and self.prevStateDispThresh is True:
                key = cv2.waitKey(1) & 0xFF
                cv2.imshow('Processed', frame_processed)
            elif self.settings[
                    'dispThresh'] is False and self.prevStateDispThresh is True:
                cv2.destroyWindow('Processed')

            if self.settings['dispThresh'] or self.settings['disp']:
                if key == 27:
                    self.video_stream.stop()

            self.prevStateDisp = self.settings['disp']
            self.prevStateDispThresh = self.settings['dispThresh']

            # send objects to state machine
            self.queue_VS_2_STM.put(self.objs)

        cv2.destroyAllWindows()
        self.video_stream.stop()
        self.classLogger.debug('Ending vision system.')

    def process_frame(self, fr, setts):
        """ Takes frame and processes it based on settings. """
        # frame = imutils.resize(frame, width=600)
        # fr = cv2.flip(fr, 0)
        # frame = cv2.copyMakeBorder(frame, 3, 3, 3, 3,
        #                            cv2.BORDER_CONSTANT,
        #                            value=(255, 255, 255))
        frameGray = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        frameBlurred = cv2.GaussianBlur(frameGray, (7, 7), 0)
        frameThresh = cv2.threshold(frameBlurred, setts['lowerThresh'], 255,
                                    cv2.THRESH_BINARY_INV)[1]
        frameThresh = cv2.erode(frameThresh,
                                None,
                                iterations=setts['erodeValue'])
        frameThresh = cv2.dilate(frameThresh,
                                 None,
                                 iterations=setts['erodeValue'])
        frameThresh = cv2.copyMakeBorder(frameThresh,
                                         3,
                                         3,
                                         3,
                                         3,
                                         cv2.BORDER_CONSTANT,
                                         value=(0, 0, 0))
        frameFinal = frameThresh

        return frameFinal

    def draw_cntrs_features(self, fr, setts, obj):
        """
        Takes frame, settings, objects list and draws features (contours,
        names, vertives, centers) on frame.
        """
        if setts['dispContours']:
            cv2.drawContours(fr, [obj['contour']], -1, (255, 255, 0), 1)
        if setts['dispApproxContours']:
            cv2.drawContours(fr, [obj['approx_cnt']], -1, (0, 255, 0), 1)
        if setts['dispNames']:
            cv2.putText(fr, obj['shape'] + str(obj['approx_cnt_area']),
                        obj['center'], cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (255, 255, 255), 1)
        if setts['dispVertices']:
            for i in range(0, len(obj['verts'])):
                cv2.circle(fr, tuple(obj['verts'][i]), 4, (255, 100, 100), 1)
        if setts['dispCenters']:
            cv2.circle(fr, (obj['center']), 2, (50, 255, 50), 1)

    def detect_shapes(self, frameOriginal, frameProcessed):
        """
        This functiion simplifies the contour, identifies shape by name,
        unpacks vertices, computes area. Then it returns a dictionary with
        all of this data.

        :param c: Contour to be approximated.
        :type c: OpenCV2 contour.
        :returns: dictionary -- shape name, vertices, approximated contour,
        approximated area.
        :rtype: dictionary.
        """

        # #####################################################################
        # FIND COUNTOURS
        # #####################################################################
        cnts = cv2.findContours(frameProcessed.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # #####################################################################
        # ANALYZE CONTOURS
        # #####################################################################

        # clear list
        self.objs = []

        for index, c in enumerate(cnts):
            verts = []
            vrt = []

            # #################################################################
            # SIMPLIFY CONTOUR
            # #################################################################
            perimeter = cv2.arcLength(c, True)
            approx_cnt = cv2.approxPolyDP(c, 0.04 * perimeter, True)

            # #################################################################
            # GET CONTOUR AREA
            # #################################################################
            approx_cnt_area = cv2.contourArea(approx_cnt)

            # #################################################################
            # GETTING THE VERTICES COORDINATES
            # #################################################################
            for i in range(0, len(approx_cnt)):
                # iterate over vertices (needs additional [0]
                vrt = []
                for j in range(0, 2):
                    vrt.append(int(approx_cnt[i][0][j]))
                verts.append(vrt)

            # #################################################################
            # NAMING THE OBJECT
            # #################################################################
            # if the shape is a triangle, it will have 3 vertices
            if len(approx_cnt) == 3:
                shape = "triangle"

            # if the shape has 4 vertices, it is either a square or
            # a rectangle
            elif len(approx_cnt) == 4:
                # compute the bounding box of the contour and use the
                # bounding box to compute the aspect ratio
                (x, y, w, h) = cv2.boundingRect(approx_cnt)
                ar = w / float(h)

                # a square will have an aspect ratio that is approximately
                # equal to one, otherwise, the shape is a rectangle
                shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"

            # if the shape is a pentagon, it will have 5 vertices
            elif len(approx_cnt) == 5:
                shape = "pentagon"

            # otherwise, we assume the shape is a circle
            else:
                shape = "circle"

            # #################################################################
            # COMPUTING CENTER
            # #################################################################
            M = cv2.moments(approx_cnt)
            try:
                approx_cnt_X = int((M['m10'] / M['m00']))
                approx_cnt_Y = int((M['m01'] / M['m00']))
            except ZeroDivisionError:
                approx_cnt_X = 0
                approx_cnt_Y = 0

            obj = {
                'shape': shape,
                'verts': verts,
                'approx_cnt': approx_cnt,
                'approx_cnt_area': approx_cnt_area,
                'contour': c,
                'center': (approx_cnt_X, approx_cnt_Y)
            }

            self.objs.append(obj)

            c = c.astype('float')
            c = c.astype('int')

            self.draw_cntrs_features(frameOriginal, self.settings,
                                     self.objs[index])

        if self.settings['dispTHEcenter']:
            cv2.circle(frameOriginal,
                       (self.resolution[0] / 2, self.resolution[1] / 2), 2,
                       (50, 50, 255), 1)

        if self.settings['dispGoal'] and bool(self.objs):
            cv2.line(frameOriginal,
                     (self.resolution[0] / 2, self.resolution[1] / 2),
                     self.objs[0]['center'], (255, 0, 0), 2)
Ejemplo n.º 9
0
    """
    camera = PiCamera()
    #camera.resolution = (640, 480)
    #camera.framerate = 32
    rawCapture = PiRGBArray(camera)
    #rawCapture = io.BytesIO()

    time.sleep(0.1)

    """

    # initialize the picamera stream and allow the camera
    # sensor to warmup
    stream = PiVideoStream(resolution=(640, 480), framerate=32)

    vs = stream.start()
    time.sleep(2.0)

    img1 = resize(img1, preferred_dimensions=(640, 480))

    kp = detector.detect(img1, None)

    kp, desc = compute.compute(img1, kp)

    count = 0
    start_time = timer()

    frame = vs.read()

    #for image in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
    #rawCapture.truncate(0)