Ejemplo n.º 1
0
def videoStream():
	##PiVideoStream starts camera capture continous in a seperate thread
	#Keeping call to I/O in seperate thread keeps our main loop uniterupted and acheives higher FPS
	encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
	vs = PiVideoStream().start()
	time.sleep(1.0)
	print("Capturing video...")
	#keep reading frames
	try:
		while True:
			frame = vs.read()
			#compress image - allows for faster live stream
			ret, frame = cv2.imencode('.jpg',frame, encode_param)

			#View stream on RPI HDMI
			#cv2.imshow("frame", frame)
			#cv2.waitKey(1) & 0xFF

			#Now serialize the data with pickle
			data = pickle.dumps(frame)
			#add msgLength to a struct- send message length first, then data
			message_size = struct.pack("L", len(data))
			#SEND msg length + DATA
			videoSocket.sendall(message_size + data)
	except:
		vs.stop()
		videoSocket.close()
		commSocket.close()
		exit()
Ejemplo n.º 2
0
class Camera(object):
    def __init__(self):
        if cv2.__version__.startswith('2'):
            PROP_FRAME_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
        elif cv2.__version__.startswith('3'):
            PROP_FRAME_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT

#        self.video = cv2.VideoCapture(0)
        self.video = PiVideoStream().start()

        #self.video = cv2.VideoCapture(1)
        #self.video.set(PROP_FRAME_WIDTH, 640)
        #self.video.set(PROP_FRAME_HEIGHT, 480)
#        self.video.set(PROP_FRAME_WIDTH, 320)
#        self.video.set(PROP_FRAME_HEIGHT, 240)
    
    def __del__(self):
        self.video.stop()
    
    def get_frame(self):
        while True:
            try:
                image = self.video.read()
                ret, jpeg = cv2.imencode('.jpg', image)
                return jpeg.tostring()
            except:
                continue
Ejemplo n.º 3
0
class QRDetector(object):
    def __init__(self, flip=False):
        self.vs = PiVideoStream(resolution=(400, 304), framerate=3).start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        decoded_objs = self.decode(frame)
        # 認識したQRコードの位置を描画する
        frame = self.draw_positions(frame, decoded_objs)

        num = 0
        if len(decoded_objs) > 0:
            num = decoded_objs

        cv2.putText(frame, 'Num: {}'.format(num), (15, 30),
                    cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 255, 255), 1)

        return frame

        detected = False
        if len(decoded_objs) > 0:
            detected = True

        cv2.putText(frame, 'Detected: {}'.format(detected), (15, 30),
                    cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 255, 255), 1)

        return frame

    def decode(self, frame):
        decoded_objs = pyzbar.decode(frame, scan_locations=True)
        for obj in decoded_objs:
            print(datetime.now().strftime('%H:%M:%S.%f'))
            print('Type: ', obj.type)
            print('Data: ', obj.data)

        return decoded_objs

    def draw_positions(self, frame, decoded_objs):
        for obj in decoded_objs:
            left, top, width, height = obj.rect
            frame = cv2.rectangle(frame, (left, top),
                                  (left + width, height + top), (0, 255, 0), 2)
            data = obj.data.decode('utf-8')

        return frame
Ejemplo n.º 4
0
class PersonDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(640, 480)).start()
        self.flip = flip
        self.last_uploaded = datetime.now()
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        # PersonDetectorはFlipしない
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        persons = 0
        frame = imutils.resize(frame, width=300)
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 127.5)
        net.setInput(blob)
        detections = net.forward()

        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]

            # 認識した物体の確からしさ
            if confidence < 0.5:
                continue

            idx = int(detections[0, 0, i, 1])
            # 15は人
            if idx != 15:
                continue

            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype('int')
            label = '{}: {:.2f}%'.format('Person', confidence * 100)
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0),
                          2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 1)
            persons += 1

        if persons > 0:
            timestamp = datetime.now()
            if (timestamp - self.last_uploaded).seconds >= 30:
                cv2.imwrite("image.jpg", frame)
                print('Uploading...')
                upload(persons)
                self.last_uploaded = timestamp
                print('Finished.')

        return frame
Ejemplo n.º 5
0
class Camera(object):
    def __init__(self, calib_img, resolution, framerate):
        self.stream = PiVideoStream(resolution=resolution, framerate=framerate)
        calib = undistort(cv2.imread(calib_img))
        self.transform = birdseye_transform(calib)

    def start(self):
        self.stream.start()

    def stop(self):
        self.stream.stop()

    def get_line(self):
        frame = self.stream.read()
        test = undistort(frame)
        bird = birdseye(test, transform=self.transform)

        u = u_channel(bird)

        filtered = sobel(u)

        thresh = threshold(filtered)

        lines, line_coeff = detect_lines(thresh)
        # print("line:", p)
        # print("curvature: {}, slope: {}, offset: {}".format(*line_coeff))
        return line_coeff
Ejemplo n.º 6
0
def main():
    targetFramerate = 20

    sleepLength = 1/targetFramerate
    vs = PiVideoStream(resolution=(640,480), vflip=True).start()
    time.sleep(2.0)
    fps = FPS().start()
    # loop over some frames...this time using the threaded stream
    capture = True
    try:
        while capture:
            frame = vs.read()
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF
            fps.update()
            if key == ord('q'):
                capture = False
            # time.sleep(sleepLength)

        # on break
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    finally:
        # do a bit of cleanup
        cv2.destroyAllWindows()
        vs.stop()
Ejemplo n.º 7
0
def detect_face():
    #camera = PiCamera()
    #camera.resolution = (640, 480)
    #camera.framerate = 32
    #rawCapture = PiRGBArray(camera, size = (640, 480))
    #stream = camera.capture_continuous(rawCapture, format="bgr", use_video_port=True)
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

    #cap = cv2.VideoCapture(0)
    cap = PiVideoStream().start()
    time.sleep(2.0)
    face_detected = False
    while face_detected == False:
        # Read the frame
        #_, img = cap.read()
        #        print('looking for faces')
        frame = cap.read()
        frame = imutils.resize(frame, width=400)
        # Convert to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Detect the faces
        faces = face_cascade.detectMultiScale(gray, 1.1, 4)
        # Draw the rectangle around each face
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 0), 2)
            #            print("face detected")
            face_detected = True

    # When everything done, release the capture
    cap.stop()
    cv2.destroyAllWindows()
    if face_detected == True:
        return True
    return False
Ejemplo n.º 8
0
class QRDetector(object):
    def __init__(self, flip=False):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        pass

    def decode(self, frame):
        pass

    def draw(self, frame, decoded_objs):
        pass
class VideoCamera(object):
    def __init__(self):
        self.vs = PiVideoStream().start()
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def get_frame(self):
        frame = self.vs.read()
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):
        found_objects = False
        frame = self.vs.read().copy()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        if len(objects) > 0:
            found_objects = True

        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        ret, jpeg = cv2.imencode('.jpg', frame)
        return (jpeg.tobytes(), found_objects)
Ejemplo n.º 10
0
class FaceDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        # opencvの顔分類器(CascadeClassifier)をインスタンス化する

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        # opencvでframe(カラー画像)をグレースケールに変換

        # 上記でグレースケールに変換したものをインスタンス化した顔分類器の
        # detectMultiScaleメソッドで処理し、認識した顔の座標情報を取得する

        # 取得した座標情報を元に、cv2.rectangleを使ってframe上に
        # 顔の位置を描画する

        # frameを戻り値として返す
        return frame
Ejemplo n.º 11
0
class VideoCamera(object):
    def __init__(self, flip=False):
        self.vs = None
        self.flip = flip

    def __del__(self):
        if self.vs is not None:
            self.vs.stop()

    def start(self, flip=False):
        if self.vs is None:
            self.vs = PiVideoStream().start()
            self.flip = flip
            time.sleep(2.0)

    def stop(self):
        if self.vs is not None:
            self.vs.stop()
            self.vs = None

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        if self.vs is None:
            return
        frame = self.flip_if_needed(self.vs.read())
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
Ejemplo n.º 12
0
class ThreadVideoStream:
    def __init__(self, resolution=(320, 240), framerate=32):

        from imutils.video.pivideostream import PiVideoStream
        # initialize the picamera stream and allow the camera
        # sensor to warmup
        self.stream = PiVideoStream(resolution=resolution, framerate=framerate)

    def start(self):
        # start the threaded video stream
        #self.stream.start()
        #Thread(target=self.update, args=()).start()
        return Thread(target=self.update, args=()).start()
        #return self.stream.start()

    def update(self):
        # grab the next frame from the stream
        self.stream.update()

    def read(self):
        # return the current frame
        return self.stream.read()

    def stop(self):
        # stop the thread and release any resources
        self.stream.stop()
Ejemplo n.º 13
0
class VideoCamera(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream((640, 480)).start()
        self.vs.camera.vflip = True
        #        self.vs.camera.framerate = 30
        #self.vs.camera.exposure_mode = 'night'
        #self.vs.camera.vflip = True
        self.flip = flip
        self.vs.camera.start_preview()

        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        #	return frame
        if not self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tostring().tobytes()

    def get_object(self, classifiers):
        found_objects = False
        xLocation = 0
        yLocation = 0
        frame = self.flip_if_needed(self.vs.read()).copy()
        #	print(frame[1].size
        height, width = frame.shape[:2]
        #print width
        #print height
        #	small = cv2.resize(frame, (width/2, height/2), interpolation = cv2.INTER_AREA)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        classifier_used = -1
        for i in range(len(classifiers)):
            objects = classifiers[i].detectMultiScale(
                gray,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(64, 64),
                flags=cv2.CASCADE_SCALE_IMAGE)
            if len(objects) > 0:
                found_objects = True
                print 'found with %s' % i
                classifier_used = i
                break

        # Draw a rectangle around the objects
        for (x, y, w, h) in objects:
            cv2.circle(frame, (x + w / 2, y + h / 2), 10, (0, 255, 0), 2)
            xLocation = x
            yLocation = y

        ret, jpeg = cv2.imencode('.jpg', frame)
        return (jpeg.tobytes(), found_objects, xLocation, yLocation,
                classifier_used, width, height)
Ejemplo n.º 14
0
def Phase_2_QR_Match():
    print("PHASE 2: QR MATCH")
    vs = PiVideoStream().start()                 
    time.sleep(2.0)
    while True:
        global MATCH_QR_CODE
        GPIO.output(4, True)

        frame = vs.read()
        qr_display(frame, decode(frame, qr_data_match_len)[0])
        if len(qr_data_match_len) > 0:
            qr_data_match = str(qr_data_match_len[len(qr_data_match_len) - 1])
            qr_data_match_split = qr_data_match.replace("'", '').replace('b', "").split(",")
            qr_data_replace = str(qr_data).replace("'", '').replace('b', "")
            for i in range(len(qr_data_match_split)):
                if qr_data_replace in qr_data_match_split[i]:
                    print("{0} is in {1}".format(qr_data_match_split[i], qr_data_replace))  # prints data match
                    MATCH_QR_CODE =True
                    break
                else:
                    print("{0} no match to {1} line follower".format(qr_data_match_split[i],qr_data_replace))  # prints data match
                    print("forward")
        if MATCH_QR_CODE == True:
            break
            
        cv2.imshow('frame', frame)
        cv2.waitKey(10)
    vs.stop()
    GPIO.output(4, False)
Ejemplo n.º 15
0
class VideoCamera(object):
    def __init__(self, flip=False):
        self.mydb = mysql.connector.connect(host="#################",
                                            user="******",
                                            passwd="#################",
                                            database="################")
        self.carsarray = []
        self.vs = PiVideoStream().start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):
        found_objects = False
        frame = self.flip_if_needed(self.vs.read()).copy()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(15, 15),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        if len(objects) > 0:
            found_objects = True

        # Draw a rectangle around the objects
        numofcars = 0
        for (x, y, w, h) in objects:
            numofcars = numofcars + 1
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            #print (numofcars)
        print("Analyzing...")
        self.carsarray.append(numofcars)
        cv2.imshow('video', frame)
        cv2.waitKey(50)
        if len(self.carsarray) > 9:
            avginimage = math.ceil(sum(self.carsarray) / 10)
            print(avginimage)
            mycursor = self.mydb.cursor()
            #sql = "UPDATE cars SET Cars =%s%s"
            #val = (" ", avginimage)
            mycursor.execute("UPDATE cars SET Cars = %s" % (avginimage))
            self.mydb.commit()
            self.carsarray = []
        ret, jpeg = cv2.imencode('.jpg', frame)
        return (jpeg.tobytes(), found_objects)
Ejemplo n.º 16
0
class VideoCamera(object):
    def __init__(self, flip=False):
        self.vs = PiVideoStream().start() self.flip = fliptime.sleep(2.0)

    def __del__(self): self.vs.stop()

    def flip_if_needed(self, frame): if self.flip:
        return np.flip(frame, 0) return frame
class PersonDetector(object):
    def __init__(self, flip=True):
        self.last_upload = time.time()
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        frame = imutils.resize(frame, width=300)
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 0)
        net.setInput(blob)
        detections = net.forward()

        count = 0
        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]

            if confidence < 0.2:
                continue

            idx = int(detections[0, 0, i, 1])
            if idx != 15:
                continue

            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype('int')
            label = '{}: {:.2f}%'.format('Person', confidence * 100)
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0),
                          2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 1)
            count += 1

        if count > 0:
            print('Count: {}'.format(count))
            elapsed = time.time() - self.last_upload
            if elapsed > 60:
                cv2.imwrite('hello.jpg', frame)
                upload()
                self.last_upload = time.time()

        return frame
class FaceDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        # opencvの顔分類器(CascadeClassifier)をインスタンス化する
        self.face_cascade = cv2.CascadeClassifier(
            'camera/processor/model/haarcascades/haarcascade_frontalface_default.xml'
        )
        self.eye_cascade = cv2.CascadeClassifier(
            'camera/processor/model/haarcascades/haarcascade_eye.xml')

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        # opencvでframe(カラー画像)をグレースケールに変換
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 上記でグレースケールに変換したものをインスタンス化した顔分類器の
        # detectMultiScaleメソッドで処理し、認識した顔の座標情報を取得する
        faces = self.face_cascade.detectMultiScale(gray, 1.3, 3)

        # 文字列を作成する
        iSize = len(faces)
        # iRow = frame.shape[0]
        # iCol = frame.shape[1]
        font = cv2.FONT_HERSHEY_SIMPLEX

        # 取得した座標情報を元に、cv2.rectangleを使ってframe上に
        # 顔の位置を描画する
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (128, 128, 0), 2)
            face = frame[y:y + h, x:x + w]
            face_gray = gray[y:y + h, x:x + w]
            eyes = self.eye_cascade.detectMultiScale(face_gray)
            for (ex, ey, ew, eh) in eyes:
                cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0),
                              2)

        # 文字列を書く
        cv2.putText(frame, str(iSize), (10, 100), font, 4, (128, 0, 255), 3,
                    cv2.LINE_AA)
        # frameを戻り値として返す
        return frame
Ejemplo n.º 19
0
class VideoCamera(object):
    def __init__(self, flip = False):
        self.vs = PiVideoStream(resolution=(480 , 320),framerate=32).start()  #resolution and framerate
        self.flip = flip
        time.sleep(0.1)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self, classifier):
        frame = self.flip_if_needed(self.vs.read()).copy() 
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(
            gray,
            scaleFactor=1.5,
            minNeighbors=4,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE
        )
        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):        
        found_objects = False
        frame = self.flip_if_needed(self.vs.read()).copy() 
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(
            gray,
            scaleFactor=1.5,
            minNeighbors=4,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE
        )

        if len(objects) > 0:
            found_objects = True

        # Draw a rectangle around the objects
        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        ret, jpeg = cv2.imencode('.jpg', frame)
        #print('called = ', found_objects )
        return (jpeg.tobytes(), found_objects,frame)
Ejemplo n.º 20
0
class VideoCamera(object):
    def __init__(self, flip=False):
        self.vs = PiVideoStream().start()
        self.vs.camera.rotation = 270
        self.flip = flip
        self.conf = json.load(open("conf.json"))
        self.vs.framerate = self.conf["fps"]
        self.vs.resolution = self.conf["resolution"]
        self.client = boto3.resource('s3')
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):
        timestamp = datetime.datetime.now()
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S.%f%p")
        found_objects = False
        frame = self.flip_if_needed(self.vs.read()).copy()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(gray,
                                              scaleFactor=1.05,
                                              minNeighbors=5,
                                              minSize=(60, 60),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        if len(objects) > 0:
            found_objects = True

        # Draw a rectangle around the objects
        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            #t = TempImage()
            #cv2.imwrite(t.path, frame[y:y+h,x:x+w])
            #self.client.meta.client.upload_file(t.path, self.conf["s3bucket_faces"], t.key )
            #t.cleanup()

        cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1)
        #cv2.imshow("feed",frame)
        #cv2.waitKey(1000)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return (jpeg.tobytes(), found_objects, frame)
class FaceDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        # opencvの顔分類器(CascadeClassifier)をインスタンス化する
        self.face_cascade = cv2.CascadeClassifier(
            'camera/processor/model/haarcascades/haarcascade_frontalface_default.xml'
        )

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        # opencvでframe(カラー画像)をグレースケールに変換
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 上記でグレースケールに変換したものをインスタンス化した顔分類器の
        # detectMultiScaleメソッドで処理し、認識した顔の座標情報を取得する
        faces = self.face_cascade.detectMultiScale(gray, 1.3, 3)

        # 取得した座標情報を元に、cv2.rectangleを使ってframe上に
        # 顔の位置を描画する
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
            cv2.putText(frame,
                        'Happy!', (x, y),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        1.0, (255, 255, 255),
                        thickness=2)

        # cv2.putText(frame, '顔の数:' + str(len(faces)), (100,100), cv2.FONT_HERSHEY_SIMPLEX, 10, (255,255,255),3,cv2.CV_AA )
        # cv2.putText(frame, "SAMPLE_TEXT", (x,y), cv2.FONT_HERSHEY_SIMPLEX, 10,  7, (0, 0, 255), 3, cv2.CV_AA)
        cv2.putText(frame,
                    str(len(faces)) + ' Happy people', (20, 40),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1.0, (255, 255, 255),
                    thickness=2)

        # frameを戻り値として返す
        return frame
class FaceDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        self.argvs = sys.argv
        self.face_cascade = cv2.CascadeClassifier(
            'camera/processor/model/haarcascades/haarcascade_frontalface_default.xml'
        )
        if len(self.argvs) >= 2 and "eyes" in self.argvs:
            self.eye_cascade = cv2.CascadeClassifier(
                'camera/processor/model/haarcascades/haarcascade_eye.xml')

        # opencvの顔分類器(CascadeClassifier)をインスタンス化する

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = self.face_cascade.detectMultiScale(gray, 1.3, 3)
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
            if len(self.argvs) >= 2 and "eyes" in self.argvs:
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = frame[y:y + h, x:x + w]
                eyes = self.eye_cascade.detectMultiScale(roi_gray)
                for (ex, ey, ew, eh) in eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                                  (0, 255, 0), 2)
        # opencvでframe(カラー画像)をグレースケールに変換

        return frame
        # 上記でグレースケールに変換したものをインスタンス化した顔分類器の
        # detectMultiScaleメソッドで処理し、認識した顔の座標情報を取得する

        # 取得した座標情報を元に、cv2.rectangleを使ってframe上に
        # 顔の位置を描画する

        # frameを戻り値として返す
        return frame
Ejemplo n.º 23
0
class VideoStreamer(object):
    def __init__(self, flip=False, resolution=(640, 480)):
        self.video_stream = PiVideoStream(resolution=resolution,
                                          framerate=8).start()
        time.sleep(2.0)

    def __del__(self):
        self.video_stream.stop()

    def get_frame(self):
        frame = self.video_stream.read()
        #ret, jpg = cv2.imencode('.jpg', frame)  # 圧縮
        return frame
Ejemplo n.º 24
0
def mainLoop():
    vs = PiVideoStream().start()
    time.sleep(2)
    baseFrame = None
    frameTimer = time.time()
    while True:
        frame = vs.read()
        HSV = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
        white_mask = cv2.inRange(HSV, np.array([0, 0, 40]), np.array([360, 111, 255]))
        whiteSmooth = cv2.GaussianBlur(white_mask, (3, 3), 0)
        if baseFrame is None:
            baseFrame = whiteSmooth
        frameDelta = cv2.absdiff(whiteSmooth, baseFrame)
        thresh = cv2.threshold(frameDelta, 150, 255, cv2.THRESH_BINARY)[1]
        cv2.imshow('white_mask', whiteSmooth)
        cv2.imshow('thresh', thresh)
        cv2.imshow('frameDelta',frameDelta)
        cv2.imshow('baseFrame', baseFrame)
        #cv2.imshow('frame',frame)
        _, cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
        im = np.copy(frame)
        cv2.drawContours(im,cnts, -1, (0,255,0), 1)
        
        
        key = cv2.waitKey(1) & 0xFF
        
        if (len(cnts) == 0 and time.time()- frameTimer > 3) or time.time() - frameTimer > 8:
            baseFrame = whiteSmooth
            print("New reference frame")
            frameTimer = time.time()

        
        #contours = imutils.grab_contours(cnts)
        
        for c in cnts:
            if cv2.contourArea(c) < 50:
                 continue
            print(cv2.contourArea(c))
            M = cv2.moments(c)
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            cv2.circle(im, (cX, cY), 7, (255, 255, 255), -1)
        
        cv2.imshow('im',im)
        
        if key == ord("q"):
            break
    
    vs.stop()
    cv2.destroyAllWindows()
class FaceDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        # opencvの顔分類器(CascadeClassifier)をインスタンス化する
        self.face_cascade = cv2.CascadeClassifier(
            'camera/processor/model/haarcascades/haarcascade_frontalface_default.xml'
        )

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        # opencvでframe(カラー画像)をグレースケールに変換
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 上記でグレースケールに変換したものをインスタンス化した顔分類器の
        # detectMultiScaleメソッドで処理し、認識した顔の座標情報を取得する
        faces = self.face_cascade.detectMultiScale(gray, 1.3, 3)

        font = cv2.FONT_HERSHEY_SCRIPT_COMPLEX
        if len(faces) == 0:
            text = 'No face'
        elif len(faces) == 1:
            text = 'A face'
        else:
            text = '{} faces :)'.format(len(faces))

        cv2.putText(frame, text, (10, 100), font, 4, (80, 129, 225), 2)

        # 取得した座標情報を元に、cv2.rectangleを使ってframe上に
        # 顔の位置を描画する

        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)

        # frameを戻り値として返す
        return frame
class FaceDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        # opencvの顔分類器(CascadeClassifier)をインスタンス化する
        self.face_cascade = cv2.CascadeClassifier(
            'camera/processor/model/haarcascades/haarcascade_frontalface_default.xml'
        )

    def __del__(self):
        self.vs.stop()

    def get_frame(self):
        # 開始時刻
        start = time.clock()

        # frameに描画レイヤーを書き足していく
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)

        # 処理時間計測
        get_image_time = int((time.clock() - start) * 1000)
        # 1フレーム取得するのにかかった時間を表示
        cv2.putText(frame,
                    str(get_image_time) + "ms", (10, 10), 1, 1, (0, 255, 0))

        return jpeg.tobytes()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def process_image(self, frame):
        # opencvでframe(カラー画像)をグレースケールに変換
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 上記でグレースケールに変換したものをインスタンス化した顔分類器の
        # detectMultiScaleメソッドで処理し、認識した顔の座標情報を取得する
        faces = self.face_cascade.detectMultiScale(gray, 1.3, 3)

        # 取得した座標情報を元に、cv2.rectangleを使ってframe上に
        # 顔の位置を描画する
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        # frameを戻り値として返す
        return frame
class MotionDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(400, 304), framerate=16).start()
        self.flip = flip
        time.sleep(2.0)

        self.avg = None

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        frame = imutils.resize(frame, width=min(500, frame.shape[1]))
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None:
            print('Starting background model...')
            self.avg = gray.copy().astype('float')
            return frame

        cv2.accumulateWeighted(gray, self.avg, 0.5)
        #frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))
        frameDelta = cv2.absdiff(blue, cv2.convertScaleAbs(self.avg))

        thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)

        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        for c in cnts:
            if cv2.contourArea(c) < 5000:
                continue

            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        return frame
Ejemplo n.º 28
0
class RPiCamera(object):
    def __init__(self):
        self.stream = PiVideoStream().start()
        sleep(2.0)

    def __del__(self):
        self.stream.stop()

    def get_frame(self):

        frame = self.stream.read()
        result, jpeg = cv2.imencode('.jpg', frame)

        return jpeg.tobytes()
Ejemplo n.º 29
0
class VideoCamera(object):
    def __init__(self, flip=False):
        self.vs = PiVideoStream().start()
        self.vs.camera.rotation = 270
        self.flip = flip
        self.vs.framerate = 10
        self.vs.resolution = 90
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    #Grab an image and rotate to ensure it is in the proper orientation
    def get_object(self, classifier):
        timestamp = datetime.datetime.now()
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S.%f%p")
        found_objects = False
        frame = self.flip_if_needed(self.vs.read()).copy()
        newframe = imutils.rotate(frame, -90)
        #img = cv2.imread('photo-1507003211169-0a1dd7228f2d.jpg')
        gray = cv2.cvtColor(newframe, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30))

        if len(objects) > 0:
            found_objects = True

        # Draw a rectangle around the objects
        for (x, y, w, h) in objects:
            frame2 = cv2.rectangle(newframe, (x, y), (x + w, y + h),
                                   (255, 0, 0), 2)
            #t = TempImage()
            #t.cleanup()

        cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1)
        #cv2.imshow("feed",frame)
        #cv2.waitKey(1000)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return (jpeg.tobytes(), found_objects, newframe)
Ejemplo n.º 30
0
def run_main():
	# created a "threaded" video stream, allow the camera sensor to warmup
	vs = PiVideoStream((640,480)).start()
	time.sleep(2.0)
	
	# read the first frame of the video
	frame = vs.read()
	
	# Set the ROI
	c, r, w, h=200, 100, 70, 70
	track_window=(c, r, w, h)
	
	# Create mask and nomralized histogram
	roi = frame[r:r+h, c:c+w]
	hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
	mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180.,255.,255.)))
	roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
	cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
	
	# Setup the termination criteria, either 80 iteration or move by atleast 1pt
	term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
	
	while True:
		frame = vs.read()
	
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
		dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
		
		# apply meanshift to get the new location
		ret, track_window = cv2.meanShift(dst, track_window, term_crit)
		
		# apply camshift to get the new location
		#ret, track_window = cv2.CamShift(dst, track_window, term_crit)

		# draw it on image
		x, y, w, h = track_window
		cv2.rectangle(frame, (x, y), (x+w, y+h), 255, 2)
		cv2.putText(frame, 'Tracked M', (x-25, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)
		
		cv2.imshow('Tracking', frame)
		
		# if the 'q' key is pressed, break from the loop
		key = cv2.waitKey(1) & 0xFF
		if key == ord("q"):
			break
			
	vs.stop()
	cv2.destroyAllWindows()
def startCameraSequence():

    resolution = (1200, 1008
                  )  #Max resolution (3280,2464), Min resolution (64,64)
    framerate = 10

    vs = PiVideoStream(resolution, framerate).start()
    time.sleep(2.0)
    fps = FPS().start()
    start_time = time.time()
    wait_again = True

    #Loop over frames using threaded stream
    while wait_again:

        #Gets timestamp
        now = datetime.datetime.now()
        now_print = now.strftime("%H:%M:%S.%f")

        #Reads the current frame at capture thread
        frame = vs.read()

        #Writes the frame into folder with time stamp
        #Change "/home/pi/Desktop/camera_test/" with desired saving folder"
        cv2.imwrite("/home/pi/Desktop/camera_test/" + now_print + ".jpg",
                    frame)

        #Update FPS counter
        fps.update()

        pressed_again = GPIO.input(18)
        if (pressed_again):
            wait_again = False

    #Stop the timer and display FPS information
    fps.stop()
    vs.stop()

    #Updates time taking pictures
    end_time = time.time()
    elapsed = end_time - start_time

    #Writes the elapsed time of capture
    #Change "/home/pi/Desktop/camera_test/" with desired saving folder"
    f = open("/home/pi/Desktop/camera_test/sequenceResults.txt", "w+")
    f.write("Done. Elapsed time (s): " + str(elapsed))
    f.close()
Ejemplo n.º 32
0
class DroidVisionThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.running = True
        self.fps_counter = FPS().start()
        self.camera = PiVideoStream(resolution=(config.RAW_FRAME_WIDTH, config.RAW_FRAME_HEIGHT))
        self.camera.start()
        time.sleep(config.CAMERA_WARMUP_TIME) # wait for camera to initialise
        self.frame = None
        self.frame_chroma = None
        self.centre = config.CENTRE
        self.can_see_lines = False
        self.last_yellow_mean = config.WIDTH * 0.8
        self.last_blue_mean = config.WIDTH * 0.2
        self.desired_steering = config.NEUTRAL_STEERING # 0 = left, 0.5 = center, 1 = right
        self.desired_throttle = config.NEUTRAL_THROTTLE # 0 = stop, 0.5 = medium speed, 1 = fastest

    def run(self):
        debug("DroidVisionThread: Thread started")
        self.vision_processing()
        self.camera.stop()
        cv2.destroyAllWindows()
        debug("DroidVisionThread: Thread stopped")

    def stop(self):
        self.running = False
        debug("DroidVisionThread: Stopping thread")

    def vision_processing(self):
        while self.running:
            self.grab_frame()

            # colour mask
            blue_mask = cv2.inRange(self.frame_chroma, config.BLUE_CHROMA_LOW, config.BLUE_CHROMA_HIGH)
            yellow_mask = cv2.inRange(self.frame_chroma, config.YELLOW_CHROMA_LOW, config.YELLOW_CHROMA_HIGH)
            colour_mask = cv2.bitwise_or(blue_mask, yellow_mask)
            colour_mask = cv2.erode(colour_mask, config.ERODE_KERNEL)
            colour_mask = cv2.dilate(colour_mask, config.DILATE_KERNEL)
            
            # lines
            lines = cv2.HoughLinesP(colour_mask, config.HOUGH_LIN_RES, config.HOUGH_ROT_RES, config.HOUGH_VOTES, config.HOUGH_MIN_LEN, config.HOUGH_MAX_GAP)
            blue_lines = np.array([])
            yellow_lines = np.array([])
            if lines != None:
                for line in lines:
                    x1,y1,x2,y2 = line[0]
                    angle = np.rad2deg(np.arctan2(y2-y1, x2-x1))
                    if config.MIN_LINE_ANGLE < abs(angle) < config.MAX_LINE_ANGLE:
                        if config.IMSHOW:
                            cv2.line(self.frame, (x1,y1), (x2,y2), (0,0,255), 1)
                        if angle > 0:
                            yellow_lines = np.append(yellow_lines, [x1, x2])
                        elif angle < 0:
                            blue_lines = np.append(blue_lines, [x1, x2])

            # find centre
            blue_mean = self.last_blue_mean
            yellow_mean = self.last_yellow_mean
            if len(blue_lines):
                blue_mean = int(np.mean(blue_lines))
            if len(yellow_lines):
                yellow_mean = int(np.mean(yellow_lines))
            self.centre = (blue_mean + yellow_mean) / 2.0
            self.last_blue_mean = blue_mean
            self.last_yellow_mean = yellow_mean

            self.can_see_lines = (len(blue_lines) or len(yellow_lines))

            # set steering and throttle
            self.desired_steering = (1.0 - (self.centre / config.WIDTH))
            if len(blue_lines) or len(yellow_lines):
                self.desired_throttle = 0.22
            else:
                self.desired_throttle = 0

            if config.IMSHOW:
                cv2.circle(self.frame, (int(self.centre), config.HEIGHT - 20), 10, (0,0,255), -1)
                cv2.imshow("colour_mask without noise", colour_mask)
                cv2.imshow("raw frame", self.frame)
                cv2.waitKey(1)

    def grab_frame(self):
        self.fps_counter.update()
        # grab latest frame and index the ROI
        self.frame = self.camera.read()
        self.frame = self.frame[config.ROI_YMIN:config.ROI_YMAX, config.ROI_XMIN:config.ROI_XMAX]
        # convert to chromaticity colourspace
        B = self.frame[:, :, 0].astype(np.uint16)
        G = self.frame[:, :, 1].astype(np.uint16)
        R = self.frame[:, :, 2].astype(np.uint16)    
        Y = 255.0 / (B + G + R)
        b = (B * Y).astype(np.uint8)
        g = (G * Y).astype(np.uint8)
        r = (R * Y).astype(np.uint8)
        self.frame_chroma = cv2.merge((b,g,r))

    def get_fps(self):
        self.fps_counter.stop()
        return self.fps_counter.fps()

    def get_error(self):
        return (config.CENTRE - self.centre)

    def get_steering_throttle(self):
        return self.desired_steering, self.desired_throttle
# created a *threaded *video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from `picamera` module...")
vs = PiVideoStream().start()
time.sleep(2.0)
fps = FPS().start()
 
# loop over some frames...this time using the threaded stream
while fps._numFrames < args["num_frames"]:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
 
    # check to see if the frame should be displayed to our screen
    if args["display"] > 0:
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
 
    # update the FPS counter
    fps.update()
 
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
 
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()