Beispiel #1
0
def main():
    print('Initializing HAT')
    hat.servo_enable(1, False)
    hat.servo_enable(2, False)
    hat.light_mode(hat.WS2812)
    hat.light_type(hat.GRBW)
    print('Initializing camera')
    with picamera.PiCamera() as camera:
        camera.resolution = (WIDTH, HEIGHT)
        camera.framerate = FRAMERATE
        camera.vflip = VFLIP  # flips image rightside up, as needed
        camera.hflip = HFLIP  # flips image left-right, as needed
        sleep(1)  # camera warm-up time
        print('Initializing websockets server on port %d' % WS_PORT)
        WebSocketWSGIHandler.http_version = '1.1'
        websocket_server = make_server(
            '',
            WS_PORT,
            server_class=WSGIServer,
            handler_class=WebSocketWSGIRequestHandler,
            app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket))
        websocket_server.initialize_websockets_manager()
        websocket_thread = Thread(target=websocket_server.serve_forever)
        print('Initializing HTTP server on port %d' % HTTP_PORT)
        http_server = StreamingHttpServer()
        http_thread = Thread(target=http_server.serve_forever)
        print('Initializing broadcast thread')
        output = BroadcastOutput(camera)
        broadcast_thread = BroadcastThread(output.converter, websocket_server)
        print('Starting recording')
        camera.start_recording(output, 'yuv')
        try:
            print('Starting websockets thread')
            websocket_thread.start()
            print('Starting HTTP server thread')
            http_thread.start()
            print('Starting broadcast thread')
            broadcast_thread.start()
            while True:
                camera.wait_recording(1)
        except KeyboardInterrupt:
            pass
        finally:
            print('Stopping recording')
            camera.stop_recording()
            print('Waiting for broadcast thread to finish')
            broadcast_thread.join()
            print('Shutting down HTTP server')
            http_server.shutdown()
            print('Shutting down websockets server')
            websocket_server.shutdown()
            print('Waiting for HTTP server thread to finish')
            http_thread.join()
            print('Disabling servos')
            hat.servo_enable(1, False)
            hat.servo_enable(2, False)
            hat.clear()
            hat.show()
            print('Waiting for websockets thread to finish')
            websocket_thread.join()
Beispiel #2
0
def test_lights():
    pantilthat.light_mode(pantilthat.WS2812)
    pantilthat.light_type(pantilthat.GRBW)

    r, g, b, w = 0, 0, 0, 0

    try:
        while True:
            pantilthat.set_all(0, 0, 0, 0)
            pantilthat.show()
            time.sleep(0.5)
            pantilthat.set_all(100, 0, 0, 0)
            pantilthat.show()
            time.sleep(0.5)
            pantilthat.set_all(0, 100, 0, 0)
            pantilthat.show()
            time.sleep(0.5)
            pantilthat.set_all(0, 0, 100, 0)
            pantilthat.show()
            time.sleep(0.5)
            pantilthat.set_all(100, 100, 0, 0)
            pantilthat.show()
            time.sleep(0.5)
            pantilthat.set_all(0, 100, 100, 0)
            pantilthat.show()
            time.sleep(0.5)
            pantilthat.set_all(100, 0, 100, 0)
            pantilthat.show()
            time.sleep(0.5)
            pantilthat.set_all(100, 100, 100, 0)
            time.sleep(0.5)
            pantilthat.show()
    except KeyboardInterrupt:
        pantilthat.clear()
        pantilthat.show()
Beispiel #3
0
    def update(self, next_frame, frameCenter):
        initial_h, initial_w, depth = next_frame.shape
        in_frame = cv2.resize(next_frame, (self.w, self.h))
        in_frame = in_frame.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
        self.exec_net.start_async(request_id=self.next_request_id,
                                  inputs={self.input_blob: in_frame})

        rects = []
        if self.exec_net.requests[self.cur_request_id].wait(-1) == 0:
            # Parse detection results of the current request
            res = self.exec_net.requests[self.cur_request_id].outputs[
                self.out_blob]
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > 0.5:
                    xmin = int(obj[3] * initial_w)
                    ymin = int(obj[4] * initial_h)
                    xmax = int(obj[5] * initial_w)
                    ymax = int(obj[6] * initial_h)
                    rects.append([xmin, ymin, xmax - xmin, ymax - ymin])

        self.cur_request_id, self.next_request_id = self.next_request_id, self.cur_request_id
        # check to see if a face was found
        if len(rects) > 0:
            # extract the bounding box coordinates of the face and
            # use the coordinates to determine the center of the
            # face
            (x, y, w, h) = rects[0]
            faceX = int(x + (w / 2))
            faceY = int(y + (h / 2))

            # color the error
            pth.set_all(255, 0, 0)
            if (faceX - frameCenter[0]) > 10:
                pth.set_pixel(0, 255, 255, 255)
            if (faceX - frameCenter[0]) > 30:
                pth.set_pixel(1, 255, 255, 255)
            if (faceX - frameCenter[0]) > 50:
                pth.set_pixel(2, 255, 255, 255)
            if (faceX - frameCenter[0]) < -10:
                pth.set_pixel(7, 255, 255, 255)
            if (faceX - frameCenter[0]) < -30:
                pth.set_pixel(6, 255, 255, 255)
            if (faceX - frameCenter[0]) < -50:
                pth.set_pixel(5, 255, 255, 255)

            pth.show()

            # print("face detected centroid", faceX, faceY)
            # return the center (x, y)-coordinates of the face
            return ((faceX, faceY), rects[0])

        # otherwise no faces were found, so return the center of the
        # frame
        pth.clear()
        pth.show()
        return (frameCenter, None)
def signal_handler(sig, frame):
    # print a status message
    print("[INFO] You pressed `ctrl + c`! Exiting...")

    # clear lights
    pth.clear()
    pth.show()

    # exit
    sys.exit()
def set_light():
    getvar_dict = request.query.decode()
    set = request.query.set
    if (set == "on"):
        pantilthat.set_all(0, 0, 0, 255)
        pantilthat.show()
        return ("Light On")
    else:
        pantilthat.clear()
        pantilthat.show()
        return ("Light Off")
Beispiel #6
0
def clear():
    pantilthat.clear()
    def start_camera(self, face_position_X, face_position_Y):
        """
		1. Begin video stream
		2. Extract faces from frames
		3. Display frames with bounding boxes
		4. Update global variables with:
			-> pixel coordinates of the center of the frame
			-> pixel coordinates of the center of the faces
		"""
        # signal trap to handle keyboard interrupt
        signal.signal(signal.SIGINT, signal_handler)

        # start the video stream and wait for the camera to warm up
        # vs = VideoStream(usePiCamera=self.rpi, resolution=self.resolution).start()
        print('Starting Camera')
        if self.rpi:
            vs = VideoStream(usePiCamera=self.rpi,
                             resolution=self.resolution).start()
        else:
            vs = VideoStream(src=1, resolution=self.resolution).start()
        time.sleep(2.0)

        # initialize the object center finder
        face_detector = HaarFaceDetector(
            os.path.join(MODELS_DIRECTORY,
                         'haarcascade_frontalface_default.xml'))

        # initialise the recogniser
        # fr = PiFaceRecognition()

        # start recording
        filename = os.path.join(DATA_DIRECTORY, 'recordings',
                                '{}.avi'.format(time.time()))
        cv2.VideoWriter_fourcc(*'MJPG')
        print(filename)
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter(filename, fourcc, 20.0, self.resolution)

        while True:
            # grab the frame from the threaded video stream and flip it
            # vertically (since our camera was upside down)
            frame = vs.read()
            frame = cv2.flip(frame, 0)

            # (H, W) = frame.shape[:2]
            # print('H', H)

            # find the object's location
            object_locations = face_detector.extract_faces(frame)
            people = []  # for setting colour

            # get first face for now
            if object_locations:
                # print('{} faces found.'.format(len(object_locations)))
                (face_position_X.value,
                 face_position_Y.value) = object_locations[0][0]
                # print(object_locations[0][0])
                # extract the bounding box and draw it
                for pos, rect, neighbour, weight in object_locations:
                    (x, y, w, h) = rect
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)

                    # recogniser part
                    gray_frame = cv2.cvtColor(
                        frame, cv2.COLOR_BGR2GRAY)  # convert to gray

                    person, confidence = PiFaceRecognition.infer_lbph_face_recogniser(
                        gray_frame[y:y + h, x:x + w])
                    people.append(person)
                    cv2.putText(frame, person, (x + 5, y - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
                                2)
                    cv2.putText(frame, str(confidence), (x + 5, y + h - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1)

            else:
                print('No faces found.')
                face_position_X.value, face_position_Y.value = (
                    self.frame_center_X, self.frame_center_Y)

            if len(people) > 1:
                # set to orange
                pth.set_all(255, 127, 80, 50)
                pth.show()
            elif 'jai' in people:
                # set to green
                pth.set_all(173, 255, 47, 50)
                pth.show()
            elif 'alleeya' in people:
                # set to purple
                pth.set_all(221, 160, 221, 50)
                pth.show()
            else:
                pth.clear()
                pth.show()

            # display the frame to the screen
            cv2.imshow("Pan-Tilt Face Tracking", frame)
            out.write(frame)
            cv2.waitKey(1)

        out.release()
Beispiel #8
0
    def update(self, frame, frameCenter):
        # convert the frame to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # self.flip += 1
        # if self.flip < 100:
        #   return ((320,240), None)
        # elif self.flip < 200:
        #   return (((160,120), None))
        # elif self.flip < 300:
        #   self.flip = 0
        #   return (((160,120), None))
        # if self.flip:
        #   self.testcoord[0] -= 1
        # else:
        #   self.testcoord[0] += 1
        # if self.testcoord[0] > 170:
        #   self.flip = True
        # if self.testcoord[0] < 150:
        #   self.flip = False
        # print(self.testcoord)
        # return (self.testcoord, (self.testcoord[0], self.testcoord[1], 10, 10))

        # detect all faces in the input frame
        rects = self.detector.detectMultiScale(gray,
                                               scaleFactor=1.05,
                                               minNeighbors=9,
                                               minSize=(30, 30),
                                               flags=cv2.CASCADE_SCALE_IMAGE)

        # check to see if a face was found
        if len(rects) > 0:
            # extract the bounding box coordinates of the face and
            # use the coordinates to determine the center of the
            # face
            (x, y, w, h) = rects[0]
            faceX = int(x + (w / 2))
            faceY = int(y + (h / 2))

            # color the error
            pth.set_all(255, 0, 0)
            if (faceX - frameCenter[0]) > 10:
                pth.set_pixel(0, 255, 255, 255)
            if (faceX - frameCenter[0]) > 30:
                pth.set_pixel(1, 255, 255, 255)
            if (faceX - frameCenter[0]) > 50:
                pth.set_pixel(2, 255, 255, 255)
            if (faceX - frameCenter[0]) < -10:
                pth.set_pixel(7, 255, 255, 255)
            if (faceX - frameCenter[0]) < -30:
                pth.set_pixel(6, 255, 255, 255)
            if (faceX - frameCenter[0]) < -50:
                pth.set_pixel(5, 255, 255, 255)

            pth.show()

            # print("face detected centroid", faceX, faceY)
            # return the center (x, y)-coordinates of the face
            return ((faceX, faceY), rects[0])

        # otherwise no faces were found, so return the center of the
        # frame
        pth.clear()
        pth.show()
        return (frameCenter, None)
Beispiel #9
0
 def do_LIGHT( self, value = 0 ) :
     if value < 0 : value = 0
     if value > 255 : value = 255
     pantilthat.clear()
     pantilthat.set_all( value, value, value )
     pantilthat.show()
Beispiel #10
0
    def do_LIGHT( self, value = 0 ) :
        if value < 0 : value = 0
        if value > 255 : value = 255
        pantilthat.clear()
        pantilthat.set_all( value, value, value )
        pantilthat.show()


# http://docs.pimoroni.com/pantilthat/

if __name__ == '__main__' :
    pantilthat.light_mode( pantilthat.WS2812 )
    pantilthat.light_type( pantilthat.GRBW )
    pantilthat.pan( 0 )
    pantilthat.tilt( 0 )
    pantilthat.clear()
    pantilthat.show()


if __name__ == '__main__' :
    time.sleep( 13 )
    # resolution = ( 1920, 1080 )
    resolution = ( 1024, 768 )
    with picamera.PiCamera( resolution = resolution, framerate = 25 ) as camera:
        camera.rotation = 180
        camera.led = True
        output = StreamingOutput()
        camera.start_recording( output, format = 'mjpeg' )
        try : StreamingServer( ( '', 80 ), WebcamHandler ).serve_forever()
        except : print >>sys.stderr, sys.exc_info()[ 1 ]
        finally: camera.stop_recording()