def main(): print('Initializing HAT') hat.servo_enable(1, False) hat.servo_enable(2, False) hat.light_mode(hat.WS2812) hat.light_type(hat.GRBW) print('Initializing camera') with picamera.PiCamera() as camera: camera.resolution = (WIDTH, HEIGHT) camera.framerate = FRAMERATE camera.vflip = VFLIP # flips image rightside up, as needed camera.hflip = HFLIP # flips image left-right, as needed sleep(1) # camera warm-up time print('Initializing websockets server on port %d' % WS_PORT) WebSocketWSGIHandler.http_version = '1.1' websocket_server = make_server( '', WS_PORT, server_class=WSGIServer, handler_class=WebSocketWSGIRequestHandler, app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket)) websocket_server.initialize_websockets_manager() websocket_thread = Thread(target=websocket_server.serve_forever) print('Initializing HTTP server on port %d' % HTTP_PORT) http_server = StreamingHttpServer() http_thread = Thread(target=http_server.serve_forever) print('Initializing broadcast thread') output = BroadcastOutput(camera) broadcast_thread = BroadcastThread(output.converter, websocket_server) print('Starting recording') camera.start_recording(output, 'yuv') try: print('Starting websockets thread') websocket_thread.start() print('Starting HTTP server thread') http_thread.start() print('Starting broadcast thread') broadcast_thread.start() while True: camera.wait_recording(1) except KeyboardInterrupt: pass finally: print('Stopping recording') camera.stop_recording() print('Waiting for broadcast thread to finish') broadcast_thread.join() print('Shutting down HTTP server') http_server.shutdown() print('Shutting down websockets server') websocket_server.shutdown() print('Waiting for HTTP server thread to finish') http_thread.join() print('Disabling servos') hat.servo_enable(1, False) hat.servo_enable(2, False) hat.clear() hat.show() print('Waiting for websockets thread to finish') websocket_thread.join()
def switch_lights(light_on): if light_on == True: pantilthat.set_all(255, 255, 255, 255) else: pantilthat.set_all(0, 0, 0, 0) # light_on = not light_on pantilthat.show()
def update(self, next_frame, frameCenter): initial_h, initial_w, depth = next_frame.shape in_frame = cv2.resize(next_frame, (self.w, self.h)) in_frame = in_frame.transpose( (2, 0, 1)) # Change data layout from HWC to CHW in_frame = in_frame.reshape((self.n, self.c, self.h, self.w)) self.exec_net.start_async(request_id=self.next_request_id, inputs={self.input_blob: in_frame}) rects = [] if self.exec_net.requests[self.cur_request_id].wait(-1) == 0: # Parse detection results of the current request res = self.exec_net.requests[self.cur_request_id].outputs[ self.out_blob] for obj in res[0][0]: # Draw only objects when probability more than specified threshold if obj[2] > 0.5: xmin = int(obj[3] * initial_w) ymin = int(obj[4] * initial_h) xmax = int(obj[5] * initial_w) ymax = int(obj[6] * initial_h) rects.append([xmin, ymin, xmax - xmin, ymax - ymin]) self.cur_request_id, self.next_request_id = self.next_request_id, self.cur_request_id # check to see if a face was found if len(rects) > 0: # extract the bounding box coordinates of the face and # use the coordinates to determine the center of the # face (x, y, w, h) = rects[0] faceX = int(x + (w / 2)) faceY = int(y + (h / 2)) # color the error pth.set_all(255, 0, 0) if (faceX - frameCenter[0]) > 10: pth.set_pixel(0, 255, 255, 255) if (faceX - frameCenter[0]) > 30: pth.set_pixel(1, 255, 255, 255) if (faceX - frameCenter[0]) > 50: pth.set_pixel(2, 255, 255, 255) if (faceX - frameCenter[0]) < -10: pth.set_pixel(7, 255, 255, 255) if (faceX - frameCenter[0]) < -30: pth.set_pixel(6, 255, 255, 255) if (faceX - frameCenter[0]) < -50: pth.set_pixel(5, 255, 255, 255) pth.show() # print("face detected centroid", faceX, faceY) # return the center (x, y)-coordinates of the face return ((faceX, faceY), rects[0]) # otherwise no faces were found, so return the center of the # frame pth.clear() pth.show() return (frameCenter, None)
def set_light_color(color, indices=None): (r, g, b) = color_to_rgb(color) # print("rgb requested: ", (r,g,b)) if indices is None: pantilthat.set_all(r, g, b) else: for idx in indices: pantilthat.set_pixel(idx, r, g, b) pantilthat.show()
def signal_handler(sig, frame): # print a status message print("[INFO] You pressed `ctrl + c`! Exiting...") # clear lights pth.clear() pth.show() # exit sys.exit()
def flash_lights(): t = time.time() r, g, b = [ int(x * 255) for x in colorsys.hsv_to_rgb(((t * 100) % 360) / 360.0, 1.0, 1.0) ] r, g, b = (255, 255, 255) for i in range(2, 6): pantilthat.set_pixel(i, r, g, b) pantilthat.show()
def set_light(): getvar_dict = request.query.decode() set = request.query.set if (set == "on"): pantilthat.set_all(0, 0, 0, 255) pantilthat.show() return ("Light On") else: pantilthat.clear() pantilthat.show() return ("Light Off")
def color_set(): global r global g global b global w r = red_select.value g = green_select.value b = blue_select.value w = white_select.value pantilthat.set_all(r, g, b, w) pantilthat.show()
if in_range(tiltAngle, servoRange[0]+20, servoRange[1] ): pth.tilt(tiltAngle-20) time.sleep(0.1) # check to see if this is the main body of execution if __name__ == "__main__": # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-m", "--model", type=str, required=True, help="path to openvino model for face detection") args = vars(ap.parse_args()) #set up lights: pth.light_mode(pth.WS2812) pth.light_type(pth.RGB) pth.set_all(0,0,255) pth.show() # start a manager for managing process-safe variables with Manager() as manager: # enable the servos pth.servo_enable(1, True) pth.servo_enable(2, True) # set integer values for the object center (x, y)-coordinates centerX = manager.Value("i", 0) centerY = manager.Value("i", 0) # set integer values for the object's (x, y)-coordinates objX = manager.Value("i", 0) objY = manager.Value("i", 0) # pan and tilt values will be managed by independed PIDs
def exit_program(): pantilthat.set_all(0, 0, 0, 0) pantilthat.show() pantilthat.pan(0) pantilthat.tilt(-20) exit()
from time import sleep # Initialise PanTilt Hat a = 0 # intitial pan position (center) b = -20 # intitial tilt position (center) r = 0 # neopixel red off g = 0 # neopixel green off b = 0 # neopixel blue off w = 0 # neopixel white off pantilthat.light_mode(pantilthat.WS2812) pantilthat.light_type(pantilthat.GRBW) pantilthat.pan(a) pantilthat.tilt(b) pantilthat.set_all(r, g, b, w) pantilthat.show() # Initialise Camera camera = picamera.PiCamera() win_size = (500, 40, 640, 480) x = 0 # set zoom to zero y = 0 # set zoom to zero video = (1920, 1088) photo = (2592, 1944) framerate = 30 rotate = 180 effect_value = "none" camera.exposure_mode = "auto" camera.awb_mode = "auto" camera.rotation = rotate camera.resolution = photo
def do_GET(self): url = urlparse(self.path) if url.path == '/': self.send_response(301) self.send_header('Location', '/index.html') self.end_headers() return elif url.path == '/do_orient': try: data = {k: int(v[0]) for k, v in parse_qs(url.query).items()} except (IndexError, ValueError) as e: self.send_error(400, str(e)) else: with self.server.hat_lock: hat.servo_enable(1, True) hat.servo_enable(2, True) try: delay = 0.1 if 'pan' in data: pan = -data['pan'] delay = max( delay, 0.5 * (abs(pan - self.server.last_pan) / 180)) hat.pan(pan) self.server.last_pan = pan if 'tilt' in data: tilt = -data['tilt'] delay = max( delay, 0.5 * (abs(tilt - self.server.last_tilt) / 180)) hat.tilt(tilt) self.server.last_tilt = tilt # Wait for the servo to complete its sweep sleep(delay) finally: hat.servo_enable(1, False) hat.servo_enable(2, False) self.send_response(200) self.end_headers() return elif url.path == '/do_light': try: data = { int(k): (int(r), int(g), int(b), int(w)) for k, v in parse_qs(url.query).items() for r, g, b, w in (v[0].split(',', 3), ) } except (IndexError, ValueError) as e: self.send_error(400, str(e)) else: if -1 in data: r, g, b, w = data.pop(-1) for i in range(8): data[i] = r, g, b, w with self.server.hat_lock: for index, (r, g, b, w) in data.items(): if 0 <= index < 8: hat.set_pixel_rgbw(index, r, g, b, w) hat.show() self.send_response(200) self.end_headers() return elif url.path == '/jsmpg.js': content_type = 'application/javascript' content = self.server.jsmpg_content elif url.path == '/index.html': content_type = 'text/html; charset=utf-8' tpl = Template(self.server.index_template) content = tpl.safe_substitute( dict(WS_PORT=WS_PORT, WIDTH=WIDTH, HEIGHT=HEIGHT, COLOR=COLOR, BGCOLOR=BGCOLOR)) elif url.path == '/styles.css': content_type = 'text/css; charset=utf-8' tpl = Template(self.server.styles_template) content = tpl.safe_substitute( dict(WS_PORT=WS_PORT, WIDTH=WIDTH, HEIGHT=HEIGHT, COLOR=COLOR, BGCOLOR=BGCOLOR)) else: self.send_error(404, 'File not found') return content = content.encode('utf-8') self.send_response(200) self.send_header('Content-Type', content_type) self.send_header('Content-Length', len(content)) self.send_header('Last-Modified', self.date_time_string(time())) self.end_headers() if self.command == 'GET': self.wfile.write(content)
pantilthat.light_type(pantilthat.GRBW); else: print('error: light_type requires parameter RBB, GRB, RGBW or GRBW'); elif( command == 'light_mode' and len(list)>=2): if( list[1] == 'PWM' ): pantilthat.light_mode(pantilthat.PWM); elif( list[1] == 'WS2812' ): pantilthat.light_mode(pantilthat.WS2812); else: print('error: light_mode requires parameter PWM or WS2812'); elif (command == 'brightness' and len(list)>=2): pantilthat.brightness(float(list[1])); elif( command == 'set_all' and len(list)==4): pantilthat.set_all(int(list[1]), int(list[2]), int(list[3])); elif( command == 'set_all' and len(list)==5): print("setting three colours and white"); pantilthat.set_all(int(list[1]), int(list[2]), int(list[3]), int(list[4])); elif( command == 'set_pixel' and len(list)==5): pantilthat.set_pixel(int(list[1]), int(list[2]), int(list[3]), int(list[4])); elif( command == 'set_pixel' and len(list)==6): pantilthat.set_pixel(int(list[1]), int(list[2]), int(list[3]), int(list[4]), int(list[5])); elif( command == 'show'): pantilthat.show(); else: print('error processing command'); except: print('error processing command');
def do_LIGHT( self, value = 0 ) : if value < 0 : value = 0 if value > 255 : value = 255 pantilthat.clear() pantilthat.set_all( value, value, value ) pantilthat.show()
def show(): pantilthat.show()
def update(self, frame, frameCenter): # convert the frame to grayscale gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # self.flip += 1 # if self.flip < 100: # return ((320,240), None) # elif self.flip < 200: # return (((160,120), None)) # elif self.flip < 300: # self.flip = 0 # return (((160,120), None)) # if self.flip: # self.testcoord[0] -= 1 # else: # self.testcoord[0] += 1 # if self.testcoord[0] > 170: # self.flip = True # if self.testcoord[0] < 150: # self.flip = False # print(self.testcoord) # return (self.testcoord, (self.testcoord[0], self.testcoord[1], 10, 10)) # detect all faces in the input frame rects = self.detector.detectMultiScale(gray, scaleFactor=1.05, minNeighbors=9, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) # check to see if a face was found if len(rects) > 0: # extract the bounding box coordinates of the face and # use the coordinates to determine the center of the # face (x, y, w, h) = rects[0] faceX = int(x + (w / 2)) faceY = int(y + (h / 2)) # color the error pth.set_all(255, 0, 0) if (faceX - frameCenter[0]) > 10: pth.set_pixel(0, 255, 255, 255) if (faceX - frameCenter[0]) > 30: pth.set_pixel(1, 255, 255, 255) if (faceX - frameCenter[0]) > 50: pth.set_pixel(2, 255, 255, 255) if (faceX - frameCenter[0]) < -10: pth.set_pixel(7, 255, 255, 255) if (faceX - frameCenter[0]) < -30: pth.set_pixel(6, 255, 255, 255) if (faceX - frameCenter[0]) < -50: pth.set_pixel(5, 255, 255, 255) pth.show() # print("face detected centroid", faceX, faceY) # return the center (x, y)-coordinates of the face return ((faceX, faceY), rects[0]) # otherwise no faces were found, so return the center of the # frame pth.clear() pth.show() return (frameCenter, None)
def start_camera(self, face_position_X, face_position_Y): """ 1. Begin video stream 2. Extract faces from frames 3. Display frames with bounding boxes 4. Update global variables with: -> pixel coordinates of the center of the frame -> pixel coordinates of the center of the faces """ # signal trap to handle keyboard interrupt signal.signal(signal.SIGINT, signal_handler) # start the video stream and wait for the camera to warm up # vs = VideoStream(usePiCamera=self.rpi, resolution=self.resolution).start() print('Starting Camera') if self.rpi: vs = VideoStream(usePiCamera=self.rpi, resolution=self.resolution).start() else: vs = VideoStream(src=1, resolution=self.resolution).start() time.sleep(2.0) # initialize the object center finder face_detector = HaarFaceDetector( os.path.join(MODELS_DIRECTORY, 'haarcascade_frontalface_default.xml')) # initialise the recogniser # fr = PiFaceRecognition() # start recording filename = os.path.join(DATA_DIRECTORY, 'recordings', '{}.avi'.format(time.time())) cv2.VideoWriter_fourcc(*'MJPG') print(filename) fourcc = cv2.VideoWriter_fourcc(*'MJPG') out = cv2.VideoWriter(filename, fourcc, 20.0, self.resolution) while True: # grab the frame from the threaded video stream and flip it # vertically (since our camera was upside down) frame = vs.read() frame = cv2.flip(frame, 0) # (H, W) = frame.shape[:2] # print('H', H) # find the object's location object_locations = face_detector.extract_faces(frame) people = [] # for setting colour # get first face for now if object_locations: # print('{} faces found.'.format(len(object_locations))) (face_position_X.value, face_position_Y.value) = object_locations[0][0] # print(object_locations[0][0]) # extract the bounding box and draw it for pos, rect, neighbour, weight in object_locations: (x, y, w, h) = rect cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # recogniser part gray_frame = cv2.cvtColor( frame, cv2.COLOR_BGR2GRAY) # convert to gray person, confidence = PiFaceRecognition.infer_lbph_face_recogniser( gray_frame[y:y + h, x:x + w]) people.append(person) cv2.putText(frame, person, (x + 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) cv2.putText(frame, str(confidence), (x + 5, y + h - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1) else: print('No faces found.') face_position_X.value, face_position_Y.value = ( self.frame_center_X, self.frame_center_Y) if len(people) > 1: # set to orange pth.set_all(255, 127, 80, 50) pth.show() elif 'jai' in people: # set to green pth.set_all(173, 255, 47, 50) pth.show() elif 'alleeya' in people: # set to purple pth.set_all(221, 160, 221, 50) pth.show() else: pth.clear() pth.show() # display the frame to the screen cv2.imshow("Pan-Tilt Face Tracking", frame) out.write(frame) cv2.waitKey(1) out.release()
def test_lights(): pantilthat.light_mode(pantilthat.WS2812) pantilthat.light_type(pantilthat.GRBW) r, g, b, w = 0, 0, 0, 0 try: while True: pantilthat.set_all(0, 0, 0, 0) pantilthat.show() time.sleep(0.5) pantilthat.set_all(100, 0, 0, 0) pantilthat.show() time.sleep(0.5) pantilthat.set_all(0, 100, 0, 0) pantilthat.show() time.sleep(0.5) pantilthat.set_all(0, 0, 100, 0) pantilthat.show() time.sleep(0.5) pantilthat.set_all(100, 100, 0, 0) pantilthat.show() time.sleep(0.5) pantilthat.set_all(0, 100, 100, 0) pantilthat.show() time.sleep(0.5) pantilthat.set_all(100, 0, 100, 0) pantilthat.show() time.sleep(0.5) pantilthat.set_all(100, 100, 100, 0) time.sleep(0.5) pantilthat.show() except KeyboardInterrupt: pantilthat.clear() pantilthat.show()