Example #1
0
def main(): 
    '''
    Main loop that initializes camera and other 
    '''
    
    args = parse_args()

    config = Config()

    #Start Camera async class    
    cam = Camera(args)
    cam.start()

    #Start io writing queue
    write_queue = WriteQueue()
    write_queue.start()
    if config.Inference.mode == 'detect':
        print('Running detection')
        detector = OpenVinoDetectorAsync(config.Inference)
    else:
        detector = OpenVinoClassifierAsync(config.Inference)

    while True: 
        _,frame = cam.read()
        start_time = time.time()
        infer_frame = deepcopy(frame)
        detections = detector.run(infer_frame)
        timestamp = datetime.now(tz=timezone.utc).strftime('%Y-%m-%d-%H-%M-%S-%f')
        path = 'tmp/' + timestamp + '.jpg'
        if detections:
            '''
            Need to fix the bounding box locations
            for detection in detections:
                xmin = detection.position.xmin
                ymin = detection.position.ymin  
                xmax = detection.position.xmax 
                ymax = detection.position.ymax 
                cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), detection.color, 2)
            '''
            cv2.putText(frame,'HUMAN',(10,400), cv2.FONT_HERSHEY_SIMPLEX, 4,(25,25,255),2,cv2.LINE_AA)
            cv2.imshow('frame', frame)
            cv2.imwrite(path, frame)
            #This has to RTT 
            upload_frame(path, config)
        else: 
            #add to upload queue
            write_queue.enqueue(path, frame)
        end_time = time.time()
        print("[LOGS] ---PIPELINE TIME--- **{}**".format(end_time-start_time))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break 
    cam.stop()
    write_queue.stop()
    cv2.destroyAllWindows() 
class Main:
    def __init__(self):
        self.camera = Camera()
        with timer.time(f'load'):
            self.mtcnn = MTCNN()
            self.mtcnn.init(480,640)

    def main(self, manual_brake=None):
        while not manual_brake:
            frame = self.camera.read()
            try:
                with timer.time(f'inference'):
                    bboxes = self.mtcnn.findFace(frame) # [3,h,w]
                    for x1,y1,x2,y2 in bboxes:
                        cv2.rectangle(frame, (x1,y1), (x2,y2), (255,0,0), 2)
            except Exception as e: # face not found
                print(e)
                continue
            manual_brake = self.camera.show_frame(frame)
        self.camera.clean()
        timer.print_stats()
class Main:
    def __init__(self, mrz_port="/dev/ttyACM0"):
        with timer.time(f'load-total'):
            self.card_reader = CardReader()
            self.card_reader.set_mrz_port(mrz_port)
            with timer.time(f'load-mtcnn'):
                self.mtcnn = MTCNN()
                self.mtcnn.init(480, 640)
            with timer.time(f'load-facenet'):
                self.face_embedder = FaceEmbedder()
            self.camera = Camera()
            #
            self.camera_face = None
            self.camera_emb = None
            self.reader_face = None
            self.reader_emb = None

    def prep_im(self, im):
        im = cv2.resize(im, (160, 160))
        return torch.tensor(im).permute(2, 1, 0).to('cuda')

    async def camera_loop(self):
        while True:
            with timer.time(f'camera-total'):
                frame = self.camera.read()
                print('[camera] frame:', frame.shape)
                with timer.time(f'camera-mtcnn'):
                    bboxes = self.mtcnn.findFace(frame)
                print('[camera] bboxes:', bboxes)
                for x1, y1, x2, y2 in bboxes:
                    self.camera_face = frame[y1:y2, x1:x2]
                    print('[camera] face:', self.camera_face.shape)
                    with timer.time(f'camera-facenet'):
                        im = self.prep_im(self.camera_face)
                        self.camera_emb = self.face_embedder.face2embedding(im)
                await asyncio.sleep(0)

    async def reader_loop(self):
        while True:
            print('[reader] starting read...')
            chip_img_future = self.card_reader.read_image_bytes_async()
            t0 = time()
            while not chip_img_future.isDone():
                await asyncio.sleep(0)
            byte_array = chip_img_future.get()
            if len(byte_array) > 0:
                print('[reader] byte arr len:', len(byte_array))
                chip_img = self.card_reader.bytes2np(byte_array)  # [h,w]
                timer.D['reader-read'].append(time() - t0)
                print('[reader] chip img:', chip_img.shape)
                chip_img = np.concatenate(
                    [chip_img[:, :, None] for _ in range(3)],
                    axis=2)  # [h,w,3]
                with timer.time(f'reader-mtcnn'):
                    bboxes = self.mtcnn.findFace(chip_img)
                print('[reader] bboxes:', bboxes)
                for y1, x1, y2, x2 in bboxes:
                    self.reader_face = chip_img[y1:y2, x1:x2]
                    print('[reader] chip face:', self.reader_face.shape)
                    with timer.time(f'reader-facenet'):
                        im = self.prep_im(self.reader_face)
                        self.reader_emb = self.face_embedder.face2embedding(im)
                    timer.D['reader-total'].append(time() - t0)
                    self.compare_embeddings_and_exit()

    def compare_embeddings_and_exit(self):
        if self.reader_face is None:
            print("[ERROR] Couldn't read face from reader...")
        if self.camera_face is None:
            print("[ERROR] Couldn't read face from camera...")
        cv2.imwrite('reader_face.png', self.reader_face)
        cv2.imwrite('camera_face.png', self.camera_face)
        score = float((self.reader_emb - self.camera_emb).norm())
        print('score:', score)
        timer.print_stats()
        sys.exit()

    def main(self):
        asyncio.gather(self.camera_loop(), self.reader_loop())
        asyncio.get_event_loop().run_forever()