Beispiel #1
0
def infer():
    # Image source
    cam = Camera()
    stream = cam.get_stream()
    floorNet = FloorNet()
    # Prediction
    while True:
        start = time.time()
        print("======================================")
        # Infer
        raw_img = stream.get()
        img, mask = floorNet.predict(raw_img)
        # Visualize
        mask = cv.cvtColor(mask, cv.COLOR_GRAY2BGR)
        cv.addWeighted(mask, 0.5, img, 0.5, 0, img)
        cv.imshow('Camera', img)

        # Calculate frames per second (FPS)
        end = time.time()
        print('Total estimated time: {:.4f}'.format(end-start))
        fps = 1/(end-start)
        print("FPS: {:.1f}".format(fps))

        if cv.waitKey(10) & 0xFF == ord('q'):
            break
    # Clear resources
    cv.destroyAllWindows()
    cam.terminate()
Beispiel #2
0
def infer():
    # Image source
    cam = Camera()
    stream = cam.get_stream()
    # Load edge model
    EDGE_MODEL = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              '../models/tpu/ohmnilabs_floornet_224_quant_postprocess_edgetpu.tflite')
    inference = Inference(EDGE_MODEL)
    # Prediction
    while True:
        start = time.time()
        print("======================================")
        # Infer
        raw_img = stream.get()
        img, mask = inference.predict(raw_img)
        # Visualize
        mask = cv.cvtColor(mask, cv.COLOR_GRAY2BGR)
        cv.addWeighted(mask, 0.5, img, 0.5, 0, img)
        cv.imshow('Camera', img)

        # Calculate frames per second (FPS)
        end = time.time()
        print('Total estimated time: {:.4f}'.format(end-start))
        fps = 1/(end-start)
        print("FPS: {:.1f}".format(fps))

        if cv.waitKey(10) & 0xFF == ord('q'):
            break
    # Clear resources
    cv.destroyAllWindows()
    cam.terminate()
Beispiel #3
0
def predict():
    # Config
    image_shape = (224, 224)
    output_shape = (640, 480)
    alpha = 0.5
    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    out = cv.VideoWriter(
        'dist/floorNet-%s.avi' % current_time, cv.VideoWriter_fourcc(*'DIVX'), 10, output_shape)
    # Model
    detector = Detector(image_shape, 'models')
    # Image source
    cam = Camera()
    stream = cam.get_stream()
    # Prediction
    while True:
        start = time.time()
        print("======================================")

        img = stream.get()
        img = detector.normalize(img)
        mask = detector.predict(img)
        mask = cv.cvtColor(mask, cv.COLOR_GRAY2BGR)
        cv.addWeighted(mask, alpha, img, 1-alpha, 0, img)
        img = cv.resize(img, output_shape)
        cv.imshow('Camera', img)

        # Save video
        frame = (img*255).astype(np.uint8)
        out.write(frame)

        # Calculate frames per second (FPS)
        end = time.time()
        print('Total estimated time: {:.4f}'.format(end-start))
        fps = 1/(end-start)
        print("FPS: {:.1f}".format(fps))

        if cv.waitKey(10) & 0xFF == ord('q'):
            break
    # Clear resources
    out.release()
    cv.destroyAllWindows()
    cam.terminate()
Beispiel #4
0
    def test(self):
        cam = Camera()
        stream = cam.get_stream()

        while True:
            timer = cv.getTickCount()

            print("===========================")
            img = stream.get()
            cv_img = cv.resize(img, self.pose.input_shape)
            pil_img = image.convert_cv_to_pil(cv_img)
            activated, obj_img, bbox, objects, inference_time = self.pose.predict(
                cv_img)

            print('Inference time: {:.4f}'.format(inference_time / 1000))
            drawed_img = ImageDraw.Draw(pil_img)
            for marks in objects:
                for mark in marks:
                    (label, score, x, y) = mark
                    if label in LABEL_FILTER:
                        self.draw_pose(drawed_img, x, y, label, score)

            # Calculate frames per second (FPS)
            print('Total Estimated Time: {:.4f}'.format(
                (cv.getTickCount() - timer) / cv.getTickFrequency()))
            fps = cv.getTickFrequency() / (cv.getTickCount() - timer)
            print('FPS: {:.1f}'.format(fps))
            print('\n')

            if activated:
                self.draw_text(drawed_img, 'Activated')
                cv.imshow('Activation', obj_img)
                cv.moveWindow('Activation', 90, 650)
            else:
                self.draw_text(drawed_img, 'Idle')
            cv.imshow('Video', image.convert_pil_to_cv(pil_img))
            if cv.waitKey(10) & 0xFF == ord('q'):
                break

        cv.destroyWindow('Activation')
        cv.destroyWindow('Video')
        cam.terminate()