met_value_converter = calorie_estimation.METValueMLPConverter()
    checkpoint = engine.load_weights('resources/calorie_estimation/mobilenet_features_met_converter.ckpt')
    met_value_converter.load_state_dict(checkpoint)
    met_value_converter.eval()

    # Concatenate feature extractor and met converter
    net = Pipe(feature_extractor, met_value_converter)

    # Create inference engine, video streaming and display objects
    inference_engine = engine.InferenceEngine(net, use_gpu=use_gpu)

    video_source = camera.VideoSource(camera_id=camera_id,
                                      size=inference_engine.expected_frame_size,
                                      filename=path_in)

    framegrabber = camera.VideoStream(video_source,
                                      inference_engine.fps)

    post_processors = [
        calorie_estimation.CalorieAccumulator(weight=weight,
                                              height=height,
                                              age=age,
                                              gender=gender,
                                              smoothing=12)
    ]

    display_ops = [
        realtimenet.display.DisplayDetailedMETandCalories(),
    ]
    display_results = realtimenet.display.DisplayResults(title=title, display_ops=display_ops)

    # Run live inference
                                            num_out=30)
    checkpoint = engine.load_weights('resources/gesture_detection/efficientnet_logistic_regression.ckpt')
    gesture_classifier.load_state_dict(checkpoint)
    gesture_classifier.eval()

    # Concatenate feature extractor and met converter
    net = Pipe(feature_extractor, gesture_classifier)

    # Create inference engine, video streaming and display instances
    inference_engine = engine.InferenceEngine(net, use_gpu=use_gpu)

    video_source = camera.VideoSource(camera_id=camera_id,
                                      size=inference_engine.expected_frame_size,
                                      filename=path_in)

    video_stream = camera.VideoStream(video_source,
                                      inference_engine.fps)

    postprocessor = [
        PostprocessClassificationOutput(INT2LAB, smoothing=4)
    ]

    display_ops = [
        realtimenet.display.DisplayTopKClassificationOutputs(top_k=1, threshold=0.5),
    ]
    display_results = realtimenet.display.DisplayResults(title=title, display_ops=display_ops)

    engine.run_inference_engine(inference_engine,
                                video_stream,
                                postprocessor,
                                display_results,
                                path_out)
Example #3
0
 def _setup_inference_engine(self):
     self.inference_engine = engine.InferenceEngine(self.net, use_gpu=True)
     video_source = camera.VideoSource(
         camera_id=0, size=self.inference_engine.expected_frame_size)
     self.frame_grabber = camera.VideoStream(video_source,
                                             self.inference_engine.fps)