def test_load_tf_openvino(self):
     local_path = self.create_temp_dir()
     url = data_url + "/models/object_detection/faster_rcnn_resnet101_coco_2018_01_28.tar.gz"
     file_abs_path = maybe_download(
         "faster_rcnn_resnet101_coco_2018_01_28.tar.gz", local_path, url)
     tar = tarfile.open(file_abs_path, "r:gz")
     extracted_to = os.path.join(local_path,
                                 "faster_rcnn_resnet101_coco_2018_01_28")
     if not os.path.exists(extracted_to):
         print("Extracting %s to %s" % (file_abs_path, extracted_to))
         tar.extractall(local_path)
         tar.close()
     model = InferenceModel(3)
     model.load_tf(model_path=extracted_to + "/frozen_inference_graph.pb",
                   backend="openvino",
                   model_type="faster_rcnn_resnet101_coco",
                   ov_pipeline_config_path=extracted_to +
                   "/pipeline.config",
                   ov_extensions_config_path=None)
     input_data = np.random.random([4, 1, 3, 600, 600])
     output_data = model.predict(input_data)
     model2 = InferenceModel(3)
     model2.load_tf_object_detection_as_openvino(
         model_path=extracted_to + "/frozen_inference_graph.pb",
         object_detection_model_type="faster_rcnn_resnet101_coco",
         pipeline_config_path=extracted_to + "/pipeline.config",
         extensions_config_path=None)
     model2.predict(input_data)
Example #2
0
 def test_load_tf_openvino(self):
     local_path = self.create_temp_dir()
     url = data_url + "/TF_faster_rcnn_resnet101_coco_2018_01_28"
     maybe_download("frozen_inference_graph.pb", local_path,
                    url + "/frozen_inference_graph.pb")
     maybe_download("pipeline.config", local_path, url + "/pipeline.config")
     maybe_download("faster_rcnn_support.json", local_path,
                    url + "/faster_rcnn_support.json")
     model = InferenceModel(3)
     model.load_tf(local_path + "/frozen_inference_graph.pb",
                   backend="openvino",
                   ov_pipeline_config_path=local_path + "/pipeline.config",
                   ov_extensions_config_path=local_path +
                   "/faster_rcnn_support.json")
     input_data = np.random.random([4, 1, 3, 600, 600])
     output_data = model.predict(input_data)
     model2 = InferenceModel(5)
     model2.load_tf(local_path + "/frozen_inference_graph.pb",
                    backend="openvino",
                    model_type="faster_rcnn_resnet101_coco")
     output_data2 = model2.predict(input_data)
Example #3
0
                      help="The path where the images are stored, "
                      "can be either a folder or an image path")
    parser.add_option("--model",
                      type=str,
                      dest="model_path",
                      help="Path to the TensorFlow model file")
    parser.add_option("--model_type",
                      type=str,
                      dest="model_type",
                      help="The type of the TensorFlow model",
                      default="faster_rcnn_resnet101_coco")

    (options, args) = parser.parse_args(sys.argv)

    sc = init_nncontext("OpenVINO Object Detection Inference Example")
    images = ImageSet.read(options.img_path,
                           sc,
                           resize_height=600,
                           resize_width=600).get_image().collect()
    input_data = np.concatenate(
        [image.reshape((1, 1) + image.shape) for image in images], axis=0)
    model = InferenceModel()
    model.load_tf(join(options.model_path, "frozen_inference_graph.pb"),
                  backend="openvino",
                  model_type=options.model_type,
                  ov_pipeline_config_path=join(options.model_path,
                                               "pipeline.config"))
    predictions = model.predict(input_data)
    # Print the detection result of the first image.
    print(predictions[0])
Example #4
0
import numpy as np
from optparse import OptionParser

from zoo.common.nncontext import init_nncontext
from zoo.feature.image import ImageSet
from zoo.pipeline.inference import InferenceModel


if __name__ == "__main__":
    parser = OptionParser()
    parser.add_option("--image", type=str, dest="img_path",
                      help="The path where the images are stored, "
                           "can be either a folder or an image path")
    parser.add_option("--model", type=str, dest="model_path",
                      help="Path to the TensorFlow model file")
    parser.add_option("--model_type", type=str, dest="model_type",
                      help="The type of the TensorFlow model",
                      default="faster_rcnn_resnet101_coco")

    (options, args) = parser.parse_args(sys.argv)

    sc = init_nncontext("OpenVINO Object Detection Inference Example")
    images = ImageSet.read(options.img_path, sc,
                           resize_height=600, resize_width=600).get_image().collect()
    input_data = np.concatenate([image.reshape((1, 1) + image.shape) for image in images], axis=0)
    model = InferenceModel()
    model.load_tf(options.model_path, backend="openvino", model_type=options.model_type)
    predictions = model.predict(input_data)
    # Print the detection result of the first image.
    print(predictions[0])