def test_load_tf_openvino(self):
     local_path = self.create_temp_dir()
     url = data_url + "/models/object_detection/faster_rcnn_resnet101_coco_2018_01_28.tar.gz"
     file_abs_path = maybe_download(
         "faster_rcnn_resnet101_coco_2018_01_28.tar.gz", local_path, url)
     tar = tarfile.open(file_abs_path, "r:gz")
     extracted_to = os.path.join(local_path,
                                 "faster_rcnn_resnet101_coco_2018_01_28")
     if not os.path.exists(extracted_to):
         print("Extracting %s to %s" % (file_abs_path, extracted_to))
         tar.extractall(local_path)
         tar.close()
     model = InferenceModel(3)
     model.load_tf(model_path=extracted_to + "/frozen_inference_graph.pb",
                   backend="openvino",
                   model_type="faster_rcnn_resnet101_coco",
                   ov_pipeline_config_path=extracted_to +
                   "/pipeline.config",
                   ov_extensions_config_path=None)
     input_data = np.random.random([4, 1, 3, 600, 600])
     output_data = model.predict(input_data)
     model2 = InferenceModel(3)
     model2.load_tf_object_detection_as_openvino(
         model_path=extracted_to + "/frozen_inference_graph.pb",
         object_detection_model_type="faster_rcnn_resnet101_coco",
         pipeline_config_path=extracted_to + "/pipeline.config",
         extensions_config_path=None)
     model2.predict(input_data)
 def test_load_tf_openvino_ic(self):
     local_path = self.create_temp_dir()
     print(local_path)
     url = data_url + "/models/resnet_v1_50_2016_08_28.tar.gz"
     file_abs_path = maybe_download("resnet_v1_50_2016_08_28.tar.gz",
                                    local_path, url)
     tar = tarfile.open(file_abs_path, "r:gz")
     print("Extracting %s to %s" % (file_abs_path, local_path))
     tar.extractall(local_path)
     tar.close()
     model = InferenceModel(3)
     model.load_tf_image_classification_as_openvino(
         model_path=None,
         image_classification_model_type="resnet_v1_50",
         checkpoint_path=local_path + "/resnet_v1_50.ckpt",
         input_shape=[4, 224, 224, 3],
         if_reverse_input_channels=True,
         mean_values=[123.68, 116.78, 103.94],
         scale=1)
     print(model)
     input_data = np.random.random([4, 1, 224, 224, 3])
     s3url = "https://s3-ap-southeast-1.amazonaws.com/"
     var_url = s3url + "analytics-zoo-models/openvino/val_bmp_32.tar"
     lib_url = s3url + "analytics-zoo-models/openvino/opencv_4.0.0_ubuntu_lib.tar"
     var_file_abs_path = maybe_download("val_bmp_32.tar", local_path,
                                        var_url)
     lib_file_abs_path = maybe_download("opencv_4.0.0_ubuntu_lib.tar",
                                        local_path, lib_url)
     var_tar = tarfile.open(var_file_abs_path, "r")
     print("Extracting %s to %s" % (var_file_abs_path, local_path))
     var_tar.extractall(local_path)
     var_tar.close()
     lib_tar = tarfile.open(lib_file_abs_path, "r")
     print("Extracting %s to %s" % (lib_file_abs_path, local_path))
     lib_tar.extractall(local_path)
     lib_tar.close()
     validation_file_path = local_path + "/val_bmp_32/val.txt"
     opencv_lib_path = local_path + "/lib"
     model2 = InferenceModel(3)
     model2.load_tf_as_calibrated_openvino(
         model_path=None,
         model_type="resnet_v1_50",
         checkpoint_path=local_path + "/resnet_v1_50.ckpt",
         input_shape=[4, 224, 224, 3],
         if_reverse_input_channels=True,
         mean_values=[123.68, 116.78, 103.94],
         scale=1,
         network_type='C',
         validation_file_path=validation_file_path,
         subset=32,
         opencv_lib_path=opencv_lib_path)
     print(model2)
     model2.predict(input_data)
Exemple #3
0
    def load(self, model_path, batch_size=0):
        """
        Load an openVINO model.

        :param model_path: String. The file path to the OpenVINO IR xml file.
        :param batch_size: Int. Set batch Size, default is 0 (use default batch size).
        :return:
        """
        self.node_num, self.core_num = get_node_and_core_number()
        self.path = model_path
        if batch_size != 0:
            self.batch_size = batch_size
        else:
            import xml.etree.ElementTree as ET
            tree = ET.parse(model_path)
            root = tree.getroot()
            shape_item = root.find('./layers/layer/output/port/dim[1]')
            if shape_item is None:
                raise ValueError(
                    "Invalid openVINO IR xml file, please check your model_path"
                )
            self.batch_size = int(shape_item.text)
        self.model = InferenceModel(supported_concurrent_num=self.core_num)
        self.model.load_openvino(
            model_path=model_path,
            weight_path=model_path[:model_path.rindex(".")] + ".bin",
            batch_size=batch_size)
Exemple #4
0
 def test_load_caffe(self):
     model = InferenceModel(10)
     model.load_caffe(
         os.path.join(resource_path, "models/caffe/test_persist.prototxt"),
         os.path.join(resource_path,
                      "models/caffe/test_persist.caffemodel"))
     input_data = np.random.random([4, 3, 8, 8])
     output_data = model.predict(input_data)
 def test_load_torch(self):
     torch_model = torchvision.models.resnet18()
     tmp_path = create_tmp_path() + ".pt"
     torch.save(torch_model, tmp_path, pickle_module=zoo_pickle_module)
     model = InferenceModel(10)
     model.load_torch(tmp_path)
     input_data = np.random.random([4, 3, 224, 224])
     output_data = model.predict(input_data)
     os.remove(tmp_path)
Exemple #6
0
    def load_model_bigdl(self):
        fp = open("bigdl_model.model", 'wb')
        fp.write(self.model)
        fp.close()

        from zoo.pipeline.inference import InferenceModel
        model = InferenceModel()
        model.load_bigdl("bigdl_model.model")
        return model
Exemple #7
0
 def test_load_openvino(self):
     local_path = self.create_temp_dir()
     model = InferenceModel(1)
     model_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.xml"
     weight_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.bin"
     model_path = maybe_download("resnet_v1_50.xml", local_path, model_url)
     weight_path = maybe_download("resnet_v1_50.bin", local_path,
                                  weight_url)
     model.load_openvino(model_path, weight_path)
     input_data = np.random.random([4, 1, 224, 224, 3])
     model.predict(input_data)
Exemple #8
0
 def test_load_tf_openvino(self):
     local_path = self.create_temp_dir()
     url = data_url + "/TF_faster_rcnn_resnet101_coco_2018_01_28"
     maybe_download("frozen_inference_graph.pb", local_path,
                    url + "/frozen_inference_graph.pb")
     maybe_download("pipeline.config", local_path, url + "/pipeline.config")
     maybe_download("faster_rcnn_support.json", local_path,
                    url + "/faster_rcnn_support.json")
     model = InferenceModel(3)
     model.load_tf(local_path + "/frozen_inference_graph.pb",
                   backend="openvino",
                   ov_pipeline_config_path=local_path + "/pipeline.config",
                   ov_extensions_config_path=local_path +
                   "/faster_rcnn_support.json")
     input_data = np.random.random([4, 1, 3, 600, 600])
     output_data = model.predict(input_data)
     model2 = InferenceModel(5)
     model2.load_tf(local_path + "/frozen_inference_graph.pb",
                    backend="openvino",
                    model_type="faster_rcnn_resnet101_coco")
     output_data2 = model2.predict(input_data)
Exemple #9
0
 def test_load_openvino(self):
     local_path = self.create_temp_dir()
     url = data_url + "/IR_faster_rcnn_resnet101_coco_2018_01_28"
     maybe_download("frozen_inference_graph.xml", local_path,
                    url + "/frozen_inference_graph.xml")
     maybe_download("frozen_inference_graph.bin", local_path,
                    url + "/frozen_inference_graph.bin")
     model = InferenceModel()
     model.load_openvino(local_path + "/frozen_inference_graph.xml",
                         local_path + "/frozen_inference_graph.bin")
     input_data = np.random.random([1, 1, 3, 600, 600])
     output_data = model.predict(input_data)
Exemple #10
0
 def __init__(self, *, model_path, batch_size=0):
     self.node_num, self.core_num = get_node_and_core_number()
     self.path = model_path
     if batch_size != 0:
         self.batch_size = batch_size
     else:
         import xml.etree.ElementTree as ET
         tree = ET.parse(model_path)
         root = tree.getroot()
         shape_item = root.find('./layers/layer/output/port/dim[1]')
         if shape_item is None:
             raise ValueError(
                 "Invalid openVINO IR xml file, please check your model_path"
             )
         self.batch_size = int(shape_item.text)
     self.model = InferenceModel(supported_concurrent_num=self.core_num)
     self.model.load_openvino(
         model_path=model_path,
         weight_path=model_path[:model_path.rindex(".")] + ".bin",
         batch_size=batch_size)
Exemple #11
0
def predict(model_path, img_path):
    model = InferenceModel()
    model.load_openvino(model_path,
                        weight_path=model_path[:model_path.rindex(".")] +
                        ".bin",
                        batch_size=BATCH_SIZE)
    sc = init_nncontext("OpenVINO Python resnet_v1_50 Inference Example")
    # pre-processing
    infer_transformer = ChainedPreprocessing([
        ImageBytesToMat(),
        ImageResize(256, 256),
        ImageCenterCrop(224, 224),
        ImageMatToTensor(format="NHWC", to_RGB=True)
    ])
    image_set = ImageSet.read(img_path, sc).\
        transform(infer_transformer).get_image().collect()
    image_set = np.expand_dims(image_set, axis=1)

    for i in range(len(image_set) // BATCH_SIZE + 1):
        index = i * BATCH_SIZE
        # check whether out of index
        if index >= len(image_set):
            break
        batch = image_set[index]
        # put 4 images in one batch
        for j in range(index + 1, min(index + BATCH_SIZE, len(image_set))):
            batch = np.vstack((batch, image_set[j]))
        batch = np.expand_dims(batch, axis=0)
        # predict batch
        predictions = model.predict(batch)
        result = predictions[0]

        # post-processing for Top-1
        print("batch_" + str(i))
        for r in result:
            output = {}
            max_index = np.argmax(r)
            output["Top-1"] = str(max_index)
            print("* Predict result " + str(output))
    print("finished...")
    sc.stop()
Exemple #12
0
 def test_load_bigdl(self):
     model = InferenceModel(3)
     model.load_bigdl(
         os.path.join(resource_path, "models/bigdl/bigdl_lenet.model"))
     input_data = np.random.random([4, 28, 28, 1])
     output_data = model.predict(input_data)
Exemple #13
0
def classify_process(model_path):
    # load the pre-trained OpenVINO model
    print("* Loading model...")
    model = InferenceModel()
    model.load_openvino(model_path,
                        weight_path=model_path[:model_path.rindex(".")] +
                        ".bin")
    print("* Model loaded")

    # continually pool for new images to classify
    while True:
        # attempt to grab a batch of images from the database, then
        # initialize the image IDs and batch of images themselves
        queue = DB.lrange(settings.IMAGE_QUEUE, 0, settings.BATCH_SIZE - 1)
        image_ids = []
        batch = None

        # loop over the queue
        start_time = time.time()
        for q in queue:
            # deserialize the object and obtain the input image
            q = json.loads(q.decode("utf-8"))
            image = helpers.base64_decode_image(q["image"],
                                                settings.IMAGE_DTYPE)
            # check to see if the batch list is None
            if batch is None:
                batch = image
            # otherwise, stack the data
            else:
                batch = np.vstack([batch, image])

            # update the list of image IDs
            image_ids.append(q["id"])
        print("* Pop from redis %d ms" %
              int(round((time.time() - start_time) * 1000)))
        # check to see if we need to process the batch
        if len(image_ids) > 0:
            # classify the batch
            batch = np.expand_dims(batch, axis=0)
            print("* Batch size: {}".format(batch.shape))
            # Output is [1, 4, 1000]
            results = model.predict(batch)[0]
            print("* Predict a batch %d ms" %
                  int(round((time.time() - start_time) * 1000)))
            # loop over the image IDs and their corresponding set of
            # results from our model
            for (imageID, resultSet) in zip(image_ids, results):
                # initialize the list of output predictions
                output = {}
                # loop over the results and add them to the list of
                # output predictions
                # Top 1
                max_index = np.argmax(resultSet)
                output["Top-1"] = str(max_index)
                output["id"] = imageID
                print("* Predict result " + str(output))
                # store the output predictions in the database, using
                # the image ID as the key so we can fetch the results
                DB.lpush(settings.PREDICT_QUEUE, json.dumps(output))

            # remove the set of images from our queue
            print("* Total time used is %d ms" %
                  int(round((time.time() - start_time) * 1000)))
            DB.ltrim(settings.IMAGE_QUEUE, len(image_ids), -1)

        # sleep for a small amount
        time.sleep(settings.SERVER_SLEEP)
Exemple #14
0
if __name__ == "__main__":
    parser = OptionParser()
    parser.add_option("--image",
                      type=str,
                      dest="img_path",
                      help="The path where the images are stored, "
                      "can be either a folder or an image path")
    parser.add_option("--model",
                      type=str,
                      dest="model_path",
                      help="Path to the TensorFlow model file")

    (options, args) = parser.parse_args(sys.argv)

    sc = init_nncontext("OpenVINO Object Detection Inference Example")
    images = ImageSet.read(options.img_path,
                           sc,
                           resize_height=600,
                           resize_width=600).get_image().collect()
    input_data = np.concatenate(
        [image.reshape((1, 1) + image.shape) for image in images], axis=0)
    model_path = options.model_path
    model = InferenceModel()
    model.load_openvino(model_path,
                        weight_path=model_path[:model_path.rindex(".")] +
                        ".bin")
    predictions = model.predict(input_data)
    # Print the detection result of the first image.
    print(predictions[0])
Exemple #15
0
def classify_process(model_path):
    print("* Loading model...")
    model = InferenceModel()
    model.load_openvino(model_path=model_path,
                        weight_path=model_path[:model_path.rindex(".")] +
                        ".bin")
    print("* Model loaded")
    # continually poll for new images to classify
    while True:
        image_ids = []
        batch = None
        # loop over the queue
        start_time = time.time()
        count = 0
        while count < 4:
            # Get message or None or timeout
            record = pub.get_message()
            if record and record['type'] == 'message':
                data = json.loads(record['data'].decode("utf-8"))
                image = helpers.base64_decode_image(data["image"])
                image = helpers.byte_to_mat(image, dtype=settings.IMAGE_DTYPE)
                image = helpers.image_preprocess(image, settings.IMAGE_WIDTH,
                                                 settings.IMAGE_HEIGHT)
                # check to see if the batch list is None
                if batch is None:
                    batch = image
                # otherwise, stack the data
                else:
                    batch = np.vstack([batch, image])
                # update the list of image IDs
                image_ids.append(data["id"])
                count += 1
        print("* Pop from redis %d ms" %
              int(round((time.time() - start_time) * 1000)))
        # check to see if we need to process the batch
        if len(image_ids) > 0:
            # classify the batch
            batch = np.expand_dims(batch, axis=0)
            print("* Batch size: {}".format(batch.shape))
            # Output is [1, 4, 1000]
            results = model.predict(batch)[0]
            print("* Predict a batch %d ms" %
                  int(round((time.time() - start_time) * 1000)))
            # loop over the image IDs and their corresponding set of
            # results from our model
            for (imageID, resultSet) in zip(image_ids, results):
                # initialize the list of output predictions
                output = {}
                # loop over the results and add them to the list of
                # output predictions
                # Top 1
                max_index = np.argmax(resultSet)
                output["Top-1"] = str(max_index)
                output["id"] = imageID
                print("* Predict result " + str(output))
                # store the output predictions in the database, using
                # the image ID as the key so we can fetch the results
                DB.lpush(settings.PREDICT_QUEUE, json.dumps(output))

            # remove the set of images from our queue
            print("* Total time used is %d ms" %
                  int(round((time.time() - start_time) * 1000)))

        # sleep for a small amount
        time.sleep(settings.SERVER_SLEEP)
Exemple #16
0
def load_model(model_path, weight_path, batch_size):
    model = InferenceModel(supported_concurrent_num=1)
    model.load_openvino(model_path=model_path,
                        weight_path=weight_path,
                        batch_size=batch_size)
    return model