Пример #1
0
    def predict(self, x, batch_per_thread=1, distributed=True, mini_batch=False):
        """
        Use a model to do prediction.
        """
        if isinstance(x, ImageSet):
            results = callZooFunc(self.bigdl_type, "zooPredict",
                                  self.value,
                                  x,
                                  batch_per_thread)
            return ImageSet(results)

        if isinstance(x, TFImageDataset):
            results = callZooFunc(self.bigdl_type, "zooPredict",
                                  self.value,
                                  x.get_prediction_data(),
                                  x.batch_per_thread)
            return ImageSet(results)

        if isinstance(x, TFDataset):
            results = callZooFunc(self.bigdl_type, "zooPredict",
                                  self.value,
                                  x.get_prediction_data())
            return results.map(lambda result: Layer.convert_output(result))

        if mini_batch:
            results = callZooFunc(self.bigdl_type, "zooPredict",
                                  self.value,
                                  x)
            return results.map(lambda result: Layer.convert_output(result))

        if distributed:
            if isinstance(x, np.ndarray):
                data_rdd = to_sample_rdd(x, np.zeros([x.shape[0]]), getOrCreateSparkContext())
            elif isinstance(x, RDD):
                data_rdd = x
            else:
                raise TypeError("Unsupported prediction data type: %s" % type(x))
            results = callZooFunc(self.bigdl_type, "zooPredict",
                                  self.value,
                                  data_rdd,
                                  batch_per_thread)
            return results.map(lambda result: Layer.convert_output(result))
        else:
            start_idx = 0
            results = []
            while start_idx < len(x):
                end_idx = min(start_idx + batch_per_thread, len(x))
                results.append(self.forward(x[start_idx:end_idx]))
                start_idx += batch_per_thread

            return np.concatenate(results, axis=0)
Пример #2
0
 def get_raw_image_set(self, with_label):
     resource_path = os.path.join(os.path.split(__file__)[0], "../../resources")
     if with_label:
         image_folder = os.path.join(resource_path, "cat_dog")
     else:
         image_folder = os.path.join(resource_path, "cat_dog/*")
     image_set = ImageSet.read(image_folder, with_label=with_label, sc=get_spark_context(),
                               one_based_label=False)
     return image_set
Пример #3
0
 def __call__(self, input):
     """
     Transform ImageSet or TextSet.
     """
     # move the import here to break circular import
     if "bigdl.dllib.feature.image.imageset.ImageSet" not in sys.modules:
         from bigdl.dllib.feature.image import ImageSet
     if "bigdl.dllib.feature.text.text_set.TextSet" not in sys.modules:
         from bigdl.dllib.feature.text import TextSet
     # if type(input) is ImageSet:
     if isinstance(input, ImageSet):
         jset = callZooFunc(self.bigdl_type, "transformImageSet",
                            self.value, input)
         return ImageSet(jvalue=jset)
     elif isinstance(input, TextSet):
         jset = callZooFunc(self.bigdl_type, "transformTextSet", self.value,
                            input)
         return TextSet(jvalue=jset)
Пример #4
0
    def predict(self, x, batch_per_thread=4, distributed=True):
        """
        Use a model to do prediction.

        # Arguments
        x: Prediction data. A Numpy array or RDD of Sample or ImageSet.
        batch_per_thread:
          The default value is 4.
          When distributed is True,the total batch size is batch_per_thread * rdd.getNumPartitions.
          When distributed is False the total batch size is batch_per_thread * numOfCores.
        distributed: Boolean. Whether to do prediction in distributed mode or local mode.
                     Default is True. In local mode, x must be a Numpy array.
        """
        if isinstance(x, ImageSet) or isinstance(x, TextSet):
            results = callZooFunc(self.bigdl_type, "zooPredict", self.value, x,
                                  batch_per_thread)
            return ImageSet(results) if isinstance(
                x, ImageSet) else TextSet(results)
        if distributed:
            if isinstance(x, np.ndarray):
                data_rdd = to_sample_rdd(x, np.zeros([x.shape[0]]))
            elif isinstance(x, RDD):
                data_rdd = x
            else:
                raise TypeError("Unsupported prediction data type: %s" %
                                type(x))
            results = callZooFunc(self.bigdl_type, "zooPredict", self.value,
                                  data_rdd, batch_per_thread)
            return results.map(lambda result: Layer.convert_output(result))
        else:
            if isinstance(x, np.ndarray) or isinstance(x, list):
                results = callZooFunc(self.bigdl_type,
                                      "zooPredict", self.value,
                                      self._to_jtensors(x), batch_per_thread)
                return [Layer.convert_output(result) for result in results]
            else:
                raise TypeError("Unsupported prediction data type: %s" %
                                type(x))
Пример #5
0
        def input_fn(mode):
            import os
            resource_path = os.path.join(
                os.path.split(__file__)[0], "../resources")
            if mode == tf.estimator.ModeKeys.TRAIN:
                image_folder = os.path.join(resource_path, "cat_dog")
                image_set = ImageSet.read(image_folder,
                                          with_label=True,
                                          sc=self.sc,
                                          one_based_label=False)
                transformer = ChainedPreprocessing([
                    ImageResize(256, 256),
                    ImageRandomCrop(224, 224, True),
                    ImageMatToTensor(format="NHWC"),
                    ImageSetToSample(input_keys=["imageTensor"],
                                     target_keys=["label"])
                ])
                image_set = image_set.transform(transformer)
                dataset = TFDataset.from_image_set(image_set,
                                                   image=(tf.float32,
                                                          [224, 224, 3]),
                                                   label=(tf.int32, [1]),
                                                   batch_size=8)
            elif mode == tf.estimator.ModeKeys.EVAL:
                image_folder = os.path.join(resource_path, "cat_dog")
                image_set = ImageSet.read(image_folder,
                                          with_label=True,
                                          sc=self.sc,
                                          one_based_label=False)
                transformer = ChainedPreprocessing([
                    ImageResize(256, 256),
                    ImageRandomCrop(224, 224, True),
                    ImageMatToTensor(format="NHWC"),
                    ImageSetToSample(input_keys=["imageTensor"],
                                     target_keys=["label"])
                ])
                image_set = image_set.transform(transformer)
                dataset = TFDataset.from_image_set(image_set,
                                                   image=(tf.float32,
                                                          [224, 224, 3]),
                                                   label=(tf.int32, [1]),
                                                   batch_per_thread=8)
            else:
                image_folder = os.path.join(resource_path, "cat_dog/*/*")
                image_set = ImageSet.read(image_folder,
                                          with_label=False,
                                          sc=self.sc,
                                          one_based_label=False)
                transformer = ChainedPreprocessing([
                    ImageResize(256, 256),
                    ImageRandomCrop(224, 224, True),
                    ImageMatToTensor(format="NHWC"),
                    ImageSetToSample(input_keys=["imageTensor"])
                ])
                image_set = image_set.transform(transformer)
                dataset = TFDataset.from_image_set(image_set,
                                                   image=(tf.float32,
                                                          [224, 224, 3]),
                                                   batch_per_thread=8)

            return dataset
Пример #6
0
    parser = OptionParser()
    parser.add_option("--image",
                      type=str,
                      dest="img_path",
                      help="The path where the images are stored, "
                      "can be either a folder or an image path")
    parser.add_option("--model",
                      type=str,
                      dest="model_path",
                      help="Path to the TensorFlow model file")

    (options, args) = parser.parse_args(sys.argv)

    sc = init_nncontext("OpenVINO Object Detection Inference Example")
    images = ImageSet.read(options.img_path,
                           sc,
                           resize_height=600,
                           resize_width=600).get_image().collect()
    input_data = np.concatenate(
        [image.reshape((1, 1) + image.shape) for image in images], axis=0)
    model_path = options.model_path
    model = InferenceModel()
    model.load_openvino(model_path,
                        weight_path=model_path[:model_path.rindex(".")] +
                        ".bin")
    predictions = model.predict(input_data)
    # Print the detection result of the first image.
    print(predictions[0])
    print("finished...")
    sc.stop()