Exemplo n.º 1
0
    def normal_predict(self):
        image_list = get_image_list(self.args.image_file)
        batch_input_list = []
        img_name_list = []
        cnt = 0
        for idx, img_path in enumerate(image_list):
            img = cv2.imread(img_path)
            if img is None:
                logger.warning(
                    "Image file failed to read and has been skipped. The path: {}".
                    format(img_path))
                continue
            else:
                img = img[:, :, ::-1]
                img = preprocess(img, args)
                batch_input_list.append(img)
                img_name = img_path.split("/")[-1]
                img_name_list.append(img_name)
                cnt += 1

            if cnt % args.batch_size == 0 or (idx + 1) == len(image_list):
                batch_outputs = self.predict(np.array(batch_input_list))
                batch_result_list = postprocess(batch_outputs, self.args.top_k)

                for number, result_dict in enumerate(batch_result_list):
                    filename = img_name_list[number]
                    clas_ids = result_dict["clas_ids"]
                    scores_str = "[{}]".format(", ".join("{:.2f}".format(
                        r) for r in result_dict["scores"]))
                    print(
                        "File:{}, Top-{} result: class id(s): {}, score(s): {}".
                        format(filename, self.args.top_k, clas_ids,
                               scores_str))
                batch_input_list = []
                img_name_list = []
Exemplo n.º 2
0
def predict(args, predictor):
    input_names = predictor.get_input_names()
    input_tensor = predictor.get_input_handle(input_names[0])

    output_names = predictor.get_output_names()
    output_tensor = predictor.get_output_handle(output_names[0])

    test_num = 500
    test_time = 0.0
    if not args.enable_benchmark:
        # for PaddleHubServing
        if args.hubserving:
            img_list = [args.image_file]
        # for predict only
        else:
            img_list = get_image_list(args.image_file)

        for idx, img_name in enumerate(img_list):
            if not args.hubserving:
                img = cv2.imread(img_name)[:, :, ::-1]
                assert img is not None, "Error in loading image: {}".format(
                    img_name)
            else:
                img = img_name
            inputs = utils.preprocess(img, args)
            inputs = np.expand_dims(
                inputs, axis=0).repeat(
                    args.batch_size, axis=0).copy()
            input_tensor.copy_from_cpu(inputs)

            predictor.run()

            output = output_tensor.copy_to_cpu()
            classes, scores = utils.postprocess(output, args)
            if args.hubserving:
                return classes, scores
            print("Current image file: {}".format(img_name))
            print("\ttop-1 class: {0}".format(classes[0]))
            print("\ttop-1 score: {0}".format(scores[0]))
    else:
        for i in range(0, test_num + 10):
            inputs = np.random.rand(args.batch_size, 3, 224,
                                    224).astype(np.float32)
            start_time = time.time()
            input_tensor.copy_from_cpu(inputs)

            predictor.run()

            output = output_tensor.copy_to_cpu()
            output = output.flatten()
            if i >= 10:
                test_time += time.time() - start_time
            time.sleep(0.01)  # sleep for T4 GPU

        fp_message = "FP16" if args.use_fp16 else "FP32"
        trt_msg = "using tensorrt" if args.use_tensorrt else "not using tensorrt"
        print("{0}\t{1}\t{2}\tbatch size: {3}\ttime(ms): {4}".format(
            args.model, trt_msg, fp_message, args.batch_size, 1000 * test_time
            / test_num))
Exemplo n.º 3
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(__dir__, '../../../')))
import argparse
import numpy as np
import cv2
import paddlehub as hub
from tools.infer.utils import preprocess

args = argparse.Namespace(resize_short=256, resize=224, normalize=True)

img_path_list = [
    "./deploy/hubserving/ILSVRC2012_val_00006666.JPEG",
]

module = hub.Module(name="clas_system")
for i, img_path in enumerate(img_path_list):
    img = cv2.imread(img_path)[:, :, ::-1]
    img = preprocess(img, args)
    batch_input_data = np.expand_dims(img, axis=0)
    res = module.predict(batch_input_data)
    print("The returned result of {}: {}".format(img_path, res))
Exemplo n.º 4
0
def main(args):
    image_path_list = get_image_list(args.image_file)
    headers = {"Content-type": "application/json"}

    cnt = 0
    predict_time = 0
    all_score = 0.0
    start_time = time.time()

    batch_input_list = []
    img_name_list = []
    cnt = 0
    for idx, img_path in enumerate(image_path_list):
        img = cv2.imread(img_path)
        if img is None:
            logger.warning(
                "Image file failed to read and has been skipped. The path: {}".
                format(img_path))
            continue
        else:
            img = img[:, :, ::-1]
            data = preprocess(img, args)
            batch_input_list.append(data)
            img_name = img_path.split('/')[-1]
            img_name_list.append(img_name)
            cnt += 1
        if cnt % args.batch_size == 0 or (idx + 1) == len(image_path_list):
            batch_input = np.array(batch_input_list)
            b64str, revert_shape = np_to_b64(batch_input)
            data = {
                "images": b64str,
                "revert_params": {
                    "shape": revert_shape,
                    "dtype": str(batch_input.dtype)
                },
                "top_k": args.top_k
            }
            try:
                r = requests.post(url=args.server_url,
                                  headers=headers,
                                  data=json.dumps(data))
                r.raise_for_status
                if r.json()["status"] != "000":
                    msg = r.json()["msg"]
                    raise Exception(msg)
            except Exception as e:
                logger.error("{}, in file(s): {} etc.".format(
                    e, img_name_list[0]))
                continue
            else:
                results = r.json()["results"]
                batch_result_list = results["prediction"]
                elapse = results["elapse"]

                cnt += len(batch_result_list)
                predict_time += elapse

                for number, result_list in enumerate(batch_result_list):
                    all_score += result_list["scores"][0]
                    result_str = ""
                    for i in range(len(result_list["clas_ids"])):
                        result_str += "{}: {:.2f}\t".format(
                            result_list["clas_ids"][i],
                            result_list["scores"][i])
                    logger.info("File:{}, The top-{} result(s): {}".format(
                        img_name_list[number], args.top_k, result_str))

            finally:
                batch_input_list = []
                img_name_list = []

    total_time = time.time() - start_time
    logger.info("The average time of prediction cost: {:.3f} s/image".format(
        predict_time / cnt))
    logger.info("The average time cost: {:.3f} s/image".format(total_time /
                                                               cnt))
    logger.info("The average top-1 score: {:.3f}".format(all_score / cnt))
Exemplo n.º 5
0
    def predict(self, input_data):
        """
        predict label of img with paddleclas
        Args:
            input_data(string, NumPy.ndarray): image to be classified, support:
                string: local path of image file, internet URL, directory containing series of images;
                NumPy.ndarray: preprocessed image data that has 3 channels and accords with [C, H, W], or raw image data that has 3 channels and accords with [H, W, C]
        Returns:
            dict: {image_name: "", class_id: [], scores: [], label_names: []},if label name path == None,label_names will be empty.
        """
        if isinstance(input_data, np.ndarray):
            if not self.args.is_preprocessed:
                input_data = input_data[:, :, ::-1]
                input_data = preprocess(input_data, self.args)
            input_data = np.expand_dims(input_data, axis=0)
            batch_outputs = self.predictor.predict(input_data)
            result = {"filename": "image"}
            result.update(self.postprocess(batch_outputs[0]))
            return result
        elif isinstance(input_data, str):
            input_path = input_data
            # download internet image
            if input_path.startswith('http'):
                if not os.path.exists(BASE_IMAGES_DIR):
                    os.makedirs(BASE_IMAGES_DIR)
                file_path = os.path.join(BASE_IMAGES_DIR, 'tmp.jpg')
                download_with_progressbar(input_path, file_path)
                print("Current using image from Internet:{}, renamed as: {}".
                      format(input_path, file_path))
                input_path = file_path
            image_list = get_image_list(input_path)

            total_result = []
            batch_input_list = []
            img_path_list = []
            cnt = 0
            for idx, img_path in enumerate(image_list):
                img = cv2.imread(img_path)
                if img is None:
                    print(
                        "Warning: Image file failed to read and has been skipped. The path: {}"
                        .format(img_path))
                    continue
                else:
                    img = img[:, :, ::-1]
                    data = preprocess(img, self.args)
                    batch_input_list.append(data)
                    img_path_list.append(img_path)
                    cnt += 1

                if cnt % self.args.batch_size == 0 or (idx +
                                                       1) == len(image_list):
                    batch_outputs = self.predictor.predict(
                        np.array(batch_input_list))
                    for number, output in enumerate(batch_outputs):
                        result = {"filename": img_path_list[number]}
                        result.update(self.postprocess(output))

                        result_str = "top-{} result: {}".format(
                            self.args.top_k, result)
                        print(result_str)

                        total_result.append(result)
                        if self.args.pre_label_image:
                            save_prelabel_results(result["class_ids"][0],
                                                  img_path_list[number],
                                                  self.args.pre_label_out_idr)
                    batch_input_list = []
                    img_path_list = []
            return total_result
        else:
            print(
                "Error: Please input legal image! The type of image supported by PaddleClas are: NumPy.ndarray and string of local path or Ineternet URL"
            )
            return []
Exemplo n.º 6
0
    def predict(self, img):
        """
        predict label of img with paddleclas
        Args:
            img: input image for clas, support single image , internet url, folder path containing series of images
        Returns:
            dict:{image_name: "", class_id: [], scores: [], label_names: []},if label name path == None,label_names will be empty.
        """
        assert isinstance(img, (str, np.ndarray))

        input_names = self.predictor.get_input_names()
        input_tensor = self.predictor.get_input_handle(input_names[0])

        output_names = self.predictor.get_output_names()
        output_tensor = self.predictor.get_output_handle(output_names[0])
        if isinstance(img, str):
            # download internet image
            if img.startswith('http'):
                if not os.path.exists(BASE_IMAGES_DIR):
                    os.makedirs(BASE_IMAGES_DIR)
                image_path = os.path.join(BASE_IMAGES_DIR, 'tmp.jpg')
                download_with_progressbar(img, image_path)
                print("Current using image from Internet:{}, renamed as: {}".
                      format(img, image_path))
                img = image_path
            image_list = utils.get_image_list(img)
        else:
            if isinstance(img, np.ndarray):
                image_list = [img]
            else:
                print('Please input legal image!')

        total_result = []
        for filename in image_list:
            if isinstance(filename, str):
                image = cv2.imread(filename)[:, :, ::-1]
                assert image is not None, "Error in loading image: {}".format(
                    filename)
                inputs = utils.preprocess(image, self.args)
                inputs = np.expand_dims(inputs, axis=0).repeat(1,
                                                               axis=0).copy()
            else:
                inputs = filename

            input_tensor.copy_from_cpu(inputs)

            self.predictor.run()

            outputs = output_tensor.copy_to_cpu()
            classes, scores = utils.postprocess(outputs, self.args)
            label_names = []
            if len(self.label_name_dict) != 0:
                label_names = [self.label_name_dict[c] for c in classes]
            result = {
                "filename": filename if isinstance(filename, str) else 'image',
                "class_ids": classes.tolist(),
                "scores": scores.tolist(),
                "label_names": label_names,
            }
            total_result.append(result)
            if self.args.pre_label_image:
                save_prelabel_results(classes[0], filename,
                                      self.args.pre_label_out_idr)
                print("\tSaving prelabel results in {}".format(
                    os.path.join(self.args.pre_label_out_idr,
                                 str(classes[0]))))
        return total_result