Exemple #1
0
    def load_model(self,
                   model_dir,
                   use_gpu=False,
                   enable_mkldnn=False,
                   cpu_threads=1):
        model = os.path.join(model_dir, '__model__')
        params = os.path.join(model_dir, '__params__')
        config = Config(model, params)

        # 设置参数
        if use_gpu:
            config.enable_use_gpu(100, 0)
        else:
            config.disable_gpu()
            config.set_cpu_math_library_num_threads(cpu_threads)
            if enable_mkldnn:
                config.enable_mkldnn()
                config.set_mkldnn_cache_capacity(10)

        config.disable_glog_info()
        config.switch_ir_optim(True)
        config.enable_memory_optim()
        config.switch_use_feed_fetch_ops(False)
        config.switch_specify_input_names(True)

        # 通过参数加载模型预测器
        predictor = create_predictor(config)

        # 获取模型的输入输出
        input_names = predictor.get_input_names()
        output_names = predictor.get_output_names()
        input_handle = predictor.get_input_handle(input_names[0])
        output_handle = predictor.get_output_handle(output_names[0])

        return predictor, input_handle, output_handle
def preprocess():
    #for i in range(1000):
    #    _preprocess()
    #return 1
    #feed = _preprocess()

    import numpy as np
    from paddle.inference import Config
    from paddle.inference import create_predictor
    config = Config("serving_server/__model__", "serving_server/__params__")
    config.disable_gpu()
    config.switch_use_feed_fetch_ops(False)
    config.switch_specify_input_names(True)
    predictor = create_predictor(config)

    for i in range(10):
        feed = _preprocess()
        # input 0 im shape
        input_names = predictor.get_input_names()
        input_handle = predictor.get_input_handle(input_names[0])
        input_t = feed["im_shape"]
        input_handle.reshape(input_t.shape)
        input_handle.copy_from_cpu(input_t)
        # input 1 image
        input_names = predictor.get_input_names()
        input_handle = predictor.get_input_handle(input_names[1])
        input_t = feed["image"]
        input_handle.reshape(input_t.shape)
        input_handle.copy_from_cpu(input_t)

        # input 2 scale factor
        input_names = predictor.get_input_names()
        input_handle = predictor.get_input_handle(input_names[2])
        input_t = feed["scale_factor"]
        input_handle.reshape(input_t.shape)
        input_handle.copy_from_cpu(input_t)

        predictor.run()
        output_names = predictor.get_output_names()
        output_handle = predictor.get_output_handle(output_names[0])
        output_data = output_handle.copy_to_cpu() 
Exemple #3
0
def main():
    args = parse_args()

    # 配置
    config = Config(args.model_file, args.params_file)
    config.disable_gpu()
    config.switch_use_feed_fetch_ops(False)
    config.switch_specify_input_names(True)

    # 创建paddlePredictor
    predictor = create_predictor(config)

    # 获取输入
    val_dataset = paddle.vision.datasets.MNIST(mode='test',
                                               transform=transforms.ToTensor())
    (image, label) = val_dataset[np.random.randint(10000)]
    # fake_input = np.random.randn(1, 1, 28, 28).astype("float32")
    # image = np.asndarray(image).astype("float32")
    # print(image.shape)
    image = image.numpy().reshape([1, 1, 28, 28])
    # print(image.shape)
    # print(fake_input.shape)
    input_names = predictor.get_input_names()
    input_handle = predictor.get_input_handle(input_names[0])
    input_handle.reshape([1, 1, 28, 28])
    input_handle.copy_from_cpu(image)

    # 运行predictor
    predictor.run()

    # 获取输出
    output_names = predictor.get_output_names()
    output_handle = predictor.get_output_handle(output_names[0])
    output = output_handle.copy_to_cpu()

    print("True label: ", label.item())
    print("Prediction: ", np.argmax(output))
Exemple #4
0
    def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
        # 对运行位置进行配置
        if use_gpu:
            try:
                int(os.environ.get('CUDA_VISIBLE_DEVICES'))
            except Exception:
                print(
                    'Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.'
                )
                use_gpu = False

        # 加载模型参数
        if combined:
            model = os.path.join(modelpath, "__model__")
            params = os.path.join(modelpath, "__params__")
            config = Config(model, params)
        else:
            config = Config(modelpath)

        # 设置参数
        if use_gpu:
            config.enable_use_gpu(100, 0)
        else:
            config.disable_gpu()
            if use_mkldnn:
                config.enable_mkldnn()
        config.disable_glog_info()
        config.switch_ir_optim(True)
        config.enable_memory_optim()
        config.switch_use_feed_fetch_ops(False)
        config.switch_specify_input_names(True)

        # 通过参数加载模型预测器
        predictor = create_predictor(config)

        # 返回预测器
        return predictor