Пример #1
0
 def _set_config(self):
     """
     predictor config setting
     """
     self.model_file_path = os.path.join(self.default_pretrained_model_path,
                                         '__model__')
     self.params_file_path = os.path.join(
         self.default_pretrained_model_path, '__params__')
     cpu_config = AnalysisConfig(self.model_file_path,
                                 self.params_file_path)
     cpu_config.disable_glog_info()
     cpu_config.disable_gpu()
     self.cpu_predictor = create_paddle_predictor(cpu_config)
     try:
         _places = os.environ["CUDA_VISIBLE_DEVICES"]
         int(_places[0])
         use_gpu = True
     except:
         use_gpu = False
     if use_gpu:
         gpu_config = AnalysisConfig(self.model_file_path,
                                     self.params_file_path)
         gpu_config.disable_glog_info()
         gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000,
                                   device_id=0)
         self.gpu_predictor = create_paddle_predictor(gpu_config)
Пример #2
0
    def _set_config(self):
        """
        predictor config setting
        """
        # encoder
        cpu_config_enc = AnalysisConfig(self.pretrained_encoder_net)
        cpu_config_enc.disable_glog_info()
        cpu_config_enc.disable_gpu()
        self.cpu_predictor_enc = create_paddle_predictor(cpu_config_enc)
        # decoder
        cpu_config_dec = AnalysisConfig(self.pretrained_decoder_net)
        cpu_config_dec.disable_glog_info()
        cpu_config_dec.disable_gpu()
        self.cpu_predictor_dec = create_paddle_predictor(cpu_config_dec)

        try:
            _places = os.environ["CUDA_VISIBLE_DEVICES"]
            int(_places[0])
            use_gpu = True
        except:
            use_gpu = False
        if use_gpu:
            # encoder
            gpu_config_enc = AnalysisConfig(self.pretrained_encoder_net)
            gpu_config_enc.disable_glog_info()
            gpu_config_enc.enable_use_gpu(memory_pool_init_size_mb=1000,
                                          device_id=0)
            self.gpu_predictor_enc = create_paddle_predictor(gpu_config_enc)
            # decoder
            gpu_config_dec = AnalysisConfig(self.pretrained_decoder_net)
            gpu_config_dec.disable_glog_info()
            gpu_config_dec.enable_use_gpu(memory_pool_init_size_mb=1000,
                                          device_id=0)
            self.gpu_predictor_dec = create_paddle_predictor(gpu_config_dec)
Пример #3
0
    def _set_config(self):
        # predictor config setting.
        cpu_config = AnalysisConfig(self.default_pretrained_model_path)
        cpu_config.disable_glog_info()
        cpu_config.disable_gpu()
        cpu_config.switch_ir_optim(False)
        self.cpu_predictor = create_paddle_predictor(cpu_config)

        try:
            _places = os.environ["CUDA_VISIBLE_DEVICES"]
            int(_places[0])
            use_gpu = True
        except:
            use_gpu = False
        if use_gpu:
            gpu_config = AnalysisConfig(self.default_pretrained_model_path)
            gpu_config.disable_glog_info()
            gpu_config.enable_use_gpu(memory_pool_init_size_mb=500,
                                      device_id=0)
            self.gpu_predictor = create_paddle_predictor(gpu_config)

        # model config setting.
        if not self.model_config:
            with open(os.path.join(self.directory, 'config.yml')) as fp:
                self.model_config = yaml.load(fp.read(),
                                              Loader=yaml.FullLoader)

        self.multi_box_head_config = self.model_config['MultiBoxHead']
        self.output_decoder_config = self.model_config['SSDOutputDecoder']
        self.mobilenet_config = self.model_config['MobileNet']
Пример #4
0
    def _set_config(self):
        """
        predictor config setting
        """
        self.model_file_path = self.default_pretrained_model_path
        cpu_config = AnalysisConfig(self.model_file_path)
        cpu_config.disable_glog_info()
        cpu_config.switch_ir_optim(True)
        cpu_config.enable_memory_optim()
        cpu_config.switch_use_feed_fetch_ops(False)
        cpu_config.switch_specify_input_names(True)
        cpu_config.disable_glog_info()
        cpu_config.disable_gpu()
        self.cpu_predictor = create_paddle_predictor(cpu_config)

        try:
            _places = os.environ["CUDA_VISIBLE_DEVICES"]
            int(_places[0])
            use_gpu = True
        except:
            use_gpu = False
        if use_gpu:
            gpu_config = AnalysisConfig(self.model_file_path)
            gpu_config.disable_glog_info()
            gpu_config.switch_ir_optim(True)
            gpu_config.enable_memory_optim()
            gpu_config.switch_use_feed_fetch_ops(False)
            gpu_config.switch_specify_input_names(True)
            gpu_config.disable_glog_info()
            gpu_config.enable_use_gpu(100, 0)
            self.gpu_predictor = create_paddle_predictor(gpu_config)
Пример #5
0
def create_predictor(mode):

    if mode == "detect":
        model_file_path = config.det_model_dir
        params_file_path = config.det_param_dir
    else:
        model_file_path = config.rec_model_dir
        params_file_path = config.rec_param_dir

    an_config = AnalysisConfig(model_file_path, params_file_path)

    if config.use_gpu:
        an_config.enable_use_gpu(config.gpu_mem, 0)
    else:
        an_config.disable_gpu()

    an_config.disable_glog_info()

    # use zero copy
    an_config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
    an_config.switch_use_feed_fetch_ops(False)
    predictor = create_paddle_predictor(an_config)
    input_names = predictor.get_input_names()
    input_tensor = predictor.get_input_tensor(input_names[0])
    output_names = predictor.get_output_names()
    output_tensors = []
    for output_name in output_names:
        output_tensor = predictor.get_output_tensor(output_name)
        output_tensors.append(output_tensor)
    return predictor, input_tensor, output_tensors
Пример #6
0
def create_predictor(mode):
    """
    create predictor for inference
    :param args: params for prediction engine
    :param mode: mode
    :return: predictor
    """
    if mode == "det":
        model_dir = "./src/ai/ocr_paddle/inference/ch_ppocr_mobile_v1.1_det_infer"
    elif mode == 'cls':
        model_dir = "./src/ai/ocr_paddle/inference/ch_ppocr_mobile_v1.1_cls_infer"
    elif mode == 'rec':
        model_dir = "./src/ai/ocr_paddle/inference/ch_ppocr_mobile_v1.1_rec_infer"
    else:
        raise ValueError(
            "'mode' of create_predictor() can only be one of ['det', 'cls', 'rec']"
        )

    if model_dir is None:
        logger.info("not find {} model file path {}".format(mode, model_dir))
        sys.exit(0)

    model_file_path = model_dir + "/model"
    params_file_path = model_dir + "/params"
    if not os.path.exists(model_file_path):
        logger.info("not find model file path {}".format(model_file_path))
        sys.exit(0)
    if not os.path.exists(params_file_path):
        logger.info("not find params file path {}".format(params_file_path))
        sys.exit(0)

    config = AnalysisConfig(model_file_path, params_file_path)

    config.disable_gpu()
    config.set_cpu_math_library_num_threads(6)
    enable_mkldnn = False
    if enable_mkldnn:
        # cache 10 different shapes for mkldnn to avoid memory leak
        config.set_mkldnn_cache_capacity(10)
        config.enable_mkldnn()

    # config.enable_memory_optim()
    config.disable_glog_info()
    use_zero_copy_run = False
    if use_zero_copy_run:
        config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
        config.switch_use_feed_fetch_ops(False)
    else:
        config.switch_use_feed_fetch_ops(True)

    predictor = create_paddle_predictor(config)
    input_names = predictor.get_input_names()
    for name in input_names:
        input_tensor = predictor.get_input_tensor(name)
    output_names = predictor.get_output_names()
    output_tensors = []
    for output_name in output_names:
        output_tensor = predictor.get_output_tensor(output_name)
        output_tensors.append(output_tensor)
    return predictor, input_tensor, output_tensors
Пример #7
0
def create_predictor_rec(mode):
    """
    create predictor for inference
    :param args: params for prediction engine
    :param mode: mode
    :return: predictor
    """

    model_dir = 'D:/CV/code/PaddleOCR/PaddleOCR-develop/inference_large/rec/'
    model_file_path = model_dir + "/model"
    params_file_path = model_dir + "/params"

    config = AnalysisConfig(model_file_path, params_file_path)

    config.enable_use_gpu(8000, 0)

    # config.enable_memory_optim()
    config.disable_glog_info()

    config.switch_use_feed_fetch_ops(True)

    predictor = create_paddle_predictor(config)
    input_names = predictor.get_input_names()
    for name in input_names:
        input_tensor = predictor.get_input_tensor(name)
    output_names = predictor.get_output_names()
    output_tensors = []
    for output_name in output_names:
        output_tensor = predictor.get_output_tensor(output_name)
        output_tensors.append(output_tensor)
    return predictor, input_tensor, output_tensors
Пример #8
0
    def load_model(self, modelpath, use_gpu):
        # 对运行位置进行配置
        if use_gpu:
            try:
                places = os.environ["CUDA_VISIBLE_DEVICES"]
                places = int(places[0])
            except Exception as e:
                print(
                    'Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".'
                    % e)
                use_gpu = False

        # 加载模型参数
        config = AnalysisConfig(modelpath)

        # 设置参数
        if use_gpu:
            config.enable_use_gpu(100, places)
        else:
            config.disable_gpu()
            config.enable_mkldnn()
        config.disable_glog_info()
        config.switch_ir_optim(True)
        config.enable_memory_optim()
        config.switch_use_feed_fetch_ops(False)
        config.switch_specify_input_names(True)

        # 通过参数加载模型预测器
        predictor = create_paddle_predictor(config)

        # 返回预测器
        return predictor
Пример #9
0
    def train(self, model_save_dir, train_data, test_data=None):
        """执行模型增量训练

        Args:
            model_save_dir: 训练结束后模型保存的路径
            train_data: 训练数据路径
            test_data: 测试数据路径,若为None则不进行测试
        """
        self.args.train_data = train_data
        if test_data:
            self.args.test_data = test_data
        test_program, fetch_list = nets.do_train(self.args)

        fluid.io.save_inference_model(
            os.path.join(model_save_dir, 'model'),
            ['words'],
            fetch_list,
            self.exe,
            main_program=test_program,
        )
        # 拷贝配置文件
        if os.path.exists(os.path.join(model_save_dir, 'conf')):
            shutil.rmtree(os.path.join(model_save_dir, 'conf'))
        shutil.copytree(os.path.join(self.model_path, 'conf'),
                        os.path.join(model_save_dir, 'conf'))
        self.model_path = model_save_dir

        # 训练结束装载新模型
        config = AnalysisConfig(os.path.join(model_save_dir, 'model'))
        if self.args.use_cuda:
            config.enable_use_gpu(
                memory_pool_init_size_mb=500,
                device_id=int(os.getenv('FLAGS_selected_gpus', '0')),
            )
        self.predictor = create_paddle_predictor(config)
Пример #10
0
    def __init__(self, model_path=None, mode='lac', use_cuda=False):
        super(LAC, self).__init__()
        utils.check_cuda(use_cuda)
        if model_path is None:
            model_path = DEFAULT_SEG if mode == 'seg' else DEFAULT_LAC

        self.args = utils.DefaultArgs(model_path)
        self.args.use_cuda = use_cuda
        self.model_path = model_path
        config = AnalysisConfig(self.args.init_checkpoint)

        if use_cuda:
            self.place = fluid.CUDAPlace(
                int(os.getenv('FLAGS_selected_gpus', '0')))
            config.enable_use_gpu(
                memory_pool_init_size_mb=500,
                device_id=int(os.getenv('FLAGS_selected_gpus', '0')),
            )
        else:
            self.place = fluid.CPUPlace()

        # init executor
        self.exe = fluid.Executor(self.place)

        self.dataset = reader.Dataset(self.args)

        self.predictor = create_paddle_predictor(config)

        self.custom = None
        self.batch = False
        self.return_tag = mode != 'seg'
Пример #11
0
    def _get_analysis_outputs(self, config):
        '''
        Return outputs of paddle inference
        Args:
            config (AnalysisConfig): predictor configs
        Returns:
            outs (numpy array): forward netwrok prediction outputs
        '''
        predictor = create_paddle_predictor(config)
        tensor_shapes = predictor.get_input_tensor_shape()
        names = predictor.get_input_names()
        for i, name in enumerate(names):
            #assert name in self.feeds_var, '{} not in feeded dict'.format(name)
            shape = tensor_shapes[name]
            tensor = predictor.get_input_tensor(name)
            feed_data = self.feeds_var[i]
            tensor.copy_from_cpu(np.array(feed_data))
            if type(feed_data) == fluid.LoDTensor:
                tensor.set_lod(feed_data.lod())

        # ensure no diff in multiple repeat times
        repeat_time = 2
        for i in range(repeat_time):
            predictor.zero_copy_run()

        output_names = predictor.get_output_names()
        outs = [
            predictor.get_output_tensor(out_name).copy_to_cpu()
            for out_name in output_names
        ]

        return outs
Пример #12
0
def create_predictor(args, mode, model_path):
    model_dir = model_path
    model_file_path = model_dir + "/model"
    params_file_path = model_dir + "/params"
    assert os.path.exists(model_file_path)
    assert os.path.exists(params_file_path)
    config = AnalysisConfig(model_file_path, params_file_path)

    # use CPU
    config.disable_gpu()
    config.set_cpu_math_library_num_threads(6)
    if args['enable_mkldnn']:
        config.enable_mkldnn()

    # config.enable_memory_optim()
    config.disable_glog_info()

    if args['use_zero_copy_run']:
        config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
        config.switch_use_feed_fetch_ops(False)
    else:
        config.switch_use_feed_fetch_ops(True)

    predictor = create_paddle_predictor(config)
    input_names = predictor.get_input_names()
    for name in input_names:
        input_tensor = predictor.get_input_tensor(name)
    output_names = predictor.get_output_names()
    output_tensors = []
    for output_name in output_names:
        output_tensor = predictor.get_output_tensor(output_name)
        output_tensors.append(output_tensor)
    return predictor, input_tensor, output_tensors
Пример #13
0
def predict(args):
    # config AnalysisConfig
    config = AnalysisConfig(args.model_file, args.params_file)
    if args.gpu_id < 0:
        config.disable_gpu()
    else:
        config.enable_use_gpu(args.gpu_mem, args.gpu_id)

    # you can enable tensorrt engine if paddle is installed with tensorrt
    # config.enable_tensorrt_engine()

    predictor = create_paddle_predictor(config)

    # input
    inputs = preprocess_image(args.image_path)
    inputs = PaddleTensor(inputs)

    # predict
    outputs = predictor.run([inputs])

    # get output
    output = outputs[0]
    output = output.as_ndarray().flatten()

    cls = np.argmax(output)
    score = output[cls]
    logger.info("class: {0}".format(cls))
    logger.info("score: {0}".format(score))
    return
Пример #14
0
    def _set_config(self):
        """
        predictor config setting
        """
        model_file_path = os.path.join(self.pretrained_model_path, 'model')
        params_file_path = os.path.join(self.pretrained_model_path, 'params')

        config = AnalysisConfig(model_file_path, params_file_path)
        try:
            _places = os.environ["CUDA_VISIBLE_DEVICES"]
            int(_places[0])
            use_gpu = True
        except:
            use_gpu = False

        if use_gpu:
            config.enable_use_gpu(8000, 0)
        else:
            config.disable_gpu()

        config.disable_glog_info()

        # use zero copy
        config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
        config.switch_use_feed_fetch_ops(False)
        self.predictor = create_paddle_predictor(config)
        input_names = self.predictor.get_input_names()
        self.input_tensor = self.predictor.get_input_tensor(input_names[0])
        output_names = self.predictor.get_output_names()
        self.output_tensors = []
        for output_name in output_names:
            output_tensor = self.predictor.get_output_tensor(output_name)
            self.output_tensors.append(output_tensor)
Пример #15
0
    def __init__(self, model_dir, label_id_path, vocab_path,
            gpu_id=None, gpu_mem=8000, zero_copy=True):
        self.tokenizer = ErnieTokenizer.from_pretrained(vocab_path)
        self.id_2_token = {v: k for k, v in self.tokenizer.vocab.items()}

        label_encoder = LabelEncoder(label_id_info=label_id_path, isFile=True)
        self.id_label_dict = label_encoder.id_label_dict

        # 设置AnalysisConfig
        config = AnalysisConfig(model_dir)
        if gpu_id is None:
            config.disable_gpu()
        else:
            config.enable_use_gpu(gpu_mem, gpu_id)
            logging.info("gpu id: {}".format(config.gpu_device_id()))

        self.zero_copy = zero_copy
        if self.zero_copy:
            config.switch_use_feed_fetch_ops(False)

        # 创建PaddlePredictor
        self.predictor = create_paddle_predictor(config)

        if self.zero_copy:
            input_names = self.predictor.get_input_names()
            #logging.info(input_names)
            self.input_tensor = self.predictor.get_input_tensor(input_names[0])

            output_names = self.predictor.get_output_names()
            #logging.info(output_names)
            self.output_tensor = self.predictor.get_output_tensor(output_names[0])
Пример #16
0
    def _get_inference_outs(self, config):
        '''
        Return AnalysisPredictor outputs. 
        '''
        predictor = create_paddle_predictor(config)
        tensor_shapes = predictor.get_input_tensor_shape()
        names = predictor.get_input_names()
        for i, name in enumerate(names):
            shape = tensor_shapes[name]
            shape[0] = 1
            tensor = predictor.get_input_tensor(name)
            feed_data = list(self.feeds.values())[i]
            tensor.copy_from_cpu(np.array(feed_data))
            if type(feed_data) == fluid.LoDTensor:
                tensor.set_lod(feed_data.lod())

        predictor.zero_copy_run()

        output_names = predictor.get_output_names()
        outs = [
            predictor.get_output_tensor(out_name).copy_to_cpu()
            for out_name in output_names
        ]

        return outs
Пример #17
0
    def __init__(self, model_path, mode, use_cuda):
        super(Model, self).__init__()

        self.mode = mode
        self.model_path = model_path

        self.args = utils.DefaultArgs(self.model_path)
        self.args.use_cuda = use_cuda

        utils.check_cuda(self.args.use_cuda)

        config = AnalysisConfig(self.args.init_checkpoint)
        config.disable_glog_info()

        if self.args.use_cuda:
            self.place = fluid.CUDAPlace(
                int(os.getenv('FLAGS_selected_gpus', '0')))
            config.enable_use_gpu(memory_pool_init_size_mb=500,
                                  device_id=int(
                                      os.getenv('FLAGS_selected_gpus', '0')),
                                  )
        else:
            self.place = fluid.CPUPlace()

        # init executor
        self.exe = fluid.Executor(self.place)
        self.dataset = reader.Dataset(self.args)
        self.predictor = create_paddle_predictor(config)
        self.segment_tool = None
        self.custom = None
        self.batch = False
Пример #18
0
def main():
    args = parse_args()
    model_file = args.model_dir + "/__model__"
    params_file = args.model_dir + "/params"
    config = AnalysisConfig(model_file, params_file)
    config.disable_gpu()
    predictor = create_paddle_predictor(config)
    test_image(predictor, args.image_path)
Пример #19
0
    def __init__(self):
        """
        create predictor manager
        """
        self.get_predictor_timeout = float(
            config.get('get.predictor.timeout', default_value=0.5))
        predictor_count = 0
        enable_mkl = False
        gpu_memory = 200
        gpu_device_ids = []

        model_dir = config.get('model.dir')
        device_type = config.get('device.type')
        if device_type == PredictorManager.CPU_DEVICE:
            cpu_predictor_count = int(
                config.getint('cpu.predictor.count', default_value=0))
            predictor_count = cpu_predictor_count
            enable_mkl = config.getboolean('cpu.enable_mkl',
                                           default_value=False)
        elif device_type == PredictorManager.GPU_DEVICE:
            gpu_predictor_count = int(
                config.getint('gpu.predictor.count', default_value=0))
            predictor_count = gpu_predictor_count
            gpu_memory = config.getint('gpu.predictor.memory',
                                       default_value=200)
            gpu_device_ids = config.get('gpu.predictor.device.id').split(',')
            gpu_device_ids = map(int, gpu_device_ids)
            if PYTHON_VERSION == 3:
                gpu_device_ids = list(gpu_device_ids)
            assert len(
                gpu_device_ids
            ) == gpu_predictor_count, "gpu predictor count doesn't match device count"
        else:
            raise Exception("no device to run predictor!")
        assert predictor_count > 0, "no device to predict"
        logger.info(
            "device type:{} predictor count:{} model dir:{} get predictor timeout:{}s"
            .format(device_type, predictor_count, model_dir,
                    self.get_predictor_timeout))
        self.predictor_queue = Queue(maxsize=predictor_count)

        for i in range(predictor_count):
            # Set config
            predictor_config = AnalysisConfig(model_dir)
            # predictor_config.specify_input_name()
            if device_type == PredictorManager.CPU_DEVICE:
                predictor_config.disable_gpu()
                if enable_mkl:
                    predictor_config.enable_mkldnn()
            else:
                device_id = gpu_device_ids[i]
                predictor_config.enable_use_gpu(gpu_memory, device_id)

            # Create PaddlePredictor
            predictor = create_paddle_predictor(predictor_config)
            self.predictor_queue.put(predictor)
Пример #20
0
def create_predictor(args, mode, logger):
    if mode == "det":
        model_dir = args.det_model_dir
    elif mode == 'cls':
        model_dir = args.cls_model_dir
    else:
        model_dir = args.rec_model_dir

    if model_dir is None:
        logger.info("not find {} model file path {}".format(mode, model_dir))
        sys.exit(0)
    model_file_path = model_dir + "/inference.pdmodel"
    params_file_path = model_dir + "/inference.pdiparams"
    if not os.path.exists(model_file_path):
        logger.info("not find model file path {}".format(model_file_path))
        sys.exit(0)
    if not os.path.exists(params_file_path):
        logger.info("not find params file path {}".format(params_file_path))
        sys.exit(0)

    config = AnalysisConfig(model_file_path, params_file_path)

    if args.use_gpu:
        config.enable_use_gpu(args.gpu_mem, 0)
        if args.use_tensorrt:
            config.enable_tensorrt_engine(
                precision_mode=AnalysisConfig.Precision.Half
                if args.use_fp16 else AnalysisConfig.Precision.Float32,
                max_batch_size=args.max_batch_size)
    else:
        config.disable_gpu()
        config.set_cpu_math_library_num_threads(6)
        if args.enable_mkldnn:
            # cache 10 different shapes for mkldnn to avoid memory leak
            config.set_mkldnn_cache_capacity(10)
            config.enable_mkldnn()

    # config.enable_memory_optim()
    config.disable_glog_info()

    if args.use_zero_copy_run:
        config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
        config.switch_use_feed_fetch_ops(False)
    else:
        config.switch_use_feed_fetch_ops(True)

    predictor = create_paddle_predictor(config)
    input_names = predictor.get_input_names()
    for name in input_names:
        input_tensor = predictor.get_input_tensor(name)
    output_names = predictor.get_output_names()
    output_tensors = []
    for output_name in output_names:
        output_tensor = predictor.get_output_tensor(output_name)
        output_tensors.append(output_tensor)
    return predictor, input_tensor, output_tensors
Пример #21
0
def stgraph_output():
    module = hub.Module(name='resnet50_vd_imagenet_ssld')
    gpu_config = AnalysisConfig(module.default_pretrained_model_path)
    gpu_config.disable_glog_info()
    gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
    gpu_predictor = create_paddle_predictor(gpu_config)
    img = cv2.imread('pandas.jpg')
    data = process_image(img)[np.newaxis, :, :, :]
    data = PaddleTensor(data.copy())
    result = gpu_predictor.run([data])
    return np.sum(result[0].as_ndarray())
    def _set_config(self):
        """
        predictor config setting
        """
        model_file_path = os.path.join(self.infer_model_path, 'model')
        params_file_path = os.path.join(self.infer_model_path, 'params')

        config = AnalysisConfig(model_file_path, params_file_path)
        config.enable_use_gpu(8000, 0)
        config.disable_glog_info()

        self.predictor = create_paddle_predictor(config)
Пример #23
0
    def _set_config(self):
        """
        predictor config setting.
        """
        cpu_config = AnalysisConfig(self.default_pretrained_model_path)
        cpu_config.disable_glog_info()
        cpu_config.disable_gpu()
        cpu_config.switch_ir_optim(False)
        self.cpu_predictor = create_paddle_predictor(cpu_config)

        try:
            _places = os.environ["CUDA_VISIBLE_DEVICES"]
            int(_places[0])
            use_gpu = True
        except:
            use_gpu = False
        if use_gpu:
            gpu_config = AnalysisConfig(self.default_pretrained_model_path)
            gpu_config.disable_glog_info()
            gpu_config.enable_use_gpu(memory_pool_init_size_mb=500, device_id=0)
            self.gpu_predictor = create_paddle_predictor(gpu_config)
    def load_model(self, model_dir, roll_back=False):
        print("load_model==>", model_dir)
        config = AnalysisConfig(model_dir)
        #不启动cpu
        config.disable_gpu()
        #把老的模型留存
        if self.predictor and roll_back:
            self.histroy.push(self.predictor)
        #创建预测
        self.predictor = create_paddle_predictor(config)

        return self.predictor
Пример #25
0
   def __init__(self, model_file, params_file, use_mkldnn=True, use_gpu = False, device_id = 0):
     config = AnalysisConfig(model_file, params_file)
     config.switch_use_feed_fetch_ops(False)
     config.switch_specify_input_names(True)
     config.enable_memory_optim()

     if use_gpu:
       print ("ENABLE_GPU")
       config.enable_use_gpu(100, device_id)

     if use_mkldnn: 
       config.enable_mkldnn()
     self.predictor = create_paddle_predictor(config)
def run_program(model_path, data_path):
    place = fluid.CPUPlace()
    inputs = []
    labels = []
    config = None
    if test_args.use_ptq:
        warmup_data, inputs, labels = get_data_with_ptq_warmup(
            data_path, place)
        config = set_config_ptq(model_path, warmup_data)
    else:
        inputs, labels = get_data(data_path, place)
        config = set_config(model_path)

    predictor = create_paddle_predictor(config)
    all_hz_num = 0
    ok_hz_num = 0
    all_ctc_num = 0
    ok_ctc_num = 0
    dataset_size = len(inputs)
    start = time.time()
    for i in range(dataset_size):
        if i == test_args.warmup_iter:
            start = time.time()
        hz_out, ctc_out = predictor.run([inputs[i]])
        np_hz_out = np.array(hz_out.data.float_data()).reshape(-1)
        np_ctc_out = np.array(ctc_out.data.int64_data()).reshape(-1)
        out_hz_label = np.argmax(np_hz_out)
        this_label = labels[i]
        this_label_data = np.array(this_label.data.int32_data()).reshape(-1)
        if this_label.shape[0] == 1:
            all_hz_num += 1
            best = this_label_data[0]
            if out_hz_label == best:
                ok_hz_num += 1
            if this_label_data[0] <= 6350:
                all_ctc_num += 1
                if np_ctc_out.shape[0] == 1 and np_ctc_out.all(
                ) == this_label_data.all():
                    ok_ctc_num += 1
        else:
            all_ctc_num += 1
            if np_ctc_out.shape[0] == this_label.shape[0] and np_ctc_out.all(
            ) == this_label_data.all():
                ok_ctc_num += 1
        if all_ctc_num > 1000 or all_hz_num > 1000:
            break
    end = time.time()
    fps = (dataset_size - test_args.warmup_iter) / (end - start)
    hx_acc = ok_hz_num / all_hz_num
    ctc_acc = ok_ctc_num / all_ctc_num
    return hx_acc, ctc_acc, fps
Пример #27
0
 def __load_inference_model(self, model_path, use_gpu):
     """
     :param meta_path:
     :return:
     """
     check_cuda(use_gpu)
     config = AnalysisConfig(model_path + "/" + "model", model_path + "/" + "params")
     if use_gpu:
         config.enable_use_gpu(1024)
     else:
         config.disable_gpu()
         config.enable_mkldnn()
     inference = create_paddle_predictor(config.to_native_config())
     return inference
Пример #28
0
def create_predictor(args, mode):
    if mode == "det":
        model_dir = args.det_model_dir
    else:
        model_dir = args.rec_model_dir

    if model_dir is None:
        logger.info("not find {} model file path {}".format(mode, model_dir))
        sys.exit(0)
    model_file_path = model_dir + "/model"
    params_file_path = model_dir + "/params"
    if not os.path.exists(model_file_path):
        logger.info("not find model file path {}".format(model_file_path))
        sys.exit(0)
    if not os.path.exists(params_file_path):
        logger.info("not find params file path {}".format(params_file_path))
        sys.exit(0)

    config = AnalysisConfig(model_file_path, params_file_path)

    if args.use_gpu:
        config.enable_use_gpu(args.gpu_mem, 0)
    else:
        config.disable_gpu()
        config.set_cpu_math_library_num_threads(6)
        if args.enable_mkldnn:
            config.enable_mkldnn()

    #config.enable_memory_optim()
    config.disable_glog_info()

    if args.use_zero_copy_run:
        config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
        config.switch_use_feed_fetch_ops(False)
    else:
        config.switch_use_feed_fetch_ops(True)

    predictor = create_paddle_predictor(config)
    input_names = predictor.get_input_names()
    for name in input_names:
        input_tensor = predictor.get_input_tensor(name)
    output_names = predictor.get_output_names()
    output_tensors = []
    for output_name in output_names:
        output_tensor = predictor.get_output_tensor(output_name)
        output_tensors.append(output_tensor)
    return predictor, input_tensor, output_tensors
Пример #29
0
    def load_model(self, model_dir):
        """装载预训练的模型"""
        use_cuda = self.args.use_cuda
        self.args = utils.DefaultArgs(model_dir)
        self.args.use_cuda = use_cuda
        self.dataset = reader.Dataset(self.args)
        self.model = self.args.model

        self.model_path = model_dir
        config = AnalysisConfig(os.path.join(model_dir, 'model'))
        config.disable_glog_info()
        if self.args.use_cuda:
            config.enable_use_gpu(memory_pool_init_size_mb=500,
                                  device_id=int(
                                      os.getenv('FLAGS_selected_gpus', '0')),
                                  )
        self.predictor = create_paddle_predictor(config)
Пример #30
0
def create_predictor(args):
    if args.model_dir is not "":
        config = AnalysisConfig(args.model_dir)
    else:
        config = AnalysisConfig(args.model_file, args.params_file)

    config.switch_use_feed_fetch_ops(False)
    config.enable_memory_optim()
    if args.use_gpu:
        config.enable_use_gpu(1000, 0)
    else:
        # If not specific mkldnn, you can set the blas thread.
        # The thread num should not be greater than the number of cores in the CPU.
        config.set_cpu_math_library_num_threads(4)

    predictor = create_paddle_predictor(config)
    return predictor