コード例 #1
0
    def run_test(self, quant=False, prog_configs=None):
        status = True

        paddlelite_configs, op_list_, (
            atol_, rtol_) = self.sample_predictor_configs()
        for prog_config in prog_configs:

            predictor_idx = -1
            for paddlelite_config in paddlelite_configs:
                predictor_idx += 1
                # judge validity of program
                if not self.is_program_valid(prog_config, paddlelite_config):
                    self.num_invalid_programs_list[predictor_idx] += 1
                    continue
                self.num_ran_programs_list[predictor_idx] += 1

                # creat model and prepare feed data
                model, params = create_fake_model(prog_config)
                if quant:
                    model, params = create_quant_model(model, params)

                feed_data = {}
                for name, tensor_config in prog_config.inputs.items():
                    feed_data[name] = {
                        'data': tensor_config.data,
                        'lod': tensor_config.lod
                    }
                results: List[Dict[str, np.ndarray]] = []
                # ignore info
                accuracy_error_flag = False
                paddle_not_support_flag = False
                paddle_lite_not_support_flag = False
                op_fusion_error_flag = False
                pred_config = paddlelite_config.value()
                for ignore_info in self.ignore_cases:
                    if ignore_info[0](prog_config, paddlelite_config):
                        self.num_ignore_tests_list[predictor_idx] += 1
                        if ignore_info[1] == IgnoreReasonsBase.ACCURACY_ERROR:
                            accuracy_error_flag = True
                            self.ignore_log(
                                "[ACCURACY_ERROR] " + ignore_info[2] + ' ' +
                                ' vs ' +
                                self.paddlelite_config_str(pred_config))
                            gl.set_out_diff_ops(self.get_target(), sys.argv[0])
                        elif ignore_info[
                                1] == IgnoreReasonsBase.PADDLELITE_NOT_SUPPORT:
                            paddle_lite_not_support_flag = True
                            self.ignore_log(
                                "[PADDLELITE_NOT_SUPPORT ERROR] " +
                                ignore_info[2] + ' ' + ' vs ' +
                                self.paddlelite_config_str(pred_config))
                        elif ignore_info[
                                1] == IgnoreReasonsBase.PADDLE_NOT_SUPPORT:
                            paddle_not_support_flag = True
                            self.ignore_log(
                                "[PADDLE_NOT_SUPPORT ERROR] " +
                                ignore_info[2] + ' ' + ' vs ' +
                                self.paddlelite_config_str(pred_config))
                        elif ignore_info[
                                1] == IgnoreReasonsBase.OP_FUSION_ERROR:
                            op_fusion_error_flag = True
                            self.ignore_log(
                                "[OP_FUSION ERROR] " + ignore_info[2] + ' ' +
                                ' vs ' +
                                self.paddlelite_config_str(pred_config))
                        else:
                            raise NotImplementedError

                if paddle_not_support_flag:
                    gl.set_paddle_not_supported_ops(self.get_target(),
                                                    sys.argv[0])
                    continue

                # baseline: cpu no ir_optim run
                base_config = self.create_inference_config(ir_optim=False)
                logging.info('[ProgramConfig]: ' + str(prog_config))
                results.append(
                    self.run_test_config(model, params, prog_config,
                                         base_config, feed_data))
                if paddle_lite_not_support_flag:
                    gl.set_lite_not_supported_ops(self.get_target(),
                                                  sys.argv[0])
                    continue

                if os.path.exists(self.cache_dir):
                    shutil.rmtree(self.cache_dir)
                if not os.path.exists(self.cache_dir):
                    os.mkdir(self.cache_dir)
                try:
                    result, opt_model_bytes = self.run_lite_config(
                        model, params, feed_data, pred_config)
                    results.append(result)
                    # add ignore methods
                    if self.passes is not None:  # pass check
                        if not accuracy_error_flag:
                            self.assert_tensors_near(atol_, rtol_, results[-1],
                                                     results[0])
                        if not op_fusion_error_flag:
                            self.assert_op_list(opt_model_bytes, op_list_)
                    else:  # op check
                        self.assert_kernel_type(opt_model_bytes, op_list_,
                                                paddlelite_config)
                        if not accuracy_error_flag:
                            self.assert_tensors_near(atol_, rtol_, results[-1],
                                                     results[0])
                except Exception as e:
                    self.fail_log(
                        self.paddlelite_config_str(pred_config) +
                        '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
                    status = False
                    break
                self.success_log('PredictorConfig: ' +
                                 self.paddlelite_config_str(pred_config))
        self.assertTrue(status)
        gl.set_success_ops(self.get_target(), sys.argv[0])
コード例 #2
0
ファイル: auto_scan_test.py プロジェクト: sandyhouse/Paddle
    def run_test(self, quant=False, *args, **kwargs):
        status = True
        run_flags = []
        for prog_config in self.sample_program_configs(*args, **kwargs):
            # In CI, only run 10% cases
            if np.random.rand() < self.num_percent_cases:
                run_flags.append(True)
            else:
                run_flags.append(False)

        for prog_config, run_flags in zip(
                self.sample_program_configs(*args, **kwargs), run_flags):
            if not run_flags:
                continue

            # if program is invalid, we should skip that cases.
            if not self.is_program_valid(prog_config):
                continue

            model, params = create_fake_model(prog_config)
            if quant:
                model, params = create_quant_model(model, params)

            feed_data = {}
            for name, tensor_config in prog_config.inputs.items():
                feed_data[name] = {
                    'data': tensor_config.data,
                    'lod': tensor_config.lod
                }

            results: List[Dict[str, np.ndarray]] = []

            # baseline: gpu run
            logging.info('RUN program_config: ' + str(prog_config))
            gpu_config = self.create_inference_config(use_trt=False)
            results.append(
                self.run_test_config(model, params, prog_config, gpu_config,
                                     feed_data))
            self.success_log('RUN_GPU_BASELINE done')

            for pred_config, nodes_num, threshold in self.sample_predictor_configs(
                    prog_config):

                if os.path.exists(self.cache_dir):
                    shutil.rmtree(self.cache_dir)

                if isinstance(threshold, float):
                    atol = threshold
                    rtol = 1e-8
                elif isinstance(threshold, list) or isinstance(
                        threshold, tuple):
                    atol = threshold[0]
                    rtol = threshold[1]
                else:
                    raise NotImplementedError

                if quant and pred_config.tensorrt_precision_mode(
                ) != paddle_infer.PrecisionType.Int8:
                    continue
                if pred_config.tensorrt_precision_mode(
                ) == paddle_infer.PrecisionType.Int8 and not quant:
                    continue

                ignore_flag = False
                for ignore_info in self.ignore_cases:
                    if ignore_info[0](prog_config, pred_config):
                        ignore_flag = True
                        if ignore_info[1] == IgnoreReasons.TRT_NOT_IMPLEMENTED:
                            self.ignore_log(
                                "[TRT_NOT_IMPLEMENTED] " + ignore_info[2] +
                                ' ' + ' vs ' +
                                self.inference_config_str(pred_config))
                        elif ignore_info[1] == IgnoreReasons.TRT_NOT_SUPPORT:
                            self.ignore_log(
                                "[TRT_NOT_SUPPORT] " + ignore_info[2] + ' ' +
                                ' vs ' +
                                self.inference_config_str(pred_config))
                        else:
                            raise NotImplementedError
                        break

                try:
                    pred_config_deserialize = paddle_infer.Config(pred_config)
                    results.append(
                        self.run_test_config(model, params, prog_config,
                                             pred_config, feed_data))
                    self.assert_tensors_near(atol, rtol, results[-1],
                                             results[0])
                    if not ignore_flag:
                        self.assert_op_size(nodes_num[0], nodes_num[1])
                    # deserialize test
                    if nodes_num[0] > 0:
                        self.run_test_config(model, params, prog_config,
                                             pred_config_deserialize,
                                             feed_data)
                except Exception as e:
                    self.fail_log(
                        self.inference_config_str(pred_config) +
                        '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
                    if not ignore_flag:
                        status = False
                    continue
                self.success_log('RUN predictor_config ' +
                                 self.inference_config_str(pred_config) +
                                 ' done')

        self.assertTrue(status)
コード例 #3
0
ファイル: auto_scan_test.py プロジェクト: sandyhouse/Paddle
    def run_test(self, quant=False, *args, **kwargs):
        status = True

        for prog_config in self.sample_program_configs(*args, **kwargs):
            # if program is invalid, we should skip that cases.
            if not self.is_program_valid(prog_config):
                continue

            model, params = create_fake_model(prog_config)
            if quant:
                model, params = create_quant_model(model, params)

            feed_data = {}
            for name, tensor_config in prog_config.inputs.items():
                feed_data[name] = {
                    'data': tensor_config.data,
                    'lod': tensor_config.lod
                }
            results: List[Dict[str, np.ndarray]] = []

            # baseline: cpu no ir_optim run
            base_config = self.create_inference_config(ir_optim=False)
            logging.info('RUN program_config: ' + str(prog_config))
            results.append(
                self.run_test_config(model, params, prog_config, base_config,
                                     feed_data))
            self.success_log('RUN_CPU_BASELINE done')

            for pred_config, (
                    atol, rtol) in self.sample_predictor_configs(prog_config):
                # skip info
                ignore_flag = False
                for ignore_info in self.ignore_cases:
                    if ignore_info[0](prog_config, pred_config):
                        ignore_flag = True
                        if ignore_info[
                                1] == IgnoreReasons.MKLDNN_ACCURACY_ERROR:
                            self.ignore_log(
                                "[MKLDNN_ACCURACY_ERROR] " + ignore_info[2] +
                                ' ' + ' vs ' +
                                self.inference_config_str(pred_config))
                        else:
                            raise NotImplementedError
                        break

                if os.path.exists(self.cache_dir):
                    shutil.rmtree(self.cache_dir)
                if not os.path.exists(self.cache_dir):
                    os.mkdir(self.cache_dir)

                try:
                    results.append(
                        self.run_test_config(model, params, prog_config,
                                             pred_config, feed_data))
                    self.assert_tensors_near(atol, rtol, results[-1],
                                             results[0])
                except Exception as e:
                    self.fail_log(
                        self.inference_config_str(pred_config) +
                        '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
                    if not ignore_flag:
                        status = False
                    continue
                self.success_log('RUN predictor_config ' +
                                 self.inference_config_str(pred_config) +
                                 ' done')

        self.assertTrue(status)
コード例 #4
0
ファイル: auto_scan_test.py プロジェクト: sandyhouse/Paddle
    def run_test(self, quant=False, prog_configs=None):
        status = True

        for prog_config in prog_configs:
            # if program is invalid, we should skip that cases.
            if not self.is_program_valid(prog_config):
                self.num_invalid_programs += 1
                continue
            self.num_ran_programs += 1
            model, params = create_fake_model(prog_config)
            if quant:
                model, params = create_quant_model(model, params)

            feed_data = {}
            for name, tensor_config in prog_config.inputs.items():
                feed_data[name] = {
                    'data': tensor_config.data,
                    'lod': tensor_config.lod
                }

            logging.info('RUN program_config: ' + str(prog_config))
            self.num_predictor_kinds = 0
            for pred_config, op_list, (
                    atol, rtol) in self.sample_predictor_configs(prog_config):
                self.num_predictor_kinds += 1

                # skip info
                ignore_flag = False
                for ignore_info in self.ignore_cases:
                    if ignore_info[0](prog_config, pred_config):
                        ignore_flag = True
                        self.num_ignore_tests += 1
                        if ignore_info[1] == IgnoreReasons.PASS_ACCURACY_ERROR:
                            self.ignore_log(
                                "[PASS_ACCURACY_ERROR] " + ignore_info[2] +
                                ' ' + ' vs ' +
                                self.inference_config_str(pred_config))
                        else:
                            raise NotImplementedError
                        break

                if os.path.exists(self.cache_dir):
                    shutil.rmtree(self.cache_dir)
                if not os.path.exists(self.cache_dir):
                    os.mkdir(self.cache_dir)

                # baseline: no ir_optim run
                base_config = self.create_inference_config(
                    ir_optim=False, use_gpu=pred_config.use_gpu())
                try:
                    # baseline
                    base_result = self.run_test_config(model, params,
                                                       prog_config,
                                                       base_config, feed_data)
                    self.success_log('RUN_BASELINE ' +
                                     self.inference_config_str(base_config) +
                                     ' done')

                    if os.path.exists(self.cache_dir):
                        shutil.rmtree(self.cache_dir)

                    pred_result = self.run_test_config(model, params,
                                                       prog_config,
                                                       pred_config, feed_data)
                    self.assert_tensors_near(atol, rtol, pred_result,
                                             base_result)
                    if not ignore_flag:
                        self.assert_op_list(op_list)

                except Exception as e:
                    self.fail_log(
                        self.inference_config_str(pred_config) +
                        '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
                    if not ignore_flag:
                        status = False
                    continue
                self.success_log('RUN predictor_config ' +
                                 self.inference_config_str(pred_config) +
                                 ' done')

        status = self.check_op_version() and status
        self.assertTrue(status)
コード例 #5
0
    def run_test(self, quant=False, prog_configs=None):
        status = True

        paddlelite_configs, op_list_, (atol_,
                                       rtol_) = self.sample_predictor_configs()
        for prog_config in prog_configs:
            # if program is invalid, we should ignore this cases.
            program_valid_ = False
            for paddlelite_config in paddlelite_configs:
                # judge validity of program
                if self.is_program_valid(prog_config, paddlelite_config):
                    program_valid_ = True
            if not program_valid_:
                self.num_invalid_programs += 1
                continue

            self.num_ran_programs += 1
            model, params = create_fake_model(prog_config)
            if quant:
                model, params = create_quant_model(model, params)

            feed_data = {}
            for name, tensor_config in prog_config.inputs.items():
                feed_data[name] = {
                    'data': tensor_config.data,
                    'lod': tensor_config.lod
                }
            results: List[Dict[str, np.ndarray]] = []

            # baseline: cpu no ir_optim run
            base_config = self.create_inference_config(ir_optim=False)
            logging.info('[ProgramConfig]: ' + str(prog_config))
            results.append(
                self.run_test_config(model, params, prog_config, base_config,
                                     feed_data))

            for paddlelite_config in paddlelite_configs:
                self.num_predictor_kinds += 1
                # ignore info
                ignore_flag = False
                paddle_lite_not_support_flag = False
                pred_config = paddlelite_config.value()
                for ignore_info in self.ignore_cases:
                    if ignore_info[0](prog_config, paddlelite_config):
                        ignore_flag = True
                        self.num_ignore_tests += 1
                        if ignore_info[1] == IgnoreReasonsBase.ACCURACY_ERROR:
                            self.ignore_log("[ACCURACY_ERROR] " + ignore_info[
                                2] + ' ' + ' vs ' + self.paddlelite_config_str(
                                    pred_config))
                        elif ignore_info[
                                1] == IgnoreReasonsBase.PADDLELITE_NOT_SUPPORT:
                            paddle_lite_not_support_flag = True
                            self.ignore_log("[PADDLELITE_NOT_SUPPORT ERROR] " +
                                            ignore_info[2] + ' ' + ' vs ' +
                                            self.paddlelite_config_str(
                                                pred_config))
                            break
                        else:
                            raise NotImplementedError
                        break
                if paddle_lite_not_support_flag:
                    continue

                if os.path.exists(self.cache_dir):
                    shutil.rmtree(self.cache_dir)
                if not os.path.exists(self.cache_dir):
                    os.mkdir(self.cache_dir)
                try:
                    result, opt_model_bytes = self.run_lite_config(
                        model, params, feed_data, pred_config)
                    results.append(result)
                    self.assert_tensors_near(atol_, rtol_, results[-1],
                                             results[0])
                    # add ignore methods
                    if self.passes is not None:
                        # op unit test: we will not check precision in ignore case
                        if not ignore_flag:
                            # pass unit test: we will not check fusion in ignore case
                            self.assert_op_list(opt_model_bytes, op_list_)
                    else:
                        self.assert_kernel_type(opt_model_bytes, op_list_,
                                                paddlelite_config)
                except Exception as e:
                    self.fail_log(
                        self.paddlelite_config_str(pred_config) +
                        '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
                    if not ignore_flag:
                        status = False
                    continue
                self.success_log('PredictorConfig: ' +
                                 self.paddlelite_config_str(pred_config))
        self.assertTrue(status)