Пример #1
0
 def sample_predictor_configs(self):
     config = CxxConfig()
     config.set_valid_places({
         Place(TargetType.Host, PrecisionType.INT64, DataLayoutType.NCHW),
         Place(TargetType.Host, PrecisionType.INT32, DataLayoutType.NCHW)
     })
     yield config, ["ctc_align"], (1e-5, 1e-5)
Пример #2
0
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:
        return False  # ci run on arm_opencl error
        # get input&output shape, get op attributes
        x_shape = list(program_config.inputs["input_data_x"].shape)
        y_shape = list(program_config.weights["input_data_y"].shape)
        x_precision = program_config.inputs["input_data_x"].dtype
        x_num_col_dims = program_config.ops[0].attrs["x_num_col_dims"]
        y_num_col_dims = program_config.ops[0].attrs["y_num_col_dims"]

        # {TargetType.Host, TargetType.X86, TargetType.ARM, TargetType.OpenCL}
        if predictor_config.target() == TargetType.ARM:
            # get input and output shape of current op
            if x_shape[1] != y_shape[0]:
                return False
        # {PrecisionType.FP16, PrecisionType.FP32, PrecisionType.FP64, PrecisionType.UINT8, PrecisionType.INT8, PrecisionType.INT16, PrecisionType.INT32, PrecisionType.INT64, PrecisionType.BOOL}
        target_type = predictor_config.target()
        if target_type not in [TargetType.OpenCL, TargetType.Metal]:
            if predictor_config.precision(
            ) == PrecisionType.FP16 and in_data_type != np.float16:
                return False
            elif predictor_config.precision(
            ) == PrecisionType.FP32 and in_data_type != np.float32:
                return False

        # {DataLayoutType.NCHW, DataLayoutType.NHWC, DataLayoutType.ImageDefault, DataLayoutType.ImageFolder, DataLayoutType.ImageNW, DataLayoutType.Any}
        elif predictor_config.layout() != DataLayoutType.NCHW:
            if y_num_col_dims > 20:
                return False
        return True
Пример #3
0
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:
        target_type = predictor_config.target()
        in_x_shape = list(program_config.inputs["input_data_x"].shape)
        in_y_shape = list(program_config.inputs["input_data_y"].shape)
        input_data_type = program_config.inputs["input_data_x"].dtype
        # Check config
        if target_type in [TargetType.ARM]:
            if predictor_config.precision(
            ) == PrecisionType.INT64 and input_data_type != np.int64:
                return False
            if predictor_config.precision(
            ) == PrecisionType.FP32 and input_data_type != np.float32:
                return False
            if predictor_config.precision(
            ) == PrecisionType.FP16 and input_data_type != np.float16:
                return False
            if predictor_config.precision(
            ) == PrecisionType.INT32 and input_data_type != np.int32:
                return False
        if target_type == TargetType.Metal:
            if input_data_type != np.float32 \
                or in_x_shape != in_y_shape \
                or len(in_x_shape) == 3 \
                or in_x_shape[0] != 1:
                return False

        return True
Пример #4
0
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:

        return False  # fix arm_opencl ci error

        target_type = predictor_config.target()
        in_shape = list(program_config.inputs["input_data"].shape)
        in_data_type = program_config.inputs["input_data"].dtype
        if "int8" == in_data_type:
            print("int8 as Input data type is not supported.")
            return False

        if target_type not in [TargetType.OpenCL, TargetType.Metal]:
            if predictor_config.precision(
            ) == PrecisionType.FP16 and in_data_type != np.float16:
                return False
            elif predictor_config.precision(
            ) == PrecisionType.FP32 and in_data_type != np.float32:
                return False
        if target_type == TargetType.Metal and in_data_type not in [
                np.float16, np.float32
        ]:
            return False

        if "ScaleTensor" in program_config.inputs:
            print("ScaleTensor as Input is not supported on Paddle Lite.")
            return False
        if predictor_config.target() == TargetType.Host:
            return False
        if predictor_config.target() == TargetType.OpenCL:
            if len(in_shape) != 4 or in_data_type != "float32":
                return False
        return True
Пример #5
0
 def sample_predictor_configs(self):
     config = CxxConfig()
     config.set_valid_places({
         Place(TargetType.X86, PrecisionType.FP32, DataLayoutType.NCHW),
         Place(TargetType.X86, PrecisionType.INT64, DataLayoutType.NCHW)
     })
     yield config, ["sequence_concat"], (1e-5, 1e-5)
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:
        target_type = predictor_config.target()
        in_x_shape = list(program_config.inputs["input_data_x"].shape)
        in_y_shape = list(program_config.inputs["input_data_y"].shape)
        input_data_type = program_config.inputs["input_data_x"].dtype
        # Check config
        if target_type in [TargetType.ARM]:
            if predictor_config.precision(
            ) == PrecisionType.INT64 and input_data_type != np.int64:
                return False
            if predictor_config.precision(
            ) == PrecisionType.FP32 and input_data_type != np.float32:
                return False
            if predictor_config.precision(
            ) == PrecisionType.FP16 and input_data_type != np.float16:
                return False
            if predictor_config.precision(
            ) == PrecisionType.INT32 and input_data_type != np.int32:
                return False

        if target_type == TargetType.ARM:
            if input_data_type == np.int64:
                err_msg = "Elementwise_add op on this backend will crash with int64 dtype, we should fix it as soon as possible!"
                return False
        if target_type == TargetType.Metal:
            if input_data_type != np.float32 \
                or in_x_shape != in_y_shape \
                or len(in_x_shape) == 3 \
                or in_x_shape[0] != 1:
                return False

        return True
Пример #7
0
 def is_program_valid(self,
                      program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     result = True
     if predictor_config.target() == TargetType.ARM:
         result = result and predictor_config.precision(
         ) != PrecisionType.FP16 and predictor_config.precision(
         ) != PrecisionType.INT8
     return result
Пример #8
0
 def is_program_valid(self,
                      program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_dtype = program_config.inputs["input_data"].dtype
     if predictor_config.target() == TargetType.ARM \
     or predictor_config.target() == TargetType.OpenCL \
     or predictor_config.target() == TargetType.Metal :
         if x_dtype == np.int32 or x_dtype == np.int64:
             return False
     return True
Пример #9
0
 def nnadapter_config_set(self, config: CxxConfig):
     config.set_nnadapter_device_names(
         self.args.nnadapter_device_names.split(","))
     config.set_nnadapter_context_properties(
         self.args.nnadapter_context_properties)
     config.set_nnadapter_model_cache_dir(
         self.args.nnadapter_model_cache_dir)
     config.set_nnadapter_subgraph_partition_config_path(
         self.args.nnadapter_subgraph_partition_config_path)
     config.set_nnadapter_mixed_precision_quantization_config_path(
         self.args.nnadapter_mixed_precision_quantization_config_path)
Пример #10
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_shape = list(program_config.inputs["input_data"].shape)
     axis = program_config.ops[0].attrs["axis"]
     if predictor_config.target() == TargetType.OpenCL:
         if len(x_shape) < 2 or (len(x_shape) == 4 and axis == 0):
             return False
     if predictor_config.target() == TargetType.Metal:
         if len(x_shape) != 4 or axis != 1 or x_shape[0] != 1:
             return False
     return True
Пример #11
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     target_type = predictor_config.target()
     input_data_type = program_config.inputs["input_data"].dtype
     # Check config
     if predictor_config.precision(
     ) == PrecisionType.INT64 and input_data_type != np.int64:
         return False
     if predictor_config.precision(
     ) == PrecisionType.INT32 and input_data_type != np.int32:
         return False
     return True
Пример #12
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_shape = list(program_config.inputs["input_data"].shape)
     axis = program_config.ops[0].attrs["axis"]
     if predictor_config.target() == TargetType.Metal:
         if len(x_shape) != 4 or axis != 1 or x_shape[0] != 1:
             return False
     if predictor_config.target() == TargetType.NNAdapter:
         if "nvidia_tensorrt" in self.get_nnadapter_device_name():
             if len(x_shape) < 2:
                 return False
     return True
Пример #13
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_dtype = program_config.inputs["input_data"].dtype
     target_type = predictor_config.target()
     if target_type in [TargetType.ARM]:
         if predictor_config.precision(
         ) == PrecisionType.FP16 and x_dtype != np.float32:
             return False
     if target_type == TargetType.NNAdapter:
         if program_config.inputs["input_data"].dtype != np.float32:
             return False
     return True
 def is_program_valid(self,
                      program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     result = True
     if predictor_config.target() == TargetType.OpenCL:
         result = result and (
             program_config.ops[0].attrs["groups"] == 1 and
             program_config.ops[0].type != "conv2d_transpose")
     if predictor_config.target() == TargetType.ARM:
         result = result and predictor_config.precision(
         ) != PrecisionType.FP16 and predictor_config.precision(
         ) != PrecisionType.INT8
     return result
Пример #15
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     in_shape = list(program_config.inputs["input_data"].shape)
     target_type = predictor_config.target()
     axis = program_config.ops[0].attrs["axis"]
     keep_dims = program_config.ops[0].attrs["keepdims"]
     if target_type == TargetType.OpenCL:
         if len(in_shape) != 4 or keep_dims == False:
             return False
     if predictor_config.target() == TargetType.Metal:
         if len(in_shape) != 4 or in_shape[
                 0] != 1 or axis != 1 or keep_dims == False:
             return False
     return True
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:
        result = True
        if predictor_config.target() == TargetType.OpenCL:
            filter_shape = list(program_config.weights["filter_data"].shape)
            dilations = program_config.ops[0].attrs["dilations"]
            if program_config.ops[0].attrs["groups"] != 1 or program_config.ops[
                    0].type == "conv2d_transpose" or dilations[0] == 2 or (
                        filter_shape[2] == 3 and filter_shape[3] == 3):
                result = False
        if program_config.ops[0].type == "conv2d_transpose":  #TODO
            result = result and program_config.ops[
                1].type != "hard_swish" and program_config.ops[
                    1].type != "hard_sigmoid" and program_config.ops[
                        1].type != "prelu"
        if predictor_config.target(
        ) == TargetType.ARM or predictor_config.target() == TargetType.X86:
            result = result and program_config.ops[
                1].type != 'prelu' and program_config.ops[
                    1].type != 'hard_sigmoid'
        if predictor_config.target(
        ) == TargetType.ARM and self.depthwise == True:
            result = result and program_config.ops[1].type != 'hard_swish'
        if predictor_config.target() == TargetType.ARM:
            result = result and predictor_config.precision(
            ) != PrecisionType.FP16 and predictor_config.precision(
            ) != PrecisionType.INT8
        if predictor_config.target() == TargetType.Metal:
            result = result and program_config.ops[1].type != 'prelu'

        return result
Пример #17
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_shape = list(program_config.inputs["input_data"].shape)
     if predictor_config.target() == TargetType.Metal:
         if len(x_shape) != 4:
             return False
     return True
Пример #18
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_dtype = program_config.inputs["stack_input1"].dtype
     if predictor_config.target() == TargetType.X86:
         if x_dtype != np.float32:
             return False
     return True
Пример #19
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_dtype = program_config.inputs["input_data"].dtype
     if predictor_config.precision() == PrecisionType.INT64:
         if x_dtype != np.int64:
             return False
     return True
Пример #20
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     padding_weights = program_config.ops[1].attrs["padding_weights"]
     if predictor_config.target() == TargetType.ARM:
         if padding_weights:
             return False
     return True
Пример #21
0
 def is_program_valid(self,
                      program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     if predictor_config.target() == TargetType.ARM:
         print("Output has diff on ARM. Skip.")
         return False
     return True
Пример #22
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     in_shape = list(program_config.inputs["input_data"].shape)
     if predictor_config.target() == TargetType.OpenCL:
         if program_config.ops[0].attrs["in_num_col_dims"] != 1 or len(
                 in_shape) != 2:
             return False
     return True
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     if predictor_config.target() == TargetType.OpenCL:
         fill_constant_shape = program_config.ops[1].attrs["shape"]
         input_shape = list(program_config.inputs["input_data"].shape)
         if len(fill_constant_shape) > 4 or len(input_shape) > 4:
             return False
     return True
Пример #24
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     target_type = predictor_config.target()
     in_shape = program_config.ops[0].attrs["shape"]
     if target_type in [TargetType.Metal]:
         if in_shape[0] != 1:
             return False
     return True
Пример #25
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     in_shape = list(program_config.inputs["input_data"].shape)
     target = predictor_config.target()
     # opencl has error
     # if target in [TargetType.OpenCL]:
     #     return False
     return True
Пример #26
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     target_type = predictor_config.target()
     in_shape = list(program_config.inputs["squeeze2_input_x"].shape)
     if target_type in [TargetType.Metal]:
         if in_shape[1] != 1:
             return False
     return True
Пример #27
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_shape = list(program_config.inputs["X_data"].shape)
     axis = program_config.ops[0].attrs["axis"]
     if predictor_config.target() == TargetType.Metal:
         if x_shape[0] != 1:
             return False
     return True
Пример #28
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     # get input&output shape, get op attributes
     x_shape = list(program_config.inputs["transpose1_input_x"].shape)
     if predictor_config.target() == TargetType.OpenCL:
         if len(x_shape) == 2 or len(x_shape) == 4:
             return False
     return True
Пример #29
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     if predictor_config.target() == TargetType.OpenCL:
         input_shape_x = list(program_config.inputs["input_data_x"].shape)
         input_shape_y = list(program_config.inputs["input_data_y"].shape)
         if len(input_shape_x) > 4 or len(input_shape_y) > 4:
             return False
     return True
Пример #30
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     target_type = predictor_config.target()
     in_shape = list(program_config.inputs["input_data_x"].shape)
     if target_type in [TargetType.Metal]:
         if len(in_shape) != 4:
             return False
     return True