Ejemplo n.º 1
0
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:

        return False  # fix arm_opencl ci error

        target_type = predictor_config.target()
        in_shape = list(program_config.inputs["input_data"].shape)
        in_data_type = program_config.inputs["input_data"].dtype
        if "int8" == in_data_type:
            print("int8 as Input data type is not supported.")
            return False

        if target_type not in [TargetType.OpenCL, TargetType.Metal]:
            if predictor_config.precision(
            ) == PrecisionType.FP16 and in_data_type != np.float16:
                return False
            elif predictor_config.precision(
            ) == PrecisionType.FP32 and in_data_type != np.float32:
                return False
        if target_type == TargetType.Metal and in_data_type not in [
                np.float16, np.float32
        ]:
            return False

        if "ScaleTensor" in program_config.inputs:
            print("ScaleTensor as Input is not supported on Paddle Lite.")
            return False
        if predictor_config.target() == TargetType.Host:
            return False
        if predictor_config.target() == TargetType.OpenCL:
            if len(in_shape) != 4 or in_data_type != "float32":
                return False
        return True
Ejemplo n.º 2
0
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:
        target_type = predictor_config.target()
        in_x_shape = list(program_config.inputs["input_data_x"].shape)
        in_y_shape = list(program_config.inputs["input_data_y"].shape)
        input_data_type = program_config.inputs["input_data_x"].dtype
        # Check config
        if target_type in [TargetType.ARM]:
            if predictor_config.precision(
            ) == PrecisionType.INT64 and input_data_type != np.int64:
                return False
            if predictor_config.precision(
            ) == PrecisionType.FP32 and input_data_type != np.float32:
                return False
            if predictor_config.precision(
            ) == PrecisionType.FP16 and input_data_type != np.float16:
                return False
            if predictor_config.precision(
            ) == PrecisionType.INT32 and input_data_type != np.int32:
                return False
        if target_type == TargetType.Metal:
            if input_data_type != np.float32 \
                or in_x_shape != in_y_shape \
                or len(in_x_shape) == 3 \
                or in_x_shape[0] != 1:
                return False

        return True
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:
        target_type = predictor_config.target()
        in_x_shape = list(program_config.inputs["input_data_x"].shape)
        in_y_shape = list(program_config.inputs["input_data_y"].shape)
        input_data_type = program_config.inputs["input_data_x"].dtype
        # Check config
        if target_type in [TargetType.ARM]:
            if predictor_config.precision(
            ) == PrecisionType.INT64 and input_data_type != np.int64:
                return False
            if predictor_config.precision(
            ) == PrecisionType.FP32 and input_data_type != np.float32:
                return False
            if predictor_config.precision(
            ) == PrecisionType.FP16 and input_data_type != np.float16:
                return False
            if predictor_config.precision(
            ) == PrecisionType.INT32 and input_data_type != np.int32:
                return False

        if target_type == TargetType.ARM:
            if input_data_type == np.int64:
                err_msg = "Elementwise_add op on this backend will crash with int64 dtype, we should fix it as soon as possible!"
                return False
        if target_type == TargetType.Metal:
            if input_data_type != np.float32 \
                or in_x_shape != in_y_shape \
                or len(in_x_shape) == 3 \
                or in_x_shape[0] != 1:
                return False

        return True
Ejemplo n.º 4
0
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:
        return False  # ci run on arm_opencl error
        # get input&output shape, get op attributes
        x_shape = list(program_config.inputs["input_data_x"].shape)
        y_shape = list(program_config.weights["input_data_y"].shape)
        x_precision = program_config.inputs["input_data_x"].dtype
        x_num_col_dims = program_config.ops[0].attrs["x_num_col_dims"]
        y_num_col_dims = program_config.ops[0].attrs["y_num_col_dims"]

        # {TargetType.Host, TargetType.X86, TargetType.ARM, TargetType.OpenCL}
        if predictor_config.target() == TargetType.ARM:
            # get input and output shape of current op
            if x_shape[1] != y_shape[0]:
                return False
        # {PrecisionType.FP16, PrecisionType.FP32, PrecisionType.FP64, PrecisionType.UINT8, PrecisionType.INT8, PrecisionType.INT16, PrecisionType.INT32, PrecisionType.INT64, PrecisionType.BOOL}
        target_type = predictor_config.target()
        if target_type not in [TargetType.OpenCL, TargetType.Metal]:
            if predictor_config.precision(
            ) == PrecisionType.FP16 and in_data_type != np.float16:
                return False
            elif predictor_config.precision(
            ) == PrecisionType.FP32 and in_data_type != np.float32:
                return False

        # {DataLayoutType.NCHW, DataLayoutType.NHWC, DataLayoutType.ImageDefault, DataLayoutType.ImageFolder, DataLayoutType.ImageNW, DataLayoutType.Any}
        elif predictor_config.layout() != DataLayoutType.NCHW:
            if y_num_col_dims > 20:
                return False
        return True
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:
        result = True
        if predictor_config.target() == TargetType.OpenCL:
            filter_shape = list(program_config.weights["filter_data"].shape)
            dilations = program_config.ops[0].attrs["dilations"]
            if program_config.ops[0].attrs["groups"] != 1 or program_config.ops[
                    0].type == "conv2d_transpose" or dilations[0] == 2 or (
                        filter_shape[2] == 3 and filter_shape[3] == 3):
                result = False
        if program_config.ops[0].type == "conv2d_transpose":  #TODO
            result = result and program_config.ops[
                1].type != "hard_swish" and program_config.ops[
                    1].type != "hard_sigmoid" and program_config.ops[
                        1].type != "prelu"
        if predictor_config.target(
        ) == TargetType.ARM or predictor_config.target() == TargetType.X86:
            result = result and program_config.ops[
                1].type != 'prelu' and program_config.ops[
                    1].type != 'hard_sigmoid'
        if predictor_config.target(
        ) == TargetType.ARM and self.depthwise == True:
            result = result and program_config.ops[1].type != 'hard_swish'
        if predictor_config.target() == TargetType.ARM:
            result = result and predictor_config.precision(
            ) != PrecisionType.FP16 and predictor_config.precision(
            ) != PrecisionType.INT8
        if predictor_config.target() == TargetType.Metal:
            result = result and program_config.ops[1].type != 'prelu'

        return result
Ejemplo n.º 6
0
 def is_program_valid(self,
                      program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     result = True
     if predictor_config.target() == TargetType.ARM:
         result = result and predictor_config.precision(
         ) != PrecisionType.FP16 and predictor_config.precision(
         ) != PrecisionType.INT8
     return result
Ejemplo n.º 7
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     target_type = predictor_config.target()
     input_data_type = program_config.inputs["input_data"].dtype
     # Check config
     if predictor_config.precision(
     ) == PrecisionType.INT64 and input_data_type != np.int64:
         return False
     if predictor_config.precision(
     ) == PrecisionType.INT32 and input_data_type != np.int32:
         return False
     return True
 def is_program_valid(self,
                      program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     result = True
     if predictor_config.target() == TargetType.OpenCL:
         result = result and (
             program_config.ops[0].attrs["groups"] == 1 and
             program_config.ops[0].type != "conv2d_transpose")
     if predictor_config.target() == TargetType.ARM:
         result = result and predictor_config.precision(
         ) != PrecisionType.FP16 and predictor_config.precision(
         ) != PrecisionType.INT8
     return result
Ejemplo n.º 9
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_dtype = program_config.inputs["input_data"].dtype
     if predictor_config.precision() == PrecisionType.INT64:
         if x_dtype != np.int64:
             return False
     return True
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     in_shape = list(program_config.inputs["input_data_x"].shape)
     scale_data = program_config.inputs["Scale"].data
     SizeTensor = list(program_config.inputs["SizeTensor"].shape)
     # paddle not support fp16
     if predictor_config.precision() == PrecisionType.FP16:
         return False
     if in_shape[2] * scale_data[0] < 1 or in_shape[3] * scale_data[0] < 1:
         return False
     return True
Ejemplo n.º 11
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_dtype = program_config.inputs["input_data"].dtype
     target_type = predictor_config.target()
     if target_type in [TargetType.ARM]:
         if predictor_config.precision(
         ) == PrecisionType.FP16 and x_dtype != np.float32:
             return False
     if target_type == TargetType.NNAdapter:
         if program_config.inputs["input_data"].dtype != np.float32:
             return False
     return True
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     target_type = predictor_config.target()
     input_data_type = program_config.inputs["input_data_x"].dtype
     # Check config
     if target_type in [TargetType.ARM]:
         if predictor_config.precision(
         ) == PrecisionType.INT64 and input_data_type != np.int64:
             return False
         if predictor_config.precision(
         ) == PrecisionType.FP32 and input_data_type != np.float32:
             return False
         if predictor_config.precision(
         ) == PrecisionType.FP16 and input_data_type != np.float16:
             return False
         if predictor_config.precision(
         ) == PrecisionType.INT32 and input_data_type != np.int32:
             return False
     if input_data_type == np.float32:
         err_msg = "Paddle's elementwise_floordiv op doesn't support float32 datatype!"
         return False
     return True
    def is_program_valid(self, program_config: ProgramConfig,
                         predictor_config: CxxConfig) -> bool:
        result = True
        if program_config.ops[1].type == "sigmoid" and predictor_config.target(
        ) != TargetType.OpenCL:
            result = False
        if program_config.ops[1].type == "tanh" and predictor_config.target(
        ) != TargetType.OpenCL:
            result = False
        if program_config.ops[1].type == "swish" and predictor_config.target(
        ) != TargetType.OpenCL:
            result = False
        if program_config.ops[1].type == "exp" and predictor_config.target(
        ) != TargetType.OpenCL:
            result = False
        if program_config.ops[1].type == "abs" and predictor_config.target(
        ) != TargetType.OpenCL:
            result = False
        if program_config.ops[0].type == "conv2d_transpose":  #TODO
            result = result and program_config.ops[
                1].type != "hard_swish" and program_config.ops[
                    1].type != "hard_sigmoid" and program_config.ops[
                        1].type != "prelu"
        if predictor_config.target(
        ) == TargetType.ARM or predictor_config.target() == TargetType.X86:
            result = result and program_config.ops[
                1].type != 'prelu' and program_config.ops[
                    1].type != 'hard_sigmoid'
        if predictor_config.target(
        ) == TargetType.ARM and self.depthwise == True:
            result = result and program_config.ops[1].type != 'hard_swish'
        if predictor_config.target() == TargetType.ARM:
            result = result and predictor_config.precision(
            ) != PrecisionType.FP16 and predictor_config.precision(
            ) != PrecisionType.INT8
        if predictor_config.target() == TargetType.Metal:
            result = result and program_config.ops[1].type != 'prelu'

        return result
Ejemplo n.º 14
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     target_type = predictor_config.target()
     input_shape = program_config.inputs["input_data"].shape
     filter_data = program_config.weights["filter_data"].shape
     groups = program_config.ops[0].attrs["groups"]
     if target_type == TargetType.OpenCL:
         # opencl doesn't support
         if groups != 1:
             return False
         else:
             return True
     elif target_type == TargetType.ARM and (
             predictor_config.precision() == PrecisionType.FP16
             or predictor_config.precision() == PrecisionType.INT8):
         # fp16 has diff and int8 doesn't support
         return False
     if target_type == TargetType.Metal:
         if groups != 1:
             return False
         if input_shape[0] != 1 or input_shape[1] < 3 or filter_data[0] < 3:
             return False
     return True
Ejemplo n.º 15
0
 def is_program_valid(self, program_config: ProgramConfig,
                      predictor_config: CxxConfig) -> bool:
     x_dtype = program_config.inputs["input_data"].dtype
     x_shape = list(program_config.inputs["input_data"].shape)
     out_shape = list(program_config.outputs)
     axis = program_config.ops[0].attrs["axis"]
     if predictor_config.precision() == PrecisionType.INT64:
         if x_dtype != np.int64:
             return False
     if predictor_config.target() == TargetType.OpenCL:
         if len(x_shape) != 4 \
             or len(out_shape) != 2 \
             or x_dtype != np.float32 :
             return False
     if predictor_config.target() == TargetType.Metal:
         if len(x_shape) == 2 or axis == 0 or axis == 1:
             return False
         if x_dtype != np.float32:
             return False
     return True