def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: return False # ci run on arm_opencl error # get input&output shape, get op attributes x_shape = list(program_config.inputs["input_data_x"].shape) y_shape = list(program_config.weights["input_data_y"].shape) x_precision = program_config.inputs["input_data_x"].dtype x_num_col_dims = program_config.ops[0].attrs["x_num_col_dims"] y_num_col_dims = program_config.ops[0].attrs["y_num_col_dims"] # {TargetType.Host, TargetType.X86, TargetType.ARM, TargetType.OpenCL} if predictor_config.target() == TargetType.ARM: # get input and output shape of current op if x_shape[1] != y_shape[0]: return False # {PrecisionType.FP16, PrecisionType.FP32, PrecisionType.FP64, PrecisionType.UINT8, PrecisionType.INT8, PrecisionType.INT16, PrecisionType.INT32, PrecisionType.INT64, PrecisionType.BOOL} target_type = predictor_config.target() if target_type not in [TargetType.OpenCL, TargetType.Metal]: if predictor_config.precision( ) == PrecisionType.FP16 and in_data_type != np.float16: return False elif predictor_config.precision( ) == PrecisionType.FP32 and in_data_type != np.float32: return False # {DataLayoutType.NCHW, DataLayoutType.NHWC, DataLayoutType.ImageDefault, DataLayoutType.ImageFolder, DataLayoutType.ImageNW, DataLayoutType.Any} elif predictor_config.layout() != DataLayoutType.NCHW: if y_num_col_dims > 20: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: return False # fix arm_opencl ci error target_type = predictor_config.target() in_shape = list(program_config.inputs["input_data"].shape) in_data_type = program_config.inputs["input_data"].dtype if "int8" == in_data_type: print("int8 as Input data type is not supported.") return False if target_type not in [TargetType.OpenCL, TargetType.Metal]: if predictor_config.precision( ) == PrecisionType.FP16 and in_data_type != np.float16: return False elif predictor_config.precision( ) == PrecisionType.FP32 and in_data_type != np.float32: return False if target_type == TargetType.Metal and in_data_type not in [ np.float16, np.float32 ]: return False if "ScaleTensor" in program_config.inputs: print("ScaleTensor as Input is not supported on Paddle Lite.") return False if predictor_config.target() == TargetType.Host: return False if predictor_config.target() == TargetType.OpenCL: if len(in_shape) != 4 or in_data_type != "float32": return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: x_dtype = program_config.inputs["input_data"].dtype if predictor_config.target() == TargetType.ARM \ or predictor_config.target() == TargetType.OpenCL \ or predictor_config.target() == TargetType.Metal : if x_dtype == np.int32 or x_dtype == np.int64: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: x_shape = list(program_config.inputs["input_data"].shape) axis = program_config.ops[0].attrs["axis"] if predictor_config.target() == TargetType.OpenCL: if len(x_shape) < 2 or (len(x_shape) == 4 and axis == 0): return False if predictor_config.target() == TargetType.Metal: if len(x_shape) != 4 or axis != 1 or x_shape[0] != 1: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: x_shape = list(program_config.inputs["input_data"].shape) axis = program_config.ops[0].attrs["axis"] if predictor_config.target() == TargetType.Metal: if len(x_shape) != 4 or axis != 1 or x_shape[0] != 1: return False if predictor_config.target() == TargetType.NNAdapter: if "nvidia_tensorrt" in self.get_nnadapter_device_name(): if len(x_shape) < 2: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: result = True if predictor_config.target() == TargetType.OpenCL: result = result and ( program_config.ops[0].attrs["groups"] == 1 and program_config.ops[0].type != "conv2d_transpose") if predictor_config.target() == TargetType.ARM: result = result and predictor_config.precision( ) != PrecisionType.FP16 and predictor_config.precision( ) != PrecisionType.INT8 return result
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: in_shape = list(program_config.inputs["input_data"].shape) target_type = predictor_config.target() axis = program_config.ops[0].attrs["axis"] keep_dims = program_config.ops[0].attrs["keepdims"] if target_type == TargetType.OpenCL: if len(in_shape) != 4 or keep_dims == False: return False if predictor_config.target() == TargetType.Metal: if len(in_shape) != 4 or in_shape[ 0] != 1 or axis != 1 or keep_dims == False: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: x_dtype = program_config.inputs["stack_input1"].dtype if predictor_config.target() == TargetType.X86: if x_dtype != np.float32: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: target_type = predictor_config.target() in_x_shape = list(program_config.inputs["input_data_x"].shape) in_y_shape = list(program_config.inputs["input_data_y"].shape) input_data_type = program_config.inputs["input_data_x"].dtype # Check config if target_type in [TargetType.ARM]: if predictor_config.precision( ) == PrecisionType.INT64 and input_data_type != np.int64: return False if predictor_config.precision( ) == PrecisionType.FP32 and input_data_type != np.float32: return False if predictor_config.precision( ) == PrecisionType.FP16 and input_data_type != np.float16: return False if predictor_config.precision( ) == PrecisionType.INT32 and input_data_type != np.int32: return False if target_type == TargetType.Metal: if input_data_type != np.float32 \ or in_x_shape != in_y_shape \ or len(in_x_shape) == 3 \ or in_x_shape[0] != 1: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: padding_weights = program_config.ops[1].attrs["padding_weights"] if predictor_config.target() == TargetType.ARM: if padding_weights: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: target_type = predictor_config.target() in_x_shape = list(program_config.inputs["input_data_x"].shape) in_y_shape = list(program_config.inputs["input_data_y"].shape) input_data_type = program_config.inputs["input_data_x"].dtype # Check config if target_type in [TargetType.ARM]: if predictor_config.precision( ) == PrecisionType.INT64 and input_data_type != np.int64: return False if predictor_config.precision( ) == PrecisionType.FP32 and input_data_type != np.float32: return False if predictor_config.precision( ) == PrecisionType.FP16 and input_data_type != np.float16: return False if predictor_config.precision( ) == PrecisionType.INT32 and input_data_type != np.int32: return False if target_type == TargetType.ARM: if input_data_type == np.int64: err_msg = "Elementwise_add op on this backend will crash with int64 dtype, we should fix it as soon as possible!" return False if target_type == TargetType.Metal: if input_data_type != np.float32 \ or in_x_shape != in_y_shape \ or len(in_x_shape) == 3 \ or in_x_shape[0] != 1: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: if predictor_config.target() == TargetType.ARM: print("Output has diff on ARM. Skip.") return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: x_shape = list(program_config.inputs["input_data"].shape) if predictor_config.target() == TargetType.Metal: if len(x_shape) != 4: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: target_type = predictor_config.target() in_shape = list(program_config.inputs["input_data_x"].shape) if target_type in [TargetType.Metal]: if len(in_shape) != 4: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: return False # fix arm_opencl ci error in_shape = list(program_config.inputs["input_data"].shape) if predictor_config.target() == TargetType.OpenCL: if len(in_shape) != 4: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: if predictor_config.target() == TargetType.OpenCL: fill_constant_shape = program_config.ops[1].attrs["shape"] input_shape = list(program_config.inputs["input_data"].shape) if len(fill_constant_shape) > 4 or len(input_shape) > 4: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: in_shape = list(program_config.inputs["input_data"].shape) if predictor_config.target() == TargetType.OpenCL: if program_config.ops[0].attrs["in_num_col_dims"] != 1 or len( in_shape) != 2: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: target_type = predictor_config.target() in_shape = program_config.ops[0].attrs["shape"] if target_type in [TargetType.Metal]: if in_shape[0] != 1: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: target_type = predictor_config.target() in_shape = list(program_config.inputs["squeeze2_input_x"].shape) if target_type in [TargetType.Metal]: if in_shape[1] != 1: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: x_shape = list(program_config.inputs["X_data"].shape) axis = program_config.ops[0].attrs["axis"] if predictor_config.target() == TargetType.Metal: if x_shape[0] != 1: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: # get input&output shape, get op attributes x_shape = list(program_config.inputs["transpose1_input_x"].shape) if predictor_config.target() == TargetType.OpenCL: if len(x_shape) == 2 or len(x_shape) == 4: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: if predictor_config.target() == TargetType.OpenCL: input_shape_x = list(program_config.inputs["input_data_x"].shape) input_shape_y = list(program_config.inputs["input_data_y"].shape) if len(input_shape_x) > 4 or len(input_shape_y) > 4: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: in_shape = list(program_config.inputs["input_data"].shape) target = predictor_config.target() # opencl has error # if target in [TargetType.OpenCL]: # return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: result = True if predictor_config.target() == TargetType.ARM: result = result and predictor_config.precision( ) != PrecisionType.FP16 and predictor_config.precision( ) != PrecisionType.INT8 return result
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: result = True if program_config.ops[0].type == "conv2d_transpose": #TODO result = result and program_config.ops[ 1].type != "hard_swish" and program_config.ops[ 1].type != "hard_sigmoid" and program_config.ops[ 1].type != "prelu" if predictor_config.target( ) == TargetType.ARM or predictor_config.target() == TargetType.X86: result = result and program_config.ops[ 1].type != 'prelu' and program_config.ops[ 1].type != 'hard_sigmoid' if predictor_config.target( ) == TargetType.ARM and self.depthwise == True: result = result and program_config.ops[1].type != 'hard_swish' return result
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: in_shape = list(program_config.inputs["input_data"].shape) target = predictor_config.target() if target == TargetType.OpenCL: if len(in_shape) != 4: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: if predictor_config.target() == TargetType.OpenCL: if program_config.ops[0].attrs[ "code_type"] == "encode_center_size" or program_config.ops[ 0].attrs["axis"] != 0 or program_config.ops[0].attrs[ "box_normalized"] == False or "PriorBoxVar" not in program_config.ops[ 0].inputs: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: if predictor_config.target() == TargetType.OpenCL: if program_config.ops[0].attrs[ "align_corners"] != True or program_config.ops[0].attrs[ "padding_mode"] != "zeros" or program_config.ops[ 0].attrs["mode"] != "bilinear": return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: in_shape = list(program_config.inputs["input_data"].shape) if predictor_config.target() == TargetType.Metal: if program_config.ops[0].attrs["adaptive"] == True \ or program_config.ops[0].attrs["ceil_mode"] == True: return False if in_shape[0] != 1: return False return True
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: x_shape = list(program_config.inputs["input_data"].shape) in_dtype = program_config.ops[0].attrs["in_dtype"] out_dtype = program_config.ops[0].attrs["out_dtype"] if in_dtype == 0 or out_dtype == 0: return False if predictor_config.target() == TargetType.Metal: if len(x_shape) != 4 or in_dtype != 5 or out_dtype != 5: return False return True