def is_empty_grad_op(op_type): grad_op = op_type + '_grad' xpu_version = core.get_xpu_device_version(0) xpu_op_list = core.get_xpu_device_op_list(xpu_version) if grad_op in xpu_op_list.keys(): return False return True
def check_grad_with_place(self, place, inputs_to_check, output_names, no_grad_set=None, numeric_grad_delta=0.005, in_place=False, max_relative_error=0.005, user_defined_grads=None, user_defined_grad_outputs=None, check_dygraph=True, numeric_place=None, check_eager=False): if hasattr(self, 'op_type_need_check_grad'): xpu_version = core.get_xpu_device_version(0) if is_empty_grad_op_type(xpu_version, self.op_type, self.in_type_str): self._check_grad_helper() return if self.dtype == np.float64: return if self.dtype == np.float16: if core.is_float16_supported(place) == False: return if self.dtype == np.float16: max_relative_error = 1.0 return super().check_grad_with_place( place, inputs_to_check, output_names, no_grad_set, numeric_grad_delta, in_place, max_relative_error, user_defined_grads, user_defined_grad_outputs, check_dygraph) a1 = self.get_grad_with_place( place, inputs_to_check, output_names, no_grad_set=no_grad_set, user_defined_grad_outputs=user_defined_grad_outputs) a2 = self.get_grad_with_place( place, inputs_to_check, output_names, no_grad_set=no_grad_set, user_defined_grad_outputs=user_defined_grad_outputs) a3 = self.get_grad_with_place( paddle.CPUPlace(), inputs_to_check, output_names, no_grad_set=no_grad_set, user_defined_grad_outputs=user_defined_grad_outputs) self._assert_is_close(a1, a2, inputs_to_check, 0.00000001, "Gradient Check On two xpu") self._assert_is_close(a1, a3, inputs_to_check, max_relative_error, "Gradient Check On cpu & xpu")
def get_xpu_op_support_types(op_name, dev_id=0): xpu_version = core.get_xpu_device_version(dev_id) support_type_list = core.get_xpu_device_op_support_types( op_name, xpu_version) support_type_str_list = [] for stype in support_type_list: if stype == paddle.bfloat16: support_type_str_list.append( type_dict_paddle_to_str[paddle.bfloat16]) else: support_type_str_list.append(type_dict_paddle_to_str[stype]) type_white_list = get_type_white_list() return [ stype for stype in support_type_str_list if stype not in type_white_list ]
def create_test_class(func_globals, test_class, test_type, test_grad=True, ignore_deivce_version=[], test_deivce_version=[]): xpu_version = core.get_xpu_device_version(0) if xpu_version in ignore_deivce_version: return if len(test_deivce_version ) != 0 and xpu_version not in test_deivce_version: return test_class_obj = test_class() register_classes = inspect.getmembers(test_class_obj, inspect.isclass) op_name = test_class_obj.op_name no_grad = is_empty_grad_op_type(xpu_version, op_name, test_type) for test_class in register_classes: if test_class[0] == '__class__': continue class_obj = test_class[1] cls_name = "{0}_{1}".format(test_class[0], str(test_type)) func_globals[cls_name] = type( cls_name, (class_obj, ), { 'in_type': type_dict_str_to_numpy[test_type], 'in_type_str': test_type, 'op_type_need_check_grad': True }) if hasattr(test_class_obj, 'use_dynamic_create_class' ) and test_class_obj.use_dynamic_create_class: base_class, dynamic_classes = test_class_obj.dynamic_create_class() for dy_class in dynamic_classes: cls_name = "{0}_{1}".format(dy_class[0], str(test_type)) attr_dict = dy_class[1] attr_dict['in_type'] = type_dict_str_to_numpy[test_type] attr_dict['in_type_str'] = test_type attr_dict['op_type_need_check_grad'] = True func_globals[cls_name] = type(cls_name, (base_class, ), attr_dict) record_op_test(op_name, test_type) if not no_grad: record_op_test(op_name + '_grad', test_type)
def init_test_case(self): self.batch_size = 3 self.channels = 3 self.height = 8 self.width = 6 self.xpu_version = core.get_xpu_device_version(0) # n, c, h, w self.x_dim = (self.batch_size, self.channels, self.height, self.width) self.spatial_scale = 1.0 / 2.0 self.pooled_height = 2 self.pooled_width = 2 self.sampling_ratio = -1 if self.xpu_version == core.XPUVersion.XPU1: self.continuous_coordinate = False else: self.continuous_coordinate = bool(np.random.randint(2)) self.x = np.random.random(self.x_dim).astype('float32')
def get_test_cover_info(): xpu_version = core.get_xpu_device_version(0) version_str = "xpu2" if xpu_version == core.XPUVersion.XPU2 else "xpu1" xpu_op_list = make_xpu_op_list(xpu_version) xpu_op_covered = [] dirname = os.getenv('XPU_OP_LIST_DIR') filename = 'xpu_op_test' if dirname is not None: filename = os.path.join(dirname, filename) if os.path.exists(filename) and os.path.isfile(filename): with open(filename) as f: for line in f: test_op_name = line.strip() if test_op_name in xpu_op_list: xpu_op_covered.append(test_op_name) diff_list = list(set(xpu_op_list).difference(set(xpu_op_covered))) total_len = len(set(xpu_op_list)) covered_len = len(set(xpu_op_covered)) print('{} test: {}/{}'.format(version_str, covered_len, total_len)) if (len(diff_list) != 0): print("These ops need to be tested on {0}! ops:{1}".format( version_str, ','.join(diff_list)))
def test_sparse_adam(self): xpu_version = core.get_xpu_device_version(0) version_str = "xpu2" if xpu_version == core.XPUVersion.XPU2 else "xpu1" if "xpu2" == version_str: self.check_with_place(paddle.XPUPlace(0), False)
def setUp(self): self.place = paddle.XPUPlace(0) self.xpu_version = core.get_xpu_device_version(0) self.init_dtype() self.set_case()