def __init__(self, kernel_size, stride, pad_mode, data_format="NCHW"): super(_PoolNd, self).__init__() self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name) self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name) if context.get_context( "device_target") != "GPU" and self.format == "NHWC": raise ValueError("NHWC format only support in GPU target.") def _check_int_or_tuple(arg_name, arg_value): validator.check_value_type(arg_name, arg_value, [int, tuple], self.cls_name) error_msg = f'For \'{self.cls_name}\' the {arg_name} should be an positive int number or ' \ f'a tuple of two positive int numbers, but got {arg_value}' if isinstance(arg_value, int): if arg_value <= 0: raise ValueError(error_msg) elif len(arg_value) == 2: for item in arg_value: if isinstance(item, int) and item > 0: continue raise ValueError(error_msg) else: raise ValueError(error_msg) return arg_value self.kernel_size = _check_int_or_tuple('kernel_size', kernel_size) self.stride = _check_int_or_tuple('stride', stride)
def __init__(self, similarity='cosine', reduction='none', zero_diagonal=True): super().__init__() similarity_list = ['dot', 'cosine'] reduction_list = ['none', 'sum', 'mean'] similarity = validator.check_value_type("similarity", similarity, [str]) self.similarity = validator.check_string(similarity, similarity_list, "similarity") reduction = validator.check_value_type("reduction", reduction, [str]) self.reduction = validator.check_string(reduction, reduction_list, "reduction") self.zero_diagonal = validator.check_value_type("zero_diagonal", zero_diagonal, [bool]) self.clear()
def __init__(self, kernel_size, stride, pad_mode): super(_PoolNd, self).__init__() self.pad_mode = validator.check_string('pad_mode', pad_mode.upper(), ['VALID', 'SAME'], self.cls_name) def _check_int_or_tuple(arg_name, arg_value): validator.check_value_type(arg_name, arg_value, [int, tuple], self.cls_name) error_msg = f'For \'{self.cls_name}\' the {arg_name} should be an positive int number or ' \ f'a tuple of two positive int numbers, but got {arg_value}' if isinstance(arg_value, int): if arg_value <= 0: raise ValueError(error_msg) elif len(arg_value) == 2: for item in arg_value: if isinstance(item, int) and item > 0: continue raise ValueError(error_msg) else: raise ValueError(error_msg) return arg_value self.kernel_size = _check_int_or_tuple('kernel_size', kernel_size) self.stride = _check_int_or_tuple('stride', stride)
def __init__(self, num_features, eps=1e-5, momentum=0.9, affine=True, gamma_init='ones', beta_init='zeros', moving_mean_init='zeros', moving_var_init='ones', use_batch_statistics=None, data_format='NCDHW'): super(BatchNorm3d, self).__init__() self.format = validator.check_string(data_format, ['NCDHW'], 'format', self.cls_name) self.bn2d = BatchNorm2d(num_features=num_features, eps=eps, momentum=momentum, affine=affine, gamma_init=gamma_init, beta_init=beta_init, moving_mean_init=moving_mean_init, moving_var_init=moving_var_init, use_batch_statistics=use_batch_statistics, data_format="NCHW") self.shape = P.Shape() self.reshape = P.Reshape()
def __init__(self, skip_channel=True, metric_name="sensitivity", calculation_method=False, decrease="mean"): super(ConfusionMatrixMetric, self).__init__() self.confusion_matrix = _ConfusionMatrix( skip_channel=skip_channel, metric_name=metric_name, calculation_method=calculation_method, decrease=decrease) self.skip_channel = validator.check_value_type("skip_channel", skip_channel, [bool]) self.calculation_method = validator.check_value_type( "calculation_method", calculation_method, [bool]) self.metric_name = validator.check_value_type("metric_name", metric_name, [str]) decrease_list = [ "none", "mean", "sum", "mean_batch", "sum_batch", "mean_channel", "sum_channel" ] decrease = validator.check_value_type("decrease", decrease, [str]) self.decrease = validator.check_string(decrease, decrease_list, "decrease") self.clear()
def __init__(self, symmetric=False, distance_metric="euclidean"): super(RootMeanSquareDistance, self).__init__() self.distance_metric_list = ["euclidean", "chessboard", "taxicab"] distance_metric = validator.check_value_type("distance_metric", distance_metric, [str]) self.distance_metric = validator.check_string(distance_metric, self.distance_metric_list, "distance_metric") self.symmetric = validator.check_value_type("symmetric", symmetric, [bool]) self.clear()
def __init__(self, paddings, mode="CONSTANT"): super(Pad, self).__init__() self.mode = mode self.paddings = paddings Validator.check_string(self.mode, ["CONSTANT", "REFLECT", "SYMMETRIC"], 'mode', self.cls_name) if not isinstance(paddings, tuple): raise TypeError('Paddings must be tuple type.') for item in paddings: if len(item) != 2: raise ValueError('The shape of paddings must be (n, 2).') if len(paddings) > 4: raise ValueError('Only padding up to 4 dims is supported') if mode == "CONSTANT": self.pad = P.Pad(self.paddings) else: self.paddings = Tensor(np.array(self.paddings)) self.pad = P.MirrorPad(mode=mode)
def __init__(self, distance_metric="euclidean", percentile=None, directed=False, crop=True): super(HausdorffDistance, self).__init__() string_list = ["euclidean", "chessboard", "taxicab"] distance_metric = validator.check_value_type("distance_metric", distance_metric, [str]) self.distance_metric = validator.check_string(distance_metric, string_list, "distance_metric") self.percentile = percentile if percentile is None else validator.check_value_type("percentile", percentile, [float]) self.directed = directed if directed is None else validator.check_value_type("directed", directed, [bool]) self.crop = crop if crop is None else validator.check_value_type("crop", crop, [bool]) self.clear()
def __init__(self, kernel_size=1, stride=1, pad_mode="valid"): super(MaxPool1d, self).__init__(kernel_size, stride, pad_mode) validator.check_value_type('kernel_size', kernel_size, [int], self.cls_name) validator.check_value_type('stride', stride, [int], self.cls_name) self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name) validator.check_int(kernel_size, 1, Rel.GE, "kernel_size", self.cls_name) validator.check_int(stride, 1, Rel.GE, "stride", self.cls_name) self.kernel_size = (1, kernel_size) self.stride = (1, stride) self.max_pool = P.MaxPool(ksize=self.kernel_size, strides=self.stride, padding=self.pad_mode) self.shape = F.shape self.reduce_mean = P.ReduceMean(keep_dims=True) self.expand = P.ExpandDims() self.squeeze = P.Squeeze(2)
def __init__(self, num_features, eps=1e-5, momentum=0.9, affine=True, gamma_init="ones", beta_init="zeros", moving_mean_init="zeros", moving_var_init="ones", input_dims="2d", data_format="NCHW"): super().__init__() validator.check_value_type('num_features', num_features, [int], self.cls_name) if num_features < 1: raise ValueError("num_features must be at least 1") self.num_features = num_features if momentum < 0 or momentum > 1: error_msg = "momentum should be a number in range [0, 1], but got {}".format(momentum) raise ValueError(error_msg) self.momentum = 1.0 - momentum self.input_dims = input_dims self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name) if ms.context.get_context("device_target") != "GPU" and self.format == "NHWC": raise ValueError("NHWC format only support in GPU target.") self.eps = eps self.moving_mean = ms.Parameter(initializer( moving_mean_init, num_features), name="mean", requires_grad=False) self.moving_variance = ms.Parameter(initializer( moving_var_init, num_features), name="variance", requires_grad=False) self.gamma = ms.Parameter(initializer( gamma_init, num_features), name="gamma", requires_grad=affine) self.beta = ms.Parameter(initializer( beta_init, num_features), name="beta", requires_grad=affine) # self._cluster_size_op = kfops.KungFuClusterSize() self._all_reduce_op = kfops.KungFuAllReduce() self._square_op = ms.ops.Square() self._sqrt_op = ms.ops.Sqrt() # HACK self._cluster_size_op = kfops.KungFuClusterSizeInput() self._cluster_size_input = ms.Tensor(np.ones((1,), dtype=np.int32))
def __init__(self, num_features, eps=1e-5, momentum=0.9, affine=True, gamma_init='ones', beta_init='zeros', moving_mean_init='zeros', moving_var_init='ones', use_batch_statistics=None, device_num_each_group=1, input_dims='2d', data_format='NCHW'): super(_BatchNorm, self).__init__() if num_features < 1: raise ValueError("num_features must be at least 1") if momentum < 0 or momentum > 1: raise ValueError("momentum should be a number in range [0, 1], but got {}".format(momentum)) self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name) if context.get_context("device_target") != "GPU" and self.format == "NHWC": raise ValueError("NHWC format only support in GPU target.") self.use_batch_statistics = use_batch_statistics self.num_features = num_features self.eps = eps self.input_dims = input_dims self.moving_mean = Parameter(initializer( moving_mean_init, num_features), name="mean", requires_grad=False) self.moving_variance = Parameter(initializer( moving_var_init, num_features), name="variance", requires_grad=False) self.gamma = Parameter(initializer( gamma_init, num_features), name="gamma", requires_grad=affine) self.beta = Parameter(initializer( beta_init, num_features), name="beta", requires_grad=affine) self.group = validator.check_positive_int(device_num_each_group) self.is_global = False if self.group != 1: self.rank_id = get_rank() self.rank_size = get_group_size() self.device_list = [i for i in range(0, self.rank_size)] self.rank_list = self.list_group(self.device_list, self.group) self.rank_list_idx = len(self.rank_list) for i in range(self.rank_list_idx): if self.rank_id in self.rank_list[i] and self.group != 1: self.is_global = True management.create_group('group' + str(i), self.rank_list[i]) self.all_reduce = P.AllReduce(P.ReduceOp.SUM, 'group' + str(i)).add_prim_attr('fusion', 1) self.shape = P.Shape() self.reduce_mean = P.ReduceMean(keep_dims=True) self.square = P.Square() self.sqrt = P.Sqrt() self.cast = P.Cast() self.dtype = P.DType() self.reshape = P.Reshape() self.is_ascend = context.get_context("device_target") == "Ascend" self.is_gpu = context.get_context("device_target") == "GPU" self.is_graph_mode = context.get_context("mode") == context.GRAPH_MODE self.momentum = 1.0 - momentum if context.get_context("enable_ge"): self.is_ge_backend = True else: self.is_ge_backend = False if self.is_graph_mode and (self.is_ge_backend or self.is_ascend): self.bn_train = P.BatchNorm(is_training=True, epsilon=self.eps) elif self.is_gpu: self.bn_train = P.FusedBatchNormEx(mode=1, epsilon=self.eps, momentum=self.momentum, data_format=self.format) else: self.bn_train = P.FusedBatchNorm(mode=1, epsilon=self.eps, momentum=self.momentum) self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps, data_format=self.format) self.enable_global_sync = self.is_global and (self.is_ge_backend or (self.is_graph_mode and self.is_ascend)) self.enable_default_train = self.is_graph_mode and not self.is_global and \ (self.is_ge_backend or self.is_ascend) data_parallel_strategy = ((1,), (1,)) data_parallel_strategy_one = ((1,), ()) self.sub_mean = P.Sub().shard(data_parallel_strategy) self.sub_var = P.Sub().shard(data_parallel_strategy) self.mul_mean = P.Mul().shard(data_parallel_strategy_one) self.mul_var = P.Mul().shard(data_parallel_strategy_one) self.assign_sub_mean = P.AssignSub().shard(data_parallel_strategy) self.assign_sub_var = P.AssignSub().shard(data_parallel_strategy)
def __init__(self, in_channels, out_channels, kernel_size, stride, pad_mode, padding, dilation, group, has_bias, weight_init, bias_init, data_format='NCHW', transposed=False): super(_Conv, self).__init__() self.in_channels = Validator.check_positive_int(in_channels) self.out_channels = Validator.check_positive_int(out_channels) self.kernel_size = kernel_size self.stride = stride self.pad_mode = pad_mode self.weight_init = weight_init self.bias_init = bias_init self.format = Validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name) if context.get_context( "device_target") != "GPU" and self.format == "NHWC": raise ValueError("NHWC format only support in GPU target.") if isinstance(padding, int): Validator.check_non_negative_int(padding, 'padding', self.cls_name) self.padding = padding elif isinstance(padding, tuple): for pad in padding: Validator.check_non_negative_int(pad, 'padding item', self.cls_name) self.padding = padding else: raise TypeError( "padding type must be int/tuple(int) cannot be {}!".format( type(padding))) self.dilation = dilation self.group = Validator.check_positive_int(group) self.has_bias = has_bias if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \ isinstance(kernel_size[0], bool) or isinstance(kernel_size[1], bool) or \ kernel_size[0] < 1 or kernel_size[1] < 1: raise ValueError( "Attr 'kernel_size' of 'Conv2D' Op passed " + str(self.kernel_size) + ", should be a int or tuple and equal to or greater than 1.") if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or \ isinstance(stride[0], bool) or isinstance(stride[1], bool) or stride[0] < 1 or stride[1] < 1: raise ValueError( "Attr 'stride' of 'Conv2D' Op passed " + str(self.stride) + ", should be a int or tuple and equal to or greater than 1.") if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \ isinstance(dilation[0], bool) or isinstance(dilation[1], bool) or dilation[0] < 1 or dilation[1] < 1: raise ValueError( "Attr 'dilation' of 'Conv2D' Op passed " + str(self.dilation) + ", should be a int or tuple and equal to or greater than 1.") if in_channels % group != 0: raise ValueError( "Attr 'in_channels' of 'Conv2D' Op must be divisible by " "attr 'group' of 'Conv2D' Op.") if out_channels % group != 0: raise ValueError( "Attr 'out_channels' of 'Conv2D' Op must be divisible by " "attr 'group' of 'Conv2D' Op.") if transposed: shape = [in_channels, out_channels // group, *kernel_size] else: shape = [out_channels, in_channels // group, *kernel_size] if self.format == "NCHW" else \ [out_channels, *kernel_size, in_channels // group] self.weight = Parameter(initializer(self.weight_init, shape), name='weight') if Validator.check_bool(has_bias): self.bias = Parameter(initializer(self.bias_init, [out_channels]), name='bias') else: if self.bias_init != 'zeros': logger.warning( "Value of 'has_bias' is False, value of 'bias_init' will be ignored." ) self.bias = None