def test_broadcast_diff_dims(): context.set_context(mode=context.GRAPH_MODE, device_target='GPU') x1_np = np.random.rand(2).astype(np.float32) x2_np = np.random.rand(2, 1).astype(np.float32) output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.minimum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.maximum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np > x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np < x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np)) output_np = np.power(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np)
def __init__(self, num_classes, num_boxes, neg_pre_positive, batch_size): super(MultiBoxLoss, self).__init__() self.num_classes = num_classes self.num_boxes = num_boxes self.neg_pre_positive = neg_pre_positive self.notequal = P.NotEqual() self.less = P.Less() self.tile = P.Tile() self.reduce_sum = P.ReduceSum() self.reduce_mean = P.ReduceMean() self.expand_dims = P.ExpandDims() self.smooth_l1_loss = P.SmoothL1Loss() self.cross_entropy = SoftmaxCrossEntropyWithLogits() self.maximum = P.Maximum() self.minimum = P.Minimum() self.sort_descend = P.TopK(True) self.sort = P.TopK(True) self.gather = P.GatherNd() self.max = P.ReduceMax() self.log = P.Log() self.exp = P.Exp() self.concat = P.Concat(axis=1) self.reduce_sum2 = P.ReduceSum(keep_dims=True) self.idx = Tensor( np.reshape(np.arange(batch_size * num_boxes), (-1, 1)), ms.int32)
def __init__(self, axis, mean=False): super().__init__() self.average = mean self.axis = axis # ~ self.keepdim = keepdim self.reduce_sum = P.ReduceSum() self.maximum = P.Maximum()
def construct(self, x): alpha_array = P.Cast()(F.scalar_to_array(self.alpha), P.DType()(x)) if self.alpha <= 1: out = P.Maximum()(alpha_array * x, x) else: out = P.Minimum()(alpha_array * x, x) return out
def __init__(self): super(FirstNet, self).__init__() self.max = P.Maximum() self.min = P.Minimum() self.net = SecondNet() self.x = Tensor(np.ones((2, 3, 4), np.float32)) self.y = Tensor(np.ones((2, 3, 4), np.float32))
def __init__(self, margin=0.0, reduction="mean"): super(CosineEmbeddingLoss, self).__init__(reduction) self.reduce_sum = P.ReduceSum() self.maximum = P.Maximum() validator.check_value_type("margin", margin, [float], self.cls_name) self.margin = validator.check_number_range("margin", margin, -1.0, 1.0, Rel.INC_BOTH, self.cls_name)
def __init__(self): super(FirstNet, self).__init__() self.max = P.Maximum() self.min = P.Minimum() self.net = SecondNet() self.x = Tensor(np.ones((3, 4), np.float32)) self.y = Tensor(np.ones((3, 4), np.float32)) self.weight = Parameter(Tensor(np.ones((2, 3, 4)).astype(np.float32)), "w1", requires_grad=True)
def __init__(self): super(SecondNet, self).__init__() self.addN = P.AddN() self.max = P.Maximum() self.add = P.TensorAdd() self.weight = Parameter(Tensor(np.ones((2, 3, 4), np.float32)), "w2", requires_grad=True)
def __init__(self): super(DictNet, self).__init__() self.max = P.Maximum() self.min = P.Minimum() self.dictionary = { "x": Tensor(np.ones([3, 2, 3], np.float32)), "y": Tensor(np.ones([1, 2, 3], np.float32)) }
def __init__(self): super(Giou, self).__init__() self.cast = P.Cast() self.reshape = P.Reshape() self.min = P.Minimum() self.max = P.Maximum() self.concat = P.Concat(axis=1) self.mean = P.ReduceMean() self.div = P.RealDiv() self.eps = 0.000001
def test_nobroadcast_fp16(): context.set_context(mode=context.GRAPH_MODE, device_target='GPU') np.random.seed(42) x1_np = np.random.rand(10, 20).astype(np.float16) x2_np = np.random.rand(10, 20).astype(np.float16) output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.minimum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.maximum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np > x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np < x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np)) output_np = np.power(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np / x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np * x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np - x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np / x2_np assert np.allclose(output_ms.asnumpy(), output_np) x2_np_zero = np.zeros_like(x2_np) output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np_zero)) assert np.allclose(output_ms.asnumpy(), x2_np_zero) output_ms = P.Mod()(Tensor(x1_np), Tensor(x2_np)) output_np = np.fmod(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.FloorMod()(Tensor(x1_np), Tensor(x2_np)) output_np = np.mod(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np)
def __init__(self, src_type=mstype.float32, dst_type=mstype.float32): super(SaturateCast, self).__init__() np_type = mstype.dtype_to_nptype(dst_type) self.tensor_min_type = float(np.finfo(np_type).min) self.tensor_max_type = float(np.finfo(np_type).max) self.min_op = P.Minimum() self.max_op = P.Maximum() self.cast = P.Cast() self.dst_type = dst_type
def test_broadcast_diff_dims(): context.set_context(mode=context.GRAPH_MODE, device_target='GPU') np.random.seed(42) x1_np = np.random.rand(2).astype(np.float32) x2_np = np.random.rand(2, 1).astype(np.float32) x1_np_int32 = np.random.randint(0, 100, (2)).astype(np.int32) x2_np_int32 = np.random.randint(0, 100, (2, 1)).astype(np.int32) output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.minimum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.maximum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Greater()(Tensor(x1_np_int32), Tensor(x2_np_int32)) output_np = x1_np_int32 > x2_np_int32 assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np > x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np < x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Less()(Tensor(x1_np_int32), Tensor(x2_np_int32)) output_np = x1_np_int32 < x2_np_int32 assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np)) output_np = np.power(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np / x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np * x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np - x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np / x2_np assert np.allclose(output_ms.asnumpy(), output_np) x2_np_zero = np.zeros_like(x2_np) output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np_zero)) assert np.allclose(output_ms.asnumpy(), x2_np_zero)
def __init__(self, weight_angle=10): super(LossFunc, self).__init__() self.split = P.Split(1, 5) self.min = P.Minimum() self.log = P.Log() self.cos = P.Cos() self.mean = P.ReduceMean() #self.flatten = P.Flatten() self.sum = P.ReduceSum() self.weight_angle = weight_angle self.max = P.Maximum() self.print = P.Print()
def __init__(self): super(ClipByNorm, self).__init__() self.reduce_sum = P.ReduceSum(keep_dims=True) self.select_ = P.Select() self.greater_ = P.Greater() self.cast = P.Cast() self.sqrt = P.Sqrt() self.max_op = P.Maximum() self.shape = P.Shape() self.reshape = P.Reshape() self.fill = P.Fill() self.expand_dims = P.ExpandDims() self.dtype = P.DType()
def construct(self, x): pred_loc, pred_label = self.network(x) default_bbox_xy = self.default_boxes[..., :2] default_bbox_wh = self.default_boxes[..., 2:] pred_xy = pred_loc[..., :2] * self.prior_scaling_xy * default_bbox_wh + default_bbox_xy pred_wh = P.Exp()(pred_loc[..., 2:] * self.prior_scaling_wh) * default_bbox_wh pred_xy_0 = pred_xy - pred_wh / 2.0 pred_xy_1 = pred_xy + pred_wh / 2.0 pred_xy = P.Concat(-1)((pred_xy_0, pred_xy_1)) pred_xy = P.Maximum()(pred_xy, 0) pred_xy = P.Minimum()(pred_xy, 1) return pred_xy, pred_label
def __init__(self): super(CrossEntropyWithIgnoreIndex, self).__init__() self.onehot = P.OneHot() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.cast = P.Cast() self.ce = nn.SoftmaxCrossEntropyWithLogits() self.greater = P.Greater() self.maximum = P.Maximum() self.fill = P.Fill() self.sum = P.ReduceSum(keep_dims=False) self.dtype = P.DType() self.relu = P.ReLU() self.reshape = P.Reshape()
def clip_by_value(x, clip_value_min, clip_value_max): r""" Clips tensor values to a specified min and max. Limits the value of :math:`x` to a range, whose lower limit is 'clip_value_min' and upper limit is 'clip_value_max'. .. math:: out_i= \left\{ \begin{array}{align} clip\_value_{max} & \text{ if } x_i\ge clip\_value_{max} \\ x_i & \text{ if } clip\_value_{min} \lt x_i \lt clip\_value_{max} \\ clip\_value_{min} & \text{ if } x_i \le clip\_value_{min} \\ \end{array}\right. Note: 'clip_value_min' needs to be less than or equal to 'clip_value_max'. Args: x (Tensor): Input data. clip_value_min (Tensor): The minimum value. clip_value_max (Tensor): The maximum value. Returns: Tensor, a clipped Tensor. Supported Platforms: ``Ascend`` ``GPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore.ops import composite as C >>> import mindspore.common.dtype as mstype >>> min_value = Tensor(5, mstype.float32) >>> max_value = Tensor(20, mstype.float32) >>> x = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mstype.float32) >>> output = C.clip_by_value(x, min_value, max_value) >>> print(output) [[ 5. 20. 5. 7.] [ 5. 11. 6. 20.]] """ min_op = P.Minimum() max_op = P.Maximum() x_min = min_op(x, clip_value_max) x_max = max_op(x_min, clip_value_min) _check_shape(F.shape(x), F.shape(x_max)) return x_max
def __init__(self): super(ClipByNorm, self).__init__() self.reduce_sum = P.ReduceSum(keep_dims=True) self.select_ = P.Select() self.greater_ = P.Greater() self.axis = () self.cast = P.Cast() self.zero = Tensor(np.array([0.0]).astype(np.float32)) self.sqrt = P.Sqrt() self.max_op = P.Maximum() self.shape = P.Shape() self.reshape = P.Reshape() self.fill = P.Fill() self.expand_dims = P.ExpandDims() self.dtype = P.DType()
def __init__(self): super(CEWithIgnoreIndex3D, self).__init__() self.exp = P.Exp() self.sum = P.ReduceSum() self.reshape = P.Reshape() self.log = P.Log() self.cast = P.Cast() self.eps_const = Tensor(eps, dtype=mstype.float32) self.ones = P.OnesLike() self.onehot = P.OneHot() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.relu = P.ReLU() self.maximum = P.Maximum() self.resum = P.ReduceSum(keep_dims=False)
def test_nobroadcast(): context.set_context(mode=context.GRAPH_MODE, device_target='GPU') x1_np = np.random.rand(10, 20).astype(np.float32) x2_np = np.random.rand(10, 20).astype(np.float32) x1_np_int32 = np.random.randint(0, 100, (10, 20)).astype(np.int32) x2_np_int32 = np.random.randint(0, 100, (10, 20)).astype(np.int32) output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.minimum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.maximum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np > x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Greater()(Tensor(x1_np_int32), Tensor(x2_np_int32)) output_np = x1_np_int32 > x2_np_int32 assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np < x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Less()(Tensor(x1_np_int32), Tensor(x2_np_int32)) output_np = x1_np_int32 < x2_np_int32 assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np)) output_np = np.power(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np / x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np * x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np - x2_np assert np.allclose(output_ms.asnumpy(), output_np)
def __init__( self, d_min=1e-3, d_max=1.0, num_rbf=32, sigma=None, trainable=False, min_cutoff=False, max_cutoff=False, ): super().__init__() if d_max <= d_min: raise ValueError( 'The argument "d_max" must be larger' + 'than the argument "d_min" in LogGaussianDistribution!') if d_min <= 0: raise ValueError('The argument "d_min" must be ' + ' larger than 0 in LogGaussianDistribution!') self.d_max = d_max self.d_min = d_min / d_max self.min_cutoff = min_cutoff self.max_cutoff = max_cutoff self.log = P.Log() self.exp = P.Exp() self.max = P.Maximum() self.min = P.Minimum() self.zeroslike = P.ZerosLike() self.oneslike = P.OnesLike() # linspace = nn.LinSpace(log_dmin,0,n_gaussians) log_dmin = math.log(self.d_min) # self.centers = linspace() # self.ones = self.oneslike(self.centers) centers = np.linspace(log_dmin, 0, num_rbf) self.centers = Tensor(centers, ms.float32) ones = np.ones_like(centers) self.ones = Tensor(ones, ms.float32) if sigma is None: sigma = -log_dmin / (num_rbf - 1) self.rescale = -0.5 / (sigma * sigma)
def __init__(self, axis=None): super(ClipByNorm, self).__init__() if axis is None: axis = () if isinstance(axis, tuple): for idx, item in enumerate(axis): Validator.check_value_type("axis[%d]" % idx, item, [int], self.cls_name) self.axis = Validator.check_value_type('axis', axis, [int, tuple], self.cls_name) self.reduce_sum = P.ReduceSum(keep_dims=True) self.select_ = P.Select() self.greater_ = P.Greater() self.cast = P.Cast() self.sqrt = P.Sqrt() self.max_op = P.Maximum() self.shape = P.Shape() self.reshape = P.Reshape() self.fill = P.Fill() self.expand_dims = P.ExpandDims() self.dtype = P.DType()
def clip_by_value(x, clip_value_min, clip_value_max): """ Clips tensor values to a specified min and max. Limits the value of :math:`x` to a range, whose lower limit is 'clip_value_min' and upper limit is 'clip_value_max'. Note: 'clip_value_min' needs to be less than or equal to 'clip_value_max'. Args: x (Tensor): Input data. clip_value_min (Tensor): The minimum value. clip_value_max (Tensor): The maximum value. Returns: Tensor, a clipped Tensor. Supported Platforms: ``Ascend`` ``GPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore.ops import composite as C >>> import mindspore.common.dtype as mstype >>> min_value = Tensor(5, mstype.float32) >>> max_value = Tensor(20, mstype.float32) >>> x = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mstype.float32) >>> output = C.clip_by_value(x, min_value, max_value) >>> print(output) [[ 5. 20. 5. 7.] [ 5. 11. 6. 20.]] """ min_op = P.Minimum() max_op = P.Maximum() x_min = min_op(x, clip_value_max) x_max = max_op(x_min, clip_value_min) return x_max
def test_broadcast_fp16(): context.set_context(mode=context.GRAPH_MODE, device_target='GPU') np.random.seed(42) x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16) x2_np = np.random.rand(1, 4, 1, 6).astype(np.float16) output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.minimum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.maximum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np > x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np < x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np)) output_np = np.power(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np / x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np * x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np - x2_np assert np.allclose(output_ms.asnumpy(), output_np)
def clip_by_value(x, clip_value_min, clip_value_max): """ Clips tensor values to a specified min and max. Limits the value of :math:`x` to a range, whose lower limit is 'clip_value_min' and upper limit is 'clip_value_max'. Note: 'clip_value_min' needs to be less than or equal to 'clip_value_max'. Args: x (Tensor): Input data. clip_value_min (Tensor): The minimum value. clip_value_max (Tensor): The maximum value. Returns: Tensor, a clipped Tensor. """ min_op = P.Minimum() max_op = P.Maximum() x_min = min_op(x, clip_value_max) x_max = max_op(x_min, clip_value_min) return x_max
def __init__(self): super(DictNet, self).__init__() self.max = P.Maximum() self.min = P.Minimum()
def __init__(self): super(Iou, self).__init__() self.min = P.Minimum() self.max = P.Maximum()
def __init__(self): super(MathBinaryNet1, self).__init__() self.add = P.TensorAdd() self.mul = P.Mul() self.max = P.Maximum() self.number = 3
def __init__(self): super(Net, self).__init__() self.max = P.Maximum() self.min = P.Minimum() self._list = [22, 66, 88, 111]