Exemplo n.º 1
0
def test_broadcast_diff_dims():
    context.set_context(mode=context.GRAPH_MODE, device_target='GPU')

    x1_np = np.random.rand(2).astype(np.float32)
    x2_np = np.random.rand(2, 1).astype(np.float32)

    output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.minimum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.maximum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np > x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np < x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.power(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)
Exemplo n.º 2
0
 def __init__(self, num_classes, num_boxes, neg_pre_positive, batch_size):
     super(MultiBoxLoss, self).__init__()
     self.num_classes = num_classes
     self.num_boxes = num_boxes
     self.neg_pre_positive = neg_pre_positive
     self.notequal = P.NotEqual()
     self.less = P.Less()
     self.tile = P.Tile()
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.expand_dims = P.ExpandDims()
     self.smooth_l1_loss = P.SmoothL1Loss()
     self.cross_entropy = SoftmaxCrossEntropyWithLogits()
     self.maximum = P.Maximum()
     self.minimum = P.Minimum()
     self.sort_descend = P.TopK(True)
     self.sort = P.TopK(True)
     self.gather = P.GatherNd()
     self.max = P.ReduceMax()
     self.log = P.Log()
     self.exp = P.Exp()
     self.concat = P.Concat(axis=1)
     self.reduce_sum2 = P.ReduceSum(keep_dims=True)
     self.idx = Tensor(
         np.reshape(np.arange(batch_size * num_boxes), (-1, 1)), ms.int32)
Exemplo n.º 3
0
    def __init__(self, batch_size=4):
        super(DiceLoss, self).__init__()

        self.threshold0 = Tensor(0.5, mstype.float32)
        self.zero_float32 = Tensor(0.0, mstype.float32)
        self.k = int(640 * 640)
        self.negative_one_int32 = Tensor(-1, mstype.int32)
        self.batch_size = batch_size
        self.concat = P.Concat()
        self.less_equal = P.LessEqual()
        self.greater = P.Greater()
        self.reduce_sum = P.ReduceSum()
        self.reduce_sum_keep_dims = P.ReduceSum(keep_dims=True)
        self.reduce_mean = P.ReduceMean()
        self.reduce_min = P.ReduceMin()
        self.cast = P.Cast()
        self.minimum = P.Minimum()
        self.expand_dims = P.ExpandDims()
        self.select = P.Select()
        self.fill = P.Fill()
        self.topk = P.TopK(sorted=True)
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.reshape = P.Reshape()
        self.slice = P.Slice()
        self.logical_and = P.LogicalAnd()
        self.logical_or = P.LogicalOr()
        self.equal = P.Equal()
        self.zeros_like = P.ZerosLike()
        self.add = P.TensorAdd()
        self.gather = P.Gather()
Exemplo n.º 4
0
 def construct(self, x):
     alpha_array = P.Cast()(F.scalar_to_array(self.alpha), P.DType()(x))
     if self.alpha <= 1:
         out = P.Maximum()(alpha_array * x, x)
     else:
         out = P.Minimum()(alpha_array * x, x)
     return out
Exemplo n.º 5
0
 def __init__(self):
     super(FirstNet, self).__init__()
     self.max = P.Maximum()
     self.min = P.Minimum()
     self.net = SecondNet()
     self.x = Tensor(np.ones((2, 3, 4), np.float32))
     self.y = Tensor(np.ones((2, 3, 4), np.float32))
Exemplo n.º 6
0
    def __init__(self,
                 config,
                 batch_size,
                 num_classes,
                 use_sigmoid_cls,
                 target_means=(.0, .0, .0, .0),
                 target_stds=(1.0, 1.0, 1.0, 1.0)
                 ):
        super(Proposal, self).__init__()
        cfg = config
        self.batch_size = batch_size
        self.num_classes = num_classes
        self.target_means = target_means
        self.target_stds = target_stds
        self.use_sigmoid_cls = config.use_sigmoid_cls

        if self.use_sigmoid_cls:
            self.cls_out_channels = 1
            self.activation = P.Sigmoid()
            self.reshape_shape = (-1, 1)
        else:
            self.cls_out_channels = num_classes
            self.activation = P.Softmax(axis=1)
            self.reshape_shape = (-1, 2)

        if self.cls_out_channels <= 0:
            raise ValueError('num_classes={} is too small'.format(num_classes))

        self.num_pre = cfg.rpn_proposal_nms_pre
        self.min_box_size = cfg.rpn_proposal_min_bbox_size
        self.nms_thr = cfg.rpn_proposal_nms_thr
        self.nms_post = cfg.rpn_proposal_nms_post
        self.nms_across_levels = cfg.rpn_proposal_nms_across_levels
        self.max_num = cfg.rpn_proposal_max_num

        # Op Define
        self.squeeze = P.Squeeze()
        self.reshape = P.Reshape()
        self.cast = P.Cast()

        self.feature_shapes = cfg.feature_shapes

        self.transpose_shape = (1, 2, 0)

        self.decode = BoundingBoxDecode()

        self.nms = P.NMSWithMask(self.nms_thr)
        self.concat_axis0 = P.Concat(axis=0)
        self.concat_axis1 = P.Concat(axis=1)
        self.split = P.Split(axis=1, output_num=5)
        self.min = P.Minimum()
        self.gatherND = P.GatherNd()
        self.slice = P.Slice()
        self.select = P.Select()
        self.greater = P.Greater()
        self.transpose = P.Transpose()
        self.tile = P.Tile()
        self.set_train_local(config, training=True)

        self.multi_10 = Tensor(10.0, mstype.float16)
Exemplo n.º 7
0
    def __init__(self,
                 params,
                 decay_steps,
                 learning_rate=0.001,
                 end_learning_rate=0.0001,
                 power=10.0,
                 beta1=0.9,
                 beta2=0.999,
                 eps=1e-6,
                 weight_decay=0.0):
        super(AdamWeightDecayDynamicLR, self).__init__(learning_rate, params)
        _check_param_value(beta1, beta2, eps, weight_decay, self.cls_name)

        # turn them to scalar when me support scalar/tensor mix operations
        self.global_step = Parameter(initializer(0, [1]), name="global_step")
        self.decay_steps = Tensor(np.array([decay_steps]).astype(np.float32))
        self.end_learning_rate = Tensor(
            np.array([end_learning_rate]).astype(np.float32))
        self.diff_learning_rate = Tensor(
            np.array([learning_rate - end_learning_rate]).astype(np.float32))
        self.power = power
        self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
        self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
        self.eps = Tensor(np.array([eps]).astype(np.float32))
        self.weight_decay_tensor = Tensor(
            np.array([weight_decay]).astype(np.float32))
        self.params = self.parameters
        self.moments1 = self.params.clone(prefix="adam_m", init='zeros')
        self.moments2 = self.params.clone(prefix="adam_v", init='zeros')

        self.hyper_map = C.HyperMap()
        self.min = P.Minimum()
        self.pow = P.Pow()
        self.one = Tensor(np.array([1.0]).astype(np.float32))
Exemplo n.º 8
0
 def __init__(self):
     super(DictNet, self).__init__()
     self.max = P.Maximum()
     self.min = P.Minimum()
     self.dictionary = {
         "x": Tensor(np.ones([3, 2, 3], np.float32)),
         "y": Tensor(np.ones([1, 2, 3], np.float32))
     }
Exemplo n.º 9
0
 def __init__(self):
     super(FirstNet, self).__init__()
     self.max = P.Maximum()
     self.min = P.Minimum()
     self.net = SecondNet()
     self.x = Tensor(np.ones((3, 4), np.float32))
     self.y = Tensor(np.ones((3, 4), np.float32))
     self.weight = Parameter(Tensor(np.ones((2, 3, 4)).astype(np.float32)), "w1", requires_grad=True)
Exemplo n.º 10
0
 def __init__(self):
     super(Giou, self).__init__()
     self.cast = P.Cast()
     self.reshape = P.Reshape()
     self.min = P.Minimum()
     self.max = P.Maximum()
     self.concat = P.Concat(axis=1)
     self.mean = P.ReduceMean()
     self.div = P.RealDiv()
     self.eps = 0.000001
Exemplo n.º 11
0
    def __init__(self,
                 params,
                 decay_steps,
                 warmup_steps=0,
                 start_learning_rate=0.1,
                 end_learning_rate=0.0001,
                 power=1.0,
                 beta1=0.9,
                 beta2=0.999,
                 eps=1e-6,
                 weight_decay=0.0,
                 decay_filter=lambda x: 'LayerNorm' not in x.name and 'bias'
                 not in x.name):

        super(Lamb, self).__init__(start_learning_rate, params)
        if self.is_group:
            raise RuntimeError(
                f"The {self.cls_name} optimizer cannot support group setting.")
        _check_param_value(decay_steps, warmup_steps, start_learning_rate,
                           end_learning_rate, power, beta1, beta2, eps,
                           weight_decay, self.cls_name)

        # turn them to scalar when me support scalar/tensor mix operations
        self.global_step = Parameter(initializer(0, [1]), name="global_step")

        self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))
        self.warmup_flag = False
        if warmup_steps > 0:
            self.warmup_flag = True
        self.decay_steps = Tensor(np.array([decay_steps]).astype(np.float32))
        self.start_learning_rate = Tensor(
            np.array([start_learning_rate]).astype(np.float32))
        self.end_learning_rate = Tensor(
            np.array([end_learning_rate]).astype(np.float32))
        self.diff_learning_rate = Tensor(
            np.array([start_learning_rate - end_learning_rate
                      ]).astype(np.float32))
        self.power = power
        self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
        self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
        self.eps = Tensor(np.array([eps]).astype(np.float32))
        self.weight_decay_tensor = Tensor(
            np.array([weight_decay]).astype(np.float32))
        self.params = self.parameters
        self.moments1 = self.params.clone(prefix="lamb_m", init='zeros')
        self.moments2 = self.params.clone(prefix="lamb_v", init='zeros')
        self.decay_flag = tuple(decay_filter(x) for x in self.params)

        self.hyper_map = C.HyperMap()
        self.min = P.Minimum()
        self.pow = P.Pow()
        self.greater = P.Greater()
        self.one = Tensor(np.array([1.0]).astype(np.float32))
        self.cast = P.Cast()
Exemplo n.º 12
0
def test_nobroadcast_fp16():
    context.set_context(mode=context.GRAPH_MODE, device_target='GPU')

    np.random.seed(42)
    x1_np = np.random.rand(10, 20).astype(np.float16)
    x2_np = np.random.rand(10, 20).astype(np.float16)

    output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.minimum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.maximum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np > x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np < x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.power(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np / x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np * x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np - x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np / x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    x2_np_zero = np.zeros_like(x2_np)
    output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np_zero))
    assert np.allclose(output_ms.asnumpy(), x2_np_zero)

    output_ms = P.Mod()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.fmod(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.FloorMod()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.mod(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)
Exemplo n.º 13
0
    def __init__(self, src_type=mstype.float32, dst_type=mstype.float32):
        super(SaturateCast, self).__init__()
        np_type = mstype.dtype_to_nptype(dst_type)

        self.tensor_min_type = float(np.finfo(np_type).min)
        self.tensor_max_type = float(np.finfo(np_type).max)

        self.min_op = P.Minimum()
        self.max_op = P.Maximum()
        self.cast = P.Cast()
        self.dst_type = dst_type
Exemplo n.º 14
0
def test_broadcast_diff_dims():
    context.set_context(mode=context.GRAPH_MODE, device_target='GPU')

    np.random.seed(42)
    x1_np = np.random.rand(2).astype(np.float32)
    x2_np = np.random.rand(2, 1).astype(np.float32)
    x1_np_int32 = np.random.randint(0, 100, (2)).astype(np.int32)
    x2_np_int32 = np.random.randint(0, 100, (2, 1)).astype(np.int32)

    output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.minimum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.maximum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)
    output_ms = P.Greater()(Tensor(x1_np_int32), Tensor(x2_np_int32))
    output_np = x1_np_int32 > x2_np_int32
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np > x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np < x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)
    output_ms = P.Less()(Tensor(x1_np_int32), Tensor(x2_np_int32))
    output_np = x1_np_int32 < x2_np_int32
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.power(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np / x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np * x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np - x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np / x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    x2_np_zero = np.zeros_like(x2_np)
    output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np_zero))
    assert np.allclose(output_ms.asnumpy(), x2_np_zero)
Exemplo n.º 15
0
 def __init__(self, weight_angle=10):
     super(LossFunc, self).__init__()
     self.split = P.Split(1, 5)
     self.min = P.Minimum()
     self.log = P.Log()
     self.cos = P.Cos()
     self.mean = P.ReduceMean()
     #self.flatten = P.Flatten()
     self.sum = P.ReduceSum()
     self.weight_angle = weight_angle
     self.max = P.Maximum()
     self.print = P.Print()
Exemplo n.º 16
0
 def __init__(self, config):
     super(ClassificationLoss, self).__init__()
     self.num_classes = config.NUM_CLASSES
     self.num_boxes = config.NUM_SSD_BOXES
     self.neg_pre_positive = config.NEG_PRE_POSITIVE
     self.minimum = P.Minimum()
     self.less = P.Less()
     self.sort = P.TopK()
     self.tile = P.Tile()
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.expand_dims = P.ExpandDims()
     self.sort_descend = P.TopK(True)
     self.cross_entropy = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
Exemplo n.º 17
0
    def construct(self, x):
        pred_loc, pred_label = self.network(x)

        default_bbox_xy = self.default_boxes[..., :2]
        default_bbox_wh = self.default_boxes[..., 2:]
        pred_xy = pred_loc[..., :2] * self.prior_scaling_xy * default_bbox_wh + default_bbox_xy
        pred_wh = P.Exp()(pred_loc[..., 2:] * self.prior_scaling_wh) * default_bbox_wh

        pred_xy_0 = pred_xy - pred_wh / 2.0
        pred_xy_1 = pred_xy + pred_wh / 2.0
        pred_xy = P.Concat(-1)((pred_xy_0, pred_xy_1))
        pred_xy = P.Maximum()(pred_xy, 0)
        pred_xy = P.Minimum()(pred_xy, 1)
        return pred_xy, pred_label
Exemplo n.º 18
0
def clip_by_value(x, clip_value_min, clip_value_max):
    r"""
    Clips tensor values to a specified min and max.

    Limits the value of :math:`x` to a range, whose lower limit is 'clip_value_min'
    and upper limit is 'clip_value_max'.

    .. math::

        out_i= \left\{
        \begin{array}{align}
            clip\_value_{max} & \text{ if } x_i\ge  clip\_value_{max} \\
            x_i & \text{ if } clip\_value_{min} \lt x_i \lt clip\_value_{max} \\
            clip\_value_{min} & \text{ if } x_i \le clip\_value_{min} \\
        \end{array}\right.

    Note:
        'clip_value_min' needs to be less than or equal to 'clip_value_max'.

    Args:
          x (Tensor): Input data.
          clip_value_min (Tensor): The minimum value.
          clip_value_max (Tensor): The maximum value.

    Returns:
          Tensor, a clipped Tensor.

    Supported Platforms:
        ``Ascend`` ``GPU``

    Examples:
        >>> import numpy as np
        >>> from mindspore import Tensor
        >>> from mindspore.ops import composite as C
        >>> import mindspore.common.dtype as mstype
        >>> min_value = Tensor(5, mstype.float32)
        >>> max_value = Tensor(20, mstype.float32)
        >>> x = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mstype.float32)
        >>> output = C.clip_by_value(x, min_value, max_value)
        >>> print(output)
        [[ 5. 20.  5.  7.]
         [ 5. 11.  6. 20.]]
    """
    min_op = P.Minimum()
    max_op = P.Maximum()
    x_min = min_op(x, clip_value_max)
    x_max = max_op(x_min, clip_value_min)
    _check_shape(F.shape(x), F.shape(x_max))
    return x_max
Exemplo n.º 19
0
def test_nobroadcast():
    context.set_context(mode=context.GRAPH_MODE, device_target='GPU')

    x1_np = np.random.rand(10, 20).astype(np.float32)
    x2_np = np.random.rand(10, 20).astype(np.float32)
    x1_np_int32 = np.random.randint(0, 100, (10, 20)).astype(np.int32)
    x2_np_int32 = np.random.randint(0, 100, (10, 20)).astype(np.int32)

    output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.minimum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.maximum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np > x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)
    output_ms = P.Greater()(Tensor(x1_np_int32), Tensor(x2_np_int32))
    output_np = x1_np_int32 > x2_np_int32
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np < x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)
    output_ms = P.Less()(Tensor(x1_np_int32), Tensor(x2_np_int32))
    output_np = x1_np_int32 < x2_np_int32
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.power(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np / x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np * x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np - x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)
Exemplo n.º 20
0
    def __init__(
        self,
        d_min=1e-3,
        d_max=1.0,
        num_rbf=32,
        sigma=None,
        trainable=False,
        min_cutoff=False,
        max_cutoff=False,
    ):
        super().__init__()
        if d_max <= d_min:
            raise ValueError(
                'The argument "d_max" must be larger' +
                'than the argument "d_min" in LogGaussianDistribution!')

        if d_min <= 0:
            raise ValueError('The argument "d_min" must be ' +
                             ' larger than 0 in LogGaussianDistribution!')

        self.d_max = d_max
        self.d_min = d_min / d_max
        self.min_cutoff = min_cutoff
        self.max_cutoff = max_cutoff

        self.log = P.Log()
        self.exp = P.Exp()
        self.max = P.Maximum()
        self.min = P.Minimum()
        self.zeroslike = P.ZerosLike()
        self.oneslike = P.OnesLike()

        # linspace = nn.LinSpace(log_dmin,0,n_gaussians)

        log_dmin = math.log(self.d_min)
        # self.centers = linspace()
        # self.ones = self.oneslike(self.centers)
        centers = np.linspace(log_dmin, 0, num_rbf)
        self.centers = Tensor(centers, ms.float32)
        ones = np.ones_like(centers)
        self.ones = Tensor(ones, ms.float32)

        if sigma is None:
            sigma = -log_dmin / (num_rbf - 1)
        self.rescale = -0.5 / (sigma * sigma)
Exemplo n.º 21
0
def clip_by_value(x, clip_value_min, clip_value_max):
    """
    Clips tensor values to a specified min and max.

    Limits the value of :math:`x` to a range, whose lower limit is 'clip_value_min'
    and upper limit is 'clip_value_max'.

    Note:
        'clip_value_min' needs to be less than or equal to 'clip_value_max'.

    Args:
          x (Tensor): Input data.
          clip_value_min (Tensor): The minimum value.
          clip_value_max (Tensor): The maximum value.

    Returns:
          Tensor, a clipped Tensor.

    Supported Platforms:
        ``Ascend`` ``GPU``

    Examples:
        >>> import numpy as np
        >>> from mindspore import Tensor
        >>> from mindspore.ops import composite as C
        >>> import mindspore.common.dtype as mstype
        >>> min_value = Tensor(5, mstype.float32)
        >>> max_value = Tensor(20, mstype.float32)
        >>> x = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mstype.float32)
        >>> output = C.clip_by_value(x, min_value, max_value)
        >>> print(output)
        [[ 5. 20.  5.  7.]
         [ 5. 11.  6. 20.]]
    """
    min_op = P.Minimum()
    max_op = P.Maximum()
    x_min = min_op(x, clip_value_max)
    x_max = max_op(x_min, clip_value_min)
    return x_max
Exemplo n.º 22
0
def test_broadcast_fp16():
    context.set_context(mode=context.GRAPH_MODE, device_target='GPU')

    np.random.seed(42)
    x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)
    x2_np = np.random.rand(1, 4, 1, 6).astype(np.float16)

    output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.minimum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.maximum(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np > x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np < x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np))
    output_np = np.power(x1_np, x2_np)
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np / x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np * x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)

    output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np))
    output_np = x1_np - x2_np
    assert np.allclose(output_ms.asnumpy(), output_np)
Exemplo n.º 23
0
def clip_by_value(x, clip_value_min, clip_value_max):
    """
    Clips tensor values to a specified min and max.

    Limits the value of :math:`x` to a range, whose lower limit is 'clip_value_min'
    and upper limit is 'clip_value_max'.

    Note:
        'clip_value_min' needs to be less than or equal to 'clip_value_max'.

    Args:
          x (Tensor): Input data.
          clip_value_min (Tensor): The minimum value.
          clip_value_max (Tensor): The maximum value.

    Returns:
          Tensor, a clipped Tensor.
    """
    min_op = P.Minimum()
    max_op = P.Maximum()
    x_min = min_op(x, clip_value_max)
    x_max = max_op(x_min, clip_value_min)
    return x_max
Exemplo n.º 24
0
     'block': P.TensorAdd(),
     'desc_inputs': [[2, 3, 3, 5], [3, 5]],
     'desc_bprop': [[2, 3, 3, 5]],
     'skip': ['backward']}),
 ('Add3', {
     'block': P.TensorAdd(),
     'desc_inputs': [[2, 3, 1, 1], [2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]],
     'skip': ['backward']}),
 ('Add4', {
     'block': P.TensorAdd(),
     'desc_inputs': [[2, 3, 3, 5], [2, 3, 1, 1]],
     'desc_bprop': [[2, 3, 3, 5]],
     'skip': ['backward']}),
 ('Minimum', {
     'block': P.Minimum(),
     'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('Pow_0', {
     'block': P.Pow(),
     'desc_const': [2.0],
     'desc_inputs': [[2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('Pow_1', {
     'block': P.Pow(),
     'desc_inputs': [[3, 5], [2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('Exp', {
     'block': P.Exp(),
     'desc_inputs': [[2, 3]],
     'desc_bprop': [[2, 3]]}),
Exemplo n.º 25
0
 def __init__(self):
     super(DictNet, self).__init__()
     self.max = P.Maximum()
     self.min = P.Minimum()
Exemplo n.º 26
0
 def __init__(self):
     super(Iou, self).__init__()
     self.min = P.Minimum()
     self.max = P.Maximum()
Exemplo n.º 27
0
 def __init__(self):
     super(Net, self).__init__()
     self.max = P.Maximum()
     self.min = P.Minimum()
     self._list = [22, 66, 88, 111]
Exemplo n.º 28
0
 def __init__(self):
     super(Net, self).__init__()
     self.max = P.Maximum()
     self.min = P.Minimum()
     self._list = [1, 2, 3]
Exemplo n.º 29
0
    def __init__(
        self,
        dim_atom_embed,
        num_rbf,
        n_heads=8,
        activation=Swish(),
        max_cycles=10,
        time_embedding=0,
        use_pondering=True,
        fixed_cycles=False,
        use_filter=True,
        inside_filter=None,
        act_threshold=0.9,
        fixed_neigh=False,
    ):
        super().__init__(gather_dim=dim_atom_embed, fixed_neigh=fixed_neigh)
        if dim_atom_embed % n_heads != 0:
            raise ValueError('The term "dim_atom_embed" cannot be divisible ' +
                             'by the term "n_heads" in AirNetIneteraction! ')

        self.n_heads = n_heads
        self.max_cycles = max_cycles
        self.dim_atom_embed = dim_atom_embed
        self.num_rbf = num_rbf
        self.time_embedding = time_embedding

        if fixed_cycles:
            self.flexable_cycels = False
        else:
            self.flexable_cycels = True

        self.use_filter = use_filter
        if self.use_filter:
            # self.filter = Filter(num_rbf,dim_atom_embed,activation)
            self.filter = Dense(num_rbf,
                                dim_atom_embed,
                                has_bias=True,
                                activation=None)

        self.positional_embedding = PositionalEmbedding(dim_atom_embed)
        self.multi_head_attention = MultiheadAttention(dim_atom_embed, n_heads)

        self.act_threshold = act_threshold
        self.act_epsilon = 1.0 - act_threshold

        self.use_pondering = use_pondering
        self.pondering = None
        self.act_weight = None
        if self.max_cycles > 1:
            if self.use_pondering:
                self.pondering = Pondering(dim_atom_embed * 3, bias_const=3)
                self.act_weight = ACTWeight(self.act_threshold)
            else:
                if self.flexable_cycels:
                    raise ValueError(
                        'The term "fixed_cycles" must be True ' +
                        'when the pondering network is None in AirNetIneteraction! '
                    )
        self.fixed_weight = Tensor(1.0 / max_cycles, ms.float32)

        self.max = P.Maximum()
        self.min = P.Minimum()
        self.concat = P.Concat(-1)
        self.pack = P.Pack()
        self.reducesum = P.ReduceSum()
        self.squeeze = P.Squeeze(-1)
        self.ones_like = P.OnesLike()
        self.zeros_like = P.ZerosLike()
        self.zeros = P.Zeros()
Exemplo n.º 30
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import operations as P
from mindspore.ops import Primitive

maximum = P.Maximum()
minimum = P.Minimum()
clip_by_value = Primitive('ClipByValue')
make_tuple = Primitive('make_tuple')
tuple_getitem = Primitive('tuple_getitem')


class FnDict:
    def __init__(self):
        self.fnDict = {}

    def __call__(self, fn):
        self.fnDict[fn.__name__] = fn

    def __getitem__(self, name):
        return self.fnDict[name]