Пример #1
0
 def __init__(self):
     super(LogSigmoid, self).__init__()
     self.mul = P.Mul()
     self.exp = P.Exp()
     self.add = P.TensorAdd()
     self.rec = P.Reciprocal()
     self.log = P.Log()
Пример #2
0
 def __init__(self, network):
     super(NetWithLossClass, self).__init__(auto_prefix=False)
     self.loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
     self.network = network
     self.reducesum = P.ReduceSum(keep_dims=False)
     self.mul = P.Mul()
     self.squeeze = P.Squeeze(axis=1)
     self.zeroslike = P.ZerosLike()
     self.concat = P.Concat(axis=1)
     self.reciprocal = P.Reciprocal()
                                                 input_mask,
                                                 token_type_id,
                                                 next_sentence_labels,
                                                 masked_lm_positions,
                                                 masked_lm_ids,
                                                 masked_lm_weights,
                                                 self.cast(F.tuple_to_array((self.sens,)),
                                                           mstype.float32))
        grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
        grads = self.grad_reducer(grads)
        succ = self.optimizer(grads)
        return F.depend(loss, succ)


grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()


@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
    return grad * reciprocal(scale)


_grad_overflow = C.MultitypeFuncGraph("_grad_overflow")
grad_overflow = P.FloatStatus()


@_grad_overflow.register("Tensor")
def _tensor_grad_overflow(grad):
    return grad_overflow(grad)
Пример #4
0
     'desc_bprop': [[2, 512, 56, 56]],
     'skip': ['backward']}),
 ('ACos', {
     'block': P.ACos(),
     'desc_inputs': [[2, 3]],
     'desc_bprop': [[2, 3]]}),
 ('Acosh', {
     'block': P.Acosh(),
     'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16))],
     'skip': ['backward']}),
 ('Sin', {
     'block': P.Sin(),
     'desc_inputs': [[2, 3]],
     'desc_bprop': [[2, 3]]}),
 ('Reciprocal', {
     'block': P.Reciprocal(),
     'desc_inputs': [[2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('Minimum_0', {
     'block': P.Minimum(),
     'desc_inputs': [[2, 3, 3, 5], [3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('Maximum', {
     'block': P.Maximum(),
     'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('Maximum_0', {
     'block': P.Maximum(),
     'desc_inputs': [[3, 5], [2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('MaximumGrad', {
Пример #5
0
 def __init__(self):
     super(Net, self).__init__()
     self.reciprocal = P.Reciprocal()
Пример #6
0
 def __init__(self, strategy1, strategy2):
     super().__init__()
     self.matmul = P.MatMul().set_strategy(strategy1)
     self.reciprocal = P.Reciprocal().set_strategy(strategy2)
     self.matmul2 = P.MatMul().set_strategy(strategy1)
Пример #7
0
    ('Rsqrt1', {
        'block': (P.Rsqrt(),
        {'exception': TypeError, 'error_keywords': ['Rsqrt']}),
        'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))],
        'skip': ['backward']}),

    # input is Tensor(bool)
    ('Sqrt1', {
        'block': (P.Sqrt(),
        {'exception': TypeError, 'error_keywords': ['Sqrt']}),
        'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))],
        'skip': ['backward']}),

    # input is not Tensor
    ('Reciprocal1', {
        'block': (P.Reciprocal(),
        {'exception': TypeError, 'error_keywords': ['Reciprocal']}),
        'desc_inputs': [5.0],
        'skip': ['backward']}),

    # input x is Tensor(bool)
    ('Pow1', {
        'block': (P.Pow(),
        {'exception': TypeError, 'error_keywords': ['Pow']}),
        'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_)), 2.0],
        'skip': ['backward']}),

    # input is not Tensor
    ('Exp1', {
        'block': (P.Exp(),
        {'exception': TypeError, 'error_keywords': ['Exp']}),
Пример #8
0
def tensor_grad_scale(scale, grad):
    return grad * P.Reciprocal()(scale)
Пример #9
0
 def __init__(self, loop_count=1):
     super().__init__()
     self.loop_count = loop_count
     self.op_seq = (P.Sqrt(), P.Reciprocal(), P.Square())