Exemple #1
0
def test_gather_pynative_fp16_14():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
    error = 1e-4
    x = Tensor(np.array([1., 2., 3., 4.]), ms.float16)
    expect = np.array(15, np.float16)
    output = P.L2Loss()(x)
    diff = output.asnumpy() - expect
    assert np.all(diff < error)
Exemple #2
0
 def __init__(self):
     super(openpose_loss, self).__init__()
     self.expand_dims = P.ExpandDims()
     self.tile = P.Tile()
     self.mul = P.Mul()
     self.l2_loss = P.L2Loss()
     self.square = P.Square()
     self.reduceMean = P.ReduceMean()
     self.reduceSum = P.ReduceSum()
     self.print = P.Print()
     self.shape = P.Shape()
     self.maxoftensor = P.ArgMaxWithValue(-1)
Exemple #3
0
 def __init__(self, label, mask, weight_decay, param):
     super(Loss, self).__init__(auto_prefix=False)
     self.label = Tensor(label)
     self.mask = Tensor(mask)
     self.loss = P.SoftmaxCrossEntropyWithLogits()
     self.one = Tensor(1.0, mstype.float32)
     self.zero = Tensor(0.0, mstype.float32)
     self.mean = P.ReduceMean()
     self.cast = P.Cast()
     self.l2_loss = P.L2Loss()
     self.reduce_sum = P.ReduceSum()
     self.weight_decay = weight_decay
     self.param = param
Exemple #4
0
    def construct(self, logits):
        """calc l2 loss"""
        l2_loss = 0
        for i in range(self.num_params):
            l2_loss = l2_loss + self.l2_coeff * P.L2Loss()(self.params[i])

        logits = P.Reshape()(logits, (-1, self.num_class))
        label = P.Reshape()(self.label, (-1, self.num_class))
        mask = P.Reshape()(self.mask, (-1,))

        logits = self.cast(logits, mstype.float32)
        loss = self.softmax(logits, label)[0]
        mask /= self.reduce_mean(mask)
        loss *= mask
        loss = self.reduce_mean(loss)
        l2_loss = P.Cast()(l2_loss, mstype.float32)
        return loss+l2_loss
Exemple #5
0
    def __init__(self, neg_item_num, l2_embed, dist_reg):
        super(BGCFLoss, self).__init__()

        self.neg_item_num = neg_item_num
        self.l2_embed = l2_embed
        self.dist_reg = dist_reg

        self.log = P.Log()
        self.pow = P.Pow()
        self.cast = P.Cast()
        self.tile = P.Tile()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.concat = P.Concat(1)
        self.concat2 = P.Concat(2)
        self.split = P.Split(0, 2)
        self.reduce_sum = P.ReduceSum()
        self.expand_dims = P.ExpandDims()
        self.multiply = P.Mul()
        self.matmul = P.BatchMatMul()
        self.squeeze = P.Squeeze(1)
        self.transpose = P.Transpose()
        self.l2_loss = P.L2Loss()
        self.sigmoid = P.Sigmoid()
Exemple #6
0
        'desc_bprop': [3, 3],
        'skip': ['backward']}),
    ('ApplyRMSProp', {
        'block': P.ApplyRMSProp(),
        'desc_const': [0.9, 0.0, 1e-10, 0.001],
        'desc_inputs': [[3, 3], [3, 3], [3, 3], [3, 3]],
        'desc_bprop': [3, 3],
        'skip': ['backward']}),
    ('ApplyCenteredRMSProp', {
        'block': P.ApplyCenteredRMSProp(),
        'desc_const': [0.9, 0.0, 1e-10, 0.001],
        'desc_inputs': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3]],
        'desc_bprop': [3, 3],
        'skip': ['backward']}),
    ('L2Loss_1', {
        'block': P.L2Loss(),
        'desc_inputs': [Tensor(np.array([1, 2, 3, 4]), mstype.float16)],
        'desc_bprop': []}),
    ('L2Loss_2', {
        'block': P.L2Loss(),
        'desc_inputs': [Tensor(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]), mstype.float16)],
        'desc_bprop': []}),
]

test_case_array_ops = [
    ('SpaceToDepth', {
        'block': P.SpaceToDepth(2),
        'desc_inputs': [[1, 3, 2, 2]],
        'desc_bprop': [[1, 12, 1, 1]]}),
    ('DepthToSpace', {
        'block': P.DepthToSpace(2),
Exemple #7
0
 def __init__(self):
     super(L2LossNet, self).__init__()
     self.l2_loss = P.L2Loss()