示例#1
0
 def __init__(self):
     super(LocalizationLoss, self).__init__()
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.loss = nn.SmoothL1Loss()
     self.expand_dims = P.ExpandDims()
     self.less = P.Less()
def smoothl1loss(beta):
    np.random.seed(42)
    prediction = np.random.randn(20).astype(np.float32)
    target = np.random.randn(20).astype(np.float32)

    net = nn.SmoothL1Loss(beta)
    return net(Tensor(prediction), Tensor(target))
示例#3
0
 def __init__(self):
     super(SmoothL1LossNew, self).__init__()
     self.transpose = P.Transpose()
     self.smooth_l1_loss = nn.SmoothL1Loss()
     self.shape = P.Shape()
     self.expand_dims = P.ExpandDims()
     self.sum = P.ReduceSum()
     self.cast = P.Cast()
def smoothl1loss_grad(beta):
    np.random.seed(42)
    prediction = np.random.randn(20).astype(np.float32)
    target = np.random.randn(20).astype(np.float32)
    sens = np.random.randn(20).astype(np.float32)

    net = nn.SmoothL1Loss(beta)
    grad = Grad(net)
    return grad(Tensor(prediction), Tensor(target), Tensor(sens))
示例#5
0
文件: ssd.py 项目: yrpang/mindspore
 def __init__(self, network, config):
     super(SSDWithLossCell, self).__init__()
     self.network = network
     self.less = P.Less()
     self.tile = P.Tile()
     self.reduce_sum = P.ReduceSum()
     self.expand_dims = P.ExpandDims()
     self.class_loss = SigmoidFocalClassificationLoss(
         config.gamma, config.alpha)
     self.loc_loss = nn.SmoothL1Loss()
示例#6
0
 def __init__(self, mode='l1'):
     super(RegLoss, self).__init__()
     self.reduce_sum = ops.ReduceSum()
     self.cast = ops.Cast()
     self.expand_dims = ops.ExpandDims()
     self.reshape = ops.Reshape()
     self.gather_feature = TransposeGatherFeature()
     if mode == 'l1':
         self.loss = nn.L1Loss(reduction='sum')
     elif mode == 'sl1':
         self.loss = nn.SmoothL1Loss()
     else:
         self.loss = None
def test_smoothl1loss():
    np.random.seed(42)
    prediction = np.random.randn(20).astype(np.float32)
    target = np.random.randn(20).astype(np.float32)
    sigma = 1.0

    net = nn.SmoothL1Loss(sigma)
    loss = net(Tensor(prediction), Tensor(target))
    expect = [0.46941718, 0.00382918, 0.16829303, 2.447778, 0.04812113, 0.05953304,
              2.2302065, 0.07672881, 0.00860204, 0.34798968, 0.00956192, 1.818008,
              0.03262977, 0.36599946, 2.047463, 0.2168481, 0.7216947, 1.7739174,
              0.08826803, 1.109165]
    assert np.allclose(loss.asnumpy(), expect)
示例#8
0
    def __init__(self, network, config):
        super(retinanetWithLossCell, self).__init__()
        self.network = network
        self.less = P.Less()
        self.tile = P.Tile()
        self.reduce_sum = P.ReduceSum()
        self.reduce_mean = P.ReduceMean()
        self.expand_dims = P.ExpandDims()
        self.class_loss = SigmoidFocalClassificationLoss(
            config.gamma, config.alpha)
        self.loc_loss = nn.SmoothL1Loss()
        self.cast = P.Cast()

        self.network.to_float(mstype.float16)
def test_smoothl1loss_grad():
    np.random.seed(42)
    prediction = np.random.randn(20).astype(np.float32)
    target = np.random.randn(20).astype(np.float32)
    sens = np.random.randn(20).astype(np.float32)
    sigma = 1.0

    net = nn.SmoothL1Loss(sigma)
    grad = Grad(net)
    dx = grad(Tensor(prediction), Tensor(target), Tensor(sens))

    dx1_expect = [-0.71552587, 0.01499678, -0.06709455, -0.30110368, -0.45868093,
                  0.24838912, -0.46063876, 0.41411355, 0.04507046, -1.4708229,
                  0.04481723, 0.38508227, -0.17292616, -0.52333146, -1.0309995,
                  0.61330026, 0.83921754, -0.3092124, 0.1391843, -0.9755451]

    dx2_expect = [0.71552587, -0.01499678, 0.06709455, 0.30110368, 0.45868093,
                  -0.24838912, 0.46063876, -0.41411355, -0.04507046, 1.4708229,
                  -0.04481723, -0.38508227, 0.17292616, 0.52333146, 1.0309995,
                  -0.61330026, -0.83921754, 0.3092124, -0.1391843, 0.9755451]

    assert np.allclose(dx[0].asnumpy(), dx1_expect)
    assert np.allclose(dx[1].asnumpy(), dx2_expect)