Пример #1
0
    def forward(self, *inputs):
        downsample = 32
        losses = []

        # YOLOv3 output fields is different between 'train' and 'eval' mode
        if len(inputs) == 6:
            output1, output2, output3, gt_box, gt_label, gt_score = inputs
        elif len(inputs) == 8:
            output1, output2, output3, img_id, bbox, gt_box, gt_label, gt_score = inputs

        outputs = [output1, output2, output3]
        for idx, out in enumerate(outputs):
            if idx == 3: break  # debug
            anchor_mask = self.anchor_masks[idx]
            loss = F.yolov3_loss(x=out,
                                 gt_box=gt_box,
                                 gt_label=gt_label,
                                 gt_score=gt_score,
                                 anchor_mask=anchor_mask,
                                 downsample_ratio=downsample,
                                 anchors=self.anchors,
                                 class_num=self.num_classes,
                                 ignore_thresh=self.ignore_thresh,
                                 use_label_smooth=False)
            loss = paddle.reduce_mean(loss)
            losses.append(loss)
            downsample //= 2
        return losses
Пример #2
0
def cal_gradient_penalty(netD,
                         real_data,
                         fake_data,
                         edge_data=None,
                         type='mixed',
                         constant=1.0,
                         lambda_gp=10.0):
    if lambda_gp > 0.0:
        if type == 'real':  # either use real images, fake images, or a linear interpolation of two.
            interpolatesv = real_data
        elif type == 'fake':
            interpolatesv = fake_data
        elif type == 'mixed':
            alpha = paddle.rand((real_data.shape[0], 1))
            alpha = paddle.expand(
                alpha, [1, np.prod(real_data.shape) // real_data.shape[0]])
            alpha = paddle.reshape(alpha, real_data.shape)
            interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
        else:
            raise NotImplementedError('{} not implemented'.format(type))
        # interpolatesv.requires_grad_(True)
        interpolatesv.stop_gradient = False
        real_data.stop_gradient = True
        fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1)
        disc_interpolates = netD(fake_AB)

        # FIXME: use paddle.ones
        outs = paddle.fill_constant(disc_interpolates.shape,
                                    disc_interpolates.dtype, 1.0)
        gradients = paddle.imperative.grad(
            outputs=disc_interpolates,
            inputs=fake_AB,
            grad_outputs=outs,  # paddle.ones(list(disc_interpolates.shape)),
            create_graph=True,
            retain_graph=True,
            only_inputs=True,
            # no_grad_vars=set(netD.parameters())
        )

        gradients = paddle.reshape(gradients[0],
                                   [real_data.shape[0], -1])  # flat the data

        gradient_penalty = paddle.reduce_mean(
            (paddle.norm(gradients + 1e-16, 2, 1) - constant)**
            2) * lambda_gp  # added eps
        return gradient_penalty, gradients
    else:
        return 0.0, None
Пример #3
0
    def cal_gradient_penalty(self,
                             netD,
                             real_data,
                             fake_data,
                             edge_data=None,
                             type='mixed',
                             constant=1.0,
                             lambda_gp=10.0):
        if lambda_gp > 0.0:
            if type == 'real':
                interpolatesv = real_data
            elif type == 'fake':
                interpolatesv = fake_data
            elif type == 'mixed':
                alpha = paddle.rand((real_data.shape[0], 1))
                alpha = paddle.expand(alpha, [
                    real_data.shape[0],
                    np.prod(real_data.shape) // real_data.shape[0]
                ])
                alpha = paddle.reshape(alpha, real_data.shape)
                interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
            else:
                raise NotImplementedError('{} not implemented'.format(type))
            interpolatesv.stop_gradient = False
            real_data.stop_gradient = True
            fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1)
            disc_interpolates = netD(fake_AB)

            outs = paddle.fill_constant(disc_interpolates.shape,
                                        disc_interpolates.dtype, 1.0)
            gradients = paddle.grad(
                outputs=disc_interpolates,
                inputs=fake_AB,
                grad_outputs=outs,
                create_graph=True,
                retain_graph=True,
                only_inputs=True)

            gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1])

            gradient_penalty = paddle.reduce_mean((paddle.norm(
                gradients + 1e-16, 2, 1) - constant)**
                                                  2) * lambda_gp  # added eps
            return gradient_penalty, gradients
        else:
            return 0.0, None
Пример #4
0
 def _test_dygraph(self, python_func, paddle_api, kwarg, place):
     paddle.disable_static(place)
     x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
     linear = paddle.nn.Linear(10, 10)
     scheduler = paddle_api(**kwarg)
     adam = paddle.optimizer.Adam(learning_rate=scheduler,
                                  parameters=linear.parameters())
     for epoch in range(20):
         for batch_id in range(2):
             x = paddle.to_tensor(x)
             out = linear(x)
             loss = paddle.reduce_mean(out)
             loss.backward()
             adam.step()
             adam.clear_grad()
         current_lr = adam.get_lr()
         expected_lr = python_func(epoch, **kwarg)
         if paddle_api.__name__ != "CosineAnnealingLR":
             self.assertEqual(current_lr, expected_lr)
             scheduler.step()
         else:
             self.assertAlmostEqual(current_lr, expected_lr)
             scheduler.step(epoch + 1)
Пример #5
0
 def forward(self, real, fake):
     loss = paddle.square(fake) + paddle.square(real - 1.)
     loss = paddle.reduce_mean(loss / 2.0)
     return loss
Пример #6
0
 def mae(a, b):  # L1Loss
     return paddle.reduce_mean(paddle.abs(a - b))
Пример #7
0
 def mse(a, b):
     return paddle.reduce_mean(paddle.square(a - b))