Exemple #1
0
    def compute(content: Tensor, target_hm: Tensor):

        content_xy, _ = heatmap_to_measure(content)
        target_xy, _ = heatmap_to_measure(target_hm)

        lossyash = Loss(nn.BCELoss()(content, target_hm) * weight +
                        nn.MSELoss()(content_xy, target_xy) * weight * 0.001)

        if name:
            writer.add_scalar(name, lossyash.item(), counter.get_iter(name))

        return lossyash
def hm_svoego_roda_loss(pred, target):
    pred_coord = heatmap_to_measure(pred)[0]
    target_coord = heatmap_to_measure(target)[0]

    pred = pred.relu() + 1e-15
    target[target < 1e-7] = 0
    target[target > 1 - 1e-7] = 1

    if torch.isnan(pred).any() or torch.isnan(target).any():
        return Loss.ZERO()

    bce = nn.BCELoss()(pred, target)

    if torch.isnan(bce).any():
        return Loss.ZERO()

    return Loss(bce + nn.MSELoss()(pred_coord, target_coord) * 0.0005)
def hm_svoego_roda_loss(pred, target):

    pred_xy, _ = heatmap_to_measure(pred)
    t_xy, _ = heatmap_to_measure(target)

    return Loss(nn.BCELoss()(pred, target) +
                nn.MSELoss()(pred_xy, t_xy) * 0.0005 +
                (pred - target).abs().mean() * 0.3)
def hm_svoego_roda_loss(pred, target):
    pred_xy, _ = heatmap_to_measure(pred)
    with torch.no_grad():
        t_xy, _ = heatmap_to_measure(target)

    return Loss(
        nn.BCELoss()(pred, target) +
        nn.MSELoss()(pred_xy, t_xy) * 0.001
        # (pred - target).abs().mean() * 0.1
    )
def stariy_hm_loss(pred, target, coef=1.0):
    pred_mes = UniformMeasure2DFactory.from_heatmap(pred)
    target_mes = UniformMeasure2DFactory.from_heatmap(target)

    return Loss(
        nn.BCELoss()(pred, target) * coef +
        nn.MSELoss()(pred_mes.coord, target_mes.coord) * (0.001 * coef) +
        nn.L1Loss()(pred_mes.coord, target_mes.coord) * (0.001 * coef)
        # (pred - target).abs().mean() * 0.1 * coef
    )
def hm_svoego_roda_loss(pred, target, coef=1.0, l1_coef=0.0):
    pred_mes = UniformMeasure2DFactory.from_heatmap(pred)
    target_mes = UniformMeasure2DFactory.from_heatmap(target)

    # pred = pred.relu() + 1e-15
    # target[target < 1e-7] = 0
    # target[target > 1 - 1e-7] = 1

    if torch.isnan(pred).any() or torch.isnan(target).any():
        print("nan in hm")
        return Loss.ZERO()

    bce = nn.BCELoss()(pred, target)

    if torch.isnan(bce).any():
        print("nan in bce")
        return Loss.ZERO()

    return Loss(bce * coef + nn.MSELoss()(pred_mes.coord, target_mes.coord) *
                (0.0005 * coef) +
                nn.L1Loss()(pred_mes.coord, target_mes.coord) * l1_coef)
Exemple #7
0
    def _generator_loss(self, dgz: Tensor, real: List[Tensor],
                        fake: List[Tensor]) -> Loss:
        batch_size = dgz.size(0)
        nc = dgz.size(1)

        real_labels = torch.full((
            batch_size,
            nc,
        ), 1, device=dgz.device)
        errG = self.__criterion(
            dgz.view(batch_size, nc).sigmoid(), real_labels)
        return Loss(errG)
Exemple #8
0
    def _discriminator_loss(self, dx: Tensor, dy: Tensor) -> Loss:

        batch_size = dx.size(0)
        nc = dx.size(1)

        real_labels = torch.full((
            batch_size,
            nc,
        ), 1, device=dx.device)
        err_real = self.__criterion(
            dx.view(batch_size, nc).sigmoid(), real_labels)

        fake_labels = torch.full((
            batch_size,
            nc,
        ), 0, device=dx.device)
        err_fake = self.__criterion(
            dy.view(batch_size, nc).sigmoid(), fake_labels)

        return Loss(-(err_fake + err_real))
Exemple #9
0
    def discriminator_loss(self, x: Tensor, y: Tensor):
        L1 = nn.L1Loss()

        x = x.detach()
        y = y.detach()

        tyx: Tensor = self.Tyx(y).detach()  # detach ?
        loss: Tensor = self.Dx(x).mean() + self.product(
            tyx, y) - self.Dx(tyx).mean()

        x0 = self.gradient_point([tyx], [x])[0]
        tx0y = self.d_grad(x0)
        x0_pred = self.Tyx(tx0y)

        y0 = self.gradient_point([y], [self.d_grad(x, False)])[0]
        ty0x = self.Tyx(y0)
        y0_pred = self.d_grad(ty0x)

        pen = L1(y0_pred, y0) + L1(x0_pred, x0)

        return Loss(loss + self.pen_weight * pen)
    def _compute(self, delta: Tensor) -> Loss:

        gradient_penalty_value = delta.relu().norm(2, dim=1).pow(2).mean()

        return Loss(self.weight * gradient_penalty_value)
 def add_generator_loss(self, loss: nn.Module, weight=1.0):
     return self.__add__(
         GANLossObject(
             lambda dx, dy: Loss.ZERO(), lambda dgz, real, fake: Loss(
                 loss(fake[0], real[0].detach()) * weight),
             self.discriminator))
 def _compute(self, gradients: List[Tensor]) -> Loss:
     gradients_cat = torch.cat([g.view(g.size(0), -1) for g in gradients],
                               dim=1)
     # gradients: Tensor = gradients.view((gradients.size(0), -1))
     gradient_penalty_value = ((gradients_cat.norm(2, dim=1) - 1)**2).mean()
     return Loss(self.weight * gradient_penalty_value)
def otdelnaya_function(content: Tensor, measure: ProbabilityMeasure):
    content_cropped = content
    lossyash = Loss((content_cropped - measure.coord).abs().mean())
    return lossyash
Exemple #14
0
    def generator_loss(self, x: Tensor):

        L1 = nn.L1Loss()
        tx: Tensor = self.d_grad(x, False).detach()

        return Loss(L1(x, tx))
Exemple #15
0
 def _discriminator_loss(self, d_real: Tensor, d_fake: Tensor) -> Loss:
     discriminator_loss = (1 - d_real).relu().mean() + (1 + d_fake).relu().mean()
     return Loss(-discriminator_loss)
 def compute(t1: Tensor, t2: Tensor):
     loss = l1_loss(t1, t2)
     if name:
         if counter.get_iter(name) % 10 == 0:
             writer.add_scalar(name, loss, counter.get_iter(name))
     return Loss(loss)
                             lr=5e-4,
                             betas=(0.5, 0.97))

g_transforms: albumentations.DualTransform = albumentations.Compose([
    ToNumpy(),
    NumpyBatch(
        albumentations.ElasticTransform(p=0.8,
                                        alpha=150,
                                        alpha_affine=1,
                                        sigma=10)),
    NumpyBatch(albumentations.ShiftScaleRotate(p=0.5, rotate_limit=10)),
    ToTensor(device)
])

R_s = UnoTransformRegularizer.__call__(
    g_transforms, lambda trans_dict, img, ltnt: Loss(nn.L1Loss()(
        ltnt, style_encoder(trans_dict['image']))))

W300DatasetLoader.batch_size = 24
W300DatasetLoader.test_batch_size = 64
Celeba.batch_size = 24

heatmaper = ToHeatMap(64)

# tuner = GoldTuner([1.0, 1.0], device=device, rule_eps=0.02, radius=0.5, active=True)

w300_test = next(iter(LazyLoader.w300().test_loader))
w300_test_image = w300_test['data'].to(device)[:8]


def hm_svoego_roda_loss(pred, target, coef=1.0, l1_coef=0.0):
    pred_mes = UniformMeasure2DFactory.from_heatmap(pred)
def hm_loss_bes_xy(pred, target):
    return Loss(
        nn.BCELoss()(pred, target)
        # (pred - target).abs().mean() * 0.1
    )
Exemple #19
0
    def transport_loss(self, y: Tensor):
        y = y.detach()
        ty: Tensor = self.Tyx(y)

        return Loss(self.product(ty, y) - self.Dx(ty).mean())
def entropy(hm: Tensor):
    B, N, D, D = hm.shape
    return Loss(-(hm * hm.log()).sum() / (B * D * D))
Exemple #21
0
 def _generator_loss(self, dgz: Tensor, real: List[Tensor], fake: List[Tensor]) -> Loss:
     return Loss(-dgz.mean())