def forward(self, m1: ProbabilityMeasure, m2: ProbabilityMeasure): batch_loss = self.loss(m1.probability, m1.coord, m2.probability, m2.coord) if self.border: batch_loss = batch_loss[batch_loss > self.border] if batch_loss.shape[0] == 0: return Loss.ZERO() return Loss(batch_loss.mean())
def add_generator_loss(self, loss: nn.Module, weight=1.0): return self.__add__( GANLossObject( lambda dx, dy: Loss.ZERO(), lambda dgz, real, fake: Loss(loss(fake[0], real[0].detach()) * weight), self.discriminator ) )
def forward(self, x, y, y_hat, latent): loss_dict = {} loss = 0.0 id_logs = None eps = 1e-8 if self.id_lambda > eps: loss_id, sim_improvement, id_logs = self.id_loss(y_hat, y, x) loss_dict['loss_id'] = float(loss_id) loss_dict['id_improve'] = float(sim_improvement) loss = loss_id * self.id_lambda if self.l2_lambda > eps: loss_l2 = F.mse_loss(y_hat, y) loss_dict['loss_l2'] = float(loss_l2) loss += loss_l2 * self.l2_lambda if self.lpips_lambda > eps: loss_lpips = self.lpips_loss(y_hat, y) loss_dict['loss_lpips'] = float(loss_lpips) loss += loss_lpips * self.lpips_lambda if self.w_norm_lambda > eps: loss_w_norm = self.w_norm_loss(latent, self.latent_avg) loss_dict['loss_w_norm'] = float(loss_w_norm) loss += loss_w_norm * self.w_norm_lambda loss_dict['brule_loss'] = float(loss) return Loss(loss)
def forward(self, x1: Tensor, x2: Tensor) -> Loss: with torch.no_grad(): P = compute_ot_matrix_par(x1.cpu().numpy(), x2.cpu().numpy()) P = torch.from_numpy(P).type_as(x1).cuda() M = PairwiseCost(lambda t1, t2: (t1 - t2).pow(2).sum(dim=-1).sqrt())( x1, x2) return Loss((M * P).sum(dim=[1, 2]).mean())
def sum_losses(self, losses: List[Loss]) -> Loss: res = Loss.ZERO() coef = self.get_coef() for i, l in enumerate(losses): res = res + l * coef[i].detach() return res
def hm_svoego_roda_loss(pred, target): pred_xy, _ = heatmap_to_measure(pred) t_xy, _ = heatmap_to_measure(target) return Loss(nn.BCELoss()(pred, target) * 10 + nn.MSELoss()(pred_xy, t_xy) * 0.005 + (pred - target).abs().mean() * 3)
def __call__(self, *args, **kwargs): self.CLLLeT4uK += 1 if self.cond(self.CLLLeT4uK): if self.preproc: return self.penalty(*self.preproc(*args, **kwargs)) else: return self.penalty(*args, **kwargs) else: return Loss.ZERO()
def stariy_hm_loss(pred, target, coef=1.0): pred_mes = UniformMeasure2DFactory.from_heatmap(pred) target_mes = UniformMeasure2DFactory.from_heatmap(target) return Loss(nn.BCELoss()(pred, target) * coef + nn.MSELoss()(pred_mes.coord, target_mes.coord) * (0.001 * coef) + nn.L1Loss()(pred_mes.coord, target_mes.coord) * (0.001 * coef))
def hm_svoego_roda_loss(pred, target, coef=1.0, l1_coef=0.0): pred_mes = UniformMeasure2DFactory.from_heatmap(pred) target_mes = UniformMeasure2DFactory.from_heatmap(target) # pred = pred.relu() + 1e-15 # target[target < 1e-7] = 0 # target[target > 1 - 1e-7] = 1 if torch.isnan(pred).any() or torch.isnan(target).any(): print("nan in hm") return Loss.ZERO() bce = nn.BCELoss()(pred, target) if torch.isnan(bce).any(): print("nan in bce") return Loss.ZERO() return Loss(bce * coef + nn.MSELoss()(pred_mes.coord, target_mes.coord) * (0.0005 * coef) + nn.L1Loss()(pred_mes.coord, target_mes.coord) * l1_coef)
def compute(content: Tensor, target_hm: Tensor): content_xy, _ = heatmap_to_measure(content) target_xy, _ = heatmap_to_measure(target_hm) lossyash = Loss(nn.BCELoss()(content, target_hm) * weight + nn.MSELoss()(content_xy, target_xy) * weight * 0.0005) # # if name: # writer.add_scalar(name, lossyash.item(), counter.get_iter(name)) return lossyash
def _generator_loss(self, dgz: Tensor, real: List[Tensor], fake: List[Tensor]) -> Loss: batch_size = dgz.size(0) nc = dgz.size(1) real_labels = torch.full(( batch_size, nc, ), 1, device=dgz.device) errG = self.__criterion( dgz.view(batch_size, nc).sigmoid(), real_labels) return Loss(errG)
def forward(self, image: Tensor, segm: Tensor) -> Loss: sp = torch_sp(image) # print(sp.device) nc = segm.shape[1] sp = torch.cat([sp] * nc, dim=1).detach() sp_argmax = self.pooling.forward(segm.detach(), sp).detach().max(dim=1)[1] return Loss(self.loss(segm, sp_argmax)) * self.weight
def sup_loss(pred_mes, target_mes): heatmapper = ToGaussHeatMap(256, 1) pred_hm = heatmapper.forward(pred_mes.coord) pred_hm = pred_hm / (pred_hm.sum(dim=[2, 3], keepdim=True).detach() + 1e-8) target_hm = heatmapper.forward(target_mes.coord).detach() target_hm = target_hm / target_hm.sum(dim=[2, 3], keepdim=True).detach() return Loss( nn.BCELoss()(pred_hm, target_hm) * 100 + nn.MSELoss()(pred_mes.coord, target_mes.coord) * 0.5 )
def forward(self, outputs, targets): targets = targets.squeeze().float() outputs = outputs.squeeze().float() loss = self.nll_loss(outputs, targets) if self.dice_weight: smooth = torch.tensor(1e-15).float() target = (targets > 1e-10).float() prediction = outputs dice_part = (1 - (2 * torch.sum(prediction * target, dim=(1,2)) + smooth) / \ (torch.sum(prediction, dim=(1,2)) + torch.sum(target, dim=(1,2)) + smooth)) loss += self.dice_weight * dice_part.mean() return Loss(loss)
def forward(self, pred_mes, target_mes) -> Loss: pred_hm = self.heatmapper.forward(pred_mes.coord).sum(dim=[1], keepdim=True) target_hm = self.heatmapper.forward(target_mes.coord).sum(dim=[1], keepdim=True) pred_hm = pred_hm / ( pred_hm.sum(dim=[1, 2, 3], keepdim=True).detach() + 1e-8) target_hm = target_hm / target_hm.sum(dim=[1, 2, 3], keepdim=True).detach() return Loss(nn.BCELoss()(pred_hm, target_hm) * self.bce_coef + OTWasLoss()(pred_mes.coord, target_mes.coord).to_tensor() * self.was_coef)
def forward(self, pred_mes, target_mes) -> Loss: pred_hm = self.heatmapper.forward(pred_mes.coord) target_hm = self.heatmapper.forward(target_mes.coord) pred_hm: Tensor = pred_hm / ( pred_hm.sum(dim=[1, 2, 3], keepdim=True).detach() + 1e-8) target_hm: Tensor = target_hm / target_hm.sum(dim=[1, 2, 3], keepdim=True).detach() return Loss( nn.BCELoss()(pred_hm, target_hm) * self.bce_coef * pred_hm.shape[1] + (pred_mes.coord - target_mes.coord).pow(2).sum(dim=2).sqrt().mean() * self.l2_coef # nn.MSELoss()(pred_mes.coord, target_mes.coord) * self.l2_coef )
def _compute(self, grads: List[Tensor]) -> Loss: path_lengths = 0 for i in range(len(grads)): B = grads[i].shape[0] path_lengths = path_lengths + grads[i].pow(2).reshape(B, -1).mean(1) path_lengths = path_lengths.sqrt() mean_path_length = self.mean_path_length + self.decay * ( path_lengths.mean() - self.mean_path_length) self.mean_path_length = mean_path_length.detach() return Loss( (path_lengths - self.mean_path_length).pow(2).mean() * self.weight)
def _discriminator_loss(self, dx: Tensor, dy: Tensor) -> Loss: batch_size = dx.size(0) nc = dx.size(1) real_labels = torch.full(( batch_size, nc, ), 1, device=dx.device) err_real = self.__criterion( dx.view(batch_size, nc).sigmoid(), real_labels) fake_labels = torch.full(( batch_size, nc, ), 0, device=dx.device) err_fake = self.__criterion( dy.view(batch_size, nc).sigmoid(), fake_labels) return Loss(-(err_fake + err_real))
def coord_hm_loss(pred_coord: Tensor, target_hm: Tensor, coef=1.0): target_hm = target_hm / target_hm.sum(dim=[2, 3], keepdim=True) target_hm = target_hm.detach() heatmapper = ToGaussHeatMap(256, 4) target_coord = UniformMeasure2DFactory.from_heatmap( target_hm).coord.detach() # sk = CoordToGaussSkeleton(target_hm.shape[-1], 1) # pred_sk = sk.forward(pred_coord).sum(dim=1, keepdim=True) # target_sk = sk.forward(target_coord).sum(dim=1, keepdim=True).detach() pred_hm = heatmapper.forward(pred_coord).sum(dim=1, keepdim=True) pred_hm = pred_hm / pred_hm.sum(dim=[2, 3], keepdim=True).detach() target_hm = heatmapper.forward(target_coord).sum(dim=1, keepdim=True).detach() target_hm = target_hm / target_hm.sum(dim=[2, 3], keepdim=True).detach() return Loss(nn.BCELoss()(pred_hm, target_hm) * coef * 1.5 + # noviy_hm_loss(pred_sk, target_sk, coef).to_tensor() * 0.5 + nn.MSELoss()(pred_coord, target_coord) * (0.001 * coef) + nn.L1Loss()(pred_coord, target_coord) * (0.001 * coef))
def l2_loss(pred, target): return Loss(nn.MSELoss().forward(pred, target))
class Y(nn.Module): def __init__(self): super().__init__() self.y = nn.Parameter(torch.tensor([10.0, 15.0])[None, ...]) def forward(self, *input): return self.y y_module = Y() y_opt = torch.optim.Adam(y_module.parameters(), lr=1e-3) for i in range(30000): for j in range(100): pred = y_module(None) Loss1 = Loss(torch.sum((z - pred)**2)) Loss2 = Loss(torch.sum((x - pred)**2)) Tuner.sum_losses([Loss1, Loss2]).minimize_step(y_opt) for j in range(100): pred = y_module(None) # Loss1 = Loss(torch.sum((z - pred) ** 2)) # Loss2 = Loss(torch.sum((x - pred) ** 2)) igor = (pred - y).pow(2).sum().item() Tuner.update(igor) # Tuner.tune_module(None, y_module, [Loss1, Loss2], lambda a: Loss((a - y).pow(2).sum()), 0.001) # Tuner.tune( # pred, # lambda a, g: Loss((a - 0.001 * g - y).pow(2).sum()), # [Loss1, Loss2]) print("coefficients: ", Tuner.coefs.detach().numpy()) print("pred: ", y_module(None).detach().numpy())
def forward(self, m1: ProbabilityMeasure, m2: ProbabilityMeasure): batch_loss = self.loss(m1.probability, m1.coord, m2.probability, m2.coord) return Loss(batch_loss.mean())
def l1_loss(pred, target): return Loss(nn.L1Loss().forward(pred, target))
def _discriminator_loss(self, dx: Tensor, dy: Tensor) -> Loss: return Loss(-d_logistic_loss(dx, dy))
def _generator_loss(self, dgz: Tensor, real: List[Tensor], fake: List[Tensor]) -> Loss: return Loss(g_nonsaturating_loss(dgz))
def hm_loss_bes_xy(pred, target): return Loss( nn.BCELoss()(pred, target) )
def noviy_hm_loss(pred, target, coef=1.0): pred = pred / pred.sum(dim=[2, 3], keepdim=True).detach() target = target / target.sum(dim=[2, 3], keepdim=True).detach() return Loss(nn.BCELoss()(pred, target) * coef)
def forward(self, m1: ProbabilityMeasure, m2: ProbabilityMeasure): batch_loss = self.loss(m1.probability, m1.coord, m2.probability, m2.coord) return Loss((batch_loss * self.weights).sum())
def compute(t1: Tensor, t2: Tensor): loss = l1_loss(t1, t2) if name: if counter.get_iter(name) % 10 == 0: writer.add_scalar(name, loss, counter.get_iter(name)) return Loss(loss)
def entropy(hm: Tensor): B, N, D, D = hm.shape return Loss(-(hm * hm.log()).sum() / (B * D * D))