alpha_affine=1, sigma=10), albumentations.ShiftScaleRotate(p=0.7, rotate_limit=10), ResizeMask(h=64, w=64), NormalizeMask(dim=(0, 1, 2)) ])), ToTensor(device), ]) R_t = DualTransformRegularizer.__call__( g_transforms, lambda trans_dict, img: hm_svoego_roda_loss( encoder_HG(trans_dict['image']), trans_dict['mask'], 1, 0.1)) for i in range(100000): counter.update(i) w300_batch = next(LazyLoader.w300().loader_train_inf) w300_image = w300_batch['data'].to(device) w300_mes = ProbabilityMeasureFabric(256).from_coord_tensor( w300_batch["meta"]["keypts_normalized"]).cuda() w300_target_hm = heatmaper.forward(w300_mes.probability, w300_mes.coord * 63).detach() content300 = encoder_HG(w300_image) loss_or_none = ( writable("real_content loss", hm_svoego_roda_loss)(content300, w300_target_hm) + writable("R_t", R_t.__call__)(w300_image, content300) * 0.05)
pred_xy, _ = heatmap_to_measure(pred) t_xy, _ = heatmap_to_measure(target) return Loss(nn.BCELoss()(pred, target) + nn.MSELoss()(pred_xy, t_xy) * 0.0005 + (pred - target).abs().mean() * 0.3) R_t = DualTransformRegularizer.__call__( g_transforms, lambda trans_dict, img: hm_svoego_roda_loss( encoder_HG(trans_dict['image']), trans_dict['mask'])) for epoch in range(130): for i, batch in enumerate(LazyLoader.w300().loader_train): # print(i) counter.update(i + epoch * len(LazyLoader.w300().loader_train)) data = batch['data'].to(device) mes = ProbabilityMeasureFabric(256).from_coord_tensor( batch["meta"]["keypts_normalized"]).cuda() target_hm = heatmaper.forward(mes.probability, mes.coord * 63) content = encoder_HG(data) hm_svoego_roda_loss(content, target_hm).minimize_step(cont_opt) if i % 5 == 0: real_img = next(LazyLoader.celeba().loader).to(device) content = encoder_HG(data) coefs = json.load(open("../parameters/content_loss_sup.json")) R_t(real_img, content).__mul__(coefs["R_t"]).minimize_step(cont_opt)