def loss_function(recon_x, x):
    msssim = ((1-pytorch_msssim.msssim(x,recon_x)))/2
    f1 =  F.l1_loss(recon_x, x)
    # psnr_error=(10 * log10( 65025/ ((torch.abs(torch.sum(x) - torch.sum(recon_batch))))))
    psnr_error=(10 * log10( 65025/ ((torch.abs(torch.sum(x) - torch.sum(recon_x))))))

    return msssim, f1, psnr_error
def avg_msssim(real_images, fake_images):
    avg = 0.0
    # for real_image,fake_image in zip(real_images,fake_images):
    avg = pytorch_msssim.msssim(
        Variable(real_images).cuda(),
        Variable(fake_images).cuda())

    return avg
def mix_loss(recon_x, x, mu, logsigma, alpha):
    NORM = recon_x.shape[0] * recon_x.shape[1] * recon_x.shape[
        2] * recon_x.shape[3]
    KLD = -0.5 * torch.sum(1 + 2 * logsigma - mu.pow(2) - (2 * logsigma).exp())
    BCE = F.mse_loss(recon_x, x, reduction='mean')
    msssim_loss = 1.0 - msssim(recon_x, x)

    loss = (KLD / NORM + BCE) * alpha + (1 - alpha) * msssim_loss
    return loss, (KLD / NORM +
                  BCE).item() * alpha, (1 - alpha) * msssim_loss.item()
Пример #4
0
def recons_loss(recon_x, x):
    # msssim 多尺度结构相似损失函数:基于多层(图片按照一定规则,由大到小缩放)的SSIM损失函数,相当于考虑了分辨率

    msssim = ((1 - pytorch_msssim.msssim(x, recon_x))) / 2  #一种优化过的ssim算法
    #ssim = ((1-pytorch_msssim.ssim(x,recon_x)))/2
    #作者结合神经科学的研究,认为我们人类衡量两幅图的距离时,
    # 更偏重于两图的结构相似性,而不是逐像素计算两图的差异。因此作者提出了基于 structural similarity 的度量,声称其比 MSE 更能反映人类视觉系统对两幅图相似性的判断。
    f1 = F.l1_loss(recon_x, x)  #l1损失:基于逐像素比较差异,然后取绝对值  l2损失:逐像素比较差异 取平方
    #L2损失函数会放大最大误差和最小误差之间的差距(比如2*2 和0.1*0.1),另外L2损失函数对异常点也比较敏感
    #论文证明 MS-SSIM+L1损失函数是最好的
    #作者这样组合的原因是,MS-SSIM容易导致亮度的改变和颜色的偏差,但它能保留高频信息(图像的边缘和细节),
    # 而L1损失函数能较好的保持亮度和颜色不变化。公式中α为0.84,是作者试验出来的,而G为高斯分布参数(MS-SSIM里面也要用到这个) Lmix = α*Lmsssim + (1-α)*G*L1  G是高斯分布函数
    return f1 + msssim
def ms_ssim_loss(recon_x, x, _, __):
    loss = 1 - msssim(recon_x, x, normalize=True)

    return loss, 0.0, loss.item()