示例#1
0
def summaries(writer, result, fbp, true, loss, it, do_print=False):
    """Save and print training and validation data to tensorboard"""
    residual = result - true
    squared_error = residual**2
    mse = torch.mean(squared_error)
    maxval = torch.max(true) - torch.min(true)
    psnr = 20 * torch.log10(maxval) - 10 * torch.log10(mse)
    # ratio of mse to fbp mse
    relative = torch.mean((result - true)**2) / torch.mean((fbp - true)**2)
    ssi = ssim(result, true)
    ssi_fbp = ssim(fbp, true)
    # relative ssim
    relative_ssim = ssi / ssi_fbp
    if do_print:
        print(it, mse.item(), psnr.item(), relative.item(), ssi.item(),
              relative_ssim.item())

    writer.add_scalar('loss', loss, it)
    writer.add_scalar('psnr', psnr, it)
    writer.add_scalar('relative', relative, it)
    writer.add_scalar('ssim', ssi, it)
    writer.add_scalar('relative ssim', relative_ssim, it)

    util.summary_image(writer, 'result', result, it)
    util.summary_image(writer, 'true', true, it)
    def forward(self, x, y, o, v, p, r, iteration, additional=None):
        Y = self.eval(x, o, v, p, r)
        losses = {}

        # Different loss functions, we used logl1 + logssim
        if "logl1" in self.loss:
            logl1 = 2 * torch.abs(Y - torch.log1p(y))
            losses["generator" + str(self.index) + "_logl1"] = logl1
        if "l1" in self.loss:
            l1 = 0.5 * torch.abs(Y - y)
            losses["generator" + str(self.index) + "_l1"] = l1
        if "logssim" in self.loss:
            logssim = 1.0 - pytorch_ssim.ssim(torch.log1p(y), Y)
            losses["generator" + str(self.index) + "_logssim"] = logssim
        if "ssim" in self.loss:
            ssim = 1.0 - pytorch_ssim.ssim(y, Y)
            losses["generator" + str(self.index) + "_ssim"] = ssim
        if "nll" in self.loss:
            sigma = max(0.1 + (2.0 - 0.1) * (1 - iteration / 2e4), 0.1)
            ll = -D.Normal(Y, sigma).log_prob(y)
            losses["generator" + str(self.index) + "_nll"] = ll

        if "logl1" in self.loss or "logssim" in self.loss:
            Y = torch.exp(Y) - 1

        output = {}
        output["generated"] = Y
        output["losses"] = losses

        return output
    def forward(self, x, y, o, v, p, r, iteration, additional=None):
        Y = self.eval(x, o, v, p, r)
        losses = {}

        # Different loss functions, we used logl1 + logssim
        if "logl1" in self.loss:
            logl1 = 2 * torch.abs(Y - torch.log1p(y))
            losses["generator" + str(self.index) + "_logl1"] = logl1
        if "l1" in self.loss:
            l1 = 2 * torch.abs(Y - y)
            losses["generator" + str(self.index) + "_l1"] = l1
        if "logl2" in self.loss:
            logl2 = 2 * torch.abs(Y -
                                  torch.log1p(y)) * torch.abs(Y -
                                                              torch.log1p(y))
            losses["generator" + str(self.index) + "_l2"] = 250 * logl2
        if "logssim" in self.loss:
            logssim = 1.0 - pytorch_ssim.ssim(torch.log1p(y), Y)
            losses["generator" + str(self.index) + "_logssim"] = logssim
        if "ssim" in self.loss:
            ssim = 1.0 - pytorch_ssim.ssim(y, Y)
            losses["generator" + str(self.index) + "_ssim"] = ssim
        if "nll" in self.loss:
            sigma = max(0.1 + (2.0 - 0.1) * (1 - iteration / 2e5), 0.1)
            ll = -D.Normal(Y, sigma).log_prob(y)
            losses["generator" + str(self.index) + "_nll"] = 0.2 * ll
        if "bce" in self.loss:
            losses["generator" + str(self.index) +
                   "_bce"] = 10 * self.bce_loss(Y, y)
        if "vgg" in self.loss:
            y_norm = torch.log1p(y).clamp(0, 1)
            Y_norm = Y.clamp(0, 1)
            target_features = self.vgg(y_norm)
            output_features = self.vgg(Y_norm)
            vgg_loss = 0
            for i in range(len(target_features)):
                vgg_loss += torch.mean(
                    (target_features[i] - output_features[i]) *
                    (target_features[i] - output_features[i]),
                    dim=[1, 2, 3],
                    keepdim=True)
            losses["generator" + str(self.index) + "_vgg"] = 0.2 * vgg_loss

        if "logl1" in self.loss or "logssim" in self.loss or "logl2" in self.loss:
            Y = torch.exp(Y) - 1

        output = {}
        output["generated"] = Y
        output["losses"] = losses

        return output
示例#4
0
 def forward(self, input, target):
     base_loss = self.base_loss
     y1 = input[:, 0:1, :, :]
     y2 = input[:, 1:2, :, :]
     base_1 = base_loss(y1, target)
     base_2 = base_loss(y2, target)
     stable_err = F.mse_loss(y1, y2)
     loss = (base_1 * self.base_loss_wt + base_2 * self.base_loss_wt +
             stable_err * self.stable_wt)
     self.metrics = {
         'pixel': (base_1 + base_2) / 2,
         'stable': stable_err,
         'ssim': (ssim.ssim(y1, target) + ssim.ssim(y2, target)) / 2,
         'psnr': (psnr(y1, target) + psnr(y2, target)) / 2
     }
     return loss
示例#5
0
def ssim(x, y):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    x = x.to(device) if x.device.type != device.type else x
    y = y.to(device) if y.device.type != device.type else y

    with torch.no_grad():
        return pytorch_ssim.ssim(x, y).item()
示例#6
0
def ra(input_sr_var, target_sr_var, target_cls_var, model_sr, model_transformer, model, 
    optimizer_sr, optimizer_transformer, criterion_sr, criterion, train=True):
    if train:
        optimizer_sr.zero_grad()

    # pdb.set_trace()
    output_sr = model_sr(input_sr_var)
    # pdb.set_trace()
    loss_sr = criterion_sr(output_sr, target_sr_var)

    loss_cls = 0

    input_cls = process_to_input_cls(output_sr)
    output_cls = model(input_cls)
    loss_cls = criterion(output_cls, target_cls_var)


    # compute ssim for every image 
    ssim = 0
    # not compute during training to save time
    if not train:
        for i in range(output_sr.size(0)):
            sr_image = output_sr[i].unsqueeze(0)
            hr_image = target_sr_var[i].unsqueeze(0)
            ssim += pytorch_ssim.ssim(sr_image, hr_image).item()
        ssim = ssim / output_sr.size(0)

    loss = loss_sr + args.l * loss_cls

    if train:
        loss.backward()
        optimizer_sr.step()

    return loss_sr, loss_cls, output_cls, ssim
示例#7
0
 def forward(self, x, y):
     # diff = x - y
     # loss_c = torch.mean(torch.sqrt(diff * diff + self.eps))
     # loss_s = 1 - pytorch_ssim.ssim(x, y)
     # loss = loss_c + self.lambda_* loss_s
     loss = 1 - pytorch_ssim.ssim(x, y)
     return loss, None
示例#8
0
 def forward(self, out_labels, out_images, target_images):
     # Adversarial Loss
     adversarial_loss = torch.mean(1 - out_labels)
     # Perception Loss
     perception_loss = self.mse_loss(self.loss_network(out_images),
                                     self.loss_network(target_images))
     # Image Loss
     image_loss = self.mse_loss(out_images, target_images)
     # TV Loss
     tv_loss = self.tv_loss(out_images)
     #ssim Loss
     #print("cal ssim loss !!!")
     ssim_loss = pytorch_ssim.ssim(Variable(target_images),
                                   Variable(out_images))
     #         out_batch = out_images.cpu().detach().numpy()
     #         label_batch = target_images.cpu().detach().numpy()
     #         #print("to cpu !!!")
     #         N, _, _, _ = out_batch.shape
     #         #print(type(label_batch[0]))
     #         ssim_loss = 0
     #         for i in range(N):
     #             ssim_loss += compare_ssim(label_batch[i],out_batch[i], win_size=3, multichannel=True)
     #         ssim_loss /= N
     #         #print("get ssim_loss  ------")
     return image_loss + 0.001 * adversarial_loss + 0.006 * perception_loss - 1e-6 * ssim_loss
示例#9
0
def ssim(img1, img2):
    img1 = torch.from_numpy(np.rollaxis(img1, 2)).float().unsqueeze(0) / 255.0
    img2 = torch.from_numpy(np.rollaxis(img2, 2)).float().unsqueeze(0) / 255.0
    img1 = Variable(img1, requires_grad=False)  # torch.Size([256, 256, 3])
    img2 = Variable(img2, requires_grad=False)
    ssim_value = pytorch_ssim.ssim(img1, img2).item()
    return ssim_value
示例#10
0
    def maxSsim(cls):

        npImg1 = cv2.imread("einstein.png")

        img1 = torch.from_numpy(np.rollaxis(npImg1,
                                            2)).float().unsqueeze(0) / 255.0
        img2 = torch.rand(img1.size())

        if torch.cuda.is_available():
            img1 = img1.cuda()
            img2 = img2.cuda()

        img1 = Variable(img1, requires_grad=False)
        img2 = Variable(img2, requires_grad=True)

        # Functional: pytorch_ssim.ssim(img1, img2, window_size = 11, size_average = True)
        ssim_value = pytorch_ssim.ssim(img1, img2).data[0]
        print("Initial ssim:", ssim_value)

        # Module: pytorch_ssim.SSIM(window_size = 11, size_average = True)
        ssim_loss = pytorch_ssim.SSIM()

        optimizer = optim.Adam([img2], lr=0.01)

        while ssim_value < 0.95:
            optimizer.zero_grad()
            ssim_out = -ssim_loss(img1, img2)
            ssim_value = -ssim_out.data[0]
            print(ssim_value)
            ssim_out.backward()
            optimizer.step()
def infer(data_path, model):
    psnr = utils.AvgrageMeter()
    ssim = utils.AvgrageMeter()

    model.eval()
    transforms = torchvision.transforms.Compose(
        [torchvision.transforms.ToTensor()])

    with torch.no_grad():
        for step, pt in enumerate(glob.glob(data_path)):
            image = np.array(Image.open(pt))

            clear_image = utils.crop_img(image[:, :image.shape[1] // 2, :],
                                         base=args.patch_size)
            rain_image = utils.crop_img(image[:, image.shape[1] // 2:, :],
                                        base=args.patch_size)

            # # Test on whole image
            # input = transforms(rain_image).unsqueeze(dim=0).cuda()
            # target = transforms(clear_image).unsqueeze(dim=0).cuda(async=True)
            # logits = model(input)
            # n = input.size(0)

            # Test on whole image with data augmentation
            target = transforms(clear_image).unsqueeze(dim=0).cuda()
            for i in range(8):
                im = utils.data_augmentation(rain_image, i)
                input = transforms(im.copy()).unsqueeze(dim=0).cuda()
                begin_time = time.time()
                if i == 0:
                    logits = utils.inverse_augmentation(
                        model(input).cpu().numpy().transpose(0, 2, 3, 1)[0], i)
                else:
                    logits = logits + utils.inverse_augmentation(
                        model(input).cpu().numpy().transpose(0, 2, 3, 1)[0], i)
                end_time = time.time()
            n = input.size(0)
            logits = transforms(logits / 8).unsqueeze(dim=0).cuda()

            # # Test on patches2patches
            # noise_patches = utils.slice_image2patches(rain_image, patch_size=args.patch_size)
            # image_patches = utils.slice_image2patches(clear_image, patch_size=args.patch_size)
            # input = torch.tensor(noise_patches.transpose(0,3,1,2)/255.0, dtype=torch.float32).cuda()
            # target = torch.tensor(image_patches.transpose(0,3,1,2)/255.0, dtype=torch.float32).cuda()
            # logits = model(input)
            # n = input.size(0)

            s = pytorch_ssim.ssim(torch.clamp(logits, 0, 1), target)
            p = utils.compute_psnr(
                np.clip(logits.detach().cpu().numpy(), 0, 1),
                target.detach().cpu().numpy())
            psnr.update(p, n)
            ssim.update(s, n)
            print('psnr:%6f ssim:%6f' % (p, s))

            # Image.fromarray(rain_image).save(args.save+'/'+str(step)+'_noise.png')
            # Image.fromarray(np.clip(logits[0].cpu().numpy().transpose(1,2,0)*255, 0, 255).astype(np.uint8)).save(args.save+'/'+str(step)+'_denoised.png')

    return psnr.avg, ssim.avg
示例#12
0
 def forward(self, x, y):
     diff = x - y
     loss_c = torch.mean(torch.sqrt(diff * diff + self.eps))
     loss_s = 1 - pytorch_ssim.ssim(x, y)
     x_input = torch.cat((x, y), 1)
     loss_v = 1 - self.vmaf_model(x_input)
     loss = loss_c + self.lambda_ * loss_s + self.lambda_vmaf * loss_v
     return loss, [loss_c, loss_s, loss_v]
示例#13
0
def ssim(outputs, labels):
    if torch.cuda.is_available():
        outputs = Variable(torch.from_numpy(outputs)).cuda()
        labels = Variable(torch.from_numpy(labels)).cuda()

    ssim = ps.ssim(outputs, labels)

    return ssim
示例#14
0
def ssim(outputs, labels):
    if torch.cuda.is_available():
        outputs = Variable(torch.from_numpy(outputs)).cuda()
        labels = Variable(torch.from_numpy(labels)).cuda()
    #print(outputs.size)
    ssim = ps.ssim(outputs, labels)
    #print('ssim')
    #print(ssim)
    return ssim
示例#15
0
def similarityLoss(outImage, inImage, alpha, windowSize):
    L1LossTerm = F.l1_loss(outImage, inImage, reduction='mean')
    SSIMLossTerm = pytorch_ssim.ssim(outImage,
                                     inImage,
                                     window_size=windowSize,
                                     size_average=True)

    loss = alpha * ((1 - SSIMLossTerm) / 2) + (1 - alpha) * L1LossTerm
    #print (loss)
    return loss
示例#16
0
def ssim(imgs, re_imgs):
    ssim_list = torch.zeros(imgs.size(0)).to(main.device)
    for i, (img, re_img) in enumerate(zip(imgs, re_imgs), 0):
        img = img.unsqueeze(0)
        re_img = re_img.unsqueeze(0)
        img = img.unsqueeze(0)
        re_img = re_img.unsqueeze(0)
        # img, re_img = img*255, re_img*255
        ssim_list[i] = pytorch_ssim.ssim(img, re_img)
    return ssim_list
示例#17
0
def write_error(estimated, reference, resultPath):
    curPSNR = compute_psnr(estimated, reference)
    estimated = Variable(estimated.unsqueeze(3).permute(3, 2, 0, 1))
    reference = Variable(reference.unsqueeze(3).permute(3, 2, 0, 1))
    curSSIM = pytorch_ssim.ssim(estimated, reference).data[0]

    fid = open(resultPath + '/ObjectiveQuality_GAN.txt', 'w')
    fid.write('PSNR: %3.2f\n' % curPSNR)
    fid.write('SSIM: %1.3f\n' % curSSIM)
    fid.close()
示例#18
0
    def basicUsage(cls):
        img1 = Variable(torch.rand(1, 1, 256, 256))
        img2 = Variable(torch.rand(1, 1, 256, 256))
        if torch.cuda.is_available():
            img1 = img1.cuda()
            img2 = img2.cuda()

        print(pytorch_ssim.ssim(img1, img2))
        ssim_loss = pytorch_ssim.SSIM(window_size=11)
        print(ssim_loss(img1, img2))
示例#19
0
def G_loss(g1, g2, pixel_label, dl, dh, trade_off, batch_size):
    content_loss = ((mse(g1, pixel_label) + mse(g2, pixel_label)) /
                    (10 * batch_size))

    adv_cls_loss = (torch.sum(bce(dl, dh) * trade_off) / batch_size) * 15000

    ssim_loss = pytorch_ssim.ssim(g2, pixel_label)
    ssim_loss = 2 * (1000 - (ssim_loss * 1000))

    return content_loss, ssim_loss, adv_cls_loss
示例#20
0
def calc_ssim(sr, hr, scale, rgb_range, dataset=None):
    if hr.nelement() == 1: return 0
    shave = scale
    gray_coeffs = [65.738, 129.057, 25.064]
    convert_s = sr.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
    sr = sr.mul(convert_s).sum(dim=1).unsqueeze(0) + 16 * torch.ones_like(hr)
    convert_h = hr.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
    hr = hr.mul(convert_h).sum(dim=1).unsqueeze(0) + 16 * torch.ones_like(hr)
    return pytorch_ssim.ssim(sr[..., shave:-shave, shave:-shave], hr[..., shave:-shave, shave:-shave],
                             data_range=rgb_range, size_average=True)
    def train_just_vae(self, s_batch, next_obs_batch):
        s_batch = torch.FloatTensor(s_batch).to(self.device)
        next_obs_batch = torch.FloatTensor(next_obs_batch).to(self.device)

        sample_range = np.arange(len(s_batch))
        reconstruction_loss = nn.MSELoss(reduction='none')

        recon_losses = np.array([])
        kld_losses = np.array([])

        for i in range(self.epoch):
            np.random.shuffle(sample_range)
            for j in range(int(len(s_batch) / self.batch_size)):
                sample_idx = sample_range[self.batch_size * j:self.batch_size *
                                          (j + 1)]

                # --------------------------------------------------------------------------------
                # for generative curiosity (VAE loss)
                gen_next_state, mu, logvar = self.vae(
                    next_obs_batch[sample_idx])

                d = len(gen_next_state.shape)
                recon_loss = -1 * pytorch_ssim.ssim(gen_next_state,
                                                    next_obs_batch[sample_idx],
                                                    size_average=False)
                # recon_loss = reconstruction_loss(gen_next_state, next_obs_batch[sample_idx]).mean(axis=list(range(1, d)))

                kld_loss = -0.5 * (1 + logvar - mu.pow(2) -
                                   logvar.exp()).sum(axis=1)

                # TODO: keep this proportion of experience used for VAE update?
                # Proportion of experience used for VAE update
                mask = torch.rand(len(recon_loss)).to(self.device)
                mask = (mask < self.update_proportion).type(
                    torch.FloatTensor).to(self.device)
                recon_loss = (recon_loss * mask).sum() / torch.max(
                    mask.sum(),
                    torch.Tensor([1]).to(self.device))
                kld_loss = (kld_loss * mask).sum() / torch.max(
                    mask.sum(),
                    torch.Tensor([1]).to(self.device))

                recon_losses = np.append(recon_losses,
                                         recon_loss.detach().cpu().numpy())
                kld_losses = np.append(kld_losses,
                                       kld_loss.detach().cpu().numpy())
                # ---------------------------------------------------------------------------------

                self.optimizer.zero_grad()
                loss = recon_loss + kld_loss
                loss.backward()
                global_grad_norm_(list(self.vae.parameters()))
                self.optimizer.step()

        return recon_losses, kld_losses
示例#22
0
def process(args, verbose=True):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = Model(args).to(device)
    model.load_state_dict(torch.load(args.weights, map_location=device))
    model.eval()

    dataset = SpectogramDataset(args.source_dir, extension=args.extension)

    files = dataset.files
    if verbose:
        print("Processing {} files.".format(len(files)))

    filenames = []
    results = []
    denoised_filenames = []

    target_dir = Path(args.target_dir)
    Path.mkdir(target_dir, exist_ok=True, parents=True)

    for file_idx in tqdm(range(len(dataset)), unit="files"):
        filename = files[file_idx]
        filenames.append(filename)

        img = dataset[file_idx].unsqueeze(0)
        img = img.to(device, dtype=torch.float)
        noise = model(img).to("cpu")
        img = img.to("cpu")

        if ssim(img - noise, img).data.item() >= args.threshold:
            results.append("clean")
            denoised_filenames.append("")
        else:
            results.append("noisy")

            denoised_filename = target_dir / (
                args.denoised_subdir +
                filename.split(str(Path(args.source_dir)))[1])
            Path.mkdir(denoised_filename.parent, exist_ok=True, parents=True)
            denoised_filenames.append(str(denoised_filename))

            clean_img = img - noise

            np.save(denoised_filename, clean_img.to("cpu").detach().numpy())

    results_df = pd.DataFrame({
        "file_name": filenames,
        "result": results,
        "denoised_file": denoised_filenames
    })

    results_df.to_csv(target_dir / "results.csv", index=False)

    return results_df
示例#23
0
def main():
    args = get_args()
    img_size = args.eval_image_size
    src_dir = args.src_dir
    num_samples = args.num_samples

    l1_loss_fn = nn.L1Loss()
    loss_fn_vgg = lpips.LPIPS(net='vgg')

    l1_results = np.empty(num_samples)
    ssim_results = np.empty(num_samples)
    lpips_results = np.empty(num_samples)

    transforms = trafo.Compose(
        [trafo.ToTensor(),
         trafo.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    for idx in range(num_samples):
        idx_str = "{:05d}".format(idx)

        # reads images as (height,width,channel)
        im_pred = imageio.imread("{}/{}_pred.png".format(src_dir, idx_str))
        im_true = imageio.imread("{}/{}_tgt.png".format(src_dir, idx_str))

        # normalize the values to be [-1,1]
        im_pred_torch = transforms(im_pred).unsqueeze(0)
        im_true_torch = transforms(im_true).unsqueeze(0)

        # resize images to be at the desired resolution if not already
        if not args.no_resize and (im_pred_torch.shape[-1] != img_size
                                   or im_true_torch.shape[-1] != img_size):
            im_pred_torch = F.interpolate(im_pred_torch, (img_size, img_size))
            im_true_torch = F.interpolate(im_true_torch, (img_size, img_size))

        # compute l1 error
        l1_loss = l1_loss_fn(im_pred_torch, im_true_torch)
        l1_results[idx] = l1_loss

        # compute ssim error
        ssim_loss = ssim(im_pred_torch, im_true_torch)
        ssim_results[idx] = ssim_loss

        # Compute lpips score
        lp = loss_fn_vgg(im_pred_torch, im_true_torch)
        lpips_results[idx] = lp

        if idx % 1000 == 0:
            print(
                f"{idx}, im dim: {im_pred_torch.shape[-1]} -- l1, ssim loss, lpips: {l1_loss, ssim_loss, lp}"
            )

    print(f"L1 loss mean: {l1_results.mean()}, std: {l1_results.std()}")
    print(f"SSIM loss mean: {ssim_results.mean()}, std: {ssim_results.std()}")
    print(f"LPIPS score: {lpips_results.mean()}, std: {lpips_results.std()}")
示例#24
0
    def testing():
        path = "path_exp/checkpoint/DFS/{}/netG_model_best.pth".format(name_exp)
        net = torch.load(path)
        net.eval()
        SEG_NET.eval()
        features.eval()
        with torch.no_grad():
            total_mse = 0
            total_mse2 = 0
            avg_psnr_depth = 0
            avg_psnr_dehaze = 0
            avg_ssim_depth = 0
            avg_ssim_dehaze = 0
            for batch in testing_data_loader:
                input, target, depth = Variable(batch[0]), Variable(batch[1]), Variable(batch[2])
                if cuda == True:
                    input = input.cuda()
                    target = target.cuda()
                    depth = depth.cuda()

                dehaze = net(input)
                prediction = SEG_NET(dehaze)

                avg_ssim_dehaze += pytorch_ssim.ssim(dehaze, target).item()

                mse = criterionMSE(prediction, depth)
                total_mse += mse.item()
                avg_psnr_depth += 10 * log10(1 / mse.item())

                mse2 = criterionMSE(dehaze, target)
                total_mse2 += mse2.item()
                avg_psnr_dehaze += 10 * log10(1 / mse2.item())

                avg_ssim_depth += pytorch_ssim.ssim(prediction, depth).item()

            print("===> Testing")
            print("===> PSNR seg: {:.4f} ".format(avg_psnr_depth / len(testing_data_loader)))
            print("===> Mse seg: {:.4f} ".format(total_mse / len(testing_data_loader)))
            print("===> SSIM seg: {:.4f} ".format(avg_ssim_depth / len(testing_data_loader)))
            print("===> PSNR dehaze: {:.4f} ".format(avg_psnr_dehaze / len(testing_data_loader)))
            print("===> SSIM dehaze: {:.4f} ".format(avg_ssim_dehaze / len(testing_data_loader)))
示例#25
0
    def _get_SSIM_score(self, pred_image: torch.tensor,
                        gt_image: torch.tensor):
        """
        pred_image.shape == (1xCxWxH)
        gt_image.shape == (1xCxWxH)

        """
        gt_image = gt_image / 255
        if self.device != 'cpu':
            pred_image = pred_image.to(self.device)
            gt_image = gt_image.to(self.device)
        score = ssim(pred_image, gt_image)
        self.SSIM_scores.append(score.cpu().numpy())
示例#26
0
def get_acc(masks, preds, batch_size):
    mssim = 0
    if masks.size(0) < batch_size:
        batch_size = masks.size(0)

    for index in range(batch_size):

        mask = Variable(masks[index].unsqueeze(0))
        pred = Variable(preds[index].unsqueeze(0))
        mssim += pytorch_ssim.ssim(mask, pred)
        #mssim = ssim(mask,pred,multichannel=True,gaussian_weights=True)

    return mssim / batch_size
示例#27
0
def test(test_gen, model, criterion, SR_dir):
    avg_psnr = 0
    avg_ssim = 0
    med_time = []

    with torch.no_grad():
        for iteration, batch in enumerate(test_gen, 1):
            # print(iteration)
            Blur = batch[0]
            HR = batch[1]
            Blur = Blur.to(device)
            HR = HR.to(device)

            name = batch[2][0][:-4]

            start_time = time.perf_counter(
            )  #-------------------------begin to deal with an image's time

            sr = model(Blur)

            #modify
            try:
                sr = torch.clamp(sr, min=0, max=1)
            except:
                sr = sr[0]
                sr = torch.clamp(sr, min=0, max=1)
            torch.cuda.synchronize()  #wait for CPU & GPU time syn
            evalation_time = time.perf_counter(
            ) - start_time  #---------finish an image
            med_time.append(evalation_time)

            ssim = pytorch_ssim.ssim(sr, HR)
            #print(ssim)
            avg_ssim += ssim

            mse = criterion(sr, HR)
            psnr = 10 * log10(1 / mse)
            #
            resultSRDeblur = transforms.ToPILImage()(sr.cpu()[0])
            resultSRDeblur.save(
                join(SR_dir, '{0}_{1}.png'.format(name, opt.name)))

            print("Processing {}:  PSNR:{} TIME:{}".format(
                iteration, psnr, evalation_time))
            avg_psnr += psnr

        print("===> Avg. SR SSIM: {:.4f} ".format(avg_ssim / iteration))
        print("Avg. SR PSNR:{:4f} dB".format(avg_psnr / iteration))
        median_time = statistics.median(med_time)
        print(median_time)
        return avg_psnr / iteration
示例#28
0
def ssim(img1, img2):
    # resizing the images to same dimensions
    if img1.shape != img2.shape:
        img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]),
                          interpolation=cv2.INTER_LANCZOS4)

    # put array into a format compatible to run the calculations with tensorflow
    img1 = torch.from_numpy(np.rollaxis(img1, 2)).float().unsqueeze(0) / 255.0
    img2 = torch.from_numpy(np.rollaxis(img2, 2)).float().unsqueeze(0) / 255.0

    # ssim calculation (return a tensor)
    ssim = pytorch_ssim.ssim(img1, img2)

    return ssim.item()  # to get the value from the tensor
示例#29
0
def summaries(writer, result, fbp, true, loss, it, do_print=False):
    residual = result - true
    squared_error = residual**2
    mse = torch.mean(squared_error)
    maxval = torch.max(true) - torch.min(true)
    psnr = 20 * torch.log10(maxval) - 10 * torch.log10(mse)

    relative = torch.mean((result - true)**2) / torch.mean((fbp - true)**2)
    ssi = ssim(result, true)
    ssi_fbp = ssim(result, fbp)
    relative_ssim = ssi / ssi_fbp
    if do_print:
        print(it, mse.item(), psnr.item(), relative.item(), ssi.item(),
              relative_ssim.item())

    writer.add_scalar('loss', loss, it)
    writer.add_scalar('psnr', psnr, it)
    writer.add_scalar('relative', relative, it)
    writer.add_scalar('ssim', ssi, it)
    writer.add_scalar('relative ssim', relative_ssim, it)

    util.summary_image(writer, 'result', result, it)
    util.summary_image(writer, 'true', true, it)
示例#30
0
def train(opt, data_loader, model, visualizer):
	logger = Logger('./checkpoints/log/')
	dataset = data_loader.load_data()
	dataset_size = len(data_loader)
	print('#training images = %d' % dataset_size)
	total_steps = 0
	for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
		epoch_start_time = time.time()
		epoch_iter = 0
		for i, data in enumerate(dataset):
			iter_start_time = time.time()
			total_steps += opt.batchSize
			epoch_iter += opt.batchSize
			model.set_input(data)
			model.optimize_parameters()

			if total_steps % opt.display_freq == 0:
				results = model.get_current_visuals()
				ssim = pytorch_ssim.ssim(results['fake_B'], results['real_B']).item()
				psnrMetric = PSNR(results['Restored_Train'],results['Sharp_Train'])
				print('PSNR = %f, SSIM = %.4f' % (psnrMetric, ssim))
				results.pop('fake_B') # 计算完SSIM,就去掉多余项
				results.pop('real_B')
				visualizer.display_current_results(results,epoch)

			if total_steps % opt.print_freq == 0:
				errors = model.get_current_errors()
				for tag, value in errors.items():
					logger.scalar_summary(tag, value, epoch)
				t = (time.time() - iter_start_time) / opt.batchSize
				visualizer.print_current_errors(epoch, epoch_iter, errors, t)
				if opt.display_id > 0:
					visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)

			if total_steps % opt.save_latest_freq == 0:
				print('saving the latest model (epoch %d, total_steps %d)' %
					  (epoch, total_steps))
				model.save('latest')

		if epoch % opt.save_epoch_freq == 0:
			print('saving the model at the end of epoch %d, iters %d' %
				  (epoch, total_steps))
			model.save('latest')
			model.save(epoch)

		print('End of epoch %d / %d \t Time Taken: %d sec' %
			  (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))

		if epoch > opt.niter:
			model.update_learning_rate()
示例#31
0
npImg1 = cv2.imread("einstein.png")

img1 = torch.from_numpy(np.rollaxis(npImg1, 2)).float().unsqueeze(0)/255.0
img2 = torch.rand(img1.size())

if torch.cuda.is_available():
    img1 = img1.cuda()
    img2 = img2.cuda()


img1 = Variable( img1,  requires_grad=False)
img2 = Variable( img2, requires_grad = True)


# Functional: pytorch_ssim.ssim(img1, img2, window_size = 11, size_average = True)
ssim_value = pytorch_ssim.ssim(img1, img2).data[0]
print("Initial ssim:", ssim_value)

# Module: pytorch_ssim.SSIM(window_size = 11, size_average = True)
ssim_loss = pytorch_ssim.SSIM()

optimizer = optim.Adam([img2], lr=0.01)

while ssim_value < 0.95:
    optimizer.zero_grad()
    ssim_out = -ssim_loss(img1, img2)
    ssim_value = - ssim_out.data[0]
    print(ssim_value)
    ssim_out.backward()
    optimizer.step()