def __init__(self,
                 device=None,
                 input_size=[1, 3, 100, 100],
                 batch_size=1,
                 learning_rate=0.001,
                 loss_type='ssim'):

        super(CNN_Encoder, self).__init__()

        self.encoder = nn.Sequential(
            nn.Conv2d(in_channels=input_size[1],
                      out_channels=32,
                      kernel_size=(3, 3),
                      stride=(2, 2),
                      padding=(1, 1),
                      bias=False),
            nn.BatchNorm2d(32),
            nn.LeakyReLU(0.1),
            nn.Conv2d(in_channels=32,
                      out_channels=64,
                      kernel_size=(3, 3),
                      stride=(2, 2),
                      padding=(1, 1),
                      bias=False),
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.1),
            nn.Conv2d(in_channels=64,
                      out_channels=128,
                      kernel_size=(3, 3),
                      stride=(2, 2),
                      padding=(1, 1),
                      bias=False),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.1),
            nn.Conv2d(in_channels=128,
                      out_channels=256,
                      kernel_size=(3, 3),
                      stride=(2, 2),
                      padding=(1, 1),
                      bias=False),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.1),
        )

        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)

        if loss_type is 'ssim':
            self.positive_loss = pytorch_ssim.SSIM(window_size=11)
            self.negative_loss = pytorch_ssim.SSIM(window_size=11)

        elif loss_type is 'mse':
            self.positive_loss = nn.MSELoss()
            self.negative_loss = nn.MSELoss()

        self.batch_size = batch_size

        self.to(device)
示例#2
0
 def train_step(self, model, data, device):
     model.unc_model.train()
     model.r_model.eval()
     input, target, mean, std, norm, _, data_range = data
     with torch.no_grad():
         output, target = self.module.inference(model.r_model, data[:-1],
                                                device)
         if self.loss == 'l1':
             target_loss = F.l1_loss(
                 output, target,
                 reduction='none').sum(dim=2).sum(dim=1).unsqueeze(1)
         elif self.loss == 'ssim':
             SSIMLoss = pytorch_ssim.SSIM(window_size=7, size_average=False)
             target_loss = SSIMLoss(output.unsqueeze(1),
                                    target.unsqueeze(1),
                                    data_range.to(device)).unsqueeze(1)
             # Normalize target loss
             target_loss = (target_loss - 0.9) / 0.1
         else:
             ValueError('Invalid loss')
     mean = mean.unsqueeze(1).unsqueeze(2).to(device)
     std = std.unsqueeze(1).unsqueeze(2).to(device)
     output = transforms.normalize(output, mean, std, eps=1e-11)
     loss_prediction = model.unc_model(output.unsqueeze(1))
     return F.mse_loss(loss_prediction, target_loss)
示例#3
0
def init_loss(opt, tensor):
    disc_loss = None
    content_loss = None
    L1_loss = None

    if opt.model == 'Seminetmodel' or opt.model == 'DAnetmodel' or opt.model == 'DAnetmodel2' or opt.model == 'Syn2realmodel' or opt.model == 'CycleGanmodel':
        content_loss = PerceptualLoss()
        content_loss.initialize(nn.MSELoss())
        L1_loss = ContentLoss()
        L1_loss.initialize(nn.MSELoss())
        ssim_loss = ContentLoss()
        ssim_loss.initialize(pytorch_ssim.SSIM())

    elif opt.model == 'pix2pix':
        content_loss = ContentLoss()
        content_loss.initialize(nn.L1Loss())
    else:
        raise ValueError("Model [%s] not recognized." % opt.model)

    if opt.gan_type == 'wgan-gp':
        disc_loss = DiscLossWGANGP()
    elif opt.gan_type == 'lsgan':
        disc_loss = DiscLossLS()
    elif opt.gan_type == 'gan':
        disc_loss = DiscLoss()
    else:
        raise ValueError("GAN [%s] not recognized." % opt.gan_type)
    disc_loss.initialize(opt, tensor)
    return disc_loss, content_loss, L1_loss, ssim_loss
示例#4
0
def reconstruction_loss(args,x,y):
    L1_loss = torch.nn.L1Loss(reduction='elementwise_mean') 
    
    ssim_loss =pytorch_ssim.SSIM(window_size= args.window_size) #Average
    loss=(1-args.beta) * L1_loss(x,y) + args.beta * torch.clamp( (1-ssim_loss(x,y))/2,0,1)

    return loss
示例#5
0
    def maxSsim(cls):

        npImg1 = cv2.imread("einstein.png")

        img1 = torch.from_numpy(np.rollaxis(npImg1,
                                            2)).float().unsqueeze(0) / 255.0
        img2 = torch.rand(img1.size())

        if torch.cuda.is_available():
            img1 = img1.cuda()
            img2 = img2.cuda()

        img1 = Variable(img1, requires_grad=False)
        img2 = Variable(img2, requires_grad=True)

        # Functional: pytorch_ssim.ssim(img1, img2, window_size = 11, size_average = True)
        ssim_value = pytorch_ssim.ssim(img1, img2).data[0]
        print("Initial ssim:", ssim_value)

        # Module: pytorch_ssim.SSIM(window_size = 11, size_average = True)
        ssim_loss = pytorch_ssim.SSIM()

        optimizer = optim.Adam([img2], lr=0.01)

        while ssim_value < 0.95:
            optimizer.zero_grad()
            ssim_out = -ssim_loss(img1, img2)
            ssim_value = -ssim_out.data[0]
            print(ssim_value)
            ssim_out.backward()
            optimizer.step()
    def __init__(self, argx, device):
        super(Losses, self).__init__()
        self.args = argx

        if self.args.loss_type == 'l1bl2':
            self.outputLoss, self.attLoss, self.wrloss = nn.L1Loss(
            ), nn.BCELoss(), nn.MSELoss()
        elif self.args.loss_type == 'l1wbl2':
            self.outputLoss, self.attLoss, self.wrloss = nn.L1Loss(
            ), WeightedBCE(), nn.MSELoss()
        elif self.args.loss_type == 'l2wbl2':
            self.outputLoss, self.attLoss, self.wrloss = nn.MSELoss(
            ), WeightedBCE(), nn.MSELoss()
        elif self.args.loss_type == 'l2xbl2':
            self.outputLoss, self.attLoss, self.wrloss = nn.MSELoss(
            ), nn.BCEWithLogitsLoss(), nn.MSELoss()
        else:  # l2bl2
            self.outputLoss, self.attLoss, self.wrloss = nn.MSELoss(
            ), nn.BCELoss(), nn.MSELoss()

        if self.args.style_loss > 0:
            self.vggloss = VGGLoss(self.args.sltype).to(device)

        if self.args.ssim_loss > 0:
            self.ssimloss = pytorch_ssim.SSIM().to(device)

        self.outputLoss = self.outputLoss.to(device)
        self.attLoss = self.attLoss.to(device)
        self.wrloss = self.wrloss.to(device)
示例#7
0
def main(args):
    encoder = Encoder(out_channels=30)
    decoder = Decoder(out_channels=30)
    predictor = PD(30)
    criterion = pytorch_ssim.SSIM(window_size=11)

    opt = torch.optim.Adam([{
        'params': encoder.parameters()
    }, {
        'params': decoder.parameters()
    }],
                           lr=args.lr)
    pre_opt = torch.optim.Adam([{
        'params': predictor.parameters()
    }],
                               lr=args.lr)
    sch = torch.optim.lr_scheduler.MultiStepLR(opt,
                                               args.lr_milestion,
                                               gamma=0.5)

    transform = transforms.Compose([transforms.ToTensor()])
    train_set = Dataset(train=True, transform=transform)
    test_set = Dataset(train=False, transform=transform)
    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.num_workers,
                              pin_memory=True)
    test_loader = DataLoader(test_set,
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=args.num_workers,
                             pin_memory=True)
    train(encoder, decoder, predictor, train_loader, test_loader, opt, pre_opt,
          sch, criterion, args)
示例#8
0
def test(test_data_loader, epoch):
    psnrs = []
    mses = []
    ssims = []
    for iteration, batch in enumerate(test_data_loader, 1):
        model.eval()
        data, label = \
            Variable(batch[0]), \
            Variable(batch[1])

        if opt.cuda:
            data = data.cuda()
            label = label.cuda()
        else:
            data = data.cpu()
            label = label.cpu()

        with torch.no_grad():
            output = model(data)
        output = torch.clamp(output, 0., 1.)
        mse = nn.MSELoss(size_average=True)(output, label).cpu()
        mses.append(mse.data)
        psnr = 10 * np.log10(1.0 / mse.data)
        psnrs.append(psnr)

        ssim_loss = pytorch_ssim.SSIM()
        ssim_out = (-1) * ssim_loss(output, label).cpu()
        ssims.append(ssim_out.data)

    psnr_mean = np.mean(psnrs)

    mse_mean = np.mean(mses)
    ssims_mean = np.mean(ssims)
    score = psnr_mean * ssims_mean

    print("Vaild  epoch %d psnr: %f ssim: %f score: %f" %
          (epoch, psnr_mean, ssims_mean, score))
    with open('logs/' + name + 'log.csv', 'a') as file:
        file.write(
            str(epoch) + ',' + str(psnr_mean) + ',' + str(ssims_mean) + ',' +
            str(score) + '\n')

    logger.add_scalar('psnr', psnr_mean, epoch)
    logger.add_scalar('mse', mse_mean, epoch)
    logger.add_scalar('score', score, epoch)

    data = make_grid(data.data)
    label = make_grid(label.data)
    output = make_grid(output.data)

    logger.add_image('data', data, epoch)
    logger.add_image('label', label, epoch)
    logger.add_image('output', output, epoch)

    if opt.report:
        urllib.request.urlopen(
            "https://sc.ftqq.com/SCU21303T3ae6f3b60b71841d0def9295e4a500905a7524916a85c.send?text=epoch_{}_loss_{}"
            .format(epoch, psnr_mean))
    return score
示例#9
0
def myssim_loss(inp,target): 
    inp = inp.cuda()
    target = target.cuda()
    ssim_loss = pytorch_ssim.SSIM(window_size = 11)
    ssim_loss.cuda()
    ssim_total=1-ssim_loss(inp, target)

    return ssim_total
示例#10
0
def compute_ssim(target, prediction):
    # because of the normalize
    target = (target * 0.5 + 0.5) * 255.0
    prediction = (prediction * 0.5 + 0.5) * 255.0
    ssim_loss = pytorch_ssim.SSIM(window_size=11)
    ssim = ssim_loss(target, prediction)
    # print(pytorch_ssim.ssim(target, prediction))
    return ssim
示例#11
0
    def validation(self):  #input as YCbCr   to be complete
        def is_image_file(filename):
            return any(
                filename.endswith(extension)
                for extension in ['.png', '.jpg', '.jepg', '.bmp'])

        print('Validation is started.')
        # os.environ["CUDA_VISIBLE_DEVICES"] = '4,5,6,7'
        #torch.cuda.set_device(4)
        # test_data_loader = self.load_dataset(dataset='test')
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model.eval()

        ###Set SSIM and PSNR
        ssim = pytorch_ssim.SSIM(
            window_size=11)  ###not ssim loss but ssim value
        # ssim = pytorch_ssim.SSIM(window_size = 11) ###not ssim loss but ssim value
        total_ssim = 0
        total_psnr = 0

        image_HR_dir = self.image_test_HR_dir
        image_LR_dir = self.image_test_LR_dir

        image_HR_filenames = [
            join(image_HR_dir, x) for x in sorted(listdir(image_HR_dir))
            if is_image_file(x)
        ]
        image_LR_filenames = [
            join(image_LR_dir, x) for x in sorted(listdir(image_LR_dir))
            if is_image_file(x)
        ]
        file_num = len(image_HR_filenames)
        for idx in range(file_num):
            # print(idx)
            image = pil_image.open(image_LR_filenames[idx]).convert('RGB')
            hr = pil_image.open(image_HR_filenames[idx]).convert('RGB')
            lr = image
            bicubic = lr.resize(
                (lr.width * self.scale_factor, lr.height * self.scale_factor),
                resample=pil_image.BICUBIC)
            lr, _ = preprocess(lr, device)
            hr, _ = preprocess(hr, device)
            _, ycbcr = preprocess(bicubic, device)

            with torch.no_grad():
                preds = self.model(lr).clamp(0.0, 1.0)

                ### To calulate psnr and ssim
                psnr = calc_psnr(hr, preds)
                # print('psnr', psnr)
                ssim_value = 1 - ssim(hr, preds)
                # print('ssim_value', ssim_value)
                total_ssim += ssim_value
                total_psnr += psnr

        print('eval psnr: {:.2f}'.format(total_psnr / file_num),
              'eval ssim: {:.2f}'.format(total_ssim / file_num))
示例#12
0
def train(config):
    cuda = torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    dehaze_net = net.AODNet().to(device)
    dehaze_net.apply(weights_init)

    train_dataset = dataloader.DataLoader(config.path_clearimg,
                                          config.path_hazyimg)
    val_dataset = dataloader.DataLoader(config.path_clearimg,
                                        config.path_hazyimg,
                                        mode="val")
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.train_batch_size,
        shuffle=True,
        num_workers=config.num_workers,
        pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=config.val_batch_size,
                                             shuffle=True,
                                             num_workers=config.num_workers,
                                             pin_memory=True)
    # criterion = nn.MSELoss().to(device)
    criterion = pytorch_ssim.SSIM(window_size=11)
    optimizer = torch.optim.Adam(dehaze_net.parameters(),
                                 lr=config.lr,
                                 weight_decay=config.weight_decay)
    # optimizer = torch.optim.SGD(dehaze_net.parameters(
    # ), lr=config.lr, weight_decay=config.weight_decay)
    dehaze_net.train()
    for epoch in range(config.num_epochs):
        for iteration, (clear_img, hazy_img) in enumerate(train_loader):
            clear_img = clear_img.to(device)
            hazy_img = hazy_img.to(device)
            clean_image = dehaze_net(hazy_img)
            loss = criterion(clean_image, clear_img)
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm(dehaze_net.parameters(),
                                          config.grad_clip_norm)
            optimizer.step()

            if ((iteration + 1) % config.display_iter) == 0:
                print("Epoch", epoch + 1, ": Loss at iteration", iteration + 1,
                      ":", loss.item())

        # Validation Stage
        for iter_val, (clear_img, hazy_img) in enumerate(val_loader):
            clear_img = clear_img.to(device)
            hazy_img = hazy_img.to(device)
            clean_image = dehaze_net(hazy_img)
            torchvision.utils.save_image(
                torch.cat((hazy_img, clean_image, clear_img), 0),
                config.sample_output_folder + str(iter_val + 1) + ".jpg")
        torch.save(dehaze_net.state_dict(),
                   config.snapshots_folder + "Epoch" + str(epoch + 1) + '.pt')
示例#13
0
    def basicUsage(cls):
        img1 = Variable(torch.rand(1, 1, 256, 256))
        img2 = Variable(torch.rand(1, 1, 256, 256))
        if torch.cuda.is_available():
            img1 = img1.cuda()
            img2 = img2.cuda()

        print(pytorch_ssim.ssim(img1, img2))
        ssim_loss = pytorch_ssim.SSIM(window_size=11)
        print(ssim_loss(img1, img2))
示例#14
0
def ssim(img, ref, model_style=None, style_losses=None):

    img.requires_grad_()
    ssim_loss = pytorch_ssim.SSIM(window_size=11)
    ssim_value = ssim_loss(ref, img)
    ssim_out = -weight_ssim * ssim_value
    ssim_out.backward()
    grad_return = img.grad.flatten()

    return ssim_value, grad_return
示例#15
0
    def __init__(self):
        super(GI_Model, self).__init__()
        self.act = nn.LeakyReLU(0.01, inplace = True)
        self.pool = nn.AvgPool2d(2, stride=2)
        self.ssim_loss = pytorch_ssim.SSIM(window_size = 8)

        self.down_0_conv = nn.Conv2d(in_channels=7,out_channels=16,kernel_size=3,stride=1,padding=1)
        self.down_1_conv = nn.Conv2d(in_channels=16,out_channels=32,kernel_size=3,stride=1,padding=1,groups=2)
        self.down_2_conv = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=3,stride=1,padding=1,groups=4)
        self.down_3_conv = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1,groups=8)
        self.down_4_conv = nn.Conv2d(in_channels=128,out_channels=256,kernel_size=3,stride=1,padding=1,groups=16)
        
        self.up_4_to_3 = nn.ConvTranspose2d(in_channels=256,out_channels=256,kernel_size=4,stride=2,padding=1,bias=False,groups=256)
        self.up_3_conv = nn.Conv2d(in_channels=384,out_channels=128,kernel_size=3,stride=1,padding=1,groups=8)

        self.up_3_to_2 = nn.ConvTranspose2d(in_channels=128,out_channels=128,kernel_size=4,stride=2,padding=1,bias=False,groups=128)
        self.up_2_conv = nn.Conv2d(in_channels=192,out_channels=64,kernel_size=3,stride=1,padding=1,groups=4)

        self.up_2_to_1 = nn.ConvTranspose2d(in_channels=64,out_channels=64,kernel_size=4,stride=2,padding=1,bias=False,groups=64)
        self.up_1_conv = nn.Conv2d(in_channels=96,out_channels=32,kernel_size=3,stride=1,padding=1,groups=2)

        self.up_1_to_0 = nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1,bias=False,groups=32)
        self.up_0_conv = nn.Conv2d(in_channels=48,out_channels=1,kernel_size=3,stride=1,padding=1)

        nn.init.normal_(self.down_0_conv.weight, std=0.01)
        nn.init.normal_(self.down_1_conv.weight, std=0.01)
        nn.init.normal_(self.down_2_conv.weight, std=0.01)
        nn.init.normal_(self.down_3_conv.weight, std=0.01)
        nn.init.normal_(self.down_4_conv.weight, std=0.01)

        nn.init.normal_(self.up_3_conv.weight, std=0.01)
        nn.init.normal_(self.up_2_conv.weight, std=0.01)
        nn.init.normal_(self.up_1_conv.weight, std=0.01)
        nn.init.normal_(self.up_0_conv.weight, std=0.01)

        nn.init.normal_(self.down_0_conv.bias, std=0.01)
        nn.init.normal_(self.down_1_conv.bias, std=0.01)
        nn.init.normal_(self.down_2_conv.bias, std=0.01)
        nn.init.normal_(self.down_3_conv.bias, std=0.01)
        nn.init.normal_(self.down_4_conv.bias, std=0.01)

        nn.init.normal_(self.up_3_conv.bias, std=0.01)
        nn.init.normal_(self.up_2_conv.bias, std=0.01)
        nn.init.normal_(self.up_1_conv.bias, std=0.01)
        nn.init.normal_(self.up_0_conv.bias, std=0.01)

        nn.init.constant_(self.up_4_to_3.weight, 0.25)
        nn.init.constant_(self.up_3_to_2.weight, 0.25)
        nn.init.constant_(self.up_2_to_1.weight, 0.25)
        nn.init.constant_(self.up_1_to_0.weight, 0.25)

        freeze_layer(self.up_4_to_3)
        freeze_layer(self.up_3_to_2)
        freeze_layer(self.up_2_to_1)
        freeze_layer(self.up_1_to_0)
示例#16
0
    def forward(self):
        R, T = look_at_view_transform(dist=self.dist, elev=self.elev, azim=self.azim, at=self.p, up=self.u,
                                      device=self.device)

        predict_image = self.renderer(meshes_world=self.mesh.clone(), R=R, T=T)

        ssim_loss = pytorch_ssim.SSIM(window_size=11)
        predict_image = predict_image[..., :3]
        loss = 1 - ssim_loss(self.gray_image_ref, predict_image)

        return loss
示例#17
0
def train(net, training_DATA_LEFT, training_DATA_RIGHT, depthMaps, EPOCHS,
          BATCH_SIZE):
    optimizer = optim.Adam(net.parameters(), lr=0.005)
    loss_function = pytorch_ssim.SSIM(window_size=11)
    dataset = utils.TensorDataset(training_DATA_LEFT, training_DATA_RIGHT,
                                  depthMaps)
    train_dataloader = DataLoader(dataset,
                                  shuffle=True,
                                  num_workers=0,
                                  batch_size=1)
    net.zero_grad()
    COUNTER = 1
    avg_loss = []
    print("train function was executed")
    for epoch in range(EPOCHS):
        for i, data in enumerate(train_dataloader):

            img1, img2, depthmap = data
            optimizer.zero_grad()  # reset gradient
            outputs = net(img1, img2)
            loss = loss_function(depthmap, outputs)
            print("Loss:", loss)
            avg_loss.append(loss.detach())

            loss.backward()
            optimizer.step()
        #Print out images and epoch numbers
        print("Epoch number: ", COUNTER)
        COUNTER += 1
        avg_loss = np.array(avg_loss)
        print("Average Loss:", np.mean(avg_loss))
        avg_loss = []
        plt.figure()
        plt.imshow((outputs.view(height, width)).detach().numpy())
        # plt.show()
        plt.figure()
        plt.imshow((depthmap.view(height, width)).detach().numpy())
        # plt.show
        image = img1.view(3, height, width)
        plt.figure()
        plt.imshow(np.swapaxes(np.swapaxes(image.detach().numpy(), 0, 2), 0,
                               1))
        plt.show()
        outputs = net(img1, img2)
        img1 = img1.view(3, height, width)
        plt.figure()
        plt.imshow((outputs.view(height, width)).detach().numpy())
        plt.figure()
        plt.imshow((depthmap.view(height, width)).detach().numpy())
        plt.figure()
        plt.imshow(np.swapaxes(np.swapaxes(img1.detach().numpy(), 0, 2), 0, 1))
        plt.show()
    return net
示例#18
0
def ssim_opt(m0, temp, ref, model_style=None, style_losses=None):
    temp = temp.reshape(1, nc, imsize, imsize)

    # _, nc, imsize, imsize = temp.shape
    temp.requires_grad_()

    ssim_loss = pytorch_ssim.SSIM(window_size=11)
    ssim_out = -weight_ssim * ssim_loss(ref, temp)
    comp = ((-weight_ssim * m0) - ssim_out) ** 2
    comp.backward()

    return comp, temp.grad
示例#19
0
def criterion(y_true, y_pred, theta=0.1, max_depth_val=1000.0 / 10.0):
    l_depth = torch.mean(torch.abs(y_true - y_pred))

    dx_pred, dy_pred = gradient(y_pred)
    dx_true, dy_true = gradient(y_true)

    l_edges = torch.mean(
        torch.abs(dy_pred - dy_true) + torch.abs(dx_true - dx_pred))

    ssim_loss = pytorch_ssim.SSIM(device=torch.device('cpu'))
    l_ssim = torch.clamp(1 - ssim_loss(y_true, y_pred), 0, 1)

    return theta * l_depth + l_edges + l_ssim
示例#20
0
def reconstruction_loss(args, x, y):
    #Define Reconstruction Loss

    #L1 difference
    L1_loss = torch.nn.L1Loss(reduction='elementwise_mean')

    #SSIM index
    ssim_loss = pytorch_ssim.SSIM(window_size=args.window_size)

    #Final loss is convex combination of the above
    loss = (1-args.alpha) * L1_loss(x,y) + \
            args.alpha * torch.clamp( (1-ssim_loss(x,y))/2,0,1)

    return loss
示例#21
0
def test_for_ssim(file_a, file_b, amount):
    print("generate image via trjsr: processing {}".format(
        file_a.split('_', 1)[1][:-5]))
    f_a = open(file_a, 'rb')
    traj_set_A = pickle.load(f_a)
    f_b = open(file_b, 'rb')
    traj_set_B = pickle.load(f_b)
    f_a.close()
    f_b.close()

    data = []
    data.extend(traj_set_A)
    data.extend(traj_set_B)
    traj_img = []
    with torch.no_grad():
        for traj in tqdm(data):
            test_traj = traj2cell_test_lr(traj)
            test_img = ToTensor()(draw_lr(test_traj))
            input = torch.unsqueeze(test_img, 0).cuda()
            sr_img = netG(input)
            traj_img.append(sr_img.to(torch.device("cpu")))
    name = "vec/image/" + file_a.split('_', 1)[1][:-5] + '_' + path.split(
        "_", 1)[1][:-3]
    torch.save(torch.stack(traj_img, 0), name)
    start_time_1 = time.time()
    traj_set = torch.load("vec/image/downsample_0.0_MyG_3_ssim")
    querynum = 1000
    dbnum = traj_set.shape[0]
    print("ssim: processing {} of dbsize {}".format(
        file_a.split('_', 1)[1][:-5], dbnum - querynum - amount))

    results = {'MSE': [], 'SSIM': []}
    rank = {'MSE': [], 'SSIM': []}
    ssim_loss = pytorch_ssim.SSIM(window_size=11).cuda()
    print("--- %s seconds ---" % (time.time() - start_time_1))
    with torch.no_grad():
        bar = tqdm(range(querynum))
        for i in bar:
            bar.set_description(desc="dbsize {}".format(dbnum - querynum -
                                                        amount))
            dist = {'MSE': [], 'SSIM': []}
            for j in range(querynum, dbnum - amount):
                traj_set[i].cuda()
                traj_set[j].cuda()
                dist['SSIM'].append(1 -
                                    ssim_loss(traj_set[i], traj_set[j]).item())
            rank['SSIM'].append(get_rank(dist['SSIM'], i))
        results['SSIM'] = sum(rank['SSIM']) / len(rank['SSIM'])
        print("the SSIM result of DBsize {} is {}".format(dbnum-querynum-amount,results['SSIM']))\
示例#22
0
 def __init__(self,
              temperature,
              h_hsize,
              l1_const,
              l2ratio,
              ssim_size,
              asymmetricSlope=1.25,
              downsampleScale=0.5):
     self.temperature = temperature
     self.h_hsize = h_hsize
     self.l1_const = l1_const
     self.l2_scale = l2ratio
     self.ssim_loss = pytorch_ssim.SSIM(window_size=ssim_size)
     self.asymmetricSlope = asymmetricSlope
     self.downsampleScale = downsampleScale
示例#23
0
def ssim(img, ref,weight_ssim):
    img = img.reshape(1, nc, imsize, imsize)

   

    img.requires_grad_()

    ssim_value = pytorch_ssim.ssim(ref, img)
    ssim_loss = pytorch_ssim.SSIM()
    ssim_out = -weight_ssim * ssim_loss(ref, img)
    ssim_out.backward()

    #    del ref
    #    torch.cuda.empty_cache()
    return ssim_value, img.grad.flatten()
示例#24
0
def ssim_opt(m0, temp, ref,weight_ssim):
    temp = temp.reshape(1, nc, imsize, imsize)

    

    # _, nc, imsize, imsize = temp.shape
    temp.requires_grad_()

    ssim_value = pytorch_ssim.ssim(ref, temp)
    ssim_loss = pytorch_ssim.SSIM()
    ssim_out = -weight_ssim * ssim_loss(ref, temp)
    comp = ((-weight_ssim * m0) - ssim_out) ** 2
    comp.backward()

    return comp, temp.grad
示例#25
0
def main(img_path='../../2.png', base=32):
    global s1, s2, num, pic, encoder, decoder, predictor
    num += 1
    encoder = Encoder(out_channels=30)
    decoder = Decoder(out_channels=30)
    predictor = Predictor(30)
    load(encoder, decoder, predictor)
    encoder.eval()
    decoder.eval()
    img = cv2.imread(img_path)
    img = np.array(img)
    x = transforms.ToTensor()(img)
    # x = x[:, 0:128 * 10, 0:128 * 10]
    x = x.unsqueeze(0)
    # x = x[:, :, 0:128, 0:128]
    b, c, h, w = x.shape
    if x.shape[2] % base != 0 or x.shape[3] % base != 0:
        x = pad(x, base)
    b, c, H, W = x.shape
    x.requires_grad = False
    if torch.cuda.is_available():
        encoder = encoder.cuda()
        decoder = decoder.cuda()
        x = x.cuda()
    y = x.clone()
    x = run(x)
    l1 = Lp_Loss.Loss(p=1)
    ss = pytorch_ssim.SSIM(window_size=11)
    psnr = PSNR()
    psnr.eval()
    ss.eval()
    x = torch.clamp(x, 0, 1)
    s1 += ss(x, y)
    s2 += psnr(x, y)
    x = x.detach()
    y = y.detach()
    s1 = s1.detach()
    s2 = s2.detach()
    print(s1 / num, s2 / num)
    x = torch.round(x * 255).int()
    x = torch.abs(x)
    x = x.detach().cpu().numpy()
    x = np.array(x, dtype=np.uint8)
    x = x.squeeze(0)
    x = np.swapaxes(x, 0, 2)
    x = np.swapaxes(x, 0, 1)
    x = x[0:h, 0:w, :]
    cv2.imwrite('./1.png', x)
    def build_model(self):
        # Define encoder-decoder (generator) and a discriminator
        self.G = Generator(self.g_first_dim, self.enc_repeat_num)
        self.D = Discriminator(self.img_crop_size, self.d_first_dim,
                               self.d_repeat_num)
        self.ssim_loss = pytorch_ssim.SSIM(window_size=11)

        # Optimizers
        self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr,
                                            [self.beta1, self.beta2])
        self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr,
                                            [self.beta1, self.beta2])

        if torch.cuda.is_available():
            self.G.cuda()
            self.D.cuda()
示例#27
0
    def InitializeTraining(self):
        self.loss_funcs = {}
        self.loss_funcs['style_gram'] = loss.StyleLoss()
        self.loss_funcs['cmp_src_tgt'] = loss.MSELossMask()
        if self.opt.smoothness_type.lower() == 'l1':
            self.loss_funcs['flow_smooth'] = loss.TVLoss_L1()
        else:
            self.loss_funcs['flow_smooth'] = loss.TVLoss_sq()
        self.loss_funcs['cmp_self'] = loss.MSELossMask()
        self.loss_funcs['ssim'] = pytorch_ssim.SSIM()

        model_params = []
        for k in self.models.keys():
            model_params += self.models[k].parameters()
        self.optimizer = torch.optim.Adam(model_params,
                                          lr=self.opt.learing_rate)
    def init_fn(self):
        if self.options.model == 'flow':
            num_input_channels = self.options.n_time_bins * 2
            num_output_channels = 2
        elif self.options.model == 'recons':
            # For the reconstruction model, we sum the event volume across the time dimension, so
            # that the network only sees a single channel event input, plus the prev image.
            num_input_channels = 1 + self.options.n_image_channels
            num_output_channels = self.options.n_image_channels
        else:
            raise ValueError(
                "Class was initialized with an invalid model {}"
                ", only {EventGAN, flow, recons} are supported.".format(
                    self.options.model))

        self.cycle_unet = UNet(num_input_channels=num_input_channels,
                               num_output_channels=num_output_channels,
                               skip_type='concat',
                               activation='tanh',
                               num_encoders=4,
                               base_num_channels=32,
                               num_residual_blocks=2,
                               norm='BN',
                               use_upsample_conv=True,
                               multi=True)

        self.models_dict = {"model": self.cycle_unet}
        model_params = self.cycle_unet.parameters()

        optimizer = radam.RAdam(list(model_params),
                                lr=self.options.lrc,
                                weight_decay=self.options.wd,
                                betas=(self.options.lr_decay, 0.999))

        self.ssim = pytorch_ssim.SSIM()
        self.l1 = nn.L1Loss(reduction="mean")
        self.image_loss = lambda x, y: self.l1(x, y) - self.ssim(x, y)

        self.optimizers_dict = {"optimizer": optimizer}

        self.train_ds, self.train_sampler = event_loader.get_and_concat_datasets(
            self.options.train_file, self.options, train=True)
        self.validation_ds, self.validation_sampler = event_loader.get_and_concat_datasets(
            self.options.validation_file, self.options, train=False)

        self.cdl_kwargs["collate_fn"] = event_utils.none_safe_collate
        self.cdl_kwargs["sampler"] = self.train_sampler
    def validation(self):  #input as YCbC to be complete
        self.Choose_Model(self.Model_index)
        val_data_loader = self.load_dataset(dataset='test')
        print('Validation is started.')

        if self.loss_func == 'mse':
            self.loss = nn.MSELoss()
        elif self.loss_func == 'ssim':
            self.loss = pytorch_ssim.SSIM(window_size=11)

        # test_data_loader = self.load_dataset(dataset='test')
        #self.model.eval()
        img_num = 0
        total_loss = 0
        for iter, data in enumerate(val_data_loader):
            LR = data['img_LR']
            HR = data['img_HR']
            #only use Y channel
            input_Y = LR[:, 0:1, :, :]
            target_Y = HR[:, 0:1, :, :]

            target_Y = utils.shave(target_Y, border_size=2 * self.scale_factor)

            if self.gpu_mode:
                input = Variable(input_Y.cuda())
            else:
                input = Variable(input_Y)

            # prediction
            recon_imgs = self.model(input).detach()

            savein_target_Y = (
                target_Y.numpy()[0, :, :, :].transpose(1, 2, 0) * 255).astype(
                    numpy.uint8)
            saveinY = (recon_imgs.numpy()[0, :, :, :].transpose(1, 2, 0) *
                       255).astype(numpy.uint8)
            #imageio.imsave('1118_validation_image/predicted/'+ str(iter) + 'predicted.png', saveinY[:, :, 0])
            #imageio.imsave('1118_validation_image/Target_Y/'+ str(iter) + 'target_Y.png', savein_target_Y[:, :, 0])
            loss = self.loss(recon_imgs, target_Y)
            total_loss += loss.data
            #print('validation_loss', loss.data)

            #scipy.misc.imsave(self.img_save_dir + '/img' + str(img_num) + '_' + str(self.scale_factor) + 'x_' + str(epoch)+'LR_'+str(self.lr)+'.png', recon_img)
        print('total_loss, ', total_loss)
        print('the average validation dataset loss is',
              total_loss / len(val_data_loader))
示例#30
0
def evaluateError(output, target, idx, batches):

    errors = {'MSE': 0, 'RMSE': 0, 'MAE': 0, 'SSIM': 0}

    _output, _target, nanMask, nValidElement = setNanToZero(output, target)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    if (nValidElement.data.cpu().numpy() > 0):

        output_0_1 = _output.cpu().detach().numpy()
        target_0_1 = _target.cpu().detach().numpy()

        #x = np.reshape(output_0_1,[500,500])
        #x = x *100000
        #x = x.astype('uint16');

        #cv2.imwrite(str(idx)+'_out.png',x)

        output_0_1 = output_0_1 * 100
        target_0_1 = target_0_1 * 100

        idx_zero = np.where(target_0_1 <= 1)
        output_0_1[idx_zero] = 0
        #target_0_1[idx_zero] = 0
        output_0_1[np.where(output_0_1 >= 30)] = 0

        output_0_1 = torch.from_numpy(output_0_1).float().to(device)
        target_0_1 = torch.from_numpy(target_0_1).float().to(device)

        diffMatrix = torch.abs((output_0_1) - (target_0_1))

        IMsize = target_0_1.shape[2] * target_0_1.shape[3]

        errors['MSE'] = torch.sum(torch.pow(diffMatrix, 2)) / IMsize / batches
        errors['MAE'] = torch.sum(diffMatrix) / IMsize / batches
        ssim_loss = pytorch_ssim.SSIM(window_size=15)
        errors['SSIM'] = ssim_loss(_output, _target)

        errors['MSE'] = float(errors['MSE'].data.cpu().numpy())
        errors['SSIM'] = float(errors['SSIM'].data.cpu().numpy())
        errors['MAE'] = float(errors['MAE'].data.cpu().numpy())
        #errors['SSIM'] = float(errors['SSIM'])

    return errors