Пример #1
0
    def build_model(self):
        # RCAN model
        self.output = self.SRDenseNet(
            x=self.x_lr,
            f=self.n_filters,
            kernel_size=self.kernel_size,
            reduction=self.reduction,
            use_bn=self.use_bn,
            scale=self.img_scale,
        )
        self.output = tf.clip_by_value(self.output * 255., 0., 255.)

        # l1 loss
        # self.loss = tf.reduce_mean(tf.abs(self.output-self.x_hr))
        # l2 loss
        self.loss = tf.losses.mean_squared_error(self.x_hr, self.output)

        self.train_op = self.opt.minimize(self.loss,
                                          global_step=self.global_step)

        # metrics
        self.psnr = tf.reduce_mean(
            metric.psnr(self.x_hr, self.output, m_val=255))
        self.ssim = tf.reduce_mean(
            metric.ssim(self.x_hr, self.output, m_val=255))

        # summaries
        tf.summary.image('lr', self.x_lr, max_outputs=self.batch_size)
        tf.summary.image('hr', self.x_hr, max_outputs=self.batch_size)
        tf.summary.image('generated-hr',
                         self.output,
                         max_outputs=self.batch_size)

        tf.summary.scalar("loss/l2_loss", self.loss)
        tf.summary.scalar("metric/psnr", self.psnr)
        tf.summary.scalar("metric/ssim", self.ssim)
        tf.summary.scalar("misc/lr", self.lr)

        # merge summary
        self.merged = tf.summary.merge_all()

        # model saver
        self.saver = tf.train.Saver(max_to_keep=2)
        self.best_saver = tf.train.Saver(max_to_keep=1)
Пример #2
0
    def build_model(self):
        # RCAN model
        self.output = self.residual_channel_attention_network(
            x=self.x_lr,
            f=self.n_filters,
            kernel_size=self.kernel_size,
            reduction=self.reduction,
            use_bn=self.use_bn,
            scale=self.img_scale,
        )
        self.output = tf.clip_by_value(self.output * 255., 0., 255.)

        # l1 loss
        self.loss = tf.reduce_mean(tf.abs(self.output - self.x_hr))

        self.train_op = self.opt.minimize(self.loss,
                                          global_step=self.global_step)

        # metrics
        self.psnr = tf.reduce_mean(metric.psnr(self.output, self.x_hr,
                                               m_val=1))
        self.ssim = tf.reduce_mean(metric.ssim(self.output, self.x_hr,
                                               m_val=1))

        # summaries
        tf.summary.image('lr', self.x_lr, max_outputs=self.batch_size)
        tf.summary.image('hr', self.x_hr, max_outputs=self.batch_size)
        tf.summary.image('generated-hr',
                         self.output,
                         max_outputs=self.batch_size)

        tf.summary.scalar("loss/l1_loss", self.loss)
        tf.summary.scalar("metric/psnr", self.psnr)
        tf.summary.scalar("metric/ssim", self.ssim)
        tf.summary.scalar("misc/lr", self.lr)

        # merge summary
        self.merged = tf.summary.merge_all()

        # model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.best_saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter(self.tf_log, self.sess.graph)
Пример #3
0
    if torch.cuda.is_available():
        torch.set_default_tensor_type("torch.cuda.FloatTensor")
        torch.backends.cudnn.benchmark = True
    MAX_EPOCHS = 10
    resize = Resize((128, 128))
    dataset = Dataset("datasets/test/test12", resize)
    loader = DataLoader(dataset, batch_size=4)

    model = PReNet_r(recurrent_iter=6).to(device)
    optimizer = Adam(model.parameters(), lr=0.001)
    scheduler = MultiStepLR(optimizer, milestones=[30, 50, 80], gamma=0.2)

    for epoch in range(MAX_EPOCHS):
        scheduler.step()
        for x, y in loader:

            x = x.to(device)
            y = y.to(device)

            y_pred = model(x)
            loss = -ssim(y, y_pred)
            loss.backward()
            with torch.no_grad():
                mse = torch.mean((y - y_pred)**2)
                p = psnr(mse)
                s = -loss
            print(loss.item(), p.item())

            optimizer.step()
            optimizer.zero_grad()
Пример #4
0
            body = tfu.conv2d(x, f=f, k=kernel_size, name="conv2d-body")
            body += head  # tf.math.add(body, head)

            # 3. tail
            x = self.up_scaling(body, f, scale, name='up-scaling')
            tail = tfutil.conv2d(x, f=self.n_channel, k=kernel_size, name="conv2d-tail")  # (-1, 384, 384, 3)
            tail = tfu.conv2d(x, f=self.n_channel, k=kernel_size, name="conv2d-tail")  # (-1, 384, 384, 3)

            x = self.image_processing(tail, sign=1, name='post-processing')
            return x
@@ -236,8 +237,8 @@ def build_model(self):
        self.train_op = self.opt.minimize(self.loss, global_step=self.global_step)

        # metrics
        self.psnr = tf.reduce_mean(metric.psnr(self.output, self.x_hr, m_val=1.))
        self.ssim = tf.reduce_mean(metric.ssim(self.output, self.x_hr, m_val=1.))
        self.psnr = tf.reduce_mean(metric.psnr(self.output, self.x_hr, m_val=1))
        self.ssim = tf.reduce_mean(metric.ssim(self.output, self.x_hr, m_val=1))

        # summaries
        tf.summary.image('lr', self.x_lr, max_outputs=self.batch_size)
        tf.summary.image('hr', self.x_hr, max_outputs=self.batch_size)
        tf.summary.image('generated-hr', self.output, max_outputs=self.batch_size)
        tf.summary.scalar("loss/l1_loss", self.loss)
        tf.summary.scalar("metric/psnr", self.psnr)
        tf.summary.scalar("metric/ssim", self.ssim)
        tf.summary.scalar("misc/lr", self.lr)
        # merge summary
        self.merged = tf.summary.merge_all()
        # model saver
        self.saver = tf.train.Saver(max_to_keep=1)
Пример #5
0
    net.load_state_dict(torch.load('weights/unet_final.pt', map_location=torch.device('cpu')))

    # net = IM2HI()
    # net.load_state_dict(torch.load('weights/im2height_augment.pt', map_location=torch.device('cpu')))

    net.eval()

    data = testloader()
    data = iter(data)
    for i in range(3):
        img, mask = data.next()

    with torch.set_grad_enabled(False):
        output = net(img)
        
        print(ssim(output, mask))
        print('l1', torch.nn.L1Loss()(output, mask))
        print('l2', torch.nn.MSELoss()(output, mask))
        # print('none l1', torch.nn.L1Loss(reduction='none')(output, mask))
        # print('none l2', torch.nn.MSELoss(reduction='none')(output, mask))
        output = torch.squeeze(output, 0)
        output = torch.cat((output, output, output))

    img, mask = torch.squeeze(img, 0), torch.squeeze(mask, 0)
    mask = torch.cat((mask, mask, mask))

    
    fig, (a1, a2,a3) = plt.subplots(1, 3, figsize=(15,5))
    a1.imshow(img.permute(1,2,0))
    a2.imshow(mask.permute(1,2,0))
    a3.imshow(output.permute(1,2,0))