Example #1
0
    def validation_step(self, batch, batch_idx):
        self.global_valid_step += 1
        input_batch, gt_batch, fname = batch[INPUT], batch[GT], batch[FPATH][0]

        # todo: edit here:
        # low_res_batch = self.down_sampler(input_batch)
        # output_batch = self.net(low_res_batch, input_batch)

        # log metrics
        # if self.global_valid_step % 100 == 0:
        psnr = util.ImageProcessing.compute_psnr(
            util.cuda_tensor_to_ndarray(output_batch),
            util.cuda_tensor_to_ndarray(gt_batch), 1.0
        )
        self.log(PSNR, psnr)

        # log images
        self.log_images_dict(
            VALID,
            osp.basename(fname),
            {
                INPUT: input_batch,
                OUTPUT: output_batch,
                GT: gt_batch,
            }
        )
        return output_batch
Example #2
0
    def validation_step(self, batch, batch_idx):

        # get output
        input_batch, gt_batch = Variable(batch[INPUT], requires_grad=False), \
                                Variable(batch[GT], requires_grad=False)
        output_batch, self.valid_metrics[
            WEIGHTS_NORM] = self.eval_forward_one_img(input_batch)

        # log images:
        self.global_valid_step += 1
        self.ia3dlut_log(VALID, self.global_valid_step, batch[FPATH][0],
                         input_batch, output_batch, gt_batch)

        # get psnr
        self.valid_metrics[PSNR] = util.ImageProcessing.compute_psnr(
            util.cuda_tensor_to_ndarray(output_batch),
            util.cuda_tensor_to_ndarray(gt_batch), 1.0)

        # log metrics to pl and loggers
        valid_metrics = {
            f'{VALID}.{x}': y
            for x, y in self.valid_metrics.items()
        }
        for x, y in valid_metrics.items():
            # Tips: if call self.log with on_step=True here, metrics will bacome "valid.psnr/epoch_xxx"
            # So just call without arguments.
            self.log(x, y, prog_bar=True)

        return output_batch
Example #3
0
    def test_step(self, batch, batch_ix):
        # test without GT image:
        input_batch, fname = batch[INPUT], batch[FPATH][0]
        assert input_batch.shape[0] == 1
        low_res_batch = self.down_sampler(input_batch)
        output_batch = self.net(low_res_batch, input_batch)
        self.save_one_img_of_batch(output_batch, self.opt[IMG_DIRPATH],
                                   osp.basename(fname))

        # save illu map:
        if self.opt[RUNTIME][PREDICT_ILLUMINATION]:
            illu_dirpath = Path(self.opt[IMG_DIRPATH]) / 'illu-map'
            self.save_one_img_of_batch(self.net.illu_map, illu_dirpath,
                                       osp.basename(fname))

        # test with GT:
        if GT in batch:
            # calculate metrics:
            output_ = util.cuda_tensor_to_ndarray(output_batch)
            y_ = util.cuda_tensor_to_ndarray(batch[GT])
            psnr = util.ImageProcessing.compute_psnr(output_, y_, 1.0)
            ssim = util.ImageProcessing.compute_ssim(output_, y_)
            self.log_dict({
                PSNR: psnr,
                SSIM: ssim
            },
                          prog_bar=True,
                          on_step=True,
                          on_epoch=True)
Example #4
0
    def test_step(self, batch, batch_ix):
        # test without GT image:
        input_batch, fpaths = batch[INPUT], batch[FPATH]
        output, _ = self.eval_forward_one_img(input_batch)
        util.saveTensorAsImg(
            output, osp.join(self.opt[IMG_DIRPATH], osp.basename(fpaths[0])))

        # test with GT:
        if GT in batch:
            # calculate metrics:
            psnr = util.ImageProcessing.compute_psnr(
                util.cuda_tensor_to_ndarray(output),
                util.cuda_tensor_to_ndarray(batch[GT]), 1.0)
            self.log(PSNR, psnr, prog_bar=True)
        return output
Example #5
0
    def training_step(self, batch, batch_idx):
        # if not self.MODEL_WATCHED:
        #     self.logger.watch(self.luts)
        #     self.logger.watch(self.cnn)
        #     self.MODEL_WATCHED = True

        # self.show_flops_and_param_num([batch[INPUT]])
        # ipdb.set_trace()

        # get output
        input_batch, gt_batch = Variable(batch[INPUT], requires_grad=False), \
                                Variable(batch[GT], requires_grad=False)
        output_batch, self.train_metrics[
            WEIGHTS_NORM] = self.train_forward_one_batch(input_batch)

        # calculate loss:
        mse = self.criterion(output_batch, gt_batch)
        tv_mn_pairs = [self.tv3(x) for x in self.luts]
        self.train_metrics[TV_CONS] = sum([x[0] for x in tv_mn_pairs])
        self.train_metrics[MN_CONS] = sum([x[1] for x in tv_mn_pairs])

        # TODO: 多卡训练这里也有问题:
        loss = (
            mse + self.opt[RUNTIME][LAMBDA_SMOOTH] *
            (self.train_metrics[WEIGHTS_NORM] + self.train_metrics[TV_CONS]) +
            self.opt[RUNTIME][LAMBDA_MONOTONICITY] *
            self.train_metrics[MN_CONS])

        self.train_metrics[MSE] = mse
        self.train_metrics[LOSS] = loss

        # get psnr
        self.train_metrics[PSNR] = util.ImageProcessing.compute_psnr(
            util.cuda_tensor_to_ndarray(output_batch),
            util.cuda_tensor_to_ndarray(gt_batch), 1.0)

        # log to logger
        for x, y in self.train_metrics.items():
            self.log(x, y, prog_bar=True)

        self.ia3dlut_log(TRAIN, self.global_step, batch[FPATH][0], input_batch,
                         output_batch, gt_batch)

        return loss
Example #6
0
    def test_step(self, batch, batch_ix):
        # test without GT image:
        input_batch, fname = batch[INPUT], batch[FPATH][0]
        output_dict = self.net(input_batch)
        output = torch.clamp(output_dict[OUTPUT], 0.0, 1.0)

        util.saveTensorAsImg(
            output, os.path.join(self.opt[IMG_DIRPATH], osp.basename(fname)))
        if PREDICT_ILLUMINATION in output_dict:
            util.saveTensorAsImg(
                output_dict[PREDICT_ILLUMINATION],
                os.path.join(self.illumination_dirpath, osp.basename(fname)))

        # test with GT:
        if GT in batch:
            # calculate metrics:
            output_ = util.cuda_tensor_to_ndarray(output)
            y_ = util.cuda_tensor_to_ndarray(batch[GT])
            psnr = util.ImageProcessing.compute_psnr(output_, y_, 1.0)
            ssim = util.ImageProcessing.compute_ssim(output_, y_)
            self.log_dict({PSNR: psnr, SSIM: ssim}, prog_bar=True)
Example #7
0
    def validation_step(self, batch, batch_idx):
        self.global_valid_step += 1
        input_batch, gt_batch, fname = batch[INPUT], batch[GT], batch[FPATH][0]
        low_res_batch = self.down_sampler(input_batch)
        output_batch = self.net(low_res_batch, input_batch)

        # log metrics
        # if self.global_valid_step % 100 == 0:
        psnr = util.ImageProcessing.compute_psnr(
            util.cuda_tensor_to_ndarray(output_batch),
            util.cuda_tensor_to_ndarray(gt_batch), 1.0)
        self.log(PSNR, psnr)

        # log images
        self.log_images_dict(
            VALID, osp.basename(fname), {
                INPUT: input_batch,
                OUTPUT: output_batch,
                GT: gt_batch,
                PREDICT_ILLUMINATION: self.net.illu_map,
                GUIDEMAP: self.net.guidemap
            })
        return output_batch
Example #8
0
def debug_tensor(tensor, name):
    np.save(name, util.cuda_tensor_to_ndarray(tensor))