示例#1
0
    def test(self):
        self.real_A = Variable(self.input_A, volatile=True)
        self.fake_B = self.netG.forward(self.real_A)
        self.real_B = Variable(self.input_B, volatile=True)
        self.pre_filter, self.depth = self.netDepth.forward(self.real_A)

        # recover B according to depth
        self.fake_B2 = util.reverse_matting(self.real_A, self.depth)

        # reconstruct A based on optical model
        self.fake_A = util.synthesize_matting(self.fake_B, self.depth)
    def forward(self):
        self.real_A = Variable(self.input_A)
        self.fake_B = self.netG.forward(self.real_A)
        self.real_B = Variable(self.input_B)
        self.depth = self.netDepth.forward(self.real_A)

        # clip with 0.9
        self.depth = torch.clamp(self.depth, max=0.9)

        # recover B according to depth
        self.fake_B2 = util.reverse_matting(self.real_A, self.depth)

        # reconstruct A based on optical model
        self.fake_A = util.synthesize_matting(self.fake_B, self.depth)
    def forward(self):
        self.real_A = Variable(self.input_A)
        self.fake_B = self.netG.forward(
            self.real_A)  #GAN生成hazefreeimage fake_B
        self.real_B = Variable(self.input_B)
        self.depth = self.netDepth.forward(self.real_A)  #netdepth输生成透射率图

        # clip with 0.9
        self.depth = torch.clamp(self.depth, max=0.9)

        # recover B according to depth
        self.fake_B2 = util.reverse_matting(
            self.real_A,
            self.depth)  #根据透射图depth和默认大气光1.0被hazeimage恢复为hazefree image

        # reconstruct A based on optical model
        self.fake_A = util.synthesize_matting(
            self.fake_B, self.depth)  #根据hazefreeimage和透射图depth生成haze image
示例#4
0
    def test(self):
        self.real_A = Variable(self.input_A, volatile=True)
        self.fake_B = self.netG.forward(self.real_A)
        self.real_B = Variable(self.input_B, volatile=True)
        self.depth = self.netDepth.forward(self.real_A)

        if not self.opt.which_model_depth == 'aod':
            self.depth = (self.depth + 1) / 2.

        # recover B according to depth
        self.fake_B2 = util.reverse_matting(self.real_A, self.depth)

        # reconstruct A based on optical model
        self.fake_A = util.synthesize_matting(self.fake_B, self.depth)

        # feed haze-free image
        self.extra_B = self.netG.forward(self.real_B)
        self.extra_depth = self.netDepth.forward(self.real_B)
        if not self.opt.which_model_depth == 'aod':
            self.extra_depth = (self.extra_depth + 1) / 2.  # scale it to [0,1]
        self.extra_A = util.synthesize_matting(self.extra_B, self.extra_depth)
    def test(self):
        self.real_A = Variable(self.input_A, volatile=True)
        self.fake_B = self.netG.forward(self.real_A)
        self.real_B = Variable(self.input_B, volatile=True)
        self.depth = self.netDepth.forward(self.real_A)

        if not self.opt.which_model_depth == 'aod':
            self.depth = (self.depth + 1) / 2.

        # regularize depth by lower bound
        real_A = (self.real_A + 1) / 2  # scale it to [0,1]
        A = 1.  # A is 1 at this moment
        LB = 1 - torch.min(real_A / A, dim=1,
                           keepdim=True)[0]  # get lower bould of depth
        self.depth_LB = torch.max(self.depth, LB)

        # recover B according to depth
        self.fake_B2 = util.reverse_matting(self.real_A, self.depth_LB)

        # reconstruct A based on optical model
        self.fake_A = util.synthesize_matting(self.fake_B, self.depth_LB)