Example #1
0
    def _backward(self, batch, out, p_buffers):
        assert not out or 'radiance' in out
        assert not out or 'diffuse' in out
        assert not out or 'specular' in out

        if out:
            total, diffuse, specular = out['radiance'], out['diffuse'], out[
                'specular']
            tgt_total = crop_like(batch['target_total'], total)
        loss_dict = {}

        if self.manif_learn:
            tgt_diffuse = batch[
                'target_diffuse']  #crop_like(batch['target_diffuse'], diffuse)
            tgt_specular = batch[
                'target_specular']  #crop_like(batch['target_specular'], specular)

            p_buffer_diffuse = p_buffers[
                'diffuse']  #crop_like(p_buffers['diffuse'], diffuse)
            L_manif_diffuse = self.loss_funcs['l_manif'](
                p_buffer_diffuse, tgt_diffuse) * self.w_manif

            p_buffer_specular = p_buffers[
                'specular']  #crop_like(p_buffers['specular'], specular)
            L_manif_specular = self.loss_funcs['l_manif'](
                p_buffer_specular, tgt_specular) * self.w_manif

            loss_dict['l_manif_diffuse'] = L_manif_diffuse.detach(
            ) / self.w_manif
            loss_dict['l_manif_specular'] = L_manif_specular.detach(
            ) / self.w_manif

            L_manif_diffuse.backward()
            L_manif_specular.backward()
        elif self.train_branches:
            tgt_diffuse = crop_like(batch['target_diffuse'], diffuse)
            L_diffuse = self.loss_funcs['l_diffuse'](diffuse, tgt_diffuse)

            tgt_specular = crop_like(batch['target_specular'], specular)
            L_specular = self.loss_funcs['l_specular'](specular, tgt_specular)

            loss_dict['l_diffuse'] = L_diffuse.detach()
            loss_dict['l_specular'] = L_specular.detach()

            L_diffuse.backward()
            L_specular.backward()

            with torch.no_grad():
                L_total = self.loss_funcs['l_recon'](total, tgt_total)
                loss_dict['l_total'] = L_total.detach()
        else:
            L_total = self.loss_funcs['l_recon'](total, tgt_total)
            loss_dict['l_total'] = L_total.detach()
            L_total.backward()

        return loss_dict
Example #2
0
    def _backward(self, batch, out, p_buffers):
        assert 'radiance' in out
        assert 'diffuse' in out
        assert 'specular' in out

        total, diffuse, specular = out['radiance'], out['diffuse'], out[
            'specular']
        loss_dict = {}
        tgt_total = crop_like(batch['target_total'], total)

        if self.train_branches:  # training diffuse and specular branches
            tgt_diffuse = crop_like(batch['target_diffuse'], diffuse)
            L_diffuse = self.loss_funcs['l_diffuse'](diffuse, tgt_diffuse)

            tgt_specular = crop_like(batch['target_specular'], specular)
            L_specular = self.loss_funcs['l_specular'](specular, tgt_specular)

            loss_dict['l_diffuse'] = L_diffuse.detach()
            loss_dict['l_specular'] = L_specular.detach()

            if self.manif_learn:
                p_buffer_diffuse = crop_like(p_buffers['diffuse'], diffuse)
                L_manif_diffuse = self.loss_funcs['l_manif'](p_buffer_diffuse,
                                                             tgt_diffuse)
                L_diffuse += L_manif_diffuse * self.w_manif

                p_buffer_specular = crop_like(p_buffers['specular'], specular)
                L_manif_specular = self.loss_funcs['l_manif'](
                    p_buffer_specular, tgt_specular)
                L_specular += L_manif_specular * self.w_manif

                loss_dict['l_manif_diffuse'] = L_manif_diffuse.detach()
                loss_dict['l_manif_specular'] = L_manif_specular.detach()

            L_diffuse.backward()
            L_specular.backward()

            with torch.no_grad():
                L_total = self.loss_funcs['l_recon'](total, tgt_total)
                loss_dict['l_total'] = L_total.detach()
        else:  # post-training the entire system
            L_total = self.loss_funcs['l_recon'](total, tgt_total)
            loss_dict['l_total'] = L_total.detach()
            L_total.backward()

        with torch.no_grad():
            loss_dict['rmse'] = self.loss_funcs['l_test'](total,
                                                          tgt_total).detach()

        return loss_dict
Example #3
0
    def validate_batch(self, batch):
        p_buffers = None

        batch = {
            'target_total':
            batch['target_total'],
            'target_diffuse':
            batch['target_diffuse'],
            'target_specular':
            batch['target_specular'],
            'kpcn_diffuse_in':
            torch.cat([batch['kpcn_diffuse_in'], batch['target_diffuse']], 1),
            'kpcn_specular_in':
            torch.cat([batch['kpcn_specular_in'], batch['target_specular']],
                      1),
            'kpcn_diffuse_buffer':
            batch['kpcn_diffuse_buffer'],
            'kpcn_specular_buffer':
            batch['kpcn_specular_buffer'],
            'kpcn_albedo':
            batch['kpcn_albedo'],
        }

        out = self._regress_forward(batch)

        tgt_total = crop_like(batch['target_total'], out['radiance'])
        L_total = self.loss_funcs['l_test'](out['radiance'], tgt_total)
        if self.m_losses['m_val'] == 0.0 and self.m_losses[
                'm_val'].device != L_total.device:
            self.m_losses['m_val'] = torch.tensor(0.0, device=L_total.device)
        self.m_losses['m_val'] += L_total.detach()

        return out['radiance'], p_buffers
Example #4
0
    def validate_batch(self, batch):
        p_buffers = None

        if self.use_llpm_buf:
            p_buffers = self._manifold_forward(batch)
            """ Feature disentanglement """
            _, _, c, _, _ = p_buffers['diffuse'].shape
            assert c >= 2
            if self.disentanglement_option == 'm10r01' or self.disentanglement_option == 'm11r01':
                p_buffers = {
                    'diffuse': p_buffers['diffuse'][:, :, :c // 2, ...],
                    'specular': p_buffers['specular'][:, :, :c // 2, ...]
                }

            p_var_diffuse = p_buffers['diffuse'].var(1).mean(
                1, keepdims=True).detach()
            p_var_diffuse /= p_buffers['diffuse'].shape[1]  # spp
            p_var_specular = p_buffers['specular'].var(1).mean(
                1, keepdims=True).detach()
            p_var_specular /= p_buffers['specular'].shape[1]

            # make a new batch
            batch = {
                'target_total':
                batch['target_total'],
                'target_diffuse':
                batch['target_diffuse'],
                'target_specular':
                batch['target_specular'],
                'kpcn_diffuse_in':
                torch.cat([
                    batch['kpcn_diffuse_in'], p_buffers['diffuse'].mean(1),
                    p_var_diffuse
                ], 1),
                'kpcn_specular_in':
                torch.cat([
                    batch['kpcn_specular_in'], p_buffers['specular'].mean(1),
                    p_var_specular
                ], 1),
                'kpcn_diffuse_buffer':
                batch['kpcn_diffuse_buffer'],
                'kpcn_specular_buffer':
                batch['kpcn_specular_buffer'],
                'kpcn_albedo':
                batch['kpcn_albedo'],
            }

        out = self._regress_forward(batch)

        tgt_total = crop_like(batch['target_total'], out['radiance'])
        L_total = self.loss_funcs['l_test'](out['radiance'], tgt_total)
        if self.m_losses['m_val'] == 0.0 and self.m_losses[
                'm_val'].device != L_total.device:
            self.m_losses['m_val'] = torch.tensor(0.0, device=L_total.device)
        self.m_losses['m_val'] += L_total.detach()

        return out['radiance'], p_buffers
Example #5
0
    def _backward(self, batch, out, p_buffer):
        loss_dict = {}
        tgt_total = crop_like(batch['target_image'], out)

        L_total = self.loss_funcs['l_recon'](out, tgt_total)

        if self.manif_learn:
            p_buffer = crop_like(p_buffer, out)
            L_manif = self.loss_funcs['l_manif'](p_buffer, tgt_total)

            loss_dict['l_manif'] = L_manif.detach()
            loss_dict['l_recon'] = L_total.detach()

            L_total += L_manif * self.w_manif

        loss_dict['l_total'] = L_total.detach()

        L_total.backward()

        with torch.no_grad():
            loss_dict['rmse'] = self.loss_funcs['l_test'](out,
                                                          tgt_total).detach()

        return loss_dict
Example #6
0
    def validate_batch(self, batch):
        p_buffer = None

        if self.use_llpm_buf:
            p_buffer = self._manifold_forward(batch)
            """ Feature disentanglement """
            _, s, c, _, _ = p_buffer.shape
            assert c >= 2
            if self.disentangle == 'm10r01':
                p_buffer = p_buffer[:, :, :c // 2, ...]
            elif self.disentangle == 'm11r01':
                p_buffer = p_buffer[:, :, :c // 2, ...]

            p_var = p_buffer.var(1).mean(1, keepdims=True)
            p_var /= s  # spp
            p_var = torch.stack([
                p_var,
            ] * s, axis=1).detach()

            # make a new batch
            batch = {
                'target_image': batch['target_image'],
                'radiance': batch['radiance'],
                'features': torch.cat([batch['features'], p_buffer, p_var], 2),
            }

        out = self._regress_forward(batch)

        tgt_total = crop_like(batch['target_image'], out)
        L_total = self.loss_funcs['l_test'](out, tgt_total)
        if self.m_losses['m_val'] == 0.0 and self.m_losses[
                'm_val'].device != L_total.device:
            self.m_losses['m_val'] = torch.tensor(0.0, device=L_total.device)
        self.m_losses['m_val'] += L_total.detach()

        return out, p_buffer