Example #1
0
 def g_backward(self):
     with utils.temporary_freeze(self.disc):
         g_loss = sum(self.g_losses.values())
         if self.cfg.use_half:
             with amp.scale_loss(g_loss, self.g_optim,
                                 loss_id=1) as scaled_g_loss:
                 scaled_g_loss.backward()
         else:
             g_loss.backward()
Example #2
0
 def d_backward(self):
     with utils.temporary_freeze(self.gen):
         d_loss = sum(self.d_losses.values())
         if self.cfg.use_half:
             with amp.scale_loss(d_loss, self.d_optim,
                                 loss_id=0) as scaled_d_loss:
                 scaled_d_loss.backward()
         else:
             d_loss.backward()
Example #3
0
    def ac_backward(self):
        if self.aux_clf is None:
            return
        ac_loss = sum(self.ac_losses.values())
        ac_loss.backward(retain_graph=True)

        with utils.temporary_freeze(self.aux_clf):
            frozen_ac_loss = sum(self.frozen_ac_losses.values())
            frozen_ac_loss.backward(retain_graph=True)
Example #4
0
    def ac_backward(self, retain_graph):
        if self.aux_clf is None:
            return

        org_grads = utils.freeze(self.gen.memory.persistent_memory)

        if 'ac' in self.ac_losses:
            self.ac_losses['ac'].backward(retain_graph=retain_graph)

        if 'ac_gen' in self.ac_losses:
            with utils.temporary_freeze(self.aux_clf):
                self.ac_losses['ac_gen'].backward(retain_graph=retain_graph)

        utils.unfreeze(self.gen.memory.persistent_memory, org_grads)
Example #5
0
    def add_ac_losses_and_update_stats(self, comp_feats, style_comp_ids,
                                       generated, trg_comp_ids, stats):
        # 1. ac(enc(x)) loss
        loss, acc = self.infer_ac(comp_feats, style_comp_ids)
        self.ac_losses['ac'] = loss * self.cfg['ac_w']
        stats.ac_acc.update(acc, style_comp_ids.numel())

        # 2. ac(enc(fake)) loss
        # Freeze second encoder to prevent cheating by encoder
        with utils.temporary_freeze(self.gen.component_encoder):
            feats = self.gen.component_encoder(generated)

        gen_comp_feats = feats[-1]

        loss, acc = self.infer_ac(gen_comp_feats, trg_comp_ids)
        self.ac_losses['ac_gen'] = loss * self.cfg['ac_w']
        stats.ac_gen_acc.update(acc, trg_comp_ids.numel())
Example #6
0
    def ac_backward(self):
        if self.aux_clf is None:
            return

        if 'ac' in self.ac_losses:
            if self.cfg.use_half:
                with amp.scale_loss(self.ac_losses['ac'],
                                    [self.ac_optim, self.g_optim],
                                    loss_id=2) as scaled_ac_loss:
                    scaled_ac_loss.backward(retain_graph=True)
            else:
                self.ac_losses['ac'].backward(retain_graph=True)

        if 'ac_gen' in self.ac_losses:
            with utils.temporary_freeze(self.aux_clf):
                loss = self.ac_losses.get('ac_gen', 0.)
                if self.cfg.use_half:
                    with amp.scale_loss(loss, [self.ac_optim, self.g_optim],
                                        loss_id=3) as scaled_ac_g_loss:
                        scaled_ac_g_loss.backward(retain_graph=True)
                else:
                    loss.backward(retain_graph=True)
Example #7
0
 def g_backward(self):
     with utils.temporary_freeze(self.disc):
         g_loss = sum(self.g_losses.values())
         g_loss.backward()
Example #8
0
 def d_backward(self):
     with utils.temporary_freeze(self.gen):
         d_loss = sum(self.d_losses.values())
         d_loss.backward()