def get_current_errors(self): D_A_real = self.loss_D_A_real.item() D_A_fake = self.loss_D_A_fake.item() G_A = self.loss_G_A.item() Cyc_A = self.loss_cycle_A.item() D_B_real = self.loss_D_B_real.item() D_B_fake = self.loss_D_B_fake.item() G_B = self.loss_G_B.item() Cyc_B = self.loss_cycle_B.item() AE = self.loss_AE.item() ret = OrderedDict([('D_A_real', D_A_real), ('D_A_fake', D_A_fake), ('G_A', G_A), ('Cyc_A', Cyc_A), ('D_B_real', D_B_real), ('D_B_fake', D_B_fake), ('G_B', G_B), ('Cyc_B', Cyc_B), ('AE', AE)]) if self.opt.identity > 0.0: idt_A = self.loss_idt_A.item() idt_B = self.loss_idt_B.item() ret = OrderedDict(list(ret.items()) + [('idt_A', idt_A), ('idt_B', idt_B)]) if self.opt.lambda_gp > 0.0: gp_A = self.loss_D_A_gp.item() gp_B = self.loss_D_B_gp.item() ret = OrderedDict(list(ret.items()) + [('D_A_gp', gp_A), ('D_B_gp', gp_B)]) if self.opt.lambda_color_mean > 0 or self.opt.lambda_color_sig_mean > 0: ret = OrderedDict(list(ret.items()) + [('G_A_color', self.loss_color_A.data[0]), ('G_B_color', self.loss_color_B.data[0])]) if self.opt.log_grad: g_D_A = util.get_grads(self.netD_A, ret_type='sum').item() g_D_B = util.get_grads(self.netD_B, ret_type='sum').item() g_E_A = util.get_grads(self.netE_A, ret_type='sum').item() g_D = util.get_grads(self.net_D, ret_type='sum').item() g_G_A = util.get_grads(self.netG_A, ret_type='sum').item() g_E_C = util.get_grads(self.netE_C, ret_type='sum').item() g_Dc = util.get_grads(self.net_Dc, ret_type='sum').item() g_G_c = util.get_grads(self.netG_C, ret_type='sum').item() g_G_B = util.get_grads(self.netG_B, ret_type='sum').item() ret = OrderedDict(list(ret.items()) + [('D_A_grad', g_D_A), ('D_B_grad', g_D_B),('E_A_grad', g_E_A),('D_grad', g_D) ('G_A_grad', g_G_A), ('E_C_grad', g_E_C), ('G_C_grad', g_G_C), ('G_B_grad', g_G_B)]) return ret
def get_network_grads(self): return [('E_A', util.get_grads(self.netE_A)), ('D', util.get_grads(self.net_D)), ('G_A', util.get_grads(self.netG_A)), ('E_C', util.get_grads(self.netE_C)), ('G_C', util.get_grads(self.netG_C)), ('G_B', util.get_grads(self.netG_B)), ('D_A', util.get_grads(self.netD_A)), ('D_B', util.get_grads(self.netD_B))]
def get_current_errors(self): ret = OrderedDict([]) for i in range(len(self.loss_D_A_real)): D_A_real = self.loss_D_A_real[i] D_A_fake = self.loss_D_A_fake[i] G_A = self.loss_G_A_list[i] D_B_real = self.loss_D_B_real[i] D_B_fake = self.loss_D_B_fake[i] G_B = self.loss_G_B_list[i] ret = OrderedDict( list(ret.items()) + [('D_A_real_%d' % i, D_A_real), ('D_A_fake_%d' % i, D_A_fake), ('G_A_%d' % i, G_A), ('D_B_real_%d' % i, D_B_real), ('D_B_fake_%d' % i, D_B_fake), ('G_B_%d' % i, G_B)]) if self.opt.lambda_gp > 0.0: gp_A = self.loss_D_A_gp[i] gp_B = self.loss_D_B_gp[i] ret = OrderedDict( list(ret.items()) + [('D_A_gp_%d' % i, gp_A), ('D_B_gp_%d' % i, gp_B)]) for i in range(len(self.loss_cycle_A_list)): Cyc_A = self.loss_cycle_A_list[i] Cyc_B = self.loss_cycle_B_list[i] ret = OrderedDict( list(ret.items()) + [('Cyc_A_%d' % i, Cyc_A), ('Cyc_B_%d' % i, Cyc_B)]) if self.opt.identity > 0.0: idt_A = self.loss_idt_A.data[0] idt_B = self.loss_idt_B.data[0] ret = OrderedDict( list(ret.items()) + [('idt_A', idt_A), ('idt_B', idt_B)]) if self.opt.alpha_gate != '' and i < len(self.loss_D_A_real) - 1: if self.opt.alpha_gate == 'simp': mask_A = self.netG_A.gate.mask mask_B = self.netG_B.gate.mask ret = OrderedDict( list(ret.items()) + [('G_A_mask', mask_A), ('G_B_mask', mask_B)]) elif 'trans' not in self.opt.alpha_gate: mask_A = eval('self.netG_A.gate%d.mask.mean()' % i) mask_B = eval('self.netG_B.gate%d.mask.mean()' % i) ret = OrderedDict( list(ret.items()) + [('G_A_mask_%d' % i, mask_A.data[0]), ('G_B_mask_%d' % i, mask_B.data[0])]) if self.opt.lambda_color_mean > 0 or self.opt.lambda_color_sig_mean > 0: ret = OrderedDict( list(ret.items()) + [('G_A_color', self.loss_color_A.data[0]), ('G_B_color', self.loss_color_B.data[0])]) if self.opt.log_grad: g_D_A = util.get_grads(self.netD_A, ret_type='sum').data[0] g_D_B = util.get_grads(self.netD_B, ret_type='sum').data[0] g_G_A = util.get_grads(self.netG_A, ret_type='sum').data[0] g_G_B = util.get_grads(self.netG_B, ret_type='sum').data[0] ret = OrderedDict( list(ret.items()) + [('D_A_grad', g_D_A), ('D_B_grad', g_D_B), ('G_A_grad', g_G_A), ('G_B_grad', g_G_B)]) if self.opt.lambda_color_mean > 0 or self.opt.lambda_color_sig_mean > 0: ret = OrderedDict( list(ret.items()) + [('G_A_color', self.loss_color_A.data[0]), ('G_B_color', self.loss_color_B.data[0])]) if self.opt.lambda_style > 0: ret = OrderedDict( list(ret.items()) + [('G_A_style', self.style_loss_A.data[0]), ('G_B_style', self.style_loss_B.data[0])]) if self.opt.lambda_content > 0 or self.opt.lambda_content_l1 > 0: ret = OrderedDict( list(ret.items()) + [('G_A_cont', self.content_loss_A.data[0]), ('G_B_cont', self.content_loss_B.data[0])]) return ret