def backward_G(self): D_fake = [] for sample in self.fake: D_fake.append(self.netD(sample)) # best index of netG if self.opt.find_version == 'v1.2': index = find_best_netG_v1dot2(self.D_real, self.D_fake) if self.opt.find_version == 'v1.1': index = find_best_netG_v1dot1(self.criterionL1, self.fake, self.X) else: index = find_best_netG(self.D_fake) self.fake_like_prob = D_fake[index] self.fake_like_sample.data.copy_(self.fake[index].data) self.label.data.fill_(1) self.loss_G_fake_like = self.criterionGAN(self.fake_like_prob, self.label) self.loss_G_fake_like.backward(retain_variables=True) preb_G_gap = [] self.label.data.fill_(0) for i in range(self.nums): if i != index: gap = self.criterionGAN(D_fake[i], self.label) gap.backward(retain_variables=True) preb_G_gap.append(gap) self.loss_G_lambda = sum(preb_G_gap) self.loss_G = self.loss_G_fake_like + self.loss_G_lambda self.loss_G.backward(retain_variables=True) self.best_netG_index = index
def backward_G(self): D_fake = [] for sample in self.fake: D_fake.append(self.netD(sample)) # best index of netG if self.opt.find_version == 'v1.2': index = find_best_netG_v1dot2(self.D_real, self.D_fake) if self.opt.find_version == 'v1.1': index = find_best_netG_v1dot1(self.criterionL1, self.fake, self.X) else: index = find_best_netG(self.D_fake) if self.opt.random and self.cnt % self.opt.display_it == 0: index = random.randint(0, self.nums-1) self.index_exp.add_scalar_value('index', index, step=self.cnt) self.fake_like_prob = self.D_fake[index] self.fake_like_sample.data.copy_(self.fake[index].data) self.label.data.fill_(1) self.loss_G_fake_like = self.criterionGAN(self.fake_like_prob, self.label) self.loss_G_fake_like.backward(retain_variables=True) preb_G_gap = [] for i in range(self.nums): if i != index: gap = self.criterionL1(self.fake[i], self.fake_like_sample) * self.Lambda gap.backward(retain_variables=True) preb_G_gap.append(gap) self.loss_G_lambda = sum(preb_G_gap)/len(preb_G_gap) self.loss_G = self.loss_G_fake_like + self.loss_G_lambda self.best_netG_index = index self.G_exp.add_scalar_value('G_loss', self.loss_G.data[0], step=self.cnt)
def backward_D(self): self.fake = self.generate_samples() # classify self.D_fake, self.C_fake = [], [] for i in range(self.nums): self.D_fake.append(self.netD(self.fake[i])) self.C_fake.append(self.netC(self.fake[i])) # best index of netG if self.opt.find_version == 'v1.2': index = find_best_netG_v1dot2(self.D_real, self.D_fake) if self.opt.find_version == 'v1.1': index = find_best_netG_v1dot1(self.criterionL1, self.fake, self.X) if self.opt.find_version == 'v1.3': index = find_best_netG_v1dot3(self.C_fake, self.target) else: index = find_best_netG(self.D_fake) # in v1.4.4, compute prob of real X, should link self.X and target if self.condition_D: self.condition_data.data.copy_(link2condition_data(self.X.data, self.target)) self.D_real = self.netD(self.condition_data) self.C_real = self.netC(self.condition_data) else: self.D_real = self.netD(self.X) self.C_real = self.netC(self.X) # random choice index(best netG index) if self.opt.random and self.cnt % self.opt.display_it == 0: index = random.randint(0, self.nums-1) # copy best sample in real_like_sample sample = self.fake[index] self.real_like_prob = self.D_fake[index] self.real_like_sample.data.copy_(sample.data) # real-backward self.label.data.fill_(1) self.loss_D_real = self.criterionGAN(self.D_real, self.label) self.loss_D_real.backward(retain_variables=True) # fake-backward self.label.data.fill_(0) self.loss_D_real_like = self.criterionGAN(self.real_like_prob, self.label) self.loss_D_real_like.backward(retain_variables=True) # classfier-backward self.loss_C = self.criterionEntropy(self.C_real, self.target) + self.criterionEntropy(self.C_fake[index], self.target) self.loss_C.backward(retain_variables=True) self.class_exp.add_scalar_value('C_loss', self.loss_C.data[0], step=self.cnt) self.loss_D = self.loss_D_real + self.loss_D_real_like self.cnt = self.cnt + 1 self.D_exp.add_scalar_value('D_loss', self.loss_D.data[0], step=self.cnt)
def backward_D(self): self.fake = self.generate_samples() # best index of netG if self.opt.find_version == 'v1.2': index = find_best_netG_v1dot2(self.D_real, self.D_fake) if self.opt.find_version == 'v1.1': index = find_best_netG_v1dot1(self.criterionL1, self.fake, self.X) else: index = find_best_netG(self.D_fake) # in v1.4.4, compute prob of sample after index. take index as fake label self.D_fake = [] for i in range(self.nums): if self.condition_D: self.condition_data.data.copy_( link2condition_data(self.fake[i].data, index)) D_fake = self.netD(self.condition_data) else: D_fake = self.netD(self.fake[i]) self.D_fake.append(D_fake) # in v1.4.4, compute prob of real X, should link self.X and target if self.condition_D: self.condition_data.data.copy_( link2condition_data(self.X.data, self.target)) self.D_real = self.netD(self.condition_data) else: self.D_real = self.netD(self.X) # random choice index(best netG index) if self.opt.random and self.cnt % self.opt.display_it == 0: index = random.randint(0, self.nums - 1) # copy best sample in real_like_sample self.real_like_prob = self.D_fake[index] self.real_like_sample.data.copy_(self.fake[index].data) # real-backward self.label.data.fill_(1) self.loss_D_real = self.criterionGAN(self.D_real, self.label) # fake-backward self.label.data.fill_(0) self.loss_D_fake = 0 for i in range(self.nums): self.loss_D_fake += self.criterionGAN(self.D_fake[i], self.label) self.loss_D_fake = self.loss_D_fake / (self.nums) # lambda self.loss_D = self.loss_D_real + self.loss_D_fake self.loss_D.backward(retain_variables=True) self.cnt = self.cnt + 1
def backward_D(self): self.fake = self.generate_samples() # best index of netG if self.opt.find_version == 'v1.2': index = find_best_netG_v1dot2(self.D_real, self.D_fake) if self.opt.find_version == 'v1.1': index = find_best_netG_v1dot1(self.criterionL1, self.fake, self.X) else: index = find_best_netG(self.D_fake) # in v1.4.4, compute prob of sample after index. take index as fake label print 'D:' sample = self.fake[index] if self.condition_D: self.condition_data.data.copy_(link2condition_data(sample.data, index)) self.D_fake = self.netD(self.condition_data) else: self.D_fake = self.netD(sample) # in v1.4.4, compute prob of real X, should link self.X and target if self.condition_D: self.condition_data.data.copy_(link2condition_data(self.X.data, self.target)) self.D_real = self.netD(self.condition_data) else: self.D_real = self.netD(self.X) # random choice index(best netG index) if self.opt.random and self.cnt % self.opt.display_it == 0: index = random.randint(0, self.nums-1) # copy best sample in real_like_sample self.real_like_prob = self.D_fake self.real_like_sample.data.copy_(sample.data) # real-backward self.label.data.fill_(1) self.loss_D_real = self.criterionGAN(self.D_real, self.label) #self.loss_D_real.backward(retain_variables=True) # fake-backward self.label.data.fill_(0) self.loss_D_real_like = self.criterionGAN(self.D_fake, self.label) #self.loss_D_real_like.backward(retain_variables=True) # lambda self.loss_D_lambda = self.criterionL1(self.fake[index], self.X) #self.loss_D_lambda.backward(retain_variables=True) self.loss_D = self.loss_D_real + self.loss_D_real_like + self.loss_D_lambda self.loss_D.backward(retain_variables=True) self.cnt = self.cnt + 1 self.D_exp.add_scalar_value('D_loss', self.loss_D.data[0], step=self.cnt)
def backward_D(self): self.D_real = self.netD(self.X) self.D_fake = [] self.fake = self.generate_samples() for sample in self.fake: self.D_fake.append(self.netD(sample)) # best index of netG if self.opt.find_version == 'v1.2': index = find_best_netG_v1dot2(self.D_real, self.D_fake) if self.opt.find_version == 'v1.1': index = find_best_netG_v1dot1(self.criterionL1, self.fake, self.X) else: index = find_best_netG(self.D_fake) if self.opt.random and self.cnt % self.opt.display_it == 0: index = random.randint(0, self.nums - 1) self.real_like_prob = self.D_fake[index] self.real_like_sample.data.copy_(self.fake[index].data) # real self.label.data.fill_(1) self.loss_D_real = self.criterionGAN(self.D_real, self.label) self.loss_D_real.backward(retain_variables=True) # real-like self.label.data.fill_(0) self.loss_D_real_like = self.criterionGAN(self.real_like_prob, self.label) self.loss_D_real_like.backward(retain_variables=True) # fake preb_D_gap = [] self.label.data.fill_(0) for i in range(self.nums): if i != index: gap = self.criterionGAN(self.D_fake[i], self.label) / (self.nums - 1) gap.backward(retain_variables=True) preb_D_gap.append(gap) self.loss_D_fake = sum(preb_D_gap) / len(preb_D_gap) self.loss_D = self.loss_D_real + self.loss_D_real_like + self.loss_D_fake self.cnt = self.cnt + 1 self.D_exp.add_scalar_value('D_loss', self.loss_D.data[0], step=self.cnt)
def backward_G(self): # best index of netG if self.opt.find_version == 'v1.2': index = find_best_netG_v1dot2(self.D_real, self.D_fake) if self.opt.find_version == 'v1.1': index = find_best_netG_v1dot1(self.criterionL1, self.fake, self.X) else: index = find_best_netG(self.D_fake) # in v1.4.4, compute prob of sample after index. take index as fake label print 'G:' sample = self.fake[index] self.D_fakes = [] for i in range(self.nums): if self.condition_D: self.condition_data.data.copy_(link2condition_data(sample.data, index)) D_fake = self.netD(self.condition_data) else: D_fake = self.netD(sample) self.D_fakes.append(D_fake) # random choice index(best netG index) if self.opt.random and self.cnt % self.opt.display_it == 0: index = random.randint(0, self.nums-1) # add index to index-exp; copy fake-sample into fake_like_sample self.index_exp.add_scalar_value('index', index, step=self.cnt) self.fake_like_prob = D_fake self.fake_like_sample.data.copy_(sample.data) self.label.data.fill_(1) self.loss_G_fake_like = self.criterionGAN(self.fake_like_prob, self.label) #self.loss_G_fake_like.backward(retain_variables=True) self.loss_G_lambda = self.criterionL1(sample, self.X) * self.Lambda #self.loss_G_lambda.backward(retain_variables=True) # not the best netG, other netG backwards self.label.data.fill_(0) for i in range(self.nums): if i != index: gap = self.criterionGAN(self.D_fakes[i], self.label) gap.backward(retain_variables=True) self.loss_G = self.loss_G_fake_like + self.loss_G_lambda self.loss_G.backward(retain_variables=True) self.best_netG_index = index self.G_exp.add_scalar_value('G_loss', self.loss_G.data[0], step=self.cnt)
def backward_G(self): # class D_fake, C_fake = [], [] for i in range(self.nums): D_fake.append(self.netD(self.fake[i])) C_fake.append(self.netC(self.fake[i])) # best index of netG if self.opt.find_version == 'v1.2': index = find_best_netG_v1dot2(self.D_real, D_fake) if self.opt.find_version == 'v1.1': index = find_best_netG_v1dot1(self.criterionL1, self.fake, self.X) if self.opt.find_version == 'v1.3': index = find_best_netG_v1dot3(C_fake, self.target) else: index = find_best_netG(D_fake) # random choice index(best netG index) if self.opt.random and self.cnt % self.opt.display_it == 0: index = random.randint(0, self.nums-1) # add index to index-exp; copy fake-sample into fake_like_sample self.index_exp.add_scalar_value('index', index, step=self.cnt) sample = self.fake[index] self.fake_like_prob = D_fake[index] self.fake_like_sample.data.copy_(sample.data) self.label.data.fill_(1) self.loss_G_fake_like = self.criterionGAN(self.fake_like_prob, self.label) #self.loss_G_fake_like.backward(retain_variables=True) # class loss self.loss_C = self.criterionEntropy(self.C_real, self.target) + self.criterionEntropy(C_fake[index], self.target) #self.loss_C.backward(retain_variables=True) self.label.data.fill_(0) for i in range(self.nums): if i != index: gap = self.criterionGAN(D_fake[i], self.label) gap.backward(retain_variables=True) self.loss_G = self.loss_G_fake_like + self.loss_C self.loss_G.backward(retain_variables=True) self.best_netG_index = index self.G_exp.add_scalar_value('G_loss', self.loss_G.data[0], step=self.cnt)
def backward_G(self): # best index of netG if self.opt.find_version == 'v1.2': index = find_best_netG_v1dot2(self.D_real, self.D_fake) if self.opt.find_version == 'v1.1': index = find_best_netG_v1dot1(self.criterionL1, self.fake, self.X) else: index = find_best_netG(self.D_fake) # in v1.4.4, compute prob of sample after index. take index as fake label sample = self.fake[index] D_fakes = [] for i in range(self.nums): if self.condition_D: self.condition_data.data.copy_( link2condition_data(sample.data, index)) D_fake = self.netD(self.condition_data) else: D_fake = self.netD(sample) D_fakes.append(D_fake) # random choice index(best netG index) if self.opt.random and self.cnt % self.opt.display_it == 0: index = random.randint(0, self.nums - 1) self.fake_like_sample.data.copy_(self.fake[index].data) self.label.data.fill_(1) for netG in self.netGs: netG.zero_grad() for i in range(self.nums): self.loss_G = self.criterionGAN(D_fakes[i], self.label) if i == index: self.loss_G_lambda = self.criterionL1(self.fake[index], self.X) * self.Lambda self.loss_G += self.loss_G_lambda self.loss_G.backward(retain_variables=True) else: self.loss_G.backward(retain_variables=True) self.G_solvers[i].step() self.best_netG_index = index