def _train_cls(self, args, epoch, disc, train_loader, optimizer_d): cls_criterion = nn.CrossEntropyLoss() _loss_g, _loss_cls_gen, _loss_adv_gen = 0., 0., 0. _loss_d, _loss_cls, _loss_adv = 0., 0., 0. ypred, ypred_gen = [], [] ytrue, ytrue_gen = [], [] # gen_loss_lst = [] for i, (inputs, featmaps, targets) in enumerate(train_loader): inputs, featmaps, targets = inputs.to(args.device), featmaps.to(args.device), targets.to(args.device) # Optimize Discriminator loss = 0 optimizer_d.zero_grad() feats, logits_cls, logits_adv = disc(featmaps) loss_cls = cls_criterion(logits_cls, targets.long()) _loss_cls += loss_cls.item() loss = loss_cls.clone() preds = F.softmax(logits_cls, dim=1).argmax(dim=1).cpu().numpy().tolist() ypred.extend(preds) ytrue.extend(targets) loss.backward() optimizer_d.step() acc = round((np.array(ypred) == np.array(ytrue)).sum() / len(ytrue), 4) acc_gen = round((np.array(ypred_gen) == np.array(ytrue_gen)).sum() / len(ytrue_gen), 4) if epoch % args.visualize_freq == 0 or epoch == 1: print_statement(epoch, i, acc, acc_gen, _loss_cls, _loss_cls_gen, _loss_adv, _loss_adv_gen) return return_statement(i, acc, acc_gen, _loss_cls, _loss_cls_gen, _loss_adv, _loss_adv_gen)
def _train_joint(self, args, epoch, disc, gen, train_loader, optimizer_d, optimizer_g, train_output_dir): cls_criterion = nn.CrossEntropyLoss() _loss_g, _loss_cls_gen, _loss_adv_gen = 0., 0., 0. _loss_d, _loss_cls, _loss_adv = 0., 0., 0. ypred, ypred_gen = [], [] ytrue, ytrue_gen = [], [] # gen_loss_lst = [] for i, (inputs, featmaps, targets) in enumerate(train_loader): inputs, featmaps, targets = inputs.to(args.device), featmaps.to(args.device), targets.to(args.device) # Optimize Discriminator loss = 0 optimizer_d.zero_grad() feats, logits_cls, logits_adv = disc(featmaps) loss_cls = cls_criterion(logits_cls, targets.long()) _loss_cls += loss_cls.item() loss = loss_cls.clone() if args.adv: # feats, gen_targets = self._sample_vecs(inputs.shape[0]) # feats = feats.to(args.device) gen_image = gen(feats.unsqueeze(2).unsqueeze(3).detach()) feats_gen, logits_cls_gen, logits_adv_gen = disc(gen_image) loss_adv = (adversarial_loss(logits_adv, is_real=True, is_disc=True, type_=args.adv_type) + adversarial_loss(logits_adv_gen, is_real=False, is_disc=True, type_=args.adv_type)) _loss_adv += loss_adv.item() loss += args.adv_w*loss_adv.clone()/2. preds = F.softmax(logits_cls, dim=1).argmax(dim=1).cpu().numpy().tolist() ypred.extend(preds) ytrue.extend(targets) loss.backward() optimizer_d.step() disc.eval() loss = 0 optimizer_g.zero_grad() # Optimize Generator feats, logits_cls, logits_adv = disc(featmaps) # feats, gen_targets = self._sample_vecs(inputs.shape[0]) # feats, gen_targets = feats.to(args.device), gen_targets.to(args.device) gen_image = gen(feats.unsqueeze(2).unsqueeze(3).detach()) feats_gen, logits_cls_gen, logits_adv_gen = disc(gen_image) loss_cls_gen = cls_criterion(logits_cls_gen, targets.long()) # loss_cls_gen = cls_criterion(logits_cls_gen, gen_targets.long()) _loss_cls_gen += loss_cls_gen.item() loss = args.cls_w*loss_cls_gen.clone() if args.adv: loss_adv_gen = adversarial_loss(logits_adv_gen, is_real=True, is_disc=False, type_=args.adv_type) _loss_adv_gen += loss_adv_gen.item() loss += args.adv_w*loss_adv_gen.clone() preds_gen = F.softmax(logits_cls_gen, dim=1).argmax(dim=1).cpu().numpy().tolist() ypred_gen.extend(preds_gen) ytrue_gen.extend(targets) loss.backward() optimizer_g.step() disc.train() acc = round((np.array(ypred) == np.array(ytrue)).sum() / len(ytrue), 4) acc_gen = round((np.array(ypred_gen) == np.array(ytrue_gen)).sum() / len(ytrue_gen), 4) if epoch % args.visualize_freq == 0 or epoch == 1: print_statement(epoch, i, acc, acc_gen, _loss_cls, _loss_cls_gen, _loss_adv, _loss_adv_gen) visualize(featmaps[0], gen_image[0], out_dir = train_output_dir + str(epoch) + "_" + str(i) + ".jpg") return return_statement(i, acc, acc_gen, _loss_cls, _loss_cls_gen, _loss_adv, _loss_adv_gen)