Beispiel #1
0
    def _train_cls(self, args, epoch, disc, train_loader, optimizer_d):
        cls_criterion = nn.CrossEntropyLoss()
        _loss_g, _loss_cls_gen, _loss_adv_gen = 0., 0., 0.
        _loss_d, _loss_cls, _loss_adv = 0., 0., 0.
        ypred, ypred_gen = [], []
        ytrue, ytrue_gen = [], []
    #     gen_loss_lst = []
        for i, (inputs, featmaps, targets) in enumerate(train_loader):
            inputs, featmaps, targets = inputs.to(args.device), featmaps.to(args.device), targets.to(args.device)
            # Optimize Discriminator
            loss = 0
            optimizer_d.zero_grad()
            feats, logits_cls, logits_adv = disc(featmaps)
            loss_cls = cls_criterion(logits_cls, targets.long())
            _loss_cls += loss_cls.item()
            loss = loss_cls.clone()

            preds = F.softmax(logits_cls, dim=1).argmax(dim=1).cpu().numpy().tolist()
            ypred.extend(preds)
            ytrue.extend(targets)

            loss.backward()
            optimizer_d.step()
  
        acc = round((np.array(ypred) == np.array(ytrue)).sum() / len(ytrue), 4)  
        acc_gen = round((np.array(ypred_gen) == np.array(ytrue_gen)).sum() / len(ytrue_gen), 4)  
        if epoch % args.visualize_freq == 0 or epoch == 1:
            print_statement(epoch, i, acc, acc_gen, _loss_cls, _loss_cls_gen, _loss_adv, _loss_adv_gen)
        return return_statement(i, acc, acc_gen, _loss_cls, _loss_cls_gen, _loss_adv, _loss_adv_gen)
Beispiel #2
0
    def _test(self, args, epoch, disc, gen, test_loader, test_output_dir):
        mse_criterion = nn.MSELoss()
        cls_criterion = nn.CrossEntropyLoss()
        _loss_g, _loss_cls_gen, _loss_adv_gen = 0., 0., 0.
        _loss_d, _loss_cls, _loss_adv = 0., 0., 0.
        _loss = 0.
        ypred, ypred_gen = [], []
        ytrue, ytrue_gen = [], []
        for i, (inputs, featmaps, targets) in enumerate(test_loader):
            loss = 0
            inputs, featmaps, targets = inputs.to(args.device), featmaps.to(args.device), targets.to(args.device)

            feats, logits_cls, logits_adv = disc(featmaps)
            loss_cls = cls_criterion(logits_cls, targets.long())
            loss = loss_cls
            _loss_cls += loss_cls.item()

            preds = F.softmax(logits_cls, dim=1).argmax(dim=1).cpu().numpy().tolist()
            ypred.extend(preds)
            ytrue.extend(targets)
            
#             feats, gen_targets = self._sample_vecs(inputs.shape[0])
#             feats = feats.to(args.device)
            gen_image = gen(feats.unsqueeze(2).unsqueeze(3).detach())
            feats_gen, logits_cls_gen, logits_adv_gen = disc(gen_image)
            loss_cls_gen = cls_criterion(logits_cls_gen, targets.long())
            loss += args.cls_w*loss_cls_gen
            _loss_cls_gen += loss_cls_gen.item()   

            if args.adv:
                loss_adv = (adversarial_loss(logits_adv, is_real=True, is_disc=True, type_=args.adv_type) + adversarial_loss(logits_adv_gen, is_real=False, is_disc=True, type_=args.adv_type))
                _loss_adv += loss_adv.item()
                loss += args.adv_w*loss_adv.clone()/2.


                loss_adv_gen = adversarial_loss(logits_adv_gen, is_real=True, is_disc=False, type_=args.adv_type)
                _loss_adv_gen += loss_adv_gen.item()
                loss += args.adv_w*loss_adv_gen.clone()
            preds_gen = F.softmax(logits_cls_gen, dim=1).argmax(dim=1).cpu().numpy().tolist()
            ypred_gen.extend(preds_gen)
            ytrue_gen.extend(targets)
            _loss += loss.item()

            if i%10 == 0:
                visualize(featmaps[0], gen_image[0],
                          out_dir = test_output_dir + str(epoch) + "_" + str(i) + ".jpg")

        acc = round((np.array(ypred) == np.array(ytrue)).sum() / len(ytrue), 4)  
        acc_gen = round((np.array(ypred_gen) == np.array(ytrue_gen)).sum() / len(ytrue_gen), 4)  

        print("Test Set Epoch {}, Training Iteration {}".format(epoch, i))
        print("Accuracy: {}, Accuracy gen: {}".format(acc, acc_gen))
        print("Loss: {}, Loss_cls: {}, Loss_cls_gen: {}"
              .format(_loss/(i+1),_loss_cls/(i+1),_loss_cls_gen/(i+1)))
        if args.adv:
            print("Loss_adv: {}, Loss_adv_gen: {}"
                  .format(_loss_adv/(i+1),_loss_adv_gen/(i+1)))
        return return_statement(i, acc, acc_gen, _loss_cls, _loss_cls_gen, _loss_adv, _loss_adv_gen)
Beispiel #3
0
    def _train_joint(self, args, epoch, disc, gen, train_loader, optimizer_d, optimizer_g, train_output_dir):
        cls_criterion = nn.CrossEntropyLoss()
        _loss_g, _loss_cls_gen, _loss_adv_gen = 0., 0., 0.
        _loss_d, _loss_cls, _loss_adv = 0., 0., 0.
        ypred, ypred_gen = [], []
        ytrue, ytrue_gen = [], []
    #     gen_loss_lst = []
        for i, (inputs, featmaps, targets) in enumerate(train_loader):
            inputs, featmaps, targets = inputs.to(args.device), featmaps.to(args.device), targets.to(args.device)
            # Optimize Discriminator
            loss = 0
            optimizer_d.zero_grad()
            feats, logits_cls, logits_adv = disc(featmaps)
            loss_cls = cls_criterion(logits_cls, targets.long())

            _loss_cls += loss_cls.item()
            loss = loss_cls.clone()
            if args.adv:
#                 feats, gen_targets = self._sample_vecs(inputs.shape[0])
#                 feats = feats.to(args.device)
                gen_image = gen(feats.unsqueeze(2).unsqueeze(3).detach())

                feats_gen, logits_cls_gen, logits_adv_gen = disc(gen_image)

                loss_adv = (adversarial_loss(logits_adv, is_real=True, is_disc=True, type_=args.adv_type) + adversarial_loss(logits_adv_gen, is_real=False, is_disc=True, type_=args.adv_type))
                _loss_adv += loss_adv.item()
                loss += args.adv_w*loss_adv.clone()/2.

            preds = F.softmax(logits_cls, dim=1).argmax(dim=1).cpu().numpy().tolist()
            ypred.extend(preds)
            ytrue.extend(targets)

            loss.backward()
            optimizer_d.step()

            disc.eval()

            loss = 0
            optimizer_g.zero_grad()

            # Optimize Generator
            feats, logits_cls, logits_adv = disc(featmaps)
#             feats, gen_targets = self._sample_vecs(inputs.shape[0])
#             feats, gen_targets = feats.to(args.device), gen_targets.to(args.device)
            gen_image = gen(feats.unsqueeze(2).unsqueeze(3).detach())

            feats_gen, logits_cls_gen, logits_adv_gen = disc(gen_image)

            loss_cls_gen = cls_criterion(logits_cls_gen, targets.long())
#             loss_cls_gen = cls_criterion(logits_cls_gen, gen_targets.long())
            _loss_cls_gen += loss_cls_gen.item() 
            loss = args.cls_w*loss_cls_gen.clone()

            if args.adv:
                loss_adv_gen = adversarial_loss(logits_adv_gen, is_real=True, is_disc=False, type_=args.adv_type)
                _loss_adv_gen += loss_adv_gen.item()
                loss += args.adv_w*loss_adv_gen.clone()

            preds_gen = F.softmax(logits_cls_gen, dim=1).argmax(dim=1).cpu().numpy().tolist()
            ypred_gen.extend(preds_gen)
            ytrue_gen.extend(targets)

            loss.backward()
            optimizer_g.step()

            disc.train()    

        acc = round((np.array(ypred) == np.array(ytrue)).sum() / len(ytrue), 4)  
        acc_gen = round((np.array(ypred_gen) == np.array(ytrue_gen)).sum() / len(ytrue_gen), 4)  
        
        if epoch % args.visualize_freq == 0 or epoch == 1:
            print_statement(epoch, i, acc, acc_gen, _loss_cls, _loss_cls_gen, _loss_adv, _loss_adv_gen)
            visualize(featmaps[0], gen_image[0],
                      out_dir = train_output_dir + str(epoch) + "_" + str(i) + ".jpg")
        return return_statement(i, acc, acc_gen, _loss_cls, _loss_cls_gen, _loss_adv, _loss_adv_gen)
Beispiel #4
0
    def _test(self, args, epoch, disc, gen, test_loader, test_output_dir):
        mse_criterion = nn.MSELoss()
        cls_criterion = nn.CrossEntropyLoss()
        _loss_g, _loss_cls_gen, _loss_adv_gen = 0., 0., 0.
        _loss_d, _loss_cls, _loss_adv = 0., 0., 0.
        _loss_recon, _loss_mse = 0., 0.
        _loss = 0.
        ypred, ypred_gen = [], []
        ytrue, ytrue_gen = [], []
        cls_count = [0] * 10

        class_featmaps = np.zeros((10, 1000, 256 * 14 * 14))
        class_featmaps_gen = np.zeros((10, 1000, 256 * 14 * 14))
        class_idx = [0] * 10

        for i, (inputs, featmaps, targets, indexes) in enumerate(test_loader):
            inputs, featmaps, targets = inputs.to(args.device), featmaps.to(
                args.device), targets.to(args.device)

            feats, gen_targets = self._sample_vecs_index(inputs.shape[0])
            feats, gen_targets = feats.to(args.device), gen_targets.to(
                args.device)
            gen_image = gen(feats.unsqueeze(2).unsqueeze(3).detach())

            for j, target in enumerate(
                    targets.detach().cpu().numpy().astype(int)):
                class_featmaps[target, class_idx[target]] = featmaps[j].view(
                    256 * 14 * 14).detach().cpu().numpy()
                class_featmaps_gen[target,
                                   class_idx[target]] = gen_image[j].view(
                                       256 * 14 * 14).detach().cpu().numpy()
                class_idx[target] += 1

        print(class_idx)
        plt.figure(figsize=(12, 12))
        plt.imshow(
            pairwise_distances(np.vstack(class_featmaps[0:10]), metric='l2'))
        plt.colorbar()
        plt.savefig("self.png")
        plt.close()

        print(class_idx)
        plt.figure(figsize=(12, 12))
        plt.imshow(
            pairwise_distances(np.vstack(class_featmaps_gen[0:10]),
                               metric='l2'))
        plt.colorbar()
        plt.savefig("self_gen.png")
        plt.close()

        plt.figure(figsize=(12, 12))
        plt.imshow(
            pairwise_distances(np.vstack(class_featmaps[0:10]),
                               np.vstack(class_featmaps_gen[0:10]),
                               metric='l2'))
        plt.colorbar()
        plt.savefig("pair.png")
        plt.close()

        plt.figure(figsize=(12, 12))
        plt.imshow(
            pairwise_distances(np.vstack(class_featmaps[0:10]),
                               metric='cosine'))
        plt.colorbar()
        plt.savefig("self_cosine.png")
        plt.close()

        plt.figure(figsize=(12, 12))
        plt.imshow(
            pairwise_distances(np.vstack(class_featmaps_gen[0:10]),
                               metric='cosine'))
        plt.colorbar()
        plt.savefig("self_gen_cosine.png")
        plt.close()

        plt.figure(figsize=(12, 12))
        plt.imshow(
            pairwise_distances(np.vstack(class_featmaps[0:10]),
                               np.vstack(class_featmaps_gen[0:10]),
                               metric='cosine'))
        plt.colorbar()
        plt.savefig("pair_cosine.png")
        plt.close()

        for i, (images, featmaps, targets, indexes) in enumerate(test_loader):
            loss = 0
            images, featmaps, targets = images.to(args.device), featmaps.to(
                args.device), targets.to(args.device)
            if args.data == "image":
                inputs = (images * 2) - 1
            else:
                inputs = featmaps
            feats, logits_cls, logits_adv = disc(inputs)
            loss_cls = cls_criterion(logits_cls, targets.long())
            loss = loss_cls
            _loss_cls += loss_cls.item()

            preds = F.softmax(logits_cls,
                              dim=1).argmax(dim=1).cpu().numpy().tolist()
            ypred.extend(preds)
            ytrue.extend(targets)

            feats, gen_targets = self._sample_vecs_index(inputs.shape[0])
            feats, gen_targets = feats.to(args.device), gen_targets.to(
                args.device)
            gen_image = gen(feats.unsqueeze(2).unsqueeze(3).detach())
            feats_gen, logits_cls_gen, logits_adv_gen = disc(gen_image)
            loss_cls_gen = cls_criterion(logits_cls_gen, gen_targets.long())
            loss += args.cls_w * loss_cls_gen
            _loss_cls_gen += loss_cls_gen.item()

            if args.adv:
                loss_adv = (adversarial_loss(logits_adv,
                                             is_real=True,
                                             is_disc=True,
                                             type_=args.adv_type) +
                            adversarial_loss(logits_adv_gen,
                                             is_real=False,
                                             is_disc=True,
                                             type_=args.adv_type))
                _loss_adv += loss_adv.item()
                loss += args.adv_w * loss_adv.clone() / 2.

                loss_adv_gen = adversarial_loss(logits_adv_gen,
                                                is_real=True,
                                                is_disc=False,
                                                type_=args.adv_type)
                _loss_adv_gen += loss_adv_gen.item()
                loss += args.adv_w * loss_adv_gen.clone()
            if args.recon:
                loss_recon = (1 - nn.CosineSimilarity(dim=1, eps=1e-6)(
                    feats_gen, feats).mean())
                loss += args.adv_r * loss_recon.clone()
                _loss_recon += loss_recon.item()
            if args.mse:
                loss_mse = nn.MSELoss()(gen_image, inputs)
                loss += args.mse_w * loss_mse.clone()
                _loss_mse += args.mse_w * loss_mse.item()

            preds_gen = F.softmax(logits_cls_gen,
                                  dim=1).argmax(dim=1).cpu().numpy().tolist()
            ypred_gen.extend(preds_gen)
            ytrue_gen.extend(gen_targets)
            _loss += loss.item()

            if i % 10 == 0:
                visualize(inputs[0],
                          gen_image[0],
                          out_dir=test_output_dir + str(epoch) + "_" + str(i) +
                          ".jpg",
                          featmap=(args.data == "featmap"))

            if sum(cls_count) < 50:
                cls_count = visualize_classes(inputs, gen_image, gen_targets,
                                              cls_count, test_output_dir,
                                              args.data == "featmap")

        acc = round((np.array(ypred) == np.array(ytrue)).sum() / len(ytrue), 4)
        acc_gen = round((np.array(ypred_gen) == np.array(ytrue_gen)).sum() /
                        len(ytrue_gen), 4)

        print("Test Set Epoch {}, Training Iteration {}".format(epoch, i))
        print("Accuracy: {}, Accuracy gen: {}".format(acc, acc_gen))
        print("Loss: {}, Loss_cls: {}, Loss_cls_gen: {}".format(
            _loss / (i + 1), _loss_cls / (i + 1), _loss_cls_gen / (i + 1)))
        if args.adv:
            print("Loss_adv: {}, Loss_adv_gen: {}".format(
                _loss_adv / (i + 1), _loss_adv_gen / (i + 1)))
        if args.mse:
            print("Loss_mse: {}".format(_loss_mse / (i + 1)))
        return return_statement(i, acc, acc_gen, _loss_cls, _loss_cls_gen,
                                _loss_adv, _loss_adv_gen, _loss_recon,
                                _loss_mse)