def __init__(self, args):
     kwargs = {'num_workers': 4, 'pin_memory': True}
     self.source_loader, self.target_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)
     self.tbar = tqdm(self.test_loader, desc='\r')
     self.trainer = wgan_trainer(args, 2)
     self.evaluator = Evaluator(2)
     self.best_IoU = {'disc': 0.77, 'cup': 0.65}
     self.attempt = 9.5
     self.validation(args, self.trainer.target_model, self.tbar )
     self.trainer_wgan(args)
class wgan:
    def __init__(self, args):
        kwargs = {'num_workers': 4, 'pin_memory': True}
        self.source_loader, self.target_loader, self.test_loader, self.nclass = make_data_loader(
            args, **kwargs)
        self.tbar = tqdm(self.test_loader, desc='\r')
        self.trainer = wgan_trainer(args, 2)
        self.evaluator = Evaluator(2)
        self.best_IoU = {'disc': 0.77, 'cup': 0.65}
        self.attempt = 9.5
        self.validation(args, self.trainer.target_model, self.tbar)
        self.trainer_wgan(args)

    def loop_iterable(self, iterable):
        while True:
            yield from iterable

    def save_model(self, epoch):
        print('Validation:')
        Acc = self.evaluator.Pixel_Accuracy([
            self.evaluator.confusion_matrix_disc,
            self.evaluator.confusion_matrix_cup
        ])
        Acc_class = self.evaluator.Pixel_Accuracy_Class([
            self.evaluator.confusion_matrix_disc,
            self.evaluator.confusion_matrix_cup
        ])
        mIoU = self.evaluator.Mean_Intersection_over_Union([
            self.evaluator.confusion_matrix_disc,
            self.evaluator.confusion_matrix_cup
        ])
        FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union([
            self.evaluator.confusion_matrix_disc,
            self.evaluator.confusion_matrix_cup
        ])
        print("epoch:{}, Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
            epoch, Acc, Acc_class, mIoU, FWIoU))
        if (mIoU['cup'] > self.best_IoU['cup']):
            #model save
            self.best_IoU = mIoU
            print('---- MODEL SAVE ---')
            torch.save(
                {
                    'epoch': epoch + 1,
                    'state_dict': self.trainer.target_model.state_dict(),
                    'best_auc': str(mIoU['cup']),
                    'optimizer': self.trainer.dda_optim.state_dict()
                }, 'best_models_save/groupwise/m-adda_wgan_clip_0.03' + "v_" +
                str(self.attempt) + '.pth.tar')
        return mIoU

    def trainer_wgan(self, args):
        self.trainer.target_model.train()
        self.trainer.disc_model.train()
        self.evaluator.reset()
        max_epochs = args.epochs
        for epoch in range(1, max_epochs + 1):
            self.trainer.target_model.train()
            batch_iterator = zip(self.loop_iterable(self.source_loader),
                                 self.loop_iterable(self.target_loader))
            total_loss = 0
            total_loss_tgt = 0
            loss_critic = 0
            loss_tgt = 0
            total_accuracy = 0
            regu_val_disc = 0
            len_dataloader = max(len(self.source_loader),
                                 len(self.target_loader))
            torch.manual_seed(1 + epoch)
            for step in trange(len_dataloader, leave=True):
                p = float(step + epoch * len_dataloader
                          ) / args.epochs / len_dataloader  #(1+len)/epochs/
                alpha = 2. / (1. + np.exp(-10 * p)) - 1
                try:
                    data = next(batch_iterator)
                except StopIteration:
                    batch_iterator = zip(
                        self.loop_iterable(self.source_loader),
                        self.loop_iterable(self.target_loader))
                    data = next(batch_iterator)
                if epoch < 0:
                    source_x, src_labels = data[0][0].cuda(), data[0][1].cuda()
                    target_x, target_lab = data[1][0].cuda(), data[1][1].cuda()
                    dda_loss, tgt_loss = self.trainer.update_weights(
                        source_x, src_labels, target_x, target_lab, 0, 0,
                        'train_gen')
                    continue
                for i in range(args.k_disc):
                    source_x, src_labels = data[0][0].cuda(), data[0][1].cuda()
                    target_x, target_lab = data[1][0].cuda(), data[1][1].cuda()
                    # t = np.array(target_x.detach().cpu()[0,:,:,:])
                    # t = t.transpose((1,2,0))
                    # import cv2
                    # cv2.imwrite('cup_orig.png', t*255)
                    # target_x, target_lab = data[1][0].cuda(),  data[1][1].cuda()
                    #target_x = self.trainer.adv_aug.perturb(target_x, target_lab, self.trainer.target_criterion, random_start=False )
                    # t = np.array(target_x.detach().cpu()[0,:,:,:])
                    # t = t.transpose((1,2,0))
                    # cv2.imwrite('cup.png', t*255)
                    dda_loss, tgt_loss, regu_val_disc = self.trainer.update_weights(
                        source_x, src_labels, target_x, target_lab, 0.1, 0.1,
                        0.8, 1.2, 0.01, 'train_disc')  #0.2, 0.01

                for i in range(args.k_src):
                    source_x, src_labels = data[0][0].cuda(), data[0][1].cuda()
                    target_x, target_lab = data[1][0].cuda(), data[1][1].cuda()
                    #target_x = self.trainer.adv_aug.perturb(target_x, target_lab, self.trainer.target_criterion, random_start=False )
                    dda_loss, tgt_loss = self.trainer.update_weights(
                        source_x, src_labels, target_x, target_lab, 0.1, 0.1,
                        0.8, 1.2, 0.01, 'train_gen')  #0.2 , 0.01
                total_loss += dda_loss
                total_loss_tgt += tgt_loss
                regu_val_disc += regu_val_disc
                if (step % 50 == 0):
                    print(tgt_loss, dda_loss, regu_val_disc)
                #    print("Target_loss:{}, disc_loss:{}".format(total_loss_tgt/(step+1), total_loss/(step+1)))
                self.trainer.scheduler(self.trainer.dda_optim, step, epoch,
                                       self.best_IoU['cup'])
            self.trainer.target_model.eval()
            self.trainer.disc_model.eval()
            for st, data in enumerate(self.tbar):
                image, target = data[0], data[1]
                #target = target.transpose(3,1)
                image, target = image.cuda(), target.cuda()
                with torch.no_grad():
                    output, _ = self.trainer.target_model(image)
                test_loss = self.trainer.target_criterion(output, target)
                pred = output.data.cpu().numpy()
                target = target.cpu().numpy()
                pred[pred >= 0.5] = 1
                pred[pred < 0.5] = 0
                # Add batch sample into evaluator
                self.evaluator.add_batch(target, pred)
                self.evaluator.add_test_loss(test_loss / (st + 1))
            mIoU = self.evaluator.Mean_Intersection_over_Union([
                self.evaluator.confusion_matrix_disc,
                self.evaluator.confusion_matrix_cup
            ])
            print("mIoU:{}".format(mIoU))
            total_accuracy = 0
            if ((epoch + 1) % 1 == 0):
                print("Epoch [{}/{}] Step [{}/{}]:"
                      "d_loss={:.5f} g_loss={:.5f} acc={:.5f}".format(
                          epoch + 1, max_epochs, epoch + 1,
                          len(self.source_loader), total_loss / ((step + 1)),
                          total_loss_tgt / (step + 1), total_accuracy))
            mIoU = self.save_model(epoch)

    def validation(self, args, model, tbar):
        best_pred = {'cup': 0, 'disc': 0}
        model.eval()
        self.evaluator.reset()
        test_loss = 0.0
        for i, data in enumerate(tbar):
            image, target = data[0], data[1]
            #target = target.transpose(3,1)
            image, target = image.cuda(), target.cuda()
            import pdb
            with torch.no_grad():
                output, _ = model(image)
            test_loss = self.trainer.target_criterion(output, target)
            pred = output.data.cpu().numpy()
            target = target.cpu().numpy()
            pred[pred >= 0.5] = 1
            pred[pred < 0.5] = 0
            # Add batch sample into evaluator
            self.evaluator.add_batch(target, pred)
            self.evaluator.add_test_loss(test_loss / (i + 1))

        mIoU = self.evaluator.Mean_Intersection_over_Union([
            self.evaluator.confusion_matrix_disc,
            self.evaluator.confusion_matrix_cup
        ])
        #evaluator.Plot_Loss(1)
        print('Validation:')
        #print('[Epoch: %d, numImages: %5d]' % (epoch, i * args.batch_size + image.data.shape[0]))
        print("mIoU:{}".format(mIoU))
        print('Loss: %.3f' % test_loss)
        new_pred = mIoU
        if new_pred['cup'] > best_pred['cup']:
            is_best = True
            best_pred = new_pred
Example #3
0
    def __init__(self, args):

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()

        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.source_loader, self.target_loader, _, self.nclass = make_data_loader(
            args, **kwargs)

        # Define Target Model
        self.target_model = DeepLab(num_classes=self.nclass,
                                    backbone=args.backbone,
                                    output_stride=args.out_stride,
                                    sync_bn=args.sync_bn,
                                    freeze_bn=args.freeze_bn)

        # Using cuda
        self.best_pred = {'disc': 0.0, 'cup': 0.0}

        self.target_model = torch.nn.DataParallel(self.target_model)
        patch_replication_callback(self.target_model)
        self.target_model = self.target_model.cuda()
        model_dict = self.target_model.module.state_dict()
        pretrained_dict = {
            k: v
            for k, v in checkpoint['state_dict'].items()
            if 'last_conv' not in k
        }
        model_dict.update(pretrained_dict)
        self.target_model.module.load_state_dict(model_dict)
        self.target_model.train()
        self.set_requires_grad('target', True)

        # Define learning rate and optimizer params
        target_params = [{
            'params': self.target_model.module.get_1x_lr_params(),
            'lr': args.lr
        }, {
            'params': self.target_model.module.get_10x_lr_params(),
            'lr': args.lr * 10
        }]

        target_optim = torch.optim.SGD(target_params,
                                       momentum=args.momentum,
                                       weight_decay=args.weight_decay,
                                       nesterov=args.nesterov)
        target_optim.zero_grad()

        self.target_criterion = torch.nn.BCEWithLogitsLoss()
        self.target_optim = target_optim

        # Define lr scheduler
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr, args.epochs,
                                      len(self.target_loader))
        self.evaluator = Evaluator(3)
Example #4
0
class Adda():
    def __init__(self, args):

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()

        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.source_loader, self.target_loader, _, self.nclass = make_data_loader(
            args, **kwargs)

        # Define Target Model
        self.target_model = DeepLab(num_classes=self.nclass,
                                    backbone=args.backbone,
                                    output_stride=args.out_stride,
                                    sync_bn=args.sync_bn,
                                    freeze_bn=args.freeze_bn)

        # Using cuda
        self.best_pred = {'disc': 0.0, 'cup': 0.0}

        self.target_model = torch.nn.DataParallel(self.target_model)
        patch_replication_callback(self.target_model)
        self.target_model = self.target_model.cuda()
        model_dict = self.target_model.module.state_dict()
        pretrained_dict = {
            k: v
            for k, v in checkpoint['state_dict'].items()
            if 'last_conv' not in k
        }
        model_dict.update(pretrained_dict)
        self.target_model.module.load_state_dict(model_dict)
        self.target_model.train()
        self.set_requires_grad('target', True)

        # Define learning rate and optimizer params
        target_params = [{
            'params': self.target_model.module.get_1x_lr_params(),
            'lr': args.lr
        }, {
            'params': self.target_model.module.get_10x_lr_params(),
            'lr': args.lr * 10
        }]

        target_optim = torch.optim.SGD(target_params,
                                       momentum=args.momentum,
                                       weight_decay=args.weight_decay,
                                       nesterov=args.nesterov)
        target_optim.zero_grad()

        self.target_criterion = torch.nn.BCEWithLogitsLoss()
        self.target_optim = target_optim

        # Define lr scheduler
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr, args.epochs,
                                      len(self.target_loader))
        self.evaluator = Evaluator(3)

    def set_requires_grad(self, mode, requires_grad=False):

        for param in eval('self.' + mode + '_model').parameters():
            param.requires_grad = requires_grad

    def loop_iterable(self, iterable):
        while True:
            yield from iterable

    def trainer(self, num_epochs, iterations):
        best_IoU = 0.68
        attempt = 1
        for epoch in range(1, num_epochs + 1):
            total_loss_tgt = 0
            total_accuracy = 0
            self.evaluator.reset()
            total_loss = 0
            len_dataloader = len(self.source_loader)
            torch.manual_seed(1 + epoch)
            for step in trange(len_dataloader, leave=True):
                p = float(step + epoch * len_dataloader
                          ) / args.epochs / len_dataloader  #(1+len)/epochs/
                alpha = 2. / (1. + np.exp(-10 * p)) - 1
                try:
                    data = next(batch_iterator)
                except StopIteration:
                    batch_iterator = zip(
                        self.loop_iterable(self.source_loader),
                        self.loop_iterable(self.target_loader))
                    data = next(batch_iterator)