Exemple #1
0
        def get_metrics(tbar, if_source=False):
            self.evaluator.reset()
            test_loss = 0.0
            feat_mean, low_feat_mean, feat_var, low_feat_var = 0, 0, 0, 0
            adv_loss = 0.0
            for i, sample in enumerate(tbar):
                image, target = sample['image'], sample['label']

                if self.args.cuda:
                    image, target = image.cuda(), target.cuda()

                with torch.no_grad():
                    output, low_feat, feat = self.model(image)

                low_feat = low_feat.cpu().numpy()
                feat = feat.cpu().numpy()
                #from IPython import embed
                #embed()
                if isinstance(feat, np.ndarray):
                    feat_mean += feat.mean(axis=0).mean(axis=1).mean(axis=1)
                    low_feat_mean += low_feat.mean(axis=0).mean(axis=1).mean(
                        axis=1)
                    feat_var += feat.var(axis=0).var(axis=1).var(axis=1)
                    low_feat_var += low_feat.var(axis=0).var(axis=1).var(
                        axis=1)
                else:
                    feat_mean = feat.mean(axis=0).mean(axis=1).mean(axis=1)
                    low_feat_mean = low_feat.mean(axis=0).mean(axis=1).mean(
                        axis=1)
                    feat_var = feat.var(axis=0).var(axis=1).var(axis=1)
                    low_feat_var = low_feat.var(axis=0).var(axis=1).var(axis=1)

                d_output = self.D(F.softmax(output))
                adv_loss += bce_loss(d_output, self.source_label).item()
                loss = self.criterion(output, target)
                test_loss += loss.item()
                tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
                pred = output.data.cpu().numpy()

                target = target.cpu().numpy()
                pred = np.argmax(pred, axis=1)

                # Add batch sample into evaluator
                self.evaluator.add_batch(target, pred)

            feat_mean /= (i + 1)
            low_feat_mean /= (i + 1)
            feat_var /= (i + 1)
            low_feat_var /= (i + 1)
            adv_loss /= (i + 1)
            # Fast test during the training
            Acc = self.evaluator.Building_Acc()
            IoU = self.evaluator.Building_IoU()
            mIoU = self.evaluator.Mean_Intersection_over_Union()

            if if_source:
                print('Validation on source:')
            else:
                print('Validation on target:')
            print('[Epoch: %d, numImages: %5d]' %
                  (epoch, i * self.config.batch_size + image.data.shape[0]))
            print("Acc:{}, IoU:{}, mIoU:{}".format(Acc, IoU, mIoU))
            print('Loss: %.3f' % test_loss)
            print('Adv Loss: %.3f' % adv_loss)

            # Draw Visdom
            if if_source:
                names = ['source', 'source_acc', 'source_IoU', 'source_mIoU']
            else:
                names = ['target', 'target_acc', 'target_IoU', 'target_mIoU']

            if self.visdom:
                self.vis.line(X=torch.tensor([epoch]),
                              Y=torch.tensor([test_loss]),
                              win='val_loss',
                              name=names[0],
                              update='append')
                self.vis.line(X=torch.tensor([epoch]),
                              Y=torch.tensor([adv_loss]),
                              win='val_loss',
                              name='adv_loss',
                              update='append')
                self.vis.line(X=torch.tensor([epoch]),
                              Y=torch.tensor([Acc]),
                              win='metrics',
                              name=names[1],
                              opts=dict(title='metrics',
                                        xlabel='epoch',
                                        ylabel='performance'),
                              update='append' if epoch > 0 else None)
                self.vis.line(X=torch.tensor([epoch]),
                              Y=torch.tensor([IoU]),
                              win='metrics',
                              name=names[2],
                              update='append')
                self.vis.line(X=torch.tensor([epoch]),
                              Y=torch.tensor([mIoU]),
                              win='metrics',
                              name=names[3],
                              update='append')

            return Acc, IoU, mIoU, feat_mean, low_feat_mean, feat_var, low_feat_var, adv_loss
Exemple #2
0
    def training(self, epoch):
        train_loss, seg_loss_sum, bn_loss_sum, entropy_loss_sum, adv_loss_sum, d_loss_sum, ins_loss_sum = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
        self.model.train()
        if config.freeze_bn:
            self.model.module.freeze_bn()
        tbar = tqdm(self.train_loader)
        num_img_tr = len(self.train_loader)
        target_train_iterator = iter(self.target_train_loader)
        for i, sample in enumerate(tbar):
            itr = epoch * len(self.train_loader) + i
            #if self.visdom:
            #    self.vis.line(X=torch.tensor([itr]), Y=torch.tensor([self.optimizer.param_groups[0]['lr']]),
            #              win='lr', opts=dict(title='lr', xlabel='iter', ylabel='lr'),
            #              update='append' if itr>0 else None)
            self.summary.writer.add_scalar(
                'Train/lr', self.optimizer.param_groups[0]['lr'], itr)
            A_image, A_target = sample['image'], sample['label']

            # Get one batch from target domain
            try:
                target_sample = next(target_train_iterator)
            except StopIteration:
                target_train_iterator = iter(self.target_train_loader)
                target_sample = next(target_train_iterator)

            B_image, B_target, B_image_pair = target_sample[
                'image'], target_sample['label'], target_sample['image_pair']

            if self.args.cuda:
                A_image, A_target = A_image.cuda(), A_target.cuda()
                B_image, B_target, B_image_pair = B_image.cuda(
                ), B_target.cuda(), B_image_pair.cuda()

            self.scheduler(self.optimizer, i, epoch, self.best_pred_source,
                           self.best_pred_target, self.config.lr_ratio)
            self.scheduler(self.D_optimizer, i, epoch, self.best_pred_source,
                           self.best_pred_target, self.config.lr_ratio)

            A_output, A_feat, A_low_feat = self.model(A_image)
            B_output, B_feat, B_low_feat = self.model(B_image)
            #B_output_pair, B_feat_pair, B_low_feat_pair = self.model(B_image_pair)
            #B_output_pair, B_feat_pair, B_low_feat_pair = flip(B_output_pair, dim=-1), flip(B_feat_pair, dim=-1), flip(B_low_feat_pair, dim=-1)

            self.optimizer.zero_grad()
            self.D_optimizer.zero_grad()

            # Train seg network
            for param in self.D.parameters():
                param.requires_grad = False

            # Supervised loss
            seg_loss = self.criterion(A_output, A_target)
            main_loss = seg_loss

            # Unsupervised loss
            #ins_loss = 0.01 * self.instance_loss(B_output, B_output_pair)
            #main_loss += ins_loss

            # Train adversarial loss
            D_out = self.D(prob_2_entropy(F.softmax(B_output)))
            adv_loss = bce_loss(D_out, self.source_label)

            main_loss += self.config.lambda_adv * adv_loss
            main_loss.backward()

            # Train discriminator
            for param in self.D.parameters():
                param.requires_grad = True
            A_output_detach = A_output.detach()
            B_output_detach = B_output.detach()
            # source
            D_source = self.D(prob_2_entropy(F.softmax(A_output_detach)))
            source_loss = bce_loss(D_source, self.source_label)
            source_loss = source_loss / 2
            # target
            D_target = self.D(prob_2_entropy(F.softmax(B_output_detach)))
            target_loss = bce_loss(D_target, self.target_label)
            target_loss = target_loss / 2
            d_loss = source_loss + target_loss
            d_loss.backward()

            self.optimizer.step()
            self.D_optimizer.step()

            seg_loss_sum += seg_loss.item()
            #ins_loss_sum += ins_loss.item()
            adv_loss_sum += self.config.lambda_adv * adv_loss.item()
            d_loss_sum += d_loss.item()

            #train_loss += seg_loss.item() + self.config.lambda_adv * adv_loss.item()
            train_loss += seg_loss.item()
            self.summary.writer.add_scalar('Train/SegLoss', seg_loss.item(),
                                           itr)
            #self.summary.writer.add_scalar('Train/InsLoss', ins_loss.item(), itr)
            self.summary.writer.add_scalar('Train/AdvLoss', adv_loss.item(),
                                           itr)
            self.summary.writer.add_scalar('Train/DiscriminatorLoss',
                                           d_loss.item(), itr)
            tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))

            # Show the results of the last iteration
            #if i == len(self.train_loader)-1:
        print("Add Train images at epoch" + str(epoch))
        self.summary.visualize_image('Train-Source', self.config.dataset,
                                     A_image, A_target, A_output, epoch, 5)
        self.summary.visualize_image('Train-Target', self.config.target,
                                     B_image, B_target, B_output, epoch, 5)
        print('[Epoch: %d, numImages: %5d]' %
              (epoch, i * self.config.batch_size + A_image.data.shape[0]))
        print('Loss: %.3f' % train_loss)
Exemple #3
0
    def training(self, epoch):
        train_loss, seg_loss_sum, bn_loss_sum, entropy_loss_sum, adv_loss_sum, d_loss_sum = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
        self.model.train()
        tbar = tqdm(self.train_loader)
        num_img_tr = len(self.train_loader)
        target_train_iterator = iter(self.target_train_loader)
        for i, sample in enumerate(tbar):
            itr = epoch * len(self.train_loader) + i
            if self.visdom:
                self.vis.line(
                    X=torch.tensor([itr]),
                    Y=torch.tensor([self.optimizer.param_groups[0]['lr']]),
                    win='lr',
                    opts=dict(title='lr', xlabel='iter', ylabel='lr'),
                    update='append' if itr > 0 else None)
            A_image, A_target = sample['image'], sample['label']

            # Get one batch from target domain
            try:
                target_sample = next(target_train_iterator)
            except StopIteration:
                target_train_iterator = iter(self.target_train_loader)
                target_sample = next(target_train_iterator)

            B_image, B_target = target_sample['image'], target_sample['label']

            if self.args.cuda:
                A_image, A_target = A_image.cuda(), A_target.cuda()
                B_image, B_target = B_image.cuda(), B_target.cuda()

            self.scheduler(self.optimizer, i, epoch, self.best_pred_source,
                           self.best_pred_target)
            self.scheduler(self.D_optimizer, i, epoch, self.best_pred_source,
                           self.best_pred_target)

            A_output, A_feat, A_low_feat = self.model(A_image)
            B_output, B_feat, B_low_feat = self.model(B_image)

            self.optimizer.zero_grad()
            self.D_optimizer.zero_grad()

            # Train seg network
            for param in self.D.parameters():
                param.requires_grad = False

            # Supervised loss
            seg_loss = self.criterion(A_output, A_target)
            # Unsupervised bn loss
            bottleneck_loss = self.bottleneck_loss.loss(
                A_feat, B_feat) + self.bottleneck_loss.loss(
                    A_low_feat, B_low_feat)
            # Unsupervised entropy minimization loss
            #entropy_mini_loss = self.entropy_mini_loss.loss(B_output)
            #main_loss = seg_loss + bottleneck_loss*100
            main_loss = seg_loss

            # Train adversarial loss
            D_out = self.D(F.softmax(B_output))
            adv_loss = bce_loss(D_out, self.source_label)
            #adv_loss.backward()
            main_loss += self.config.lambda_adv * adv_loss
            main_loss.backward()

            # Train discriminator
            for param in self.D.parameters():
                param.requires_grad = True
            A_output_detach = A_output.detach()
            B_output_detach = B_output.detach()
            # source
            D_source = self.D(F.softmax(A_output_detach))
            source_loss = bce_loss(D_source, self.source_label)
            source_loss = source_loss / 2
            #source_loss.backward()
            # target
            D_target = self.D(F.softmax(B_output_detach))
            target_loss = bce_loss(D_target, self.target_label)
            target_loss = target_loss / 2
            #target_loss.backward()
            d_loss = source_loss + target_loss
            d_loss.backward()

            self.optimizer.step()
            self.D_optimizer.step()

            seg_loss_sum += seg_loss.item()
            bn_loss_sum += bottleneck_loss.item()
            adv_loss_sum += self.config.lambda_adv * adv_loss.item()
            d_loss_sum += d_loss.item()

            train_loss += seg_loss.item(
            ) + self.config.lambda_adv * adv_loss.item()
            tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))

        print('[Epoch: %d, numImages: %5d]' %
              (epoch, i * self.config.batch_size + A_image.data.shape[0]))
        #print('Loss: %.3f' % train_loss)
        print('Seg Loss: %.3f' % seg_loss_sum)
        print('BN Loss: %.3f' % bn_loss_sum)
        print('Adv Loss: %.3f' % adv_loss_sum)
        print('Discriminator Loss: %.3f' % d_loss_sum)

        if self.visdom:
            self.vis.line(X=torch.tensor([epoch]),
                          Y=torch.tensor([seg_loss_sum]),
                          win='train_loss',
                          name='Seg_loss',
                          opts=dict(title='loss',
                                    xlabel='epoch',
                                    ylabel='loss'),
                          update='append' if epoch > 0 else None)
            self.vis.line(X=torch.tensor([epoch]),
                          Y=torch.tensor([bn_loss_sum]),
                          win='train_loss',
                          name='BN_loss',
                          opts=dict(title='loss',
                                    xlabel='epoch',
                                    ylabel='loss'),
                          update='append' if epoch > 0 else None)
            self.vis.line(X=torch.tensor([epoch]),
                          Y=torch.tensor([adv_loss_sum]),
                          win='train_loss',
                          name='Adv_loss',
                          opts=dict(title='loss',
                                    xlabel='epoch',
                                    ylabel='loss'),
                          update='append' if epoch > 0 else None)
            self.vis.line(X=torch.tensor([epoch]),
                          Y=torch.tensor([d_loss_sum]),
                          win='train_loss',
                          name='Dis_loss',
                          opts=dict(title='loss',
                                    xlabel='epoch',
                                    ylabel='loss'),
                          update='append' if epoch > 0 else None)