Пример #1
0
    def train_classifier(self, train_loader, model, criterion, optimizer,
                         last_best_epochs, epoch):
        batch_time = AverageMeter()
        losses = AverageMeter()
        top1 = AverageMeter()
        losses_per_class = LossPerClassMeter(
            len(train_loader.dataset.dataset.classes))

        end = time.time()

        model.train()

        for i, (data_x, data_y) in enumerate(train_loader):
            data_x = data_x.cuda(non_blocking=True)
            data_y = data_y.cuda(non_blocking=True)

            if self.train_feat:
                output = model.forward_encoder_classifier(data_x)
            else:
                model.eval()
                with torch.no_grad():
                    h = model.forward_encoder(data_x)
                model.train()
                output = model.forward_classifier(h)

            loss = criterion(output, data_y)

            losses_per_class.update(loss.cpu().detach().numpy(),
                                    data_y.cpu().numpy())
            loss = torch.sum(loss) / loss.size(0)

            acc = accuracy(output.data, data_y, topk=(1, ))[0]
            losses.update(loss.data.item(), data_x.size(0))
            top1.update(acc.item(), data_x.size(0))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_time.update(time.time() - end)
            end = time.time()

            if i % self.args.print_freq == 0:
                print('Epoch Classifier: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Last best epoch {last_best_epoch}'.format(
                          epoch,
                          i,
                          len(train_loader),
                          batch_time=batch_time,
                          loss=losses,
                          last_best_epoch=last_best_epochs))

        return pd.DataFrame.from_dict(
            {
                f'{k}-train-loss': losses_per_class.avg[i]
                for i, k in enumerate(train_loader.dataset.dataset.classes)
            },
            orient='index').T
Пример #2
0
    def train(self, train_loader, models, optimizers, criterions, epoch, last_best_epochs):
        batch_time = AverageMeter()
        losses = AverageMeter()
        top1 = AverageMeter()
        losses_per_class = LossPerClassMeter(len(train_loader.dataset.dataset.classes))

        models['backbone'].train()
        models['module'].train()

        end = time.time()

        for i, (data_x, data_y) in enumerate(train_loader):
            data_y = data_y.cuda(non_blocking=True)
            data_x = data_x.cuda(non_blocking=True)

            optimizers['backbone'].zero_grad()
            optimizers['module'].zero_grad()

            output, features = models['backbone'].forward_features(data_x)
            target_loss = criterions['backbone'](output, data_y)

            pred_loss = models['module'](features)
            pred_loss = pred_loss.view(pred_loss.size(0))

            losses_per_class.update(target_loss.cpu().detach().numpy(), data_y.cpu().numpy())
            m_backbone_loss = torch.sum(target_loss) / target_loss.size(0)

            m_module_loss = criterions['module'](pred_loss, target_loss)
            loss = m_backbone_loss + self.args.learning_loss_weight * m_module_loss

            loss.backward()
            optimizers['backbone'].step()
            optimizers['module'].step()

            acc = accuracy(output.data, data_y, topk=(1,))[0]
            losses.update(loss.data.item(), data_x.size(0))
            top1.update(acc.item(), data_x.size(0))

            batch_time.update(time.time() - end)
            end = time.time()

            if i % self.args.print_freq == 0:
                print('Epoch Classifier: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Last best epoch {last_best_epoch}'
                      .format(epoch, i, len(train_loader), batch_time=batch_time, loss=losses,
                              last_best_epoch=last_best_epochs))

        return pd.DataFrame.from_dict({f'{k}-train-loss': losses_per_class.avg[i]
                                       for i, k in enumerate(train_loader.dataset.dataset.classes)}, orient='index').T
Пример #3
0
    def validate(self, val_loader, models, criterions, last_best_epochs):
        batch_time = AverageMeter()
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()
        metrics = Metrics()
        losses_per_class = LossPerClassMeter(len(val_loader.dataset.dataset.classes))

        models['backbone'].eval()
        models['module'].eval()

        end = time.time()

        with torch.no_grad():
            for i, (data_x, data_y) in enumerate(val_loader):
                data_y = data_y.cuda(non_blocking=True)
                data_x = data_x.cuda(non_blocking=True)

                output = models['backbone'](data_x)
                loss = criterions['backbone'](output, data_y)

                losses_per_class.update(loss.cpu().detach().numpy(), data_y.cpu().numpy())
                loss = torch.sum(loss) / loss.size(0)

                acc = accuracy(output.data, data_y, topk=(1, 2,))
                losses.update(loss.data.item(), data_x.size(0))
                top1.update(acc[0].item(), data_x.size(0))
                top5.update(acc[1].item(), data_x.size(0))
                metrics.add_mini_batch(data_y, output)

                batch_time.update(time.time() - end)
                end = time.time()

                if i % self.args.print_freq == 0:
                    print('Test: [{0}/{1}]\t'
                          'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                          'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                          'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                          'Last best epoch {last_best_epoch}'
                          .format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1,
                                  last_best_epoch=last_best_epochs))

        report = metrics.get_report(target_names=val_loader.dataset.dataset.classes)
        print(' * Acc@1 {top1.avg:.3f}\t * Prec {0}\t * Recall {1} * Acc@5 {top5.avg:.3f}\t'
              .format(report['macro avg']['precision'], report['macro avg']['recall'], top1=top1, top5=top5))

        return pd.DataFrame.from_dict({f'{k}-val-loss': losses_per_class.avg[i]
                                       for i, k in enumerate(val_loader.dataset.dataset.classes)}, orient='index').T, \
            pd.DataFrame.from_dict(report)
Пример #4
0
    def train(self, train_loader, model, optimizer, epoch, loaders_len,
              criterions, classes, last_best_epochs):
        batch_time = AverageMeter()
        losses = AverageMeter()
        top1 = AverageMeter()
        losses_per_class = LossPerClassMeter(len(classes))

        end = time.time()

        model.train()

        for i, (data_labeled, data_unlabeled) in enumerate(train_loader):
            data_x, data_y = data_labeled
            data_x, data_y = data_x.cuda(non_blocking=True), data_y.cuda(
                non_blocking=True)

            (data_w, data_s), _ = data_unlabeled
            data_w, data_s = data_w.cuda(non_blocking=True), data_s.cuda(
                non_blocking=True)

            inputs = torch.cat((data_x, data_w, data_s))
            logits = model.forward_encoder_classifier(inputs)
            logits_labeled = logits[:self.args.batch_size]
            logits_unlabeled_w, logits_unlabeled_s = logits[
                self.args.batch_size:].chunk(2)
            del logits

            loss_labeled = criterions['labeled'](logits_labeled, data_y)

            losses_per_class.update(loss_labeled.cpu().detach().numpy(),
                                    data_y.cpu().numpy())
            loss_labeled = torch.sum(loss_labeled) / loss_labeled.size(0)

            pseudo_label = torch.softmax(logits_unlabeled_w.detach_(), dim=-1)
            max_probs, data_y_unlabeled = torch.max(pseudo_label, dim=-1)
            mask = max_probs.ge(self.args.fixmatch_threshold).float()

            loss_unlabeled = (
                criterions['unlabeled'](logits_unlabeled_s, data_y_unlabeled) *
                mask).mean()

            loss = loss_labeled + self.args.fixmatch_lambda_u * loss_unlabeled

            acc = accuracy(logits_labeled.data, data_y, topk=(1, ))[0]
            losses.update(loss.data.item(), data_x.size(0))
            top1.update(acc.item(), data_x.size(0))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            # sch.step(epoch*loaders_len + i)

            batch_time.update(time.time() - end)
            end = time.time()

            if i % self.args.print_freq == 0:
                print('Epoch Classifier: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Last best epoch {last_best_epoch}\t'
                      'Current LR: {curr_lr}'.format(
                          epoch,
                          i,
                          loaders_len,
                          batch_time=batch_time,
                          loss=losses,
                          last_best_epoch=last_best_epochs,
                          curr_lr=optimizer.param_groups[0]['lr']))

        return pd.DataFrame.from_dict(
            {
                f'{k}-train-loss': losses_per_class.avg[i]
                for i, k in enumerate(classes)
            },
            orient='index').T
Пример #5
0
    def train(self, labeled_loader, model, criterion_cl, optimizer,
              last_best_epochs, epoch, criterions_reconstruction, base_loader):
        model.train()
        batch_time = AverageMeter()
        losses_reconstruction = AverageMeter()
        losses_sum_reconstruction = np.zeros(
            len(criterions_reconstruction.keys()))

        end = time.time()
        for i, (data_x, data_y) in enumerate(base_loader):
            data_x = data_x.cuda(non_blocking=True)

            output = model(data_x)

            losses_alt = np.array([
                v(output, data_x).cpu().detach().data.item()
                for v in criterions_reconstruction.values()
            ])
            losses_alt[-1] = 1 - losses_alt[-1]
            losses_sum_reconstruction = losses_sum_reconstruction + losses_alt
            loss = criterions_reconstruction['l2'](output, data_x) + \
                (1 - criterions_reconstruction['ssim'](output, data_x))

            losses_reconstruction.update(loss.data.item(), data_x.size(0))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_time.update(time.time() - end)
            end = time.time()

            if i % self.args.print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                          epoch,
                          i,
                          len(base_loader),
                          batch_time=batch_time,
                          loss=losses_reconstruction))

        losses_avg_reconstruction = losses_sum_reconstruction / len(
            base_loader)

        batch_time = AverageMeter()
        losses_cl = AverageMeter()
        top1 = AverageMeter()
        losses_per_class_cl = LossPerClassMeter(
            len(labeled_loader.dataset.dataset.classes))

        end = time.time()

        model.train()

        for i, (data_x, data_y) in enumerate(labeled_loader):
            data_x = data_x.cuda(non_blocking=True)
            data_y = data_y.cuda(non_blocking=True)

            output = model.forward_encoder_classifier(data_x)

            loss = criterion_cl(output, data_y)

            losses_per_class_cl.update(loss.cpu().detach().numpy(),
                                       data_y.cpu().numpy())
            loss = (torch.sum(loss) / loss.size(0)) * 100

            acc = accuracy(output.data, data_y, topk=(1, ))[0]
            losses_cl.update(loss.data.item(), data_x.size(0))
            top1.update(acc.item(), data_x.size(0))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_time.update(time.time() - end)
            end = time.time()

            if i % self.args.print_freq == 0:
                print('Epoch Classifier: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Last best epoch {last_best_epoch}'.format(
                          epoch,
                          i,
                          len(labeled_loader),
                          batch_time=batch_time,
                          loss=losses_cl,
                          last_best_epoch=last_best_epochs))

        return pd.DataFrame.from_dict(
            {
                f'{k}-train-loss': losses_per_class_cl.avg[i]
                for i, k in enumerate(labeled_loader.dataset.dataset.classes)
            },
            orient='index').T, losses_avg_reconstruction, losses_reconstruction