Exemple #1
0
def train(epoch, model, model2, criterion, criterion_ml, optimizer1, optimizer2, trainloader, use_gpu):
    losses1 = AverageMeter()
    losses2 = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    model2.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)
        outputs1 = model(imgs)
        outputs2 = model2(imgs)
        # print('11111')
        # embed()
        if isinstance(outputs1, tuple):
            loss1 = DeepSupervision(criterion, outputs1, pids) + 10*DeepSupervision(criterion_ml, outputs1, outputs2)
        else:
            loss1 = criterion(outputs1, pids) + 10*criterion_ml(outputs1, outputs2)

        # optimizer1.zero_grad()
        # loss1.backward()
        #optimizer1.step()
        # losses1.update(loss1.item(), pids.size(0))

        # outputs1 = model(imgs)
        # outputs2 = model2(imgs)
        # print('2222')
        # embed()
        if isinstance(outputs1, tuple):
            loss2 = DeepSupervision(criterion, outputs2, pids) + DeepSupervision(criterion_ml, outputs2, outputs1)
        else:
            loss2 = criterion(outputs2, pids) + criterion_ml(outputs2, outputs1)
        optimizer2.zero_grad()
        optimizer1.zero_grad()
        loss1.backward()
        loss2.backward()
        optimizer1.step()
        optimizer2.step()


        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        losses1.update(loss1.item(), pids.size(0))
        losses2.update(loss2.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss1 {loss1.val:.4f} ({loss1.avg:.4f})\t'
                  'Loss2 {loss2.val:.4f} ({loss2.avg:.4f})\t'.format(
                epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,
                data_time=data_time, loss1=losses1, loss2=losses2))
Exemple #2
0
def train(epoch, model, criterion_class, criterion_metric, optimizer,
          trainloader, use_gpu):
    model.train()
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    xent_losses = AverageMeter()
    htri_losses = AverageMeter()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)
        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_metric, features, pids)
            else:
                loss = criterion_metric(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_class, outputs, pids)
            else:
                xent_loss = criterion_class(outputs, pids)

            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_metric, features, pids)
            else:
                htri_loss = criterion_metric(features, pids)

            loss = xent_loss + htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()
        losses.update(loss.item(), pids.size(0))
        xent_losses.update(xent_loss.item(), pids.size(0))
        htri_losses.update(htri_loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'CLoss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                  'MLoss {htri_loss.val:.4f} ({htri_loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      xent_loss=xent_losses,
                      htri_loss=htri_losses))
Exemple #3
0
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)

        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))