Exemple #1
0
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    print('len of trainloader', len(trainloader))
    for batch_idx, (imgs, pids) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        pids = pids.type(torch.cuda.FloatTensor)
        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % config.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t Total Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                .format(epoch + 1,
                        batch_idx + 1,
                        len(trainloader),
                        loss=losses))
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
    model.train()
    losses = AverageMeter()

    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)
            
            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)
            
            loss = xent_loss + htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        losses.update(loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:
            print("Epoch {}/{}\t Batch {}/{}\t Loss {:.6f} ({:.6f})".format(
                epoch+1, args.max_epoch, batch_idx+1, len(trainloader), losses.val, losses.avg
            ))
Exemple #3
0
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
          use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)

        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)

            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)

            loss = xent_loss + htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter(
    )  # elapsed time (reading data + "forward+backward") for each batch
    data_time = AverageMeter()  # run time in reading data for each batch

    model.train()

    end = time.time()
    for batch_idx, (
            imgs, pids,
            _) in enumerate(trainloader):  # iterate through trainloader
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()  # put input data into GPU

        # measure data loading time
        data_time.update(time.time() - end)

        optimizer.zero_grad(
        )  # clear the gradients of all optimized variables, for each batch iteration
        outputs = model(
            imgs
        )  # forward pass: compute predicted outputs by passing inputs to the model
        # calculate the batch loss
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)

        loss.backward(
        )  # backward pass: compute gradient of the loss with respect to model parameters
        optimizer.step(
        )  # perform a single optimization step (parameter update)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        # losses.update(): Computes and stores the average and current value.
        losses.update(loss.item(),
                      pids.size(0))  # loss.item() alreay returns a mean value

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          freeze_bn=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)

        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)
        
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        
        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)
            
            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)
            
            loss = xent_loss + htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time,
                   data_time=data_time, loss=losses))
        
        end = time.time()
Exemple #7
0
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

#print('img.shape {}  pids.shape'.format(imgs.shape, pids.shape))
# measure data loading time
        pids = pids.type(torch.cuda.FloatTensor)
        data_time.update(time.time() - end)
        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                #'Identity Loss {lossI.val:.4f} ({lossI.avg:.4f})\t'
                #'Attribute Loss {lossA.val:.4f} ({lossA.avg:.4f})\t'
                'Total Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                    epoch + 1,
                    batch_idx + 1,
                    len(trainloader),
                    batch_time=batch_time,
                    data_time=data_time,
                    loss=losses))
Exemple #8
0
def train(epoch, model, criterion, optimizer, trainloader, use_gpu,
          use_salience, use_parsing):
    model.train()
    losses = AverageMeter()

    for batch_idx, tuple_i in enumerate(trainloader):
        if use_salience and not use_parsing:
            imgs, pids, _, salience_imgs, _ = tuple_i
        elif not use_salience and use_parsing:
            imgs, pids, _, parsing_imgs, _ = tuple_i
        elif use_salience and use_parsing:
            imgs, pids, _, salience_imgs, parsing_imgs, _ = tuple_i
        else:
            imgs, pids, _, _ = tuple_i

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        if use_salience and not use_parsing:
            outputs = model(imgs, salience_masks=salience_imgs)
        elif not use_salience and use_parsing:
            outputs = model(imgs, parsing_masks=parsing_imgs)
        elif use_salience and use_parsing:
            outputs = model(imgs,
                            salience_masks=salience_imgs,
                            parsing_masks=parsing_imgs)
        else:
            outputs = model(imgs)

        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print("Epoch {}/{}\t Batch {}/{}\t Loss {:.6f} ({:.6f})".format(
                epoch + 1, args.max_epoch, batch_idx + 1, len(trainloader),
                losses.val, losses.avg))
def train(epoch, model, criterion, optimizer, trainloader, use_gpu, freeze_bn=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)
        
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        
        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time,
                   data_time=data_time, loss=losses))
        
        end = time.time()
Exemple #10
0
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu=True, summary=None, length=0):
    losses = AverageMeter()
    xentlosses = AverageMeter()
    htrilosses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)
        
        if args.loss == 'xent,htri':
            outputs, features = model(imgs)
        elif args.loss == 'xent':
            outputs = model(imgs)
        # use l2-softmax    
        if args.l2_reg:
            # L2 norm
            outputs = outputs/torch.norm(outputs, dim=1, keepdim=True)
            features = features/torch.norm(features, dim=1, keepdim=True)
            # scale
            outputs = outputs * 32
            # features = features * 64
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        elif args.loss == 'xent,htri':
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)
            
            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)
        if args.loss == 'xent,htri':
            loss = xent_loss + htri_loss
        elif args.loss == 'xent':
            loss = xent_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        if args.loss == 'xent':
            xentlosses.update(xent_loss.item(),pids.size(0))
        elif args.loss == 'xent,htri':
            xentlosses.update(xent_loss.item(),pids.size(0))
            htrilosses.update(htri_loss.item(),pids.size(0))
        losses.update(loss.item(), pids.size(0))

        # add the losses to summary writer
        if summary is not None:
            summary.add_scalars('loss', {'Total loss': loss.item(), 'xentloss': xent_loss.item(), 'htriloss': htri_loss.item()}, length * epoch + batch_idx)
            summary.add_scalar('lr', optimizer.param_groups[0]['lr'], length * epoch + batch_idx)

        if (batch_idx+1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Lr: {3:.2e}\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), optimizer.param_groups[0]['lr'], batch_time=batch_time,data_time=data_time, loss=losses))
            if args.loss == 'xent,htri':
                print('XentLoss: {xentlosses.val:.4f} ({xentlosses.avg:.4f})\t'
                      'HtriLoss: {htrilosses.val:.4f} ({htrilosses.avg:.4f})\t'.format(xentlosses=xentlosses, htrilosses=htrilosses))