Exemple #1
0
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
          use_gpu):
    xent_losses = AverageMeter()
    htri_losses = AverageMeter()
    accs = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    for p in model.parameters():
        p.requires_grad = True  # open all layers

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs, features = model(imgs)
        if isinstance(outputs, (tuple, list)):
            xent_loss = DeepSupervision(criterion_xent, outputs, pids)
        else:
            xent_loss = criterion_xent(outputs, pids)

        if isinstance(features, (tuple, list)):
            htri_loss = DeepSupervision(criterion_htri, features, pids)
        else:
            htri_loss = criterion_htri(features, pids)

        loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        xent_losses.update(xent_loss.item(), pids.size(0))
        htri_losses.update(htri_loss.item(), pids.size(0))
        accs.update(accuracy(outputs, pids)[0])

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}/{1}][{2}/{3}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Xent {xent.val:.4f} ({xent.avg:.4f})\t'
                  'Htri {htri.val:.4f} ({htri.avg:.4f})\t'
                  'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
                      epoch + 1,
                      args.max_epoch,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      xent=xent_losses,
                      htri=htri_losses,
                      acc=accs))

        end = time.time()
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
    model.train()
    losses = AverageMeter()

    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)
            
            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)
            
            loss = xent_loss + htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        losses.update(loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:
            print("Epoch {}/{}\t Batch {}/{}\t Loss {:.6f} ({:.6f})".format(
                epoch+1, args.max_epoch, batch_idx+1, len(trainloader), losses.val, losses.avg
            ))
Exemple #3
0
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    print('len of trainloader', len(trainloader))
    for batch_idx, (imgs, pids) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        pids = pids.type(torch.cuda.FloatTensor)
        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % config.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t Total Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                .format(epoch + 1,
                        batch_idx + 1,
                        len(trainloader),
                        loss=losses))
Exemple #4
0
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
          use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)

        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)

            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)

            loss = xent_loss + htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter(
    )  # elapsed time (reading data + "forward+backward") for each batch
    data_time = AverageMeter()  # run time in reading data for each batch

    model.train()

    end = time.time()
    for batch_idx, (
            imgs, pids,
            _) in enumerate(trainloader):  # iterate through trainloader
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()  # put input data into GPU

        # measure data loading time
        data_time.update(time.time() - end)

        optimizer.zero_grad(
        )  # clear the gradients of all optimized variables, for each batch iteration
        outputs = model(
            imgs
        )  # forward pass: compute predicted outputs by passing inputs to the model
        # calculate the batch loss
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)

        loss.backward(
        )  # backward pass: compute gradient of the loss with respect to model parameters
        optimizer.step(
        )  # perform a single optimization step (parameter update)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        # losses.update(): Computes and stores the average and current value.
        losses.update(loss.item(),
                      pids.size(0))  # loss.item() alreay returns a mean value

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          freeze_bn=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)

        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)
        
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        
        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)
            
            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)
            
            loss = xent_loss + htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time,
                   data_time=data_time, loss=losses))
        
        end = time.time()
Exemple #8
0
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

#print('img.shape {}  pids.shape'.format(imgs.shape, pids.shape))
# measure data loading time
        pids = pids.type(torch.cuda.FloatTensor)
        data_time.update(time.time() - end)
        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                #'Identity Loss {lossI.val:.4f} ({lossI.avg:.4f})\t'
                #'Attribute Loss {lossA.val:.4f} ({lossA.avg:.4f})\t'
                'Total Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                    epoch + 1,
                    batch_idx + 1,
                    len(trainloader),
                    batch_time=batch_time,
                    data_time=data_time,
                    loss=losses))
Exemple #9
0
def train(epoch, model, criterion, optimizer, trainloader, use_gpu,
          use_salience, use_parsing):
    model.train()
    losses = AverageMeter()

    for batch_idx, tuple_i in enumerate(trainloader):
        if use_salience and not use_parsing:
            imgs, pids, _, salience_imgs, _ = tuple_i
        elif not use_salience and use_parsing:
            imgs, pids, _, parsing_imgs, _ = tuple_i
        elif use_salience and use_parsing:
            imgs, pids, _, salience_imgs, parsing_imgs, _ = tuple_i
        else:
            imgs, pids, _, _ = tuple_i

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        if use_salience and not use_parsing:
            outputs = model(imgs, salience_masks=salience_imgs)
        elif not use_salience and use_parsing:
            outputs = model(imgs, parsing_masks=parsing_imgs)
        elif use_salience and use_parsing:
            outputs = model(imgs,
                            salience_masks=salience_imgs,
                            parsing_masks=parsing_imgs)
        else:
            outputs = model(imgs)

        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print("Epoch {}/{}\t Batch {}/{}\t Loss {:.6f} ({:.6f})".format(
                epoch + 1, args.max_epoch, batch_idx + 1, len(trainloader),
                losses.val, losses.avg))
def train(epoch, model, criterion, optimizer, trainloader, use_gpu, freeze_bn=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)
        
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        
        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time,
                   data_time=data_time, loss=losses))
        
        end = time.time()
Exemple #11
0
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
          use_gpu):
    xent_losses = AverageMeter(
    )  #AverageMeter():Computes and stores the average and current value
    htri_losses = AverageMeter()  # .update(curr_value)
    accs = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()  #设置为训练模式
    for p in model.parameters():
        p.requires_grad = True  # open all layers 打开自动求导

    end = time.time()
    for batch_idx, (imgs, pids, _,
                    _) in enumerate(trainloader):  #加载一个batch的训练图像
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # 前向计算
        outputs, features = model(
            imgs
        )  #imgs -> y,v (https://github.com/KevinQian97/ELECTRICITY-MTMC/blob/ce5f173aabdc9ae6733ca36d1fdcfc53fa3d3d6e/identifier/models/resnet.py#L221)
        # 计算2种loss
        if isinstance(outputs, (tuple, list)):
            xent_loss = DeepSupervision(criterion_xent, outputs, pids)
        else:
            xent_loss = criterion_xent(outputs, pids)
        if isinstance(features, (tuple, list)):
            htri_loss = DeepSupervision(criterion_htri, features, pids)
        else:
            htri_loss = criterion_htri(features, pids)
        #综合loss=2种loss求和
        loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
        optimizer.zero_grad(
        )  #对于每个batch,梯度(loss关于weight的导数)置零,目的是清空上一步的残余更新参数值
        loss.backward()  #反向传播
        optimizer.step()  #更新参数

        batch_time.update(time.time() - end)

        xent_losses.update(xent_loss.item(), pids.size(0))  #记录
        htri_losses.update(htri_loss.item(), pids.size(0))
        accs.update(accuracy(outputs, pids)[0])

        #隔一段时间,打印训练状态
        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Xent {xent.val:.4f} ({xent.avg:.4f})\t'
                  'Htri {htri.val:.4f} ({htri.avg:.4f})\t'
                  'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      xent=xent_losses,
                      htri=htri_losses,
                      acc=accs))

        end = time.time()
Exemple #12
0
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu=True, summary=None, length=0):
    losses = AverageMeter()
    xentlosses = AverageMeter()
    htrilosses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)
        
        if args.loss == 'xent,htri':
            outputs, features = model(imgs)
        elif args.loss == 'xent':
            outputs = model(imgs)
        # use l2-softmax    
        if args.l2_reg:
            # L2 norm
            outputs = outputs/torch.norm(outputs, dim=1, keepdim=True)
            features = features/torch.norm(features, dim=1, keepdim=True)
            # scale
            outputs = outputs * 32
            # features = features * 64
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        elif args.loss == 'xent,htri':
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)
            
            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)
        if args.loss == 'xent,htri':
            loss = xent_loss + htri_loss
        elif args.loss == 'xent':
            loss = xent_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        if args.loss == 'xent':
            xentlosses.update(xent_loss.item(),pids.size(0))
        elif args.loss == 'xent,htri':
            xentlosses.update(xent_loss.item(),pids.size(0))
            htrilosses.update(htri_loss.item(),pids.size(0))
        losses.update(loss.item(), pids.size(0))

        # add the losses to summary writer
        if summary is not None:
            summary.add_scalars('loss', {'Total loss': loss.item(), 'xentloss': xent_loss.item(), 'htriloss': htri_loss.item()}, length * epoch + batch_idx)
            summary.add_scalar('lr', optimizer.param_groups[0]['lr'], length * epoch + batch_idx)

        if (batch_idx+1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Lr: {3:.2e}\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), optimizer.param_groups[0]['lr'], batch_time=batch_time,data_time=data_time, loss=losses))
            if args.loss == 'xent,htri':
                print('XentLoss: {xentlosses.val:.4f} ({xentlosses.avg:.4f})\t'
                      'HtriLoss: {htrilosses.val:.4f} ({htrilosses.avg:.4f})\t'.format(xentlosses=xentlosses, htrilosses=htrilosses))
def train(epoch, model, criterion_xent, criterion_htri, criterion_cent,
          optimizer_model, optimizer_cent, trainloader, use_gpu):
    losses = AverageMeter()
    xent_losses = AverageMeter()
    cent_losses = AverageMeter()
    htri_losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)

        outputs, features = model(imgs)

        if isinstance(outputs, tuple):
            xent_loss = DeepSupervision(criterion_xent, outputs, pids)
        else:
            xent_loss = criterion_xent(outputs, pids)

        if isinstance(features, tuple):
            htri_loss = DeepSupervision(criterion_htri, features, pids)
        else:
            htri_loss = criterion_htri(features, pids)
        #xentloss = criterion_xent(outputs, pids)

        cent_loss = criterion_cent(features, pids) * args.weight_cent

        loss = xent_loss + 0.5 * htri_loss + cent_loss

        optimizer_model.zero_grad()
        optimizer_cent.zero_grad()
        loss.backward()
        optimizer_model.step()
        # remove the impact of weight_cent in learning centers
        for param in criterion_cent.parameters():
            param.grad.data *= (1. / args.weight_cent)
        optimizer_cent.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        losses.update(loss.item(), pids.size(0))
        xent_losses.update(xent_loss.item(), pids.size(0))
        cent_losses.update(cent_loss.item(), pids.size(0))
        htri_losses.update(htri_loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'TotalLoss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'ClassLoss {xcent_loss.val:.4f} ({xcent_loss.avg:.4f})\t'
                  'MetricLoss {htri_loss.val:.4f} ({htri_loss.avg:.4f})\t'
                  'CenterLoss {cent_loss.val:.4f} ({cent_loss.avg:.4f})\t'.
                  format(epoch + 1,
                         batch_idx + 1,
                         len(trainloader),
                         batch_time=batch_time,
                         data_time=data_time,
                         loss=losses,
                         xcent_loss=xent_losses,
                         htri_loss=htri_losses,
                         cent_loss=cent_losses))