def train_rotTester(epoch,
                    model,
                    criterion_rot,
                    optimizer,
                    trainloader,
                    use_gpu,
                    writer,
                    args,
                    freeze_bn=True):

    #pdb.set_trace()
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    rot_loss_meter = AverageMeter()
    printed = False
    model.train()
    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)
        #model.base_model.eval()
    end = time.time()
    for batch_idx, (imgs, pids, rotation_labels) in enumerate(trainloader):
        data_time.update(time.time() - end)
        if use_gpu:
            imgs, pids, rotation_labels = imgs.cuda(), pids.cuda(
            ), rotation_labels.cuda()

        rotation_logits = model(imgs)
        rot_loss = criterion_rot(rotation_logits, rotation_labels)

        loss = rot_loss

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        rot_loss_meter.update(rot_loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            if not printed:
                printed = True
            else:
                # Clean the current line
                sys.stdout.console.write("\033[F\033[K")
                #sys.stdout.console.write("\033[K")
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Rot Loss {rot_loss.val:.4f} ({rot_loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      rot_loss=rot_loss_meter))

        end = time.time()
    writer.add_scalars('loss',
                       dict(angle_loss=rot_loss_meter.avg, loss=losses.avg),
                       epoch + 1)
Esempio n. 2
0
def test(model, queryloader, galleryloader, pool, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b*s, c, h, w)
            
            end = time.time()
            outputs, features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b*s, c, h, w)
            
            end = time.time()
            torch.cuda.empty_cache()
            outputs, features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
    
    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch*args.seq_len))

    sys.stdout.flush()
    print("Computing CMC and mAP")
    cmc, mAP = gpu_evaluate(qf.cuda(), gf.cuda(), q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    print("------------------")
    sys.stdout.flush()

    if return_distmat:
        return distmat
    return cmc[0]
Esempio n. 3
0
def train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu, fixbase=False):
    losses = AverageMeter()
    precisions = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    epoch_iterations = len(trainloader)

    model.train()

    if fixbase or args.always_fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, ((img1, img2), pids, _, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            img1, img2, pids = img1.cuda(), img2.cuda(), pids.cuda()

        y_large, y_small, y_joint = model(img1, img2)

        loss_batch = args.train_loss_batch_size
        how_many_mini = args.train_batch_size // loss_batch
        for mini_idx in range(how_many_mini):

            start_index = mini_idx * loss_batch
            end_index = start_index + loss_batch

            mini_y_large = y_large[start_index:end_index, :]
            mini_y_small = y_small[start_index:end_index, :]
            mini_y_joint = y_joint[start_index:end_index, :]
            mini_pids = pids[start_index:end_index]

            loss_large = criterion(mini_y_large, mini_pids)
            loss_small = criterion(mini_y_small, mini_pids)
            loss_joint = criterion(mini_y_joint, mini_pids)

            joint_prob = F.softmax(mini_y_joint, dim=1)
            loss_joint_large = criterion(mini_y_large, joint_prob, one_hot=True)
            loss_joint_small = criterion(mini_y_small, joint_prob, one_hot=True)

            total_loss_large = loss_large + loss_joint_large #+
            total_loss_small = loss_small + loss_joint_small #+
            total_loss_joint = loss_joint #+

            prec, = accuracy(mini_y_joint.data, mini_pids.data)
            prec1 = prec[0]  # get top 1

            optimizer.zero_grad()

            # total_loss_large.backward(retain_graph=True)
            # total_loss_small.backward(retain_graph=True)
            # total_loss_joint.backward()
            # sum losses
            loss = total_loss_joint + total_loss_small + total_loss_large
            loss.backward(retain_graph=True)

            optimizer.step()

            loss_iter = epoch*epoch_iterations+batch_idx*how_many_mini+mini_idx
            writer.add_scalar('iter/loss_small', loss_small, loss_iter)
            writer.add_scalar('iter/loss_large', loss_large, loss_iter)
            writer.add_scalar('iter/loss_joint', loss_joint, loss_iter)
            writer.add_scalar('iter/loss_joint_small', loss_joint_small, loss_iter)
            writer.add_scalar('iter/loss_joint_large', loss_joint_large, loss_iter)
            writer.add_scalar('iter/total_loss_small', total_loss_small, loss_iter)
            writer.add_scalar('iter/total_loss_large', total_loss_large, loss_iter)
            writer.add_scalar('iter/total_loss_joint', total_loss_joint, loss_iter)
            writer.add_scalar('iter/loss', loss, loss_iter)


            losses.update(loss.item(), pids.size(0))
            precisions.update(prec1, pids.size(0))

            if (batch_idx*how_many_mini+mini_idx + 1) % args.print_freq == 0:
                print('Epoch: [{0:02d}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec {prec.val:.2%} ({prec.avg:.2%})\t'.format(
                       epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,
                       data_time=data_time, loss=losses, prec=precisions))

        batch_time.update(time.time() - end)
        end = time.time()

    return losses.avg, precisions.avg
Esempio n. 4
0
def test(model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b * s, c, h, w)

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print('Extracted features for query set, obtained {}-by-{} matrix'.
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b * s, c, h, w)

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print('Extracted features for gallery set, obtained {}-by-{} matrix'.
              format(gf.size(0), gf.size(1)))

    print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(
        batch_time.avg, args.test_batch_size * args.seq_len))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print('Computing CMC and mAP')
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print('Results ----------')
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in ranks:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
    print('------------------')

    if return_distmat:
        return distmat
    return cmc[0]
Esempio n. 5
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False,
         name=None):

    flip_eval = args.flip_eval

    if flip_eval:
        print('# Using Flip Eval')

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids, q_paths = [], [], [], []

        if flip_eval:
            enumerator = enumerate(zip(queryloader[0], queryloader[1]))
        else:
            enumerator = enumerate(queryloader[0])

        for batch_idx, package in enumerator:
            end = time.time()

            if flip_eval:
                (imgs0, pids, camids, paths), (imgs1, _, _, _) = package
                if use_gpu:
                    imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
                features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
                # print(features.size())
            else:
                (imgs, pids, camids, paths) = package
                if use_gpu:
                    imgs = imgs.cuda()

                features = model(imgs)[0]

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
            q_paths.extend(paths)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids, g_paths = [], [], [], []
        if flip_eval:
            enumerator = enumerate(zip(galleryloader[0], galleryloader[1]))
        else:
            enumerator = enumerate(galleryloader[0])

        for batch_idx, package in enumerator:
            end = time.time()

            if flip_eval:
                (imgs0, pids, camids, paths), (imgs1, _, _, _) = package
                if use_gpu:
                    imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
                features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
                # print(features.size())
            else:
                (imgs, pids, camids, _) = package
                if use_gpu:
                    imgs = imgs.cuda()

                features = model(imgs)[0]

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
            g_paths.extend(paths)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

        # if os.environ.get('save_feat'):
        if True:
            import scipy.io as io
            # io.savemat(os.environ.get('save_feat'), {'q': qf.data.numpy(), 'g': gf.data.numpy(), 'qt': q_pids, 'gt': g_pids})
            io.savemat(
                'save/abd_feat_' + name + '.mat', {
                    'q': qf.data.numpy(),
                    'g': gf.data.numpy(),
                    'qt': q_pids,
                    'gt': g_pids
                })
            # return

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
        torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    # distmat.addmm_(1, -2, qf, gf.t())
    distmat.addmm_(qf, gf.t(), beta=1, alpha=-2)
    distmat = distmat.numpy()

    # if os.environ.get('distmat'):
    if True:
        import scipy.io as io
        # io.savemat(os.environ.get('distmat'), {'distmat': distmat, 'qp': q_paths, 'gp': g_paths})
        io.savemat('save/abd_distmat_' + name + '.mat', {
            'distmat': distmat,
            'qp': q_paths,
            'gp': g_paths
        })

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
def test(model,
         testloader,
         use_gpu,
         args,
         writer,
         epoch,
         ranks=[1, 5, 10, 20],
         return_distmat=False,
         use_cosine=False,
         draw_tsne=False,
         tsne_clusters=3):

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        qf_std = []
        q_imgPath = []
        for batch_idx, (input) in enumerate(testloader):
            if not args.draw_tsne:
                imgs, pids, _ = input
            else:
                imgs, pids, _, img_path = input
                q_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features, std = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            std = std.data.cpu()
            qf.append(features)
            qf_std.append(std)
            q_pids.extend(pids)
        qf = torch.cat(qf, 0)
        qf_std = torch.cat(qf_std, 0)
        q_pids = np.asarray(q_pids)
        q_imgPath = np.asarray(q_imgPath)

        print(
            "Extracted features for test set, obtained {}-by-{} matrix".format(
                qf.size(0), qf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))
    m = qf.size(0)

    if args.use_ecn:
        distmat = (ECN(qf.numpy(),
                       qf.numpy(),
                       k=25,
                       t=3,
                       q=8,
                       method='rankdist')).transpose()
    elif args.mahalanobis:
        print("Using STD for Mahalanobis distance")
        distmat = torch.zeros((m, m))
        # #pdb.set_trace()
        # qf = qf.data.numpy()
        # gf= gf.data.numpy()
        # qf_std = qf_std.data.numpy()
        # for q_indx in range(int(m)):
        #     distmat[q_indx]= pw(np.expand_dims(qf[q_indx],axis=0),gf,metric='mahalanobis',n_jobs=8, VI=(np.eye(qf_std[q_indx].shape[0])*(1/qf_std[q_indx]).transpose()))
        #     print(q_indx)
        # pdb.set_trace()
        qf = qf / qf_std
        for q_indx in range(int(m)):
            qf_norm = qf * 1 / qf_std[q_indx]
            distmat[q_indx] = torch.pow(qf[q_indx], 2).sum(dim=0, keepdim=True).expand(m) + \
                      torch.pow(qf_norm, 2).sum(dim=1, keepdim=True).squeeze()
            distmat[q_indx].unsqueeze(0).addmm_(1, -2, qf[q_indx].unsqueeze(0),
                                                qf_norm.t())
        distmat = distmat.numpy()
    elif not (use_cosine or args.use_cosine):

        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
                  torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
        distmat.addmm_(1, -2, qf, qf.t())
        distmat = distmat.numpy()

        # if args.re_ranking:
        #     distmat_q_q = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
        #               torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
        #     distmat_q_q.addmm_(1, -2, qf, qf.t())
        #     distmat_q_q = distmat_q_q.numpy()
        #
        #     distmat_g_g = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
        #               torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
        #     distmat_g_g.addmm_(1, -2, gf, gf.t())
        #     distmat_g_g = distmat_g_g.numpy()
        #
        #     print("Normal Re-Ranking")
        #     distmat = re_ranking(distmat, distmat_q_q, distmat_g_g, k1=20, k2=6, lambda_value=0.3)
    else:
        qf_norm = qf / qf.norm(dim=1)[:, None]
        distmat = torch.addmm(1, torch.ones((m, m)), -1, qf_norm,
                              qf_norm.transpose(0, 1))
        distmat = distmat.numpy()

        # if args.re_ranking:
        #     distmat_q_q = torch.addmm(1,torch.ones((m,m)),-1,qf_norm,qf_norm.transpose(0,1))
        #     distmat_q_q = distmat_q_q.numpy()
        #
        #     distmat_g_g = torch.addmm(1,torch.ones((n,n)),-1,gf_norm,gf_norm.transpose(0,1))
        #     distmat_g_g = distmat_g_g.numpy()
        #
        #     print("Re-Ranking with Cosine")
        #     distmat = re_ranking(distmat, distmat_q_q, distmat_g_g, k1=20, k2=6, lambda_value=0.3)

    print("Computing CMC and mAP")
    K_range = [1, 2, 4, 8, 16, 32]
    recall = evaluate_recall(distmat, q_pids, K_range)

    print("Results ----------")
    print("Recall@K results")
    for j, k in enumerate(K_range):
        print("Recall@{:<3}: {:.1%}".format(k, recall[j]))
    print("------------------")

    # if draw_tsne:
    #     drawTSNE(qf,gf,q_pids, g_pids, q_camids, g_camids,q_imgPath, g_imgPath,tsne_clusters,args.save_dir)
    if return_distmat:
        return distmat

    if writer != None:
        writer.add_scalars('Testing', dict(rank_1=recall[0], rank_2=recall[1]),
                           epoch + 1)
    return recall[0]
def train(epoch, model, criterion, optimizer, trainloader, use_gpu,writer, args,freeze_bn=False):
    losses = AverageMeter()
    xent_losses = AverageMeter()
    confidence_losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    printed = False
    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)
        if isinstance(outputs, tuple):
            xent_loss = DeepSupervision(criterion[0], outputs, pids)
            confidence_loss = DeepSupervision(criterion[1],outputs,pids)
        else:
            xent_loss = criterion[0](outputs, pids)
            confidence_loss = criterion[1](outputs,pids)
        if args.confidence_penalty:

            loss = args.lambda_xent *xent_loss - args.confidence_beta *confidence_loss
        elif args.jsd:
            loss = args.lambda_xent *xent_loss + args.confidence_beta *confidence_loss
        else:
            loss = args.lambda_xent *xent_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))
        xent_losses.update(xent_loss.item(), pids.size(0))
        confidence_losses.update(confidence_loss.item(), pids.size(0))

        if args.jsd:
            text = 'JSD_Loss {confidence_loss.val:.4f} ({confidence_loss.avg:.4f})\t'
        else:
            text = 'Confi_Loss {confidence_loss.val:.4f} ({confidence_loss.avg:.4f})\t'

        if (batch_idx + 1) % args.print_freq == 0:
            if not printed:
              printed = True
            else:
              # Clean the current line
              sys.stdout.console.write("\033[F\033[K")
              #sys.stdout.console.write("\033[K")
            if args.jsd:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                      'Xent_Loss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                      'JSD_Loss {confidence_loss.val:.4f} ({confidence_loss.avg:.4f})\t'
                      'Total_Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                       epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,
                       data_time=data_time,xent_loss=xent_losses,confidence_loss=confidence_losses, loss=losses))
            else:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                      'Xent_Loss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                      'Confi_Loss {confidence_loss.val:.4f} ({confidence_loss.avg:.4f})\t'
                      'Total_Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                       epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,
                       data_time=data_time,xent_loss=xent_losses,confidence_loss=confidence_losses, loss=losses))

        end = time.time()

    writer.add_scalars(
      'loss',
      dict(loss=losses.avg,
            xent_loss = xent_losses.avg,
            confidence_loss = confidence_losses.avg),
      epoch + 1)
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          freeze_bn=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    global pn_gan
    global poses
    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        if args.use_gan:
            for pose_idx, pose_img in enumerate(poses):
                if use_gpu:
                    pose_img = Variable(pose_img).cuda()
                tgt_img = pn_gan(Variable(imgs).cuda(), pose_img)
                outputs = model(tgt_img)
                if isinstance(outputs, tuple):
                    loss = DeepSupervision(criterion, outputs, pids)
                else:
                    loss = criterion(outputs, pids)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                losses.update(loss.item(), pids.size(0))

        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        losses.update(loss.item(), pids.size(0))

        batch_time.update(time.time() - end)

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
        end = time.time()
def train(epoch,
          model,
          criterion_xent,
          criterion_htri,
          optimizer,
          trainloader,
          use_gpu,
          writer,
          args,
          freeze_bn=False):
    losses = AverageMeter()
    xent_losses = AverageMeter()
    confidence_losses = AverageMeter()
    htri_losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    printed = False
    model.train()
    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)
    end = time.time()
    for batch_idx, ((imgs_a, pids_a, _), (imgs_p, pids_p,
                                          _)) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs_a, pids_a = imgs_a.cuda(), pids_a.cuda()
            imgs_p, pids_p = imgs_p.cuda(), pids_p.cuda()

        outputs_a, features_a = model(imgs_a)
        _, features_p = model(imgs_p)
        if isinstance(outputs_a, tuple):
            xent_loss = DeepSupervision(criterion_xent[0], outputs_a, pids_a)
        else:
            xent_loss = criterion_xent[0](outputs_a, pids_a)

        confidence_loss = criterion_xent[1](outputs_a)
        htri_loss = 0
        if not (criterion_htri is None):
            htri_loss = criterion_htri(features_a, features_p, pids_a, pids_p)

        if args.confidence_penalty:
            loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss - args.confidence_beta * confidence_loss
        else:
            loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids_a.size(0))
        confidence_losses.update(confidence_loss.item(), pids_a.size(0))
        if not args.htri_only:
            xent_losses.update(xent_loss.item(), pids_a.size(0))
        if criterion_htri is None:
            htri_losses.update(htri_loss, pids_a.size(0))
        else:
            htri_losses.update(htri_loss.item(), pids_a.size(0))
        if (batch_idx + 1) % args.print_freq == 0:
            if not printed:
                printed = True
            else:
                # Clean the current line
                sys.stdout.console.write("\033[F\033[K")
                #sys.stdout.console.write("\033[K")
            print(
                'Epoch: [{0}][{1}/{2}]\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                'Xent Loss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                'Confi_Loss {confidence_loss.val:.4f} ({confidence_loss.avg:.4f})\t'
                'Htri Loss {htri_loss.val:.4f} ({htri_loss.avg:.4f})\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                    epoch + 1,
                    batch_idx + 1,
                    len(trainloader),
                    batch_time=batch_time,
                    data_time=data_time,
                    xent_loss=xent_losses,
                    confidence_loss=confidence_losses,
                    htri_loss=htri_losses,
                    loss=losses))

        end = time.time()

    writer.add_scalars(
        'Losses',
        dict(total_loss=losses.avg,
             xen_loss=xent_losses.avg,
             htri_loss=htri_losses.avg,
             confidence_loss=confidence_losses.avg), epoch + 1)
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          writer,
          use_gpu,
          fixbase=False):
    losses = AverageMeter()
    precisions = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    epoch_iterations = len(trainloader)

    model.train()

    if fixbase or args.always_fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, ((img1, img2), pids, _, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            img1, img2, pids = img1.cuda(), img2.cuda(), pids.cuda()

        y10, y05, y_consensus = model(img1, img2)

        loss10 = criterion(y10, pids)
        loss05 = criterion(y05, pids)
        loss_consensus = criterion(y_consensus, pids)

        prec, = accuracy(y_consensus.data, pids.data)
        prec1 = prec[0]  # get top 1

        writer.add_scalar('iter/loss', loss_consensus,
                          epoch * epoch_iterations + batch_idx)
        writer.add_scalar('iter/prec1', prec1,
                          epoch * epoch_iterations + batch_idx)

        optimizer.zero_grad()

        loss10.backward(retain_graph=True)
        loss05.backward(retain_graph=True)
        loss_consensus.backward()

        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss_consensus.item(), pids.size(0))
        precisions.update(prec1, pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0:02d}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec {prec.val:.2%} ({prec.avg:.2%})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      prec=precisions))

        end = time.time()

    return losses.avg, precisions.avg
Esempio n. 11
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         args,
         writer,
         epoch,
         ranks=[1, 5, 10, 20],
         return_distmat=False,
         use_cosine=False,
         draw_tsne=False,
         tsne_clusters=3):

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        q_imgPath = []
        for batch_idx, (input) in enumerate(queryloader):
            if not args.draw_tsne:
                imgs, pids, camids = input
            else:
                imgs, pids, camids, img_path = input
                q_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        q_imgPath = np.asarray(q_imgPath)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        g_imgPath = []
        end = time.time()
        for batch_idx, (input) in enumerate(galleryloader):
            if not args.draw_tsne:
                imgs, pids, camids = input
            else:
                imgs, pids, camids, img_path = input
                g_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        g_imgPath = np.asarray(q_imgPath)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    if not use_cosine:
        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()

        if args.re_ranking:
            distmat_q_q = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
                      torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
            distmat_q_q.addmm_(1, -2, qf, qf.t())
            distmat_q_q = distmat_q_q.numpy()

            distmat_g_g = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
                      torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
            distmat_g_g.addmm_(1, -2, gf, gf.t())
            distmat_g_g = distmat_g_g.numpy()

            distmat = re_ranking(distmat,
                                 distmat_q_q,
                                 distmat_g_g,
                                 k1=20,
                                 k2=6,
                                 lambda_value=0.3)
    else:
        m, n = qf.size(0), gf.size(0)
        qf_norm = qf / qf.norm(dim=1)[:, None]
        gf_norm = gf / gf.norm(dim=1)[:, None]
        distmat = torch.addmm(1, torch.ones((m, n)), -1, qf_norm,
                              gf_norm.transpose(0, 1))
        distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if draw_tsne:
        drawTSNE(qf, gf, q_pids, g_pids, q_camids, g_camids, q_imgPath,
                 g_imgPath, tsne_clusters, args.save_dir)
    if return_distmat:
        return distmat

    if writer != None:
        writer.add_scalars('Testing',
                           dict(rank_1=cmc[0], rank_5=cmc[4], mAP=mAP),
                           epoch + 1)
    return cmc[0]
def train_w2v_single_batch_text(writer, epoch, model, criterion_htri_reid,
                                criterion_attributes, optimizer_reid,
                                trainloader_reid, use_gpu):

    losses = AverageMeter()

    if args.attraug_reid:
        losses_attributes_reid = AverageMeter()
    if args.global_learning:
        losses_htri_glob_feat = AverageMeter()

    model.train()
    if args.htri_learning:
        losses_htri_glob_feat = AverageMeter()
        criterion_htri_reid1 = TripletLoss1(margin=0.3)

    for batch_idx, (imgs, pids, camID, glove,
                    glove_labels) in enumerate(trainloader_reid):

        for i in range(6):
            glove[i] = glove[i].cuda().float()
            glove_labels[i] = glove_labels[i].cuda().float()

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        emb, glob_glove, glob_img = model(imgs.squeeze(), glove)

        glob_feat = torch.cat((glob_glove, glob_img), dim=0)
        pids_tot = torch.cat((pids, pids), dim=0)

        re_loss = 0

        if args.global_learning:
            # cross modality
            htri_glob_feat = criterion_htri_reid(glob_feat, pids_tot)
            losses_htri_glob_feat.update(to_scalar(htri_glob_feat),
                                         pids.size(0))
            re_loss += htri_glob_feat

        if args.htri_learning:
            htri_glob_feat = criterion_htri_reid1(
                glob_glove, pids) + criterion_htri_reid1(glob_img, pids)
            losses_htri_glob_feat.update(to_scalar(htri_glob_feat),
                                         pids.size(0))
            re_loss += htri_glob_feat

        if args.attraug_reid:
            loss_attribute_reid = criterion_attributes(emb, glove_labels)
            re_loss += loss_attribute_reid * args.coeff_loss_attributes_reid
            losses_attributes_reid.update(to_scalar(loss_attribute_reid),
                                          pids.size(0))

        optimizer_reid.zero_grad()
        losses.update(to_scalar(re_loss), pids.size(0))
        re_loss.backward()
        optimizer_reid.step()

        if (batch_idx + 1) % 100 == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Loss reid and att {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader_reid),
                      loss=losses))

            if args.global_learning or args.htri_learning:
                print(
                    'Epoch: [{0}][{1}/{2}]\t'
                    'Loss cross glob {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                        epoch + 1,
                        batch_idx + 1,
                        len(trainloader_reid),
                        loss=losses_htri_glob_feat))

            if args.attraug_reid:
                print(
                    'Epoch: [{0}][{1}/{2}]\t'
                    'Loss attributes {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                        epoch + 1,
                        batch_idx + 1,
                        len(trainloader_reid),
                        loss=losses_attributes_reid))

    if args.log_to_file:
        writer.add_scalars('train', dict(loss_reid=losses.avg), epoch)

        if args.global_learning:
            writer.add_scalars('train',
                               dict(glob_feat=losses_htri_glob_feat.avg),
                               epoch)

        if args.attraug_reid:
            writer.add_scalars(
                'train', dict(loss_attributes=losses_attributes_reid.avg),
                epoch)
Esempio n. 13
0
def test(model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=(1, 5, 10, 20),
         return_distmat=False):
    global mAP
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, adj) in enumerate(queryloader):
            if use_gpu:
                imgs, adj = imgs.cuda(), adj.cuda()
            if args.test_sample in ['dense', 'skipdense']:
                b, n, s, c, h, w = imgs.size()
                imgs = imgs.view(b * n, s, c, h, w)
                adj = adj.view(b * n, adj.size(-1), adj.size(-1))
            else:
                n, s, c, h, w = imgs.size()

            end = time.time()
            features = model(imgs, adj)
            batch_time.update(time.time() - end)
            if args.test_sample in ['dense', 'skipdense']:
                features = features.view(n, 1, -1)
                if pool == 'avg':
                    features = torch.mean(features, 0)
                else:
                    features, _ = torch.max(features, 0)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids.numpy())
            q_camids.extend(camids.numpy())
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, adj) in enumerate(galleryloader):
            if use_gpu:
                imgs, adj = imgs.cuda(), adj.cuda()
            if args.test_sample in ['dense', 'skipdense']:
                b, n, s, c, h, w = imgs.size()
                imgs = imgs.view(b * n, s, c, h, w)
                adj = adj.view(b * n, adj.size(-1), adj.size(-1))
            else:
                n, s, c, h, w = imgs.size()

            end = time.time()
            features = model(imgs, adj)
            batch_time.update(time.time() - end)
            if args.test_sample in ['dense', 'skipdense']:
                features = features.view(n, 1, -1)
                if pool == 'avg':
                    features = torch.mean(features, 0)
                else:
                    features, _ = torch.max(features, 0)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids.numpy())
            g_camids.extend(camids.numpy())
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch * args.seq_len))

    print('Computing distance matrix with metric={} ...'.format(
        args.dist_metric))
    distmat = metrics.compute_distance_matrix(qf, gf, args.dist_metric)
    distmat = distmat.numpy()

    if args.re_rank:
        print('Applying person re-ranking ...')
        distmat_qq = metrics.compute_distance_matrix(qf, qf, args.dist_metric)
        distmat_gg = metrics.compute_distance_matrix(gf, gf, args.dist_metric)
        distmat = re_ranking(distmat, distmat_qq, distmat_gg)

    print("Computing CMC and mAP")

    cmc, mAP = metrics.evaluate_rank(distmat,
                                     q_pids,
                                     g_pids,
                                     q_camids,
                                     g_camids,
                                     use_metric_mars=True)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0], mAP
Esempio n. 14
0
def train(epoch,
          model,
          criterion_xent,
          criterion_htri,
          optimizer,
          trainloader,
          use_gpu,
          writer=None):
    xent_losses = AverageMeter()
    htri_losses = AverageMeter()
    precisions = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _, adj) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids, adj = imgs.cuda(), pids.cuda(), adj.cuda()

        outputs, features = model(imgs, adj)
        if isinstance(outputs, tuple) or isinstance(outputs, list):
            xent_loss = DeepSupervision(criterion_xent, outputs, pids)
        else:
            xent_loss = criterion_xent(outputs, pids)

        if isinstance(features, tuple) or isinstance(features, list):
            htri_loss = DeepSupervision(criterion_htri, features, pids)
        else:
            htri_loss = criterion_htri(features, pids)

        loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        xent_losses.update(xent_loss.item(), pids.size(0))
        htri_losses.update(htri_loss.item(), pids.size(0))

        precisions.update(metrics.accuracy(outputs, pids).mean(axis=0)[0])

        if ((batch_idx + 1) % args.print_freq
                == 0) or (args.print_last
                          and batch_idx == (len(trainloader) - 1)):
            num_batches = len(trainloader)
            eta_seconds = batch_time.avg * (num_batches - (batch_idx + 1) +
                                            (args.max_epoch -
                                             (epoch + 1)) * num_batches)
            eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))
            print('CurTime: {0}\t'
                  'Epoch: [{1}][{2}/{3}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Speed {speed:.3f} samples/s\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Xent {xent.val:.4f} ({xent.avg:.4f})\t'
                  'Htri {htri.val:.4f} ({htri.avg:.4f})\t'
                  'Top1 {prec.val:.4f} ({prec.avg:.4f})\t'
                  'Eta {eta}'.format(cur_time(),
                                     epoch + 1,
                                     batch_idx + 1,
                                     len(trainloader),
                                     speed=1 / batch_time.avg * imgs.shape[0],
                                     batch_time=batch_time,
                                     data_time=data_time,
                                     xent=xent_losses,
                                     htri=htri_losses,
                                     prec=precisions,
                                     eta=eta_str))

        end = time.time()
    writer.add_scalar(tag='loss/xent_loss',
                      scalar_value=xent_losses.avg,
                      global_step=epoch + 1)
    writer.add_scalar(tag='loss/htri_loss',
                      scalar_value=htri_losses.avg,
                      global_step=epoch + 1)
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          writer,
          args,
          freeze_bn=False):
    losses = AverageMeter()
    xent_losses = AverageMeter()
    info_losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    printed = False
    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        (mu, std), outputs = model(imgs)
        if isinstance(outputs, tuple):
            xent_loss = DeepSupervision(criterion, outputs, pids)
        else:
            xent_loss = criterion(outputs, pids)

        info_loss = -0.5 * (1 + 2 * std.log() - mu.pow(2) -
                            std.pow(2)).sum(1).mean().div(math.log(2))
        loss = args.lambda_xent * xent_loss + args.beta * info_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))
        xent_losses.update(xent_loss.item(), pids.size(0))
        info_losses.update(info_loss.item(), pids.size(0))
        if (batch_idx + 1) % args.print_freq == 0:
            if not printed:
                printed = True
            else:
                # Clean the current line
                sys.stdout.console.write("\033[F\033[K")
                #sys.stdout.console.write("\033[K")
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Xent_Loss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                  'Info_Loss {info_loss.val:.4f} ({info_loss.avg:.4f})\t'
                  'Total_Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      xent_loss=xent_losses,
                      info_loss=info_losses,
                      loss=losses))

        end = time.time()

    writer.add_scalars(
        'loss',
        dict(loss=losses.avg,
             xent_loss=xent_losses.avg,
             info_loss=info_losses.avg), epoch + 1)
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          writer,
          args,
          freeze_bn=False):
    losses = AverageMeter()
    xent_losses = AverageMeter()
    info_losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    confidence_losses = AverageMeter()
    printed = False
    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        (mu, std), outputs = model(imgs)
        text_dict = {}
        if not isinstance(criterion, MultiHeadLossAutoTune):
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion[0], outputs, pids)
                confidence_loss = DeepSupervision(criterion[1], outputs, pids)
            else:
                xent_loss = criterion[0](outputs, pids)
                confidence_loss = criterion[1](outputs, pids)

            info_loss = criterion[-1](mu.float(), std.float())

            if args.confidence_penalty:
                loss = args.lambda_xent * xent_loss + args.beta * info_loss - args.confidence_beta * confidence_loss
            elif args.jsd:
                loss = args.lambda_xent * xent_loss + args.beta * info_loss + args.confidence_beta * confidence_loss
            else:
                loss = args.lambda_xent * xent_loss + args.beta * info_loss
            confidence_losses.update(confidence_loss.item(), pids.size(0))
        else:

            if args.confidence_penalty or args.jsd:
                loss, individual_losses = criterion([outputs, outputs, mu],
                                                    [pids, pids, std])
                confidence_loss = individual_losses[1]
            else:
                loss, individual_losses = criterion([outputs, mu], [pids, std])
                confidence_loss = 0
            xent_loss = individual_losses[0]
            info_loss = individual_losses[-1]
            text_dict = criterion.batch_meta()
            confidence_losses.update(0, pids.size(0))
        #info_loss = -0.5*(1-2*std.log()-(1+mu.pow(2))/(2*std.pow(2))).sum(1).mean().div(math.log(2))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        xent_losses.update(xent_loss.item(), pids.size(0))
        info_losses.update(info_loss.item(), pids.size(0))
        if (batch_idx + 1) % args.print_freq == 0:
            if not printed:
                printed = True
            else:
                # Clean the current line
                sys.stdout.console.write("\033[F\033[K")
                #sys.stdout.console.write("\033[K")
            if args.jsd:
                print(
                    'Epoch: [{0}][{1}/{2}]\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                    'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                    'Xent_Loss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                    'JSD_Loss {confidence_loss.val:.4f} ({confidence_loss.avg:.4f})\t'
                    'Info_Loss {info_loss.val:.4f} ({info_loss.avg:.4f})\t'
                    'Total_Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                        epoch + 1,
                        batch_idx + 1,
                        len(trainloader),
                        batch_time=batch_time,
                        data_time=data_time,
                        xent_loss=xent_losses,
                        confidence_loss=confidence_losses,
                        loss=losses), text_dict)
            else:
                print(
                    'Epoch: [{0}][{1}/{2}]\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                    'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                    'Xent_Loss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                    'Confi_Loss {confidence_loss.val:.4f} ({confidence_loss.avg:.4f})\t'
                    'Info_Loss {info_loss.val:.4f} ({info_loss.avg:.4f})\t'
                    'Total_Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                        epoch + 1,
                        batch_idx + 1,
                        len(trainloader),
                        batch_time=batch_time,
                        data_time=data_time,
                        xent_loss=xent_losses,
                        confidence_loss=confidence_losses,
                        info_loss=info_losses,
                        loss=losses), text_dict)

        end = time.time()

    writer.add_scalars(
        'loss',
        dict(loss=losses.avg,
             xent_loss=xent_losses.avg,
             info_loss=info_losses.avg,
             confidence_loss=confidence_losses.avg), epoch + 1)
def test_rotTester(model,
                   criterion_rot,
                   queryloader,
                   galleryloader,
                   trainloader,
                   use_gpu,
                   args,
                   writer,
                   epoch,
                   ranks=[1, 5, 10, 20],
                   return_distmat=False):
    batch_time = AverageMeter()
    top1_test = AverageMeter()
    top1_train = AverageMeter()
    rot_loss_meter = AverageMeter()
    training_rot_loss_meter = AverageMeter()

    model.eval()

    with torch.no_grad():

        for batch_idx, (imgs, pids, rotation_labels) in enumerate(queryloader):
            if use_gpu:
                imgs, rotation_labels = imgs.cuda(), rotation_labels.cuda()

            end = time.time()
            rot_logits = model(imgs)
            batch_time.update(time.time() - end)
            rot_loss = criterion_rot(rot_logits, rotation_labels)
            prec1 = accuracy(rot_logits.data, rotation_labels.data)
            top1_test.update(prec1[0])
            rot_loss_meter.update(rot_loss.item(), pids.size(0))
        end = time.time()

        print("--------Done Query-----")
        for batch_idx, (imgs, pids,
                        rotation_labels) in enumerate(galleryloader):
            if use_gpu:
                imgs, rotation_labels = imgs.cuda(), rotation_labels.cuda()

            end = time.time()
            rot_logits = model(imgs)
            batch_time.update(time.time() - end)

            prec1 = accuracy(rot_logits.data, rotation_labels.data)
            top1_test.update(prec1[0])
            rot_loss_meter.update(rot_loss.item(), pids.size(0))

        print("--------Done Gallery-----")
        for batch_idx, (imgs, pids, rotation_labels) in enumerate(trainloader):
            if use_gpu:
                imgs, rotation_labels = imgs.cuda(), rotation_labels.cuda()

            end = time.time()
            rot_logits = model(imgs)
            batch_time.update(time.time() - end)

            prec1 = accuracy(rot_logits.data, rotation_labels.data)
            top1_train.update(prec1[0])
            training_rot_loss_meter.update(rot_loss.item(), pids.size(0))

        print("--------Done Training-----")
    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    print("Test Angle Acc:{:.2f}".format(top1_test.avg.cpu().numpy()[0]))
    print("Train Angle Acc:{:.2f}".format(top1_train.avg.cpu().numpy()[0]))
    print("------------------")

    if writer != None:
        writer.add_scalars(
            'Accuracy Graph',
            dict(test_accuracy=top1_test.avg.cpu().numpy()[0],
                 train_accuracy=top1_train.avg.cpu().numpy()[0]), epoch + 1)

        writer.add_scalars(
            'Loss Graph',
            dict(test_loss=rot_loss_meter.avg,
                 train_loss=training_rot_loss_meter.avg), epoch + 1)
    return top1_test.avg.cpu().numpy()[0]
Esempio n. 18
0
def train(epoch,
          model,
          criterion,
          regularizer,
          optimizer,
          trainloader,
          use_gpu,
          fixbase=False,
          switch_loss=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if fixbase or args.fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):

        try:
            limited = float(os.environ.get('limited', None))
        except (ValueError, TypeError):
            limited = 1
        # print('################# limited', limited)

        if not fixbase and (batch_idx + 1) > limited * len(trainloader):
            break

        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)
        if False and isinstance(outputs, (tuple, list)):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        print(loss)
        # if True or (fixbase and args.fix_custom_loss) or not fixbase and ((switch_loss and args.switch_loss < 0) or (not switch_loss and args.switch_loss > 0)):
        if not fixbase:
            reg = regularizer(model)
            # print('use reg', reg)
            # print('use reg', reg)
            loss += reg
        optimizer.zero_grad()
        loss.backward()

        if args.use_clip_grad and (args.switch_loss < 0 and switch_loss):
            print('Clip!')
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)

        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        del loss
        del outputs

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
def test_vib(model,
             queryloader,
             galleryloader,
             use_gpu,
             args,
             writer,
             epoch,
             ranks=[1, 5, 10, 20],
             return_distmat=False,
             use_cosine=False,
             draw_tsne=False,
             tsne_clusters=3):

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf_stat.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf_stat = torch.cat(qf_stat, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf_stat.size(0), qf_stat.size(1)))

        g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf_stat.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf_stat = torch.cat(gf_stat, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    pdb.set_trace()

    m, n = qf_stat.size(0), gf_stat.size(0)
    score_board = torch.zeros((m, n, n), dtype=torch.int16)
    qf = torch.zeros(m, 512)  #,torch.zeros(m,512)
    for _ in range(args.sampling_count):
        qf_sample = model.reparametrize_n(qf_stat[:, 0], qf_stat[:, 1])
        qf = qf + qf_sample

    qf = qf / args.sampling_count

    for _ in range(args.sampling_count):

        #qf = model.reparametrize_n(qf_stat[:,0],qf_stat[:,1])
        gf = model.reparametrize_n(gf_stat[:, 0], gf_stat[:, 1])
        if not use_cosine:
            distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                      torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
            distmat.addmm_(1, -2, qf, gf.t())
            distmat = distmat.numpy()
        else:
            qf_norm = qf / qf.norm(dim=1)[:, None]
            gf_norm = gf / gf.norm(dim=1)[:, None]
            distmat = torch.addmm(1, torch.ones((m, n)), -1, qf_norm,
                                  gf_norm.transpose(0, 1))
            distmat = distmat.numpy()

        indices = np.argsort(distmat, axis=1)

        for indx in range(m):
            score_board[m, indices[indx], list(range(n))] += 1

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if draw_tsne:
        drawTSNE(qf, gf, q_pids, g_pids, q_camids, g_camids, tsne_clusters,
                 args.save_dir)
    if return_distmat:
        return distmat

    if writer != None:
        writer.add_scalars('Testing',
                           dict(rank_1=cmc[0], rank_5=cmc[4], mAP=mAP),
                           epoch + 1)
    return cmc[0]
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
          use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    losses_xent = AverageMeter()
    losses_htri = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _, caps, caps_raw) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids, caps = imgs.cuda(), pids.cuda(), caps.cuda()

        outputs, features = model(imgs, caps)

        # Loss
        if args.loss == 'xent_only':
            xent_loss = criterion_xent(outputs, pids)
            loss = xent_loss
        elif args.loss == 'htri_only':
            htri_loss = criterion_htri(features, pids)
            loss = htri_loss
        elif args.loss == 'xent_htri':
            xent_loss = criterion_xent(outputs, pids)
            htri_loss = criterion_htri(features, pids)
            loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss

            losses_xent.update(xent_loss.item(), pids.size(0))
            losses_htri.update(htri_loss.item(), pids.size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                'xent_loss={xent_loss.avg:.4f}, htri_loss={htri_loss.avg:.4f}, '
                .format(epoch + 1,
                        batch_idx + 1,
                        len(trainloader),
                        batch_time=batch_time,
                        data_time=data_time,
                        loss=losses,
                        xent_loss=losses_xent,
                        htri_loss=losses_htri))

        end = time.time()

    writer.add_scalars('train_average_loss', {'loss': losses.avg}, epoch)
def test(model, testloader, use_gpu, args,writer,epoch, ranks=[1, 5, 10, 20], return_distmat=False,draw_tsne=False,tsne_clusters=3):

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids= [], []
        q_imgPath = []
        for batch_idx, (input) in enumerate(testloader):
            if not args.draw_tsne:
                imgs, pids = input
            else:
                imgs, pids, img_path = input
                q_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_imgPath = np.asarray(q_imgPath)

        print("Extracted features for test set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))

    if args.use_ecn:
        distmat= (ECN_custom(qf,qf,k=25,t=3,q=8,method='rankdist',use_cosine=args.use_cosine)).transpose()
    elif not args.use_cosine:
        m = qf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
                  torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
        distmat.addmm_(1, -2, qf, qf.t())
        distmat = distmat.numpy()

        # if args.re_ranking:
        #     distmat_q_q = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
        #               torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
        #     distmat_q_q.addmm_(1, -2, qf, qf.t())
        #     distmat_q_q = distmat_q_q.numpy()
        #
        #     distmat_g_g = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
        #               torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
        #     distmat_g_g.addmm_(1, -2, gf, gf.t())
        #     distmat_g_g = distmat_g_g.numpy()
        #
        #     distmat = re_ranking(distmat, distmat_q_q, distmat_g_g, k1=20, k2=6, lambda_value=0.3)


    else:
        m = qf.size(0)
        qf_norm = qf/qf.norm(dim=1)[:,None]
        distmat = torch.addmm(1,torch.ones((m,m)),-1,qf_norm,qf_norm.transpose(0,1))
        distmat = distmat.numpy()

        # if args.re_ranking:
        #     distmat_q_q = torch.addmm(1,torch.ones((m,m)),-1,qf_norm,qf_norm.transpose(0,1))
        #     distmat_q_q = distmat_q_q.numpy()
        #
        #     distmat_g_g = torch.addmm(1,torch.ones((n,n)),-1,gf_norm,gf_norm.transpose(0,1))
        #     distmat_g_g = distmat_g_g.numpy()
        #
        #     distmat = re_ranking(distmat, distmat_q_q, distmat_g_g, k1=20, k2=6, lambda_value=0.3)

    print("Computing CMC and mAP")
    K_range = [1,2,4,8,16,32]
    recall= evaluate_recall(distmat, q_pids, K_range)

    print("Results ----------")
    print("Recall@K results")
    for j, k in enumerate(K_range):
        print("Recall@{:<3}: {:.1%}".format(k, recall[j]))
    print("------------------")

    # if draw_tsne:
    #     drawTSNE(qf,gf,q_pids, g_pids, q_camids, g_camids,q_imgPath, g_imgPath,tsne_clusters,args.save_dir)
    if return_distmat:
        return distmat


    if writer != None:
        writer.add_scalars(
          'Testing',
          dict(rank_1=recall[0],
               rank_2 =recall[1]),
          epoch + 1)
    return recall[0]
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False,
         epoch=0):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, caps,
                        caps_raw) in enumerate(queryloader):
            if use_gpu:
                imgs, caps = imgs.cuda(), caps.cuda()

            end = time.time()

            features = model(imgs, caps)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, caps,
                        _) in enumerate(galleryloader):
            if use_gpu:
                imgs, caps = imgs.cuda(), caps.cuda()

            end = time.time()

            features = model(imgs, caps)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    if args.rerank:
        print('re-ranking (Euclidean distance)')
        distmat = re_ranking(qf,
                             gf,
                             k1=reranking_k1,
                             k2=reranking_k2,
                             lambda_value=reranking_lambda)

    else:
        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()

    if args.evaluate:
        sio.savemat(mat_path + 'dismat.mat', {'dismat': distmat})
        sio.savemat(mat_path + 'g_pids.mat', {'g_pids': g_pids})
        sio.savemat(mat_path + 'q_pids.mat', {'q_pids': q_pids})
        sio.savemat(mat_path + 'g_camids.mat', {'g_camids': g_camids})
        sio.savemat(mat_path + 'q_camids.mat', {'q_camids': q_camids})

    print("Computing CMC and mAP")
    if args.dataset == 'market1501':
        cmc, mAP = evaluate(distmat,
                            q_pids,
                            g_pids,
                            q_camids,
                            g_camids,
                            use_metric_cuhk03=False)
    elif args.dataset == 'cuhk03':
        mAP, cmc = eval_map_cmc(distmat,
                                q_ids=q_pids,
                                g_ids=g_pids,
                                q_cams=q_camids,
                                g_cams=g_camids,
                                separate_camera_set=separate_camera_set,
                                single_gallery_shot=single_gallery_shot,
                                first_match_break=first_match_break,
                                topk=20)
    elif args.dataset == 'dukemtmcreid':
        cmc, mAP = evaluate(distmat,
                            q_pids,
                            g_pids,
                            q_camids,
                            g_camids,
                            use_metric_cuhk03=False)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print(cmc)
    print("------------------")

    if return_distmat:
        return distmat

    return cmc[0]
Esempio n. 23
0
def train(epoch,
          model,
          criterion,
          regularizer,
          optimizer,
          trainloader,
          use_gpu,
          fixbase=False):

    if not fixbase and args.use_of and epoch >= args.of_start_epoch:
        print('Using OF')

    from torchreid.losses.of_penalty import OFPenalty

    of_penalty = OFPenalty(vars(args))

    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if fixbase or args.fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):

        try:
            limited = float(os.environ.get('limited', None))
        except (ValueError, TypeError):
            limited = 1

        if not fixbase and (batch_idx + 1) > limited * len(trainloader):
            break

        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)
        loss = criterion(outputs, pids)
        if not fixbase:
            reg = regularizer(model)
            loss += reg
        if not fixbase and args.use_of and epoch >= args.of_start_epoch:

            penalty = of_penalty(outputs)
            loss += penalty

        optimizer.zero_grad()
        loss.backward()

        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
def train(epoch,
          model,
          criterion_xent,
          criterion_htri,
          optimizer,
          trainloader,
          use_gpu,
          fixbase=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if fixbase or args.fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, (tuple, list)):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, (tuple, list)):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)

            if isinstance(features, (tuple, list)):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)

            loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
Esempio n. 25
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("=> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
Esempio n. 26
0
def train(epoch, model, cirterion_batri, cirterion_lifted, criterion_xent, criterion_htri, criterion_KA, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses_xent = AverageMeter()
    losses_kapos = AverageMeter()
    losses_kaneg = AverageMeter()
    losses_batri = AverageMeter()
    losses_lifted = AverageMeter()
    losses_htri = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
        data_time.update(time.time() - end)
        
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        
        torch.cuda.empty_cache()
        outputs, features = model(imgs)

        xent_loss = criterion_xent(outputs, pids)
        losses_xent.update(xent_loss.item(), pids.size(0))
        
        
        htri_loss = criterion_htri(features, pids)
        losses_htri.update(htri_loss.item(), pids.size(0))
        
        
        lifted_loss = cirterion_lifted(features, pids)
        losses_lifted.update(lifted_loss.item(), pids.size(0))
        
        
        batri_loss = cirterion_batri(features, pids)
        losses_batri.update(batri_loss.item(), pids.size(0))
        
        
        loss_KA_pos = criterion_KA(features, features, pids, pids, mode = 'pos')
        losses_kapos.update(loss_KA_pos.item(), pids.size(0))
        
        loss_KA_neg = criterion_KA(features, features, pids, pids, mode = 'neg')
        losses_kaneg.update(loss_KA_neg.item(), pids.size(0))
        
       
        loss = xent_loss + (1.0 / 20) * batri_loss
        losses.update(loss.item(), pids.size(0))
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

       

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'.format(
                   epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,
                   data_time=data_time),end='')
            print('Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(loss = losses), end='')
            print('xent_loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(loss = losses_xent), end='')
            print('htri_loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(loss = losses_htri), end='')
            print('lift_loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(loss = losses_lifted), end='')
            print('batri_loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(loss = losses_batri), end='')
            print('loss_KA_pos {loss.val:.4f} ({loss.avg:.4f})\t'.format(loss = losses_kapos),end='')
            print('loss_KA_neg {loss.val:.4f} ({loss.avg:.4f})\t'.format(loss = losses_kaneg),end='')
            print()
            sys.stdout.flush()
            
            
        
        end = time.time()