Exemple #1
0
    def test(self, queryloader, return_distmat=False):
        batch_time = AverageMeter()
        
        self.model.eval()

        with torch.no_grad():
            qf, q_pids, q_camids = [], [], []
            for batch_idx, (imgs, _) in enumerate(queryloader):
                if use_gpu: imgs = imgs.cuda()

                end = time.time()
                features = self.model(imgs)
                batch_time.update(time.time() - end)
                
                features = features.data.cpu()
                qf.append(features)
            qf = torch.cat(qf, 0)

        m, n = qf.size(0), self.gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                torch.pow(self.gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, self.gf.t())
        distmat = distmat.numpy()

        return distmat
Exemple #2
0
    def init_gallary_info(self):
        batch_time = AverageMeter()
        # init gallary info 
        self.model.eval()
        with torch.no_grad():
            gf, g_pids, g_camids = [], [], []
            save_type = 'gpu' if use_gpu else 'cpu'
            pkl_path = './data/market1501/market1501-%s.pkl' % save_type
            if not check_isfile(pkl_path):
                end = time.time()
                for batch_idx, (imgs, _) in enumerate(self.galleryloader):
                    if use_gpu: 
                        imgs = imgs.cuda()
                    end = time.time()
                    features = self.model(imgs)
                    batch_time.update(time.time() - end)

                    features = features.data.cpu()
                    gf.append(features)
            
                gf = torch.cat(gf, 0)
                # cache for CPU
                with open(pkl_path, mode='wb') as fout:
                    pickle.dump(gf, fout)
            else:
                with open(pkl_path, mode='rb') as fin:
                    gf = pickle.load(fin)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
        return gf
def  compute_global_descriptor_from_text(loader, use_gpu, model,arch,size=0):
    batch_time = AverageMeter()
    model.training=False
    model.glove=True

    qf=np.zeros([len(loader),100],dtype=float)

    qf_glove = np.zeros([len(loader), size], dtype=float)
    q_pids= np.zeros([len(loader)],dtype=float)
    q_camids=np.zeros([len(loader)],dtype=float)

    for batch_idx, out in enumerate(loader):

        pids=out[1]; camids=out[2];

        if arch=='resnetAttW2VAttributes':
            text_desc = out[4]
            attribute_text=out[3]
        elif arch=='resnetAttW2VText':
            attribute_text = out[4]
            text_desc = torch.cat(attribute_text, dim=1)

        print(str(batch_idx) + '/' + str(len(loader)))
        if use_gpu:
            feat= model(text=attribute_text)
            feat=feat.squeeze()

        qf[batch_idx] = feat.cpu()
        qf_glove[batch_idx]=text_desc
        q_pids[batch_idx] = np.asarray(pids)
        q_camids[batch_idx] = np.asarray(camids)

    return qf,qf_glove,q_pids,q_camids
Exemple #4
0
def test(model, testloader, use_gpu):
    batch_time = AverageMeter()
    gender_correct = 0
    staff_correct = 0
    customer_correct = 0
    stand_correct = 0
    sit_correct = 0
    phone_correct = 0

    total = 0
    model.eval()

    with torch.no_grad():
        for batch_idx, (imgs, gender_labels, staff_labels, customer_labels, stand_labels,\
                        sit_labels, phone_labels) in enumerate(testloader):
            if use_gpu:
                imgs, gender_labels, staff_labels, customer_labels, stand_labels, sit_labels, phone_labels = \
     imgs.cuda(), gender_labels.cuda(), staff_labels.cuda(), customer_labels.cuda(), \
                    stand_labels.cuda(), sit_labels.cuda(), phone_labels.cuda()
            total += gender_labels.size(0)

            gender_outputs, staff_outputs, customer_outputs, stand_outputs, sit_outputs, play_with_phone_outputs = model(
                imgs)

            _, gender_predicted = torch.max(gender_outputs.data, 1)
            _, staff_predicted = torch.max(staff_outputs.data, 1)
            _, customer_predicted = torch.max(customer_outputs.data, 1)
            _, stand_predicted = torch.max(stand_outputs.data, 1)
            _, sit_predicted = torch.max(sit_outputs.data, 1)
            _, phone_predicted = torch.max(play_with_phone_outputs.data, 1)

            gender_correct += (gender_predicted == gender_labels).sum()
            staff_correct += (staff_predicted == staff_labels).sum()
            customer_correct += (customer_predicted == customer_labels).sum()
            stand_correct += (stand_predicted == stand_labels).sum()
            sit_correct += (sit_predicted == sit_labels).sum()
            phone_correct += (phone_predicted == phone_labels).sum()

        gender_correct = gender_correct.cpu().numpy()
        staff_correct = staff_correct.cpu().numpy()
        customer_correct = customer_correct.cpu().numpy()
        stand_correct = stand_correct.cpu().numpy()
        sit_correct = sit_correct.cpu().numpy()
        phone_correct = phone_correct.cpu().numpy()

        gender_accurary = float(gender_correct / total)
        staff_accurary = float(staff_correct / total)
        customer_accurary = float(customer_correct / total)
        stand_accurary = float(stand_correct / total)
        sit_accurary = float(sit_correct / total)
        phone_accurary = float(phone_correct / total)

        print(
            'Accurary:|gender {:.2f}%|\tstaff {:.2f}%|\tcustomer {:.2f}%|\tstand {:.2f}%|\tsit {:.2f}%|\tphone {:.2f}%|'
            .format(gender_accurary * 100, staff_accurary * 100,
                    customer_accurary * 100, stand_accurary * 100,
                    sit_accurary * 100, phone_accurary * 100))

    return gender_accurary, staff_accurary, customer_accurary, stand_accurary, sit_accurary, phone_accurary
Exemple #5
0
def test(model, queryloader, galleryloader, dist_metric, normalize_feature):
    batch_time = AverageMeter()
    model.eval()
    with torch.no_grad():
        print('Extracting features from query set ...')
        qf, q_names = [], []
        for batch_idx, (imgs, img_names) in enumerate(queryloader):
            imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            qf.append(features)
            q_names.extend(img_names)
        qf = torch.cat(qf, 0)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set ...')
        gf, g_names = [], []
        for batch_idx, (imgs, img_names) in enumerate(galleryloader):
            imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            g_names.extend(img_names)
        gf = torch.cat(gf, 0)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

    print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

    if normalize_feature:
        print('Normalzing features with L2 norm ...')
        qf = F.normalize(qf, p=2, dim=1)
        gf = F.normalize(gf, p=2, dim=1)

    print('Computing distance matrix with metric={} ...'.format(dist_metric))
    distmat = compute_distance_matrix(qf, gf, dist_metric)
    distmat = distmat.numpy()
    indices = np.argsort(distmat, axis=1)

    rank = {}
    for q_idx in range(qf.size(0)):
        q_name = q_names[q_idx]
        im_list = []
        for i in range(200):
            g_idx = indices[q_idx, i]
            g_name = g_names[g_idx]
            im_list.append(g_name)
        rank[q_name] = im_list

    with open("result.json", "w") as f:
        json.dump(rank, f)
        print('done')
Exemple #6
0
def extract_train_info(model, trainloader):
    model.eval()
    os.environ['fake'] = '1'

    accs = [AverageMeter() for _ in range(3)]

    with torch.no_grad():
        for imgs, pids, _, paths in trainloader:

            xent_features = model(imgs.cuda())[1]
            for i, xent_feature in enumerate(xent_features):
                accs[i].update(
                    accuracy(xent_feature, pids.cuda())[0].item(),
                    pids.size(0),
                )

    with open(args.load_weights + '.acc', 'w') as f:
        print(*(acc.avg for acc in accs), file=f)
Exemple #7
0
def evaluate(model, queryloader, galleryloader, dist_metric,
             normalize_feature):
    batch_time = AverageMeter()
    model.eval()
    with torch.no_grad():
        print('Extracting features from query set ...')
        qf, q_pids = [], []
        for batch_idx, (imgs, pids) in enumerate(queryloader):
            imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set ...')
        gf, g_pids = [], []
        for batch_idx, (imgs, pids) in enumerate(galleryloader):
            imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

    print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

    if normalize_feature:
        print('Normalzing features with L2 norm ...')
        qf = F.normalize(qf, p=2, dim=1)
        gf = F.normalize(gf, p=2, dim=1)

    print('Computing distance matrix with metric={} ...'.format(dist_metric))
    distmat = compute_distance_matrix(qf, gf, dist_metric)
    distmat = distmat.numpy()

    print('Computing rank1 and mAP ...')
    rank1, mAP, result = eval_rank(distmat, q_pids, g_pids)
    print('** Results **')
    print('Rank1: {:.8f}'.format(rank1))
    print('mAP: {:.8f}'.format(mAP))
    print('average: {:.8f}'.format(result))
Exemple #8
0
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          freeze_bn=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
def train(epoch,
          model,
          criterion_xent,
          criterion_htri,
          optimizer,
          trainloader,
          use_gpu,
          fixbase=False):
    xent_losses = AverageMeter()
    htri_losses = AverageMeter()
    accs = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if fixbase or args.always_fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs, features = model(imgs)
        if isinstance(outputs, (tuple, list)):
            xent_loss = DeepSupervision(criterion_xent, outputs, pids)
        else:
            xent_loss = criterion_xent(outputs, pids)

        if isinstance(features, (tuple, list)):
            htri_loss = DeepSupervision(criterion_htri, features, pids)
        else:
            htri_loss = criterion_htri(features, pids)

        loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        xent_losses.update(xent_loss.item(), pids.size(0))
        htri_losses.update(htri_loss.item(), pids.size(0))
        accs.update(accuracy(outputs, pids)[0])

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Xent {xent.val:.4f} ({xent.avg:.4f})\t'
                  'Htri {htri.val:.4f} ({htri.avg:.4f})\t'
                  'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      xent=xent_losses,
                      htri=htri_losses,
                      acc=accs))

        end = time.time()
Exemple #10
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    flip_eval = args.flip_eval

    if flip_eval:
        print('# Using Flip Eval')

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids, q_paths = [], [], [], []

        if flip_eval:
            enumerator = enumerate(zip(queryloader[0], queryloader[1]))
        else:
            enumerator = enumerate(queryloader[0])

        for batch_idx, package in enumerator:
            end = time.time()

            if flip_eval:
                (imgs0, pids, camids, paths), (imgs1, _, _, _) = package
                if use_gpu:
                    imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
                features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
                # print(features.size())
            else:
                (imgs, pids, camids, paths) = package
                if use_gpu:
                    imgs = imgs.cuda()

                features = model(imgs)[0]

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
            q_paths.extend(paths)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids, g_paths = [], [], [], []
        if flip_eval:
            enumerator = enumerate(zip(galleryloader[0], galleryloader[1]))
        else:
            enumerator = enumerate(galleryloader[0])

        for batch_idx, package in enumerator:
            # print('f**k')
            end = time.time()

            if flip_eval:
                (imgs0, pids, camids, paths), (imgs1, _, _, _) = package
                if use_gpu:
                    imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
                features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
                # print(features.size())
            else:
                (imgs, pids, camids, _) = package
                if use_gpu:
                    imgs = imgs.cuda()

                features = model(imgs)[0]

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
            g_paths.extend(paths)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

        if os.environ.get('save_feat'):
            import scipy.io as io
            io.savemat(
                os.environ.get('save_feat'), {
                    'q': qf.data.numpy(),
                    'g': gf.data.numpy(),
                    'qt': q_pids,
                    'gt': g_pids
                })
            # return

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    if os.environ.get('distmat'):
        import scipy.io as io
        io.savemat(os.environ.get('distmat'), {
            'distmat': distmat,
            'qp': q_paths,
            'gp': g_paths
        })

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
Exemple #11
0
def test(model,
         keyptaware,
         multitask,
         queryloader,
         galleryloader,
         use_gpu,
         vcolor2label,
         vtype2label,
         ranks=range(1, 51),
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf = []
        q_vids = []
        q_camids = []
        q_vcolors = []
        q_vtypes = []
        pred_q_vcolors = []
        pred_q_vtypes = []
        for batch_idx, (imgs, vids, camids, vcolors, vtypes,
                        vkeypts) in enumerate(queryloader):
            if use_gpu:
                if keyptaware:
                    imgs, vkeypts = imgs.cuda(), vkeypts.cuda()
                else:
                    imgs = imgs.cuda()

            end = time.time()

            if keyptaware and multitask:
                output_vids, output_vcolors, output_vtypes, features = model(
                    imgs, vkeypts)
            elif keyptaware:
                output_vids, features = model(imgs, vkeypts)
            elif multitask:
                output_vids, output_vcolors, output_vtypes, features = model(
                    imgs)
            else:
                output_vids, features = model(imgs)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_vids.extend(vids)
            q_camids.extend(camids)
            if multitask:
                q_vcolors.extend(vcolors)
                q_vtypes.extend(vtypes)
                pred_q_vcolors.extend(output_vcolors.cpu().numpy())
                pred_q_vtypes.extend(output_vtypes.cpu().numpy())
        qf = torch.cat(qf, 0)
        q_vids = np.asarray(q_vids)
        q_camids = np.asarray(q_camids)
        if multitask:
            q_vcolors = np.asarray(q_vcolors)
            q_vtypes = np.asarray(q_vtypes)
            pred_q_vcolors = np.asarray(pred_q_vcolors)
            pred_q_vtypes = np.asarray(pred_q_vtypes)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf = []
        g_vids = []
        g_camids = []
        g_vcolors = []
        g_vtypes = []
        pred_g_vcolors = []
        pred_g_vtypes = []
        for batch_idx, (imgs, vids, camids, vcolors, vtypes,
                        vkeypts) in enumerate(galleryloader):
            if use_gpu:
                if keyptaware:
                    imgs, vkeypts = imgs.cuda(), vkeypts.cuda()
                else:
                    imgs = imgs.cuda()

            end = time.time()

            if keyptaware and multitask:
                output_vids, output_vcolors, output_vtypes, features = model(
                    imgs, vkeypts)
            elif keyptaware:
                output_vids, features = model(imgs, vkeypts)
            elif multitask:
                output_vids, output_vcolors, output_vtypes, features = model(
                    imgs)
            else:
                output_vids, features = model(imgs)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_vids.extend(vids)
            g_camids.extend(camids)
            if multitask:
                g_vcolors.extend(vcolors)
                g_vtypes.extend(vtypes)
                pred_g_vcolors.extend(output_vcolors.cpu().numpy())
                pred_g_vtypes.extend(output_vtypes.cpu().numpy())
        gf = torch.cat(gf, 0)
        g_vids = np.asarray(g_vids)
        g_camids = np.asarray(g_camids)
        if multitask:
            g_vcolors = np.asarray(g_vcolors)
            g_vtypes = np.asarray(g_vtypes)
            pred_g_vcolors = np.asarray(pred_g_vcolors)
            pred_g_vtypes = np.asarray(pred_g_vtypes)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_vids, g_vids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if multitask:
        print("Compute attribute classification accuracy")

        for q in range(q_vcolors.size):
            q_vcolors[q] = vcolor2label[q_vcolors[q]]
        for g in range(g_vcolors.size):
            g_vcolors[g] = vcolor2label[g_vcolors[g]]
        q_vcolor_errors = np.argmax(pred_q_vcolors, axis=1) - q_vcolors
        g_vcolor_errors = np.argmax(pred_g_vcolors, axis=1) - g_vcolors
        vcolor_error_num = np.count_nonzero(
            q_vcolor_errors) + np.count_nonzero(g_vcolor_errors)
        vcolor_accuracy = 1.0 - (float(vcolor_error_num) /
                                 float(distmat.shape[0] + distmat.shape[1]))
        print("Color classification accuracy: {:.2%}".format(vcolor_accuracy))

        for q in range(q_vtypes.size):
            q_vtypes[q] = vcolor2label[q_vtypes[q]]
        for g in range(g_vtypes.size):
            g_vtypes[g] = vcolor2label[g_vtypes[g]]
        q_vtype_errors = np.argmax(pred_q_vtypes, axis=1) - q_vtypes
        g_vtype_errors = np.argmax(pred_g_vtypes, axis=1) - g_vtypes
        vtype_error_num = np.count_nonzero(q_vtype_errors) + np.count_nonzero(
            g_vtype_errors)
        vtype_accuracy = 1.0 - (float(vtype_error_num) /
                                float(distmat.shape[0] + distmat.shape[1]))
        print("Type classification accuracy: {:.2%}".format(vtype_accuracy))

        print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
Exemple #12
0
def evaluate(model,
             queryloader,
             galleryloader,
             dist_metric='euclidean',
             normalize_feature=False,
             rerank=False,
             return_distmat=False):
    batch_time = AverageMeter()
    model.eval()
    with torch.no_grad():
        print('Extracting features from query set ...')
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
            imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set ...')
        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
            imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

    print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

    if normalize_feature:
        print('Normalzing features with L2 norm ...')
        qf = F.normalize(qf, p=2, dim=1)
        gf = F.normalize(gf, p=2, dim=1)

    print('Computing distance matrix with metric={} ...'.format(dist_metric))
    distmat = compute_distance_matrix(qf, gf, dist_metric)
    distmat = distmat.numpy()

    if rerank:
        print('Applying person re-ranking ...')
        distmat_qq = compute_distance_matrix(qf, qf, dist_metric)
        distmat_gg = compute_distance_matrix(gf, gf, dist_metric)
        distmat = re_ranking(distmat, distmat_qq, distmat_gg)

    print('Computing CMC and mAP ...')
    cmc, mAP = evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids)
    print('** Results **')
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in [1, 5, 10, 20]:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))

    if return_distmat:
        return distmat

    return cmc[0]
def test(model,
         testloader,
         queryloader,
         galleryloader,
         train_query_loader,
         train_gallery_loader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    # with torch.no_grad():
    #     qf, q_pids = [], []
    #     for batch_idx, (imgs, pids) in enumerate(queryloader):
    #         if use_gpu: imgs = imgs.cuda()
    #
    #         end = time.time()
    #         features = model(imgs)
    #         batch_time.update(time.time() - end)
    #
    #         features = features.data.cpu()
    #
    #         print(features.shape)
    #         qf.append(features)
    #         q_pids.extend(pids)
    #
    #     qf = torch.cat(qf, 0)
    #     q_pids = np.asarray(q_pids)
    #
    #     print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))
    #     print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))
    #     gf, g_pids, g_camids, g_paths = [], [], [], []
    #     for batch_idx, (imgs, pids, camids, paths) in enumerate(galleryloader):
    #         if use_gpu: imgs = imgs.cuda()
    #
    #         end = time.time()
    #         features = model(imgs)
    #         batch_time.update(time.time() - end)
    #
    #         features = features.data.cpu()
    #         gf.append(features)
    #         g_pids.extend(pids)
    #         g_camids.extend(camids)
    #         g_paths.extend(paths)
    #     gf = torch.cat(gf, 0)
    #     g_pids = np.asarray(g_pids)
    #     # g_camids = np.asarray(g_camids)
    #     # g_paths = np.asarray(g_paths)
    #     print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
    #     print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))
    # print("Start compute distmat.")
    # if args.loss_type in ['xent', 'triplet', 'xent_htri']:
    #     m, n = qf.size(0), gf.size(0)
    #     distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
    #               torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    #     distmat.addmm_(1, -2, qf, gf.t())
    #     distmat = distmat.numpy()
    # elif args.loss_type == 'angle':
    #     vec_dot = torch.matmul(qf, gf.t())
    #     qf_len = qf.norm(dim=1, keepdim=True)
    #     gf_len = gf.norm(dim=1, keepdim=True)
    #     vec_len = torch.matmul(qf_len, gf_len.t()) + 1e-5
    #     distmat = -torch.div(vec_dot, vec_len).numpy()
    #     # m, n = qf.size(0), gf.size(0)
    #     # distmat = []
    #     # for i in range(m):
    #     #     qf2gf = []
    #     #     for j in range(n):
    #     #         dist = qf[i].dot(gf[j]) / (qf[i].norm() * gf[j].norm() + 1e-5)
    #     #         qf2gf.append(dist.numpy())
    #     #     distmat.append(qf2gf)
    #     #     print(i)
    #     # distmat = np.array(distmat)
    # else:
    #     raise KeyError("Unsupported loss: {}".format(args.loss_type))
    # print("Compute distmat done.")
    # print("distmat shape:", distmat.shape)
    # # result = {'query_f': qf.numpy(),
    # #           'query_cam': q_camids, 'query_label': q_pids, 'quim_path': q_paths,
    # #           'gallery_f': gf.numpy(),
    # #           'gallery_cam': g_camids, 'gallery_label': g_pids, 'gaim_path': g_paths}
    # # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '.mat'), result)
    # # dist_mat_dict = {'dist_mat': distmat}
    # # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '_dist.mat'), dist_mat_dict)
    # print("Start computing CMC and mAP")
    # start_time = time.time()

    with torch.no_grad():
        qf, q_pids = [], []
        for batch_idx, (imgs, pids) in enumerate(testloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()

            qf.append(features)
            q_pids.extend(pids)

        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)

        # print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))
        # print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))

    cmc = cmc_vehicleid(qf, q_pids, repeat=2)
    # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids,
    #                     use_metric_cuhk03=args.use_metric_cuhk03)
    # elapsed = round(time.time() - start_time)
    # elapsed = str(datetime.timedelta(seconds=elapsed))
    # print("Evaluate test data time (h:m:s): {}.".format(elapsed))
    print("Test data results ----------")
    # print("temAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("teRank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    # if return_distmat:
    #     return distmat
    return cmc[0]
Exemple #14
0
def train(epoch,
          model,
          criterion,
          center_loss1,
          center_loss2,
          center_loss3,
          center_loss4,
          optimizer,
          trainloader,
          use_gpu,
          fixbase=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if fixbase or args.always_fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, (imgs, pids, _, _, dataset_id) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids, dataset_id = imgs.cuda(), pids.cuda(), dataset_id.cuda(
            )

        outputs, features = model(imgs)
        if isinstance(outputs, (tuple, list)):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)

        alpha = 0.001
        loss = center_loss1(features[0], dataset_id) * alpha + loss
        loss = center_loss2(features[1], dataset_id) * alpha + loss

        # belta = 0.0001
        belta = 0.00001
        loss = center_loss3(features[0], pids) * belta + loss
        loss = center_loss4(features[1], pids) * belta + loss

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
Exemple #15
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, caps) in enumerate(queryloader):
            if use_gpu:
                imgs, caps = imgs.cuda(), caps.cuda()

            end = time.time()
            imgs_batch = imgs

            _, features = model(imgs_batch)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)

        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))
        sys.stdout.flush()

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, caps) in enumerate(galleryloader):
            if use_gpu:
                imgs, caps = imgs.cuda(), caps.cuda()

            end = time.time()
            imgs_batch = imgs

            _, features = model(imgs_batch)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))
        sys.stdout.flush()

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    sys.stdout.flush()
    if args.rerank:
        print('re-ranking')
        distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.1)
    else:
        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())

    print("Computing CMC and mAP")
    sys.stdout.flush()

    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)
    sio.savemat('dismat.mat', {'dismat': distmat})
    sio.savemat('g_pids.mat', {'g_pids': g_pids})
    sio.savemat('q_pids.mat', {'q_pids': q_pids})
    sio.savemat('g_camids.mat', {'g_camids': g_camids})
    sio.savemat('q_camids.mat', {'q_camids': q_camids})

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print(cmc)
    print("------------------")
    sys.stdout.flush()

    if return_distmat:
        return distmat

    return cmc[0]
Exemple #16
0
def train(epoch, model, criterion_xent, criterion_htri, \
          optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, cloth_ids, _, img_paths,
                    masks) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids, cloth_ids, masks = imgs.cuda(), pids.cuda(
            ), cloth_ids.cuda(), masks.cuda()

        # gait img 64*64 only have person in the middle, but reid person 64*64 have person in the entire content, so need this pre-processing:
        padding_length = (args.mask_height - args.mask_width) // 2
        left_right_padding = nn.ZeroPad2d(
            (padding_length, padding_length, 0, 0))
        masks = left_right_padding(masks)

        # Main ReID-Stream:
        features, outputs = model(imgs)

        # ReID loss local:
        xent_loss = criterion_xent(outputs, pids)
        htri_loss = criterion_htri(features, pids)

        loss_total = args.loss_ReID_cla_local * xent_loss + args.loss_ReID_tri_local * htri_loss

        optimizer.zero_grad()
        loss_total.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss_total.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
Exemple #17
0
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          fixbase=False):
    losses = AverageMeter()
    precisions = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if fixbase or args.always_fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)
        if isinstance(outputs, (tuple, list)):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)

        prec, = accuracy(outputs.data, pids.data)
        prec1 = prec[0]  # get top 1

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))
        precisions.update(prec1, pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0:02d}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec {prec.val:.2%} ({prec.avg:.2%})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      prec=precisions))

        end = time.time()

    return losses.avg, precisions.avg
Exemple #18
0
def train(epoch,
          model,
          criterions,
          optimizer,
          trainloader,
          use_gpu,
          train_writer,
          fixbase=False,
          lfw=None):
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if fixbase or args.always_fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
        iteration = epoch * len(trainloader) + batch_idx

        data_time.update(time.time() - end)

        if fixbase and batch_idx > 100:
            break

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs, features = model(imgs)

        losses = torch.zeros([1]).cuda()
        kwargs = {'targets': pids, 'imgs': imgs}
        for criterion in criterions:
            inputs = features
            if criterion.name == 'xent' or 'am':
                inputs = outputs
            loss = criterion.weight * criterion.calc_loss(inputs, **kwargs)
            losses += loss
            if np.isnan(loss.item()):
                logged_value = sys.float_info.max
            else:
                logged_value = loss.item()
            criterion.train_stats.update(logged_value, pids.size(0))

        optimizer.zero_grad()
        losses.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        if (batch_idx + 1) % args.print_freq == 0:
            output_string = 'Epoch: [{0}][{1}/{2}]\t'.format(
                epoch + 1, batch_idx + 1, len(trainloader))
            output_string += 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(
                batch_time=batch_time)
            output_string += 'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'.format(
                data_time=data_time)
            for criterion in criterions:
                output_string += 'Loss {}: {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                    criterion.name, loss=criterion.train_stats)
                train_writer.add_scalar('loss/{}'.format(criterion.name),
                                        criterion.train_stats.val, iteration)
            print(output_string)
        end = time.time()
Exemple #19
0
def train(epoch,
          max_epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          fixbase_epoch=0,
          open_layers=None):
    losses = AverageMeter()
    accs = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    model.train()
    if (epoch + 1) <= fixbase_epoch and open_layers is not None:
        print('* Only train {} (epoch: {}/{})'.format(open_layers, epoch + 1,
                                                      fixbase_epoch))
        open_specified_layers(model, open_layers)
    else:
        open_all_layers(model)
    num_batches = len(trainloader)
    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
        data_time.update(time.time() - end)
        imgs = imgs.cuda()
        pids = pids.cuda()
        optimizer.zero_grad()
        outputs = model(imgs)
        loss = criterion(outputs, pids)
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        losses.update(loss.item(), pids.size(0))
        accs.update(accuracy(outputs, pids)[0].item())
        if (batch_idx + 1) % 20 == 0:
            eta_seconds = batch_time.avg * (num_batches - (batch_idx + 1) +
                                            (max_epoch -
                                             (epoch + 1)) * num_batches)
            eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))
            print('Epoch: [{0}/{1}][{2}/{3}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Acc {acc.val:.2f} ({acc.avg:.2f})\t'
                  'Lr {lr:.6f}\t'
                  'eta {eta}'.format(epoch + 1,
                                     max_epoch,
                                     batch_idx + 1,
                                     num_batches,
                                     batch_time=batch_time,
                                     data_time=data_time,
                                     loss=losses,
                                     acc=accs,
                                     lr=optimizer.param_groups[0]['lr'],
                                     eta=eta_str))
        end = time.time()
Exemple #20
0
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          writer,
          args,
          freeze_bn=False):
    losses = AverageMeter()
    xent_losses = AverageMeter()
    confidence_losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    printed = False
    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)
        if isinstance(outputs, tuple):
            xent_loss = DeepSupervision(criterion[0], outputs, pids)
            confidence_loss = DeepSupervision(criterion[1], outputs, pids)
        else:
            xent_loss = criterion[0](outputs, pids)
            confidence_loss = criterion[1](outputs, pids)
        if args.confidence_penalty:

            loss = args.lambda_xent * xent_loss - args.confidence_beta * confidence_loss
        elif args.jsd:
            loss = args.lambda_xent * xent_loss + args.confidence_beta * confidence_loss
        else:
            loss = args.lambda_xent * xent_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))
        xent_losses.update(xent_loss.item(), pids.size(0))
        confidence_losses.update(confidence_loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            if not printed:
                printed = True
            else:
                # Clean the current line
                sys.stdout.console.write("\033[F\033[K")
                #sys.stdout.console.write("\033[K")
            if args.jsd:
                print(
                    'Epoch: [{0}][{1}/{2}]\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                    'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                    'Xent_Loss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                    'JSD_Loss {confidence_loss.val:.4f} ({confidence_loss.avg:.4f})\t'
                    'Total_Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                        epoch + 1,
                        batch_idx + 1,
                        len(trainloader),
                        batch_time=batch_time,
                        data_time=data_time,
                        xent_loss=xent_losses,
                        confidence_loss=confidence_losses,
                        loss=losses))
            else:
                print(
                    'Epoch: [{0}][{1}/{2}]\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                    'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                    'Xent_Loss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                    'Confi_Loss {confidence_loss.val:.4f} ({confidence_loss.avg:.4f})\t'
                    'Total_Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                        epoch + 1,
                        batch_idx + 1,
                        len(trainloader),
                        batch_time=batch_time,
                        data_time=data_time,
                        xent_loss=xent_losses,
                        confidence_loss=confidence_losses,
                        loss=losses))

        end = time.time()

    writer.add_scalars(
        'loss',
        dict(loss=losses.avg,
             xent_loss=xent_losses.avg,
             confidence_loss=confidence_losses.avg), epoch + 1)
Exemple #21
0
def train(epoch, model, keyptaware, multitask, criterion_xent_vid,
          criterion_xent_vcolor, criterion_xent_vtype, criterion_htri,
          optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, vids, camids, vcolors, vtypes,
                    vkeypts) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            if keyptaware and multitask:
                imgs, vids, vcolors, vtypes, vkeypts = imgs.cuda(), vids.cuda(
                ), vcolors.cuda(), vtypes.cuda(), vkeypts.cuda()
            elif keyptaware:
                imgs, vids, vkeypts = imgs.cuda(), vids.cuda(), vkeypts.cuda()
            elif multitask:
                imgs, vids, vcolors, vtypes = imgs.cuda(), vids.cuda(
                ), vcolors.cuda(), vtypes.cuda()
            else:
                imgs, vids = imgs.cuda(), vids.cuda()

        if keyptaware and multitask:
            output_vids, output_vcolors, output_vtypes, features = model(
                imgs, vkeypts)
        elif keyptaware:
            output_vids, features = model(imgs, vkeypts)
        elif multitask:
            output_vids, output_vcolors, output_vtypes, features = model(imgs)
        else:
            output_vids, features = model(imgs)

        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, vids)
            else:
                loss = criterion_htri(features, vids)
        else:
            if isinstance(output_vids, tuple):
                xent_loss = DeepSupervision(criterion_xent_vid, output_vids,
                                            vids)
            else:
                xent_loss = criterion_xent_vid(output_vids, vids)

            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, vids)
            else:
                htri_loss = criterion_htri(features, vids)

            loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss

        if multitask:
            if isinstance(output_vcolors, tuple):
                xent_loss_vcolor = DeepSupervision(criterion_xent_vcolor,
                                                   output_vcolors, vcolors)
            else:
                xent_loss_vcolor = criterion_xent_vcolor(
                    output_vcolors, vcolors)

            if isinstance(output_vtypes, tuple):
                xent_loss_vtype = DeepSupervision(criterion_xent_vtype,
                                                  output_vtypes, vtypes)
            else:
                xent_loss_vtype = criterion_xent_vtype(output_vtypes, vtypes)

            loss += args.lambda_vcolor * xent_loss_vcolor + args.lambda_vtype * xent_loss_vtype

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), vids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
Exemple #22
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         args,
         writer,
         epoch,
         ranks=[1, 5, 10, 20],
         return_distmat=False,
         tsne_clusters=3):

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        q_imgPath = []
        for batch_idx, (input) in enumerate(queryloader):
            if not args.draw_tsne:
                imgs, pids, camids = input
            else:
                imgs, pids, camids, img_path = input
                q_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        q_imgPath = np.asarray(q_imgPath)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        g_imgPath = []
        end = time.time()
        for batch_idx, (input) in enumerate(galleryloader):
            if not args.draw_tsne:
                imgs, pids, camids = input
            else:
                imgs, pids, camids, img_path = input
                g_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        g_imgPath = np.asarray(q_imgPath)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    if args.use_ecn:
        distmat = (ECN_custom(qf,
                              gf,
                              k=25,
                              t=3,
                              q=8,
                              method='rankdist',
                              use_cosine=args.use_cosine)).transpose()
    elif not args.use_cosine:
        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()

        if args.re_ranking:
            distmat_q_q = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
                      torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
            distmat_q_q.addmm_(1, -2, qf, qf.t())
            distmat_q_q = distmat_q_q.numpy()

            distmat_g_g = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
                      torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
            distmat_g_g.addmm_(1, -2, gf, gf.t())
            distmat_g_g = distmat_g_g.numpy()

            distmat = re_ranking(distmat,
                                 distmat_q_q,
                                 distmat_g_g,
                                 k1=20,
                                 k2=6,
                                 lambda_value=0.3)

    else:
        m, n = qf.size(0), gf.size(0)
        qf_norm = qf / qf.norm(dim=1)[:, None]
        gf_norm = gf / gf.norm(dim=1)[:, None]
        distmat = torch.addmm(1, torch.ones((m, n)), -1, qf_norm,
                              gf_norm.transpose(0, 1))
        distmat = distmat.numpy()

        if args.re_ranking:
            distmat_q_q = torch.addmm(1, torch.ones((m, m)), -1, qf_norm,
                                      qf_norm.transpose(0, 1))
            distmat_q_q = distmat_q_q.numpy()

            distmat_g_g = torch.addmm(1, torch.ones((n, n)), -1, gf_norm,
                                      gf_norm.transpose(0, 1))
            distmat_g_g = distmat_g_g.numpy()

            distmat = re_ranking(distmat,
                                 distmat_q_q,
                                 distmat_g_g,
                                 k1=20,
                                 k2=6,
                                 lambda_value=0.3)

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if args.draw_tsne:
        drawTSNE(qf, gf, q_pids, g_pids, q_camids, g_camids, q_imgPath,
                 g_imgPath, tsne_clusters, args.save_dir)
    if return_distmat:
        return distmat

    if writer != None:
        writer.add_scalars('Testing',
                           dict(rank_1=cmc[0], rank_5=cmc[4], mAP=mAP),
                           epoch + 1)
    return cmc[0]
Exemple #23
0
def train(epoch,
          model,
          criterion,
          regularizer,
          optimizer,
          trainloader,
          use_gpu,
          fixbase=False):
    start_train_time = time.time()
    if not fixbase and args.use_of and epoch >= args.of_start_epoch:
        print('Using OF')

    from torchreid.losses.of_penalty import OFPenalty

    of_penalty = OFPenalty(vars(args))

    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if fixbase or args.fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):

        try:
            limited = float(os.environ.get('limited', None))
        except (ValueError, TypeError):
            limited = 1

        if not fixbase and (batch_idx + 1) > limited * len(trainloader):
            break

        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)
        loss = criterion(outputs, pids)
        if not fixbase:
            reg = regularizer(model)
            loss += reg
        if not fixbase and args.use_of and epoch >= args.of_start_epoch:
            penalty = of_penalty(outputs)
            loss += penalty

        optimizer.zero_grad()

        if use_apex:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()

        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
        end = time.time()
    epoch_time = time.time() - start_train_time
    print(f"epoch_time:{epoch_time // 60} min {int(epoch_time) % 60}s")
    return losses.avg
def train(epoch,
          model,
          criterion_xent,
          criterion_htri,
          optimizer,
          trainloader,
          use_gpu,
          writer,
          args,
          freeze_bn=False):
    losses = AverageMeter()
    xent_losses = AverageMeter()
    htri_losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    printed = False
    model.train()
    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)
    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)
            loss = args.lambda_htri * htri_loss
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)

            htri_loss = 0
            if not (criterion_htri is None):
                if isinstance(features, tuple):
                    htri_loss = DeepSupervision(criterion_htri, features, pids)
                else:
                    htri_loss = criterion_htri(features, pids)

            loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))
        if not args.htri_only:
            xent_losses.update(xent_loss.item(), pids.size(0))
        if criterion_htri is None:
            htri_losses.update(htri_loss, pids.size(0))
        else:
            htri_losses.update(htri_loss.item(), pids.size(0))
        if (batch_idx + 1) % args.print_freq == 0:
            if not printed:
                printed = True
            else:
                # Clean the current line
                sys.stdout.console.write("\033[F\033[K")
                #sys.stdout.console.write("\033[K")
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Xent Loss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                  'Htri Loss {htri_loss.val:.4f} ({htri_loss.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      xent_loss=xent_losses,
                      htri_loss=htri_losses,
                      loss=losses))

        end = time.time()

    writer.add_scalars(
        'Losses',
        dict(total_loss=losses.avg,
             xen_loss=xent_losses.avg,
             htri_loss=htri_losses.avg), epoch + 1)
Exemple #25
0
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
          use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)

            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)

            loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
Exemple #26
0
def train(epoch,
          model,
          model_decoder,
          criterion_xent,
          criterion_htri,
          optimizer,
          optimizer_decoder,
          optimizer_encoder,
          trainloader,
          use_gpu,
          fixbase=False):
    losses = AverageMeter()
    losses_recon = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    model_decoder.train()

    if fixbase or args.fixbase:
        open_specified_layers(model, args.open_layers)
    else:
        open_all_layers(model)

    end = time.time()
    for batch_idx, (imgs, pids, _, img_paths,
                    imgs_texture) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids, imgs_texture = imgs.cuda(), pids.cuda(
            ), imgs_texture.cuda()

        outputs, features, feat_texture, x_down1, x_down2, x_down3 = model(
            imgs)
        torch.cuda.empty_cache()

        if args.htri_only:
            if isinstance(features, (tuple, list)):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, (tuple, list)):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)

            if isinstance(features, (tuple, list)):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)

            loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss

        optimizer.zero_grad()
        loss.backward(retain_graph=True)
        optimizer.step()

        del outputs, features

        # Second forward for training texture reconstruction
        close_specified_layers(model, ['fc', 'classifier'])

        recon_texture, x_sim1, x_sim2, x_sim3, x_sim4 = model_decoder(
            feat_texture, x_down1, x_down2, x_down3)
        torch.cuda.empty_cache()

        loss_rec = nn.L1Loss()
        loss_tri = nn.MSELoss()
        loss_recon = loss_rec(recon_texture, imgs_texture)  #*0.1

        # L1 loss to push same id's feat more similar:
        loss_triplet_id_sim1 = 0.0
        loss_triplet_id_sim2 = 0.0
        loss_triplet_id_sim3 = 0.0
        loss_triplet_id_sim4 = 0.0

        for i in range(0, ((args.train_batch_size // args.num_instances) - 1) *
                       args.num_instances, args.num_instances):
            loss_triplet_id_sim1 += max(
                loss_tri(x_sim1[i], x_sim1[i + 1]) -
                loss_tri(x_sim1[i], x_sim1[i + 4]) + 0.3, 0.0)
            loss_triplet_id_sim2 += max(
                loss_tri(x_sim2[i + 1], x_sim2[i + 2]) -
                loss_tri(x_sim2[i + 1], x_sim2[i + 5]) + 0.3,
                0.0)  #loss_tri(x_sim2[i+1], x_sim2[i+2])
            loss_triplet_id_sim3 += max(
                loss_tri(x_sim3[i + 2], x_sim3[i + 3]) -
                loss_tri(x_sim3[i + 2], x_sim3[i + 6]) + 0.3,
                0.0)  #loss_tri(x_sim3[i+2], x_sim3[i+3])
            loss_triplet_id_sim4 += max(
                loss_tri(x_sim4[i], x_sim4[i + 3]) -
                loss_tri(x_sim4[i + 3], x_sim4[i + 4]) + 0.3,
                0.0)  #loss_tri(x_sim4[i], x_sim4[i+3])
        loss_same_id = loss_triplet_id_sim1 + loss_triplet_id_sim2 + loss_triplet_id_sim3 + loss_triplet_id_sim4

        loss_recon += (loss_same_id)  # * 0.0001)

        optimizer_encoder.zero_grad()
        optimizer_decoder.zero_grad()
        loss_recon.backward()
        optimizer_encoder.step()
        optimizer_decoder.step()

        del feat_texture, x_down1, x_down2, x_down3, recon_texture

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))
        losses_recon.update(loss_recon.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Loss_recon {loss_recon.val:.4f} ({loss_recon.avg:.4f})\t'.
                  format(epoch + 1,
                         batch_idx + 1,
                         len(trainloader),
                         batch_time=batch_time,
                         data_time=data_time,
                         loss=losses,
                         loss_recon=losses_recon))

        end = time.time()
        open_all_layers(model)

        if (epoch + 1) % 50 == 0:
            print("==> Test reconstruction effect")
            model.eval()
            model_decoder.eval()
            features, feat_texture = model(imgs)
            recon_texture = model_decoder(feat_texture)
            out = recon_texture.data.cpu().numpy()[0].squeeze()
            out = out.transpose((1, 2, 0))
            out = (out / 2.0 + 0.5) * 255.
            out = out.astype(np.uint8)
            print(
                'finish: ',
                os.path.join(
                    args.save_dir, img_paths[0].split('bounding_box_train/')
                    [-1].split('.jpg')[0] + 'ep_' + str(epoch) + '.jpg'))
            cv2.imwrite(
                os.path.join(
                    args.save_dir, img_paths[0].split('bounding_box_train/')
                    [-1].split('.jpg')[0] + 'ep_' + str(epoch) + '.jpg'),
                out[:, :, ::-1])
            model.train()
            model_decoder.train()
def test(model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b * s, c, h, w)

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print('Extracted features for query set, obtained {}-by-{} matrix'.
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b * s, c, h, w)

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print('Extracted features for gallery set, obtained {}-by-{} matrix'.
              format(gf.size(0), gf.size(1)))

    print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(
        batch_time.avg, args.test_batch_size * args.seq_len))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print('Computing CMC and mAP')
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print('Results ----------')
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in ranks:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
    print('------------------')

    if return_distmat:
        return distmat
    return cmc[0]
Exemple #28
0
def test(model,
         model_decoder,
         queryloader,
         galleryloader,
         use_gpu,
         epoch,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()
    model_decoder.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, img_paths,
                        imgs_texture) in enumerate(queryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features, feat_texture, x_down1, x_down2, x_down3 = model(imgs)

            recon_texture, x_sim1, x_sim2, x_sim3, x_sim4 = model_decoder(
                feat_texture, x_down1, x_down2, x_down3)
            out = recon_texture.data.cpu().numpy()[0].squeeze()
            out = out.transpose((1, 2, 0))
            out = (out / 2.0 + 0.5) * 255.
            out = out.astype(np.uint8)
            print(
                'finish: ',
                os.path.join(
                    args.save_dir,
                    img_paths[0].split('images_labeled/')[-1].split('.jpg')[0]
                    + '_ep_' + str(epoch) + '.jpg'))
            cv2.imwrite(
                os.path.join(
                    args.save_dir,
                    img_paths[0].split('images_labeled/')[-1].split('.jpg')[0]
                    + '_ep_' + str(epoch) + '.jpg'), out)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _,
                        imgs_texture) in enumerate(galleryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features, feat_texture, x_down1, x_down2, x_down3 = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
Exemple #29
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
Exemple #30
0
def train(epoch, model, criterion, cri, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    mask = torch.zeros(1, 1, 256, 128)
    for i in range(256):
        for j in range(128):
            mask[0, 0, i,
                 j] = (2**(-1 / 256.0 * abs(i - 127.5))) * (2**(-1 / 128.0 *
                                                                abs(i - 63.5)))
    mask = mask.cuda()

    end = time.time()
    for batch_idx, (imgs, pids, _, imgs_high, res) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids, imgs_high, res = imgs.cuda(), pids.cuda(
            ), imgs_high.cuda(), res.cuda()

        prid, out, y, at11, at12, at13, at14, at15, at21, at22, at23, at24, at25 = model(
            imgs)

        loss = criterion(prid, pids)
        loss2 = cri(y * mask, imgs_high * mask)
        res = res.float().view(-1, 1, 1, 1)
        loss3 = cri(at11, res) + cri(at21, 1 - res) + cri(at12, res) + cri(
            at22, 1 - res) + cri(at13, res) + cri(at23, 1 - res) + cri(
                at14, res) + cri(at24, 1 - res) + cri(at15, res) + cri(
                    at25, 1 - res)
        loss = loss + loss2 * 0.1 + loss3 * 0.01
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % 10 == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()