예제 #1
0
파일: test.py 프로젝트: skasai5296/dpc
def test(loader, model, criterion, device, CONFIG):
    test_timer = Timer()
    metrics = [AverageMeter("XELoss"), AverageMeter("Accuracy (%)")]
    global_metrics = [AverageMeter("XELoss"), AverageMeter("Accuracy (%)")]
    model.eval()
    for it, data in enumerate(loader):
        clip = data["clip"].to(device)
        label = data["label"].to(device)
        if it == 1 and torch.cuda.is_available():
            subprocess.run(["nvidia-smi"])

        with torch.no_grad():
            out = model(clip)
            loss, lossdict = criterion(out, label)

        for metric in metrics:
            metric.update(lossdict[metric.name])
        for metric in global_metrics:
            metric.update(lossdict[metric.name])
        if it % 10 == 9:
            metricstr = " | ".join([f"test {metric}" for metric in metrics])
            print(
                f"test | {test_timer} | iter {it+1:06d}/{len(loader):06d} | "
                f"{metricstr}",
                flush=True,
            )
            for metric in metrics:
                metric.reset()
    metric = global_metrics[-1]
    if CONFIG.use_wandb:
        wandb.log({f"test {metric.name}": metric.avg}, commit=False)
    return metric.avg
예제 #2
0
def test_target():
    """Target test mode.
    Show both classification accuracy and target success rate.
    """
    model.eval()
    acc_save = AverageMeter()
    success_save = AverageMeter()
    with torch.no_grad():
        for data, label, target in test_loader:
            data, label, target = \
                data.float().cuda(), label.long().cuda(), target.long().cuda()
            # to [B, 3, N] point cloud
            data = data.transpose(1, 2).contiguous()
            batch_size = label.size(0)
            # batch in
            if args.model.lower() == 'pointnet':
                logits, _, _ = model(data)
            else:
                logits = model(data)
            preds = torch.argmax(logits, dim=-1)
            acc = (preds == label).sum().float() / float(batch_size)
            acc_save.update(acc.item(), batch_size)
            success = (preds == target).sum().float() / float(batch_size)
            success_save.update(success.item(), batch_size)

    print('Overall accuracy: {:.4f}, '
          'attack success rate: {:.4f}'.format(acc_save.avg, success_save.avg))
예제 #3
0
def test(loader):
    model.eval()
    acc_save = AverageMeter()
    with torch.no_grad():
        for data, label in loader:
            data, label = data.float().cuda(), label.long().cuda()
            # to [B, 3, N] point cloud
            data = data.transpose(1, 2).contiguous()
            batch_size = data.size(0)
            if args.model.lower() == 'pointnet':
                logits, _, _ = model(data)
            else:
                logits = model(data)
            preds = torch.argmax(logits, dim=-1)
            acc = (preds == label).sum().float() / float(batch_size)
            acc_save.update(acc.item(), batch_size)
    return acc_save.avg
def validate(val_loader, model, epoch):
    acc = AverageMeter()

    # switch to evaluate mode
    model.eval()
    with torch.no_grad():
        for i, (inputs, targets) in enumerate(val_loader):
            # compute output
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()
            out, heatmap, _, _, _, _ = model(inputs)

            # measure accuracy
            _, predicted = torch.max(out.data, 1)
            correct = predicted.eq(targets.data).cpu().sum().item()
            acc.update(100. * float(correct) / inputs.size(0), inputs.size(0))

            if i % PRINT_FREQ == 0 and args.visualize:
                vis_input = torchvision.utils.make_grid(inputs,
                                                        nrow=8,
                                                        padding=2,
                                                        normalize=True)
                cv2.imwrite(
                    os.path.join(save_dir, 'test_inputs_{}.jpg'.format(i)),
                    (vis_input * 255).cpu().detach().numpy().transpose(
                        (1, 2, 0)).astype(np.uint8))
                vis_heatmap = torchvision.utils.make_grid(heatmap,
                                                          nrow=8,
                                                          padding=2,
                                                          normalize=True)
                cv2.imwrite(
                    os.path.join(save_dir, 'test_heatmap_{}.jpg'.format(i)),
                    (vis_heatmap * 255).cpu().detach().numpy().transpose(
                        (1, 2, 0)).astype(np.uint8))

    msg = 'Epoch: [{0}][{1}/{2}]\t' \
        'Accuracy3 {acc.val:.3f} ({acc.avg:.3f})'.format(
            epoch, i, len(val_loader), acc=acc)
    logging.info(msg)
    results_test_file.write('%d, %.4f\n' % (epoch, acc.avg))
    results_test_file.flush()

    return acc.avg
예제 #5
0
def train_epoch(loader, model, optimizer, criterion, device, CONFIG, epoch):
    train_timer = Timer()
    metrics = [
        AverageMeter("XELoss"),
        AverageMeter("MSELoss"),
        AverageMeter("Accuracy (%)")
    ]
    if CONFIG.model in ("DPC"):
        metrics.pop(1)
    model.train()
    for it, data in enumerate(loader):

        clip = data["clip"].to(device)
        if it == 1 and torch.cuda.is_available():
            subprocess.run(["nvidia-smi"])

        optimizer.zero_grad()
        output = model(clip)
        loss, lossdict = criterion(*output)

        for metric in metrics:
            metric.update(lossdict[metric.name])
        loss.backward()
        if CONFIG.grad_clip > 0:
            torch.nn.utils.clip_grad_norm_(model.parameters(),
                                           max_norm=CONFIG.grad_clip)
        optimizer.step()
        if it % 10 == 9:
            metricstr = " | ".join([f"train {metric}" for metric in metrics])
            print(
                f"epoch {epoch:03d}/{CONFIG.max_epoch:03d} | train | "
                f"{train_timer} | iter {it+1:06d}/{len(loader):06d} | "
                f"{metricstr}",
                flush=True,
            )
            if CONFIG.use_wandb:
                for metric in metrics:
                    wandb.log({f"train {metric.name}": metric.avg},
                              commit=False)
                wandb.log({"iteration": it + (epoch - 1) * len(loader)})
            for metric in metrics:
                metric.reset()
예제 #6
0
def test_normal():
    """Normal test mode.
    Test on all data.
    """
    model.eval()
    acc_save = AverageMeter()
    with torch.no_grad():
        for data, label in test_loader:
            data, label = \
                data.float().cuda(), label.long().cuda()
            # to [B, 3, N] point cloud
            data = data.transpose(1, 2).contiguous()
            if data.shape[2] == 0:
                acc = float(batch_size) / float(batch_size)
                acc_save.update(acc, batch_size)
                continue
            batch_size = label.size(0)
            # batch in
            if args.model.lower() == 'pointnet':
                logits, _, _ = model(data)
            else:
                logits = model(data)
            preds = torch.argmax(logits, dim=-1)
            acc = (preds == label).sum().float() / float(batch_size)
            acc_save.update(acc.item(), batch_size)

    print('Overall accuracy: {:.4f}'.format(acc_save.avg))
예제 #7
0
파일: finetune.py 프로젝트: skasai5296/dpc
def validate(loader, model, criterion, device, CONFIG, epoch):
    val_timer = Timer()
    metrics = [AverageMeter("XELoss"), AverageMeter("Accuracy (%)")]
    global_metrics = [AverageMeter("XELoss"), AverageMeter("Accuracy (%)")]
    model.eval()
    for it, data in enumerate(loader):
        clip = data["clip"].to(device)
        label = data["label"].to(device)
        if it == 1 and torch.cuda.is_available():
            subprocess.run(["nvidia-smi"])

        with torch.no_grad():
            out = model(clip)
            loss, lossdict = criterion(out, label)

        for metric in metrics:
            metric.update(lossdict[metric.name])
        for metric in global_metrics:
            metric.update(lossdict[metric.name])
        if it % 10 == 9:
            metricstr = " | ".join(
                [f"validation {metric}" for metric in metrics])
            print(
                f"epoch {epoch:03d}/{CONFIG.max_epoch:03d} | valid | "
                f"{val_timer} | iter {it+1:06d}/{len(loader):06d} | "
                f"{metricstr}",
                flush=True,
            )
            for metric in metrics:
                metric.reset()
        # validating for 100 samples is enough
        if it == 1000:
            break
    if CONFIG.use_wandb:
        for metric in global_metrics:
            wandb.log({f"finetune epoch {metric.name}": metric.avg},
                      commit=False)
    return global_metrics[-1].avg
예제 #8
0
def train_PCB(epoch, model, criterion, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)

        logits_list, globe_x_logits, L2_logits, Y2, Y3 = model(
            imgs
        )  # return logits_list ,globe_x_logits,L2_logits,z_total_Y2,z_total_Y3
        part = {}
        for i in range(4):
            part[i] = logits_list[i]
        features_globe = globe_x_logits  #全局特征
        L2_f1 = L2_logits[0]
        L2_f2 = L2_logits[1]
        loss_list = {}
        if isinstance(logits_list[1], tuple):
            loss = DeepSupervision(criterion, logits_list[1], pids)
        else:
            for i in range(4):
                loss_list[i] = criterion(part[i], pids)
            loss_globe = criterion(features_globe, pids)
            loss_L21 = criterion(L2_f1, pids)
            loss_L22 = criterion(L2_f2, pids)
            loss_Y2 = criterion(Y2, pids)
            loss_Y3 = criterion(Y3, pids)
            #loss = criterion(outputs, pids)
        if (epoch <= 120):
            loss = loss_list[0] + loss_list[1] + loss_list[2] + loss_list[
                3] + loss_globe + loss_L21 + loss_L22
            optimizer.zero_grad()
            torch.autograd.backward([
                loss_list[0], loss_list[1], loss_list[2], loss_list[3],
                loss_L21, loss_L22, loss_globe
            ], [
                torch.ones(1).cuda(),
                torch.ones(1).cuda(),
                torch.ones(1).cuda(),
                torch.ones(1).cuda(),
                torch.ones(1).cuda(),
                torch.ones(1).cuda(),
                torch.ones(1).cuda()
            ])
            #loss.backward()
            optimizer.step()
        if (epoch > 120):
            loss = loss_Y2 + loss_Y3
            optimizer.zero_grad()
            torch.autograd.backward(
                [loss_Y2, loss_Y3],
                [torch.ones(1).cuda(),
                 torch.ones(1).cuda()])
            optimizer.step()
        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
예제 #9
0
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)

        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
def evaluate(model, BCELoss, CETLoss, data_loader):
    """ Evaluate model on labeled data. Used for evaluating on validation data.
    Args:
        model: the trained model
        BCELoss: the loss function at first level
        CETLoss: the loss function at second level
        data_loader: the loader of data set
    Return:
        loss, acc_level_0, acc_level_1
    """

    # switch to evaluate mode
    model.eval()

    # define loss and accuracies at two levels
    losses = AverageMeter()
    acc_level_0 = AverageMeter()
    acc_level_1 = AverageMeter()
    true_labels, pred_labels = [], []

    for batch_idx, (inputs, y) in enumerate(data_loader):

        y = y.numpy()  # list of label indices
        targets = categorical_to_binary_tensor(model, y)
        targets = targets.cuda() if conf.GPU_AVAIL else targets

        input_var = Variable(inputs.cuda() if conf.GPU_AVAIL else inputs,
                             requires_grad=False)
        target_var = Variable(targets, requires_grad=False)

        batch_size = inputs.size(0)

        # forward net
        outputs = model(input_var)

        # variable for sub input for each group
        input_var_0, target_var_0 = input_to_tensor(
            model, outputs, y, np.in1d(y, model.args['gr_0_idx']))
        input_var_1, target_var_1 = input_to_tensor(
            model, outputs, y, np.in1d(y, model.args['gr_1_idx']))

        # loss at the first level
        loss = BCELoss(model.level_0(outputs), target_var)

        # loss at the second level
        if not input_var_0 is None:
            input_var_0 = model.level_1_0(input_var_0)
            loss += CETLoss(input_var_0, target_var_0) * (
                target_var_0.data.size(0) / batch_size)

        if not input_var_1 is None:
            input_var_1 = model.level_1_1(input_var_1)
            loss += CETLoss(input_var_1, target_var_1) * (
                target_var_1.data.size(0) / batch_size)

        # measure accuracy and record loss
        _, pred, _, pred_sublevel = predict(model, outputs)

        losses.update(loss.item(), inputs.size(0))
        acc = (pred == np.array([model.args['gr_idx'][i]
                                 for i in y])).sum() / inputs.size(0)
        acc_level_0.update(acc, inputs.size(0))
        acc = (pred_sublevel == y).sum() / inputs.size(0)
        acc_level_1.update(acc, inputs.size(0))

        pred_labels.extend(pred_sublevel)
        true_labels.extend(y.tolist())

    return losses.avg, acc_level_0.avg, acc_level_1.avg, true_labels, pred_labels
def run_epoch(train_loader,
              model,
              BCELoss,
              CETLoss,
              optimizer,
              epoch,
              num_epochs,
              log=None):
    """Run one epoch of training."""
    # switch to train mode
    model.train()

    # define loss and accuracies at two levels
    losses = AverageMeter()
    acc_level_0 = AverageMeter()
    acc_level_1 = AverageMeter()

    data_size = len(train_loader.dataset)

    # number of iterations before print outputs
    print_iter = np.ceil(data_size / (10 * train_loader.batch_size))

    for batch_idx, (inputs, y) in enumerate(train_loader):

        y = y.numpy()  # list of label indices
        targets = categorical_to_binary_tensor(model, y)
        targets = targets.cuda() if conf.GPU_AVAIL else targets

        input_var = Variable(inputs.cuda() if conf.GPU_AVAIL else inputs)
        target_var = Variable(targets)

        batch_size = inputs.size(0)

        # reset gradient
        optimizer.zero_grad()

        # forward net
        outputs = model(input_var)

        # variable for sub input for each group
        input_var_0, target_var_0 = input_to_tensor(
            model, outputs, y, np.in1d(y, model.args['gr_0_idx']))
        #print(y)
        input_var_1, target_var_1 = input_to_tensor(
            model, outputs, y, np.in1d(y, model.args['gr_1_idx']))

        # loss at the first level
        loss = BCELoss(model.level_0(outputs), target_var)

        # loss at the second level
        if not input_var_0 is None:
            input_var_0 = model.level_1_0(input_var_0)
            loss += CETLoss(input_var_0, target_var_0) * (
                target_var_0.data.size(0) / batch_size)

        if not input_var_1 is None:
            input_var_1 = model.level_1_1(input_var_1)
            loss += CETLoss(input_var_1, target_var_1) * (
                target_var_1.data.size(0) / batch_size)

        loss.backward()
        optimizer.step()

        # measure accuracy and record loss
        prob, pred, prob_sublevel, pred_sublevel = predict(model, outputs)

        losses.update(loss.item(), inputs.size(0))
        acc = (pred == np.array([model.args['gr_idx'][i]
                                 for i in y])).sum() / inputs.size(0)
        acc_level_0.update(acc, inputs.size(0))
        acc = (pred_sublevel == y).sum() / inputs.size(0)
        acc_level_1.update(acc, inputs.size(0))

        # print all messages
        if ((batch_idx + 1) % print_iter == 0) or (losses.count == data_size):
            log.write('Epoch [{:>2}][{:>6.2f} %]\t'
                      'Loss {:.4f}\t'
                      'acc_level_0 {:.4f}\t'
                      'acc_level_1 {:.4f}\n'.format(
                          epoch + 1, losses.count * 100 / data_size,
                          losses.avg, acc_level_0.avg, acc_level_1.avg))

    return losses.avg, acc_level_0.avg, acc_level_1.avg
예제 #12
0
def train(epoch, model, model2, criterion_class, criterion_metric,
          criterion_ml, optimizer, optimizer2, trainloader, use_gpu):
    model.train()
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    xent_losses = AverageMeter()
    htri_losses = AverageMeter()
    mutual_losses = AverageMeter()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)
        outputs1, features1 = model(imgs)
        outputs2, features2 = model2(imgs)
        if args.htri_only:
            if isinstance(features1, tuple):
                loss1, dist1 = DeepSupervision(criterion_metric, features1,
                                               pids)
                loss2, dist2 = DeepSupervision(criterion_metric, features2,
                                               pids)
            else:
                loss1, dist1 = criterion_metric(features1, pids)
                loss2, dist2 = criterion_metric(features2, pids)
        else:
            if isinstance(outputs1, tuple):
                xent_loss1 = DeepSupervision(criterion_class, outputs1, pids)
                xent_loss2 = DeepSupervision(criterion_class, outputs2, pids)
            else:
                xent_loss1 = criterion_class(outputs1, pids)
                xent_loss2 = criterion_class(outputs2, pids)

            if isinstance(features1, tuple):
                htri_loss1, dist1 = DeepSupervision(criterion_metric,
                                                    features1, pids)
                htri_loss2, dist2 = DeepSupervision(criterion_metric,
                                                    features2, pids)
            else:
                htri_loss1, dist1 = criterion_metric(features1, pids)
                htri_loss2, dist2 = criterion_metric(features2, pids)

        loss1 = xent_loss1 + htri_loss1
        loss2 = xent_loss2 + htri_loss2
        #ml_loss = criterion_ml(dist1, dist2, pids)
        ml_loss = torch.mean(torch.pow(dist1 - dist2, 2))
        loss = loss1 + loss2 + ml_loss
        optimizer.zero_grad()
        optimizer2.zero_grad()
        loss.backward()
        optimizer.step()
        optimizer2.step()

        batch_time.update(time.time() - end)
        end = time.time()
        losses.update(loss.item(), pids.size(0))
        xent_losses.update(loss1.item(), pids.size(0))
        htri_losses.update(loss2.item(), pids.size(0))
        mutual_losses.update(ml_loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Loss1 {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                  'Loss2 {htri_loss.val:.4f} ({htri_loss.avg:.4f})\t'
                  'MLoss {ml_loss.val:.4f} ({ml_loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      xent_loss=xent_losses,
                      htri_loss=htri_losses,
                      ml_loss=mutual_losses))
예제 #13
0
def test(model, queryloader, galleryloader, use_gpu, dataset_q,dataset_g,track_id_tmp=None,rank=100):
    batch_time = AverageMeter()
    #embed()
    model.eval()
    
    with torch.no_grad():
        qf, lqf, q_imgs = [], [], []

        for q_idx in range(len(dataset_q)):
            q_img = int(dataset_q[q_idx].split('/')[-1].strip('.jpg'))
            q_imgs.append(q_img)
    
        for batch_idx, (imgs) in enumerate(queryloader):
            #embed()
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features,local_features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            qf.append(features)
            lqf.append(local_features)
        qf = torch.cat(qf, 0)
        lqf = torch.cat(lqf,0)
        print('lqf shape',lqf.shape)
        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

        gf, lgf, g_imgs = [], [], []

        for g_idx in range(len(dataset_g)):
            g_img = int(dataset_g[g_idx].split('/')[-1].strip('.jpg'))
            g_imgs.append(g_img)
        end = time.time()
        
        # embed()
        #obtain the track infoi
        for batch_idx, (imgs) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features,local_features = model(imgs)
            features = features.data.cpu()
            local_features = local_features.data.cpu()
            gf.append(features)
            lgf.append(local_features)

        #embed()
        gf = torch.cat(gf, 0)
        lgf = torch.cat(lgf,0)
        print('lgf shape',lgf.shape)
        gt_f,_ = track_info_average(track_id_tmp,gf,lgf)
        embed()
        print('len of gimgs',len(g_imgs))
        print('Extracted features for gallery_track set,obtained {}-by-{} matrix'.format(gt_f.size(0),gt_f.size(1)))
        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
    #embed()
    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))
    # feature normlization
    qf = 1. * qf / (torch.norm(qf, 2, dim = -1, keepdim=True).expand_as(qf) + 1e-12)
    #gf = 1. * gf / (torch.norm(gf, 2, dim = -1, keepdim=True).expand_as(gf) + 1e-12)
    gt_f = 1. * gt_f / (torch.norm(gt_f, 2, dim = -1, keepdim=True).expand_as(gt_f) + 1e-12)
    
    m, n = qf.size(0), gt_f.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gt_f, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gt_f.t())
    distmat = distmat.numpy()
    
    #embed()
    print("------------------")

    if args.reranking:
        from util.re_ranking import re_ranking
        if args.test_distance == 'global':
            print("Only using global branch for reranking")
            distmat = re_ranking(qf,gt_f,k1=20, k2=6, lambda_value=0.3)
        else:
            local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(),aligned= not args.unaligned)
            local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(),aligned= not args.unaligned)
            local_dist = np.concatenate(
                [np.concatenate([local_qq_distmat, local_distmat], axis=1),
                 np.concatenate([local_distmat.T, local_gg_distmat], axis=1)],
                axis=0)
            if args.test_distance == 'local':
                print("Only using local branch for reranking")
                distmat = re_ranking(qf,gf,k1=20,k2=6,lambda_value=0.3,local_distmat=local_dist,only_local=True)
            elif args.test_distance == 'global_local':
                print("Using global and local branches for reranking")
                distmat = re_ranking(qf,gf,k1=20,k2=6,lambda_value=0.3,local_distmat=local_dist,only_local=False)
    #embed()
    print("Computing CMC and mAP for re_ranking")

    print("==> Test aicity dataset and write to csv")
    test_rank_result = test_rank100_aicity(distmat,q_imgs,g_imgs,track_id_tmp,use_track_info=True)
    # test_rank_result is a dict, use pandas to convert 
    # embed()
    test_rank_result_df = pd.DataFrame(list(test_rank_result.items()),columns=['query_ids','gallery_ids'])
    test_result_df = test_rank_result_df.sort_values('query_ids')
    # write to csvi
    embed()
    with open('aic_res_'+args.result_dir+'.txt','w') as f:
        for idx in range(len(test_result_df)):
            sep_c = ' '
            row_ranks = []
            idx_row = test_result_df.iloc[idx]['gallery_ids'][:100]
            #embed()
            for item in idx_row:
                row_rank = str(item[0])
                row_ranks.append(row_rank)
            sep_c = sep_c.join(row_ranks)
            #embed()
            sep_c = sep_c+'\n'
            #embed()
            f.write(sep_c)
        f.close()
예제 #14
0
        BACKBONE = BACKBONE.to(DEVICE)
        HEAD = HEAD.to(DEVICE)

    # ======= train & validation & save checkpoint =======#
    DISP_FREQ = len(train_loader) // 100  # frequency to display training loss & acc

    # NUM_EPOCH_WARM_UP = NUM_EPOCH // 25  # use the first 1/25 epochs to warm up
    # NUM_BATCH_WARM_UP = len(train_loader) * NUM_EPOCH_WARM_UP  # use the first 1/25 epochs to warm up

    batch = 0  # batch index
    for epoch in range(NUM_EPOCH):  # start training process

        BACKBONE.train()  # set to training mode
        HEAD.train()

        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()

        for inputs, labels in tqdm(iter(train_loader)):

            if batch == STAGES[
                0]:  # adjust LR for each training stage after warm up, you can also choose to adjust LR manually (with slight modification) once plaueau observed
                schedule_lr(OPTIMIZER)
            if batch == STAGES[1]:
                schedule_lr(OPTIMIZER)
            if batch == STAGES[2]:
                schedule_lr(OPTIMIZER)

            # compute output
            inputs = inputs.to(DEVICE)
예제 #15
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
예제 #16
0
def train(train_loader, backbone, head, criterion, optimizer, epoch, cfg,
          writer):
    DISP_FREQ = 100  # 100 batch
    batch = 0  # batch index
    backbone.train()  # set to training mode
    head.train()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    for inputs, labels in tqdm(iter(train_loader)):
        # compute output
        start_time = time.time()
        inputs = inputs.cuda(cfg['GPU'], non_blocking=True)
        labels = labels.cuda(cfg['GPU'], non_blocking=True)
        features, conv_features = backbone(inputs)

        outputs, original_logits = head(features, labels)
        loss = criterion(outputs, labels)
        end_time = time.time()
        duration = end_time - start_time
        if ((batch + 1) % DISP_FREQ == 0) and batch != 0:
            print("batch inference time", duration)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure accuracy and record loss
        prec1, prec5 = accuracy(original_logits.data, labels, topk=(1, 5))
        losses.update(loss.data.item(), inputs.size(0))
        top1.update(prec1.data.item(), inputs.size(0))
        top5.update(prec5.data.item(), inputs.size(0))
        # dispaly training loss & acc every DISP_FREQ
        if ((batch + 1) % DISP_FREQ == 0) or batch == 0:
            print("=" * 60)
            print('Epoch {}/{} Batch {}/{}\t'
                  'Training Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Training Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      epoch + 1,
                      cfg['NUM_EPOCH'],
                      batch + 1,
                      len(train_loader),
                      loss=losses,
                      top1=top1,
                      top5=top5))
            print("=" * 60)
        sys.stdout.flush()
        batch += 1  # batch index
    epoch_loss = losses.avg
    epoch_acc = top1.avg
    print("=" * 60)
    print('Epoch: {}/{}\t'
          'Training Loss {loss.val:.4f} ({loss.avg:.4f})\t'
          'Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
          'Training Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
              epoch + 1, cfg['NUM_EPOCH'], loss=losses, top1=top1, top5=top5))
    sys.stdout.flush()
    print("=" * 60)
    if cfg['RANK'] == 0:
        writer.add_scalar("Training_Loss", epoch_loss, epoch + 1)
        writer.add_scalar("Training_Accuracy", epoch_acc, epoch + 1)
        writer.add_scalar("Top1", top1.avg, epoch + 1)
        writer.add_scalar("Top5", top5.avg, epoch + 1)
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    print('------start testing------')
    cmc1 = []
    cmc2 = []
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids, lqf = [], [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features, local_features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            qf.append(features)
            lqf.append(local_features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        lqf = torch.cat(lqf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids, lgf = [], [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features, local_features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            gf.append(features)
            lgf.append(local_features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        lgf = torch.cat(lgf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))
    # feature normlization
    qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) +
                    1e-12)
    gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) +
                    1e-12)
    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    if not args.test_distance == 'global':
        print("Only using global branch")
        from util.distance import low_memory_local_dist
        lqf = lqf.permute(0, 2, 1)
        lgf = lgf.permute(0, 2, 1)
        local_distmat = low_memory_local_dist(lqf.numpy(),
                                              lgf.numpy(),
                                              aligned=not args.unaligned)
        if args.test_distance == 'local':
            print("Only using local branch")
            distmat = local_distmat
        if args.test_distance == 'global_local':
            print("Using global and local branches")
            distmat = local_distmat + distmat
    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)
    print("cms's shape: ", cmc.shape)
    print("cms's type: ", cmc.dtype)

    # cmc1 = []
    # print("cmc2's shape: ",cmc2.shape)
    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
        cmc1.append(cmc[r - 1])
    print("------------------")

    # test matplot
    # x = np.linspace(0,2*np.pi,50)
    # y = np.sin(x)

    # print("cmc2's shape: ",cmc2.shape)
    # print(cmc2)

    if args.reranking:
        from util.re_ranking import re_ranking
        if args.test_distance == 'global':
            print("Only using global branch for reranking")
            distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
        else:
            local_qq_distmat = low_memory_local_dist(
                lqf.numpy(), lqf.numpy(), aligned=not args.unaligned)
            local_gg_distmat = low_memory_local_dist(
                lgf.numpy(), lgf.numpy(), aligned=not args.unaligned)
            local_dist = np.concatenate([
                np.concatenate([local_qq_distmat, local_distmat], axis=1),
                np.concatenate([local_distmat.T, local_gg_distmat], axis=1)
            ],
                                        axis=0)
            if args.test_distance == 'local':
                print("Only using local branch for reranking")
                distmat = re_ranking(qf,
                                     gf,
                                     k1=20,
                                     k2=6,
                                     lambda_value=0.3,
                                     local_distmat=local_dist,
                                     only_local=True)
            elif args.test_distance == 'global_local':
                print("Using global and local branches for reranking")
                distmat = re_ranking(qf,
                                     gf,
                                     k1=20,
                                     k2=6,
                                     lambda_value=0.3,
                                     local_distmat=local_dist,
                                     only_local=False)
        print("Computing CMC and mAP for re_ranking")
        cmc, mAP = evaluate(distmat,
                            q_pids,
                            g_pids,
                            q_camids,
                            g_camids,
                            use_metric_cuhk03=args.use_metric_cuhk03)

        # cmc2 = []
        print("Results ----------")
        print("mAP(RK): {:.1%}".format(mAP))
        print("CMC curve(RK)")
        for r in ranks:
            print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
            cmc2.append(cmc[r - 1])
        print("------------------")

    # matlpot
    # print("----cmc2----",cmc2)
    # print("cmc1's value------")
    # print(cmc1)
    plt.plot(ranks,
             cmc1,
             label='ranking',
             color='red',
             marker='o',
             markersize=5)
    plt.plot(ranks,
             cmc2,
             label='re-ranking',
             color='blue',
             marker='o',
             markersize=5)
    plt.ylabel('Accuracy')
    plt.xlabel('Rank_num')
    plt.title('Result of Ranking and Re-ranking(query_tank_cam=5)')
    plt.legend()
    plt.savefig('/home/gaoziqiang/tempt/tank_cam5.png')
    plt.show()

    return cmc[0]
def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (inputs, targets) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        # compute output
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        out, heatmap_all, heatmap_remain, heatmap_drop, select_channel, all_channel = model(
            inputs)

        # compute gradient and do update step
        loss = criterion(out, targets)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure accuracy and record loss
        losses.update(loss.item(), inputs.size(0))

        _, predicted = torch.max(out.data, 1)
        correct = predicted.eq(targets.data).cpu().sum().item()
        acc.update(100. * float(correct) / inputs.size(0), inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % PRINT_FREQ == 0:
            msg = 'Epoch: [{0}][{1}/{2}]\t' \
                  'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
                  'Speed {speed:.1f} samples/s\t' \
                  'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
                  'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
                  'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                      epoch, i, len(train_loader), batch_time=batch_time,
                      speed=inputs.size(0)/batch_time.val,
                      data_time=data_time, loss=losses, acc=acc)
            logging.info(msg)

            if args.visualize:
                vis_input = torchvision.utils.make_grid(inputs,
                                                        nrow=8,
                                                        padding=2,
                                                        normalize=True)
                cv2.imwrite(
                    os.path.join(save_dir, 'train_inputs_{}.jpg'.format(i)),
                    (vis_input * 255).cpu().detach().numpy().transpose(
                        (1, 2, 0)).astype(np.uint8))
                vis_heatmap_all = torchvision.utils.make_grid(heatmap_all,
                                                              nrow=8,
                                                              padding=2,
                                                              normalize=True)
                cv2.imwrite(
                    os.path.join(save_dir,
                                 'train_heatmap_all_{}.jpg'.format(i)),
                    (vis_heatmap_all * 255).cpu().detach().numpy().transpose(
                        (1, 2, 0)).astype(np.uint8))
                vis_heatmap_remain = torchvision.utils.make_grid(
                    heatmap_remain, nrow=8, padding=2, normalize=True)
                cv2.imwrite(
                    os.path.join(save_dir,
                                 'train_heatmap_remain_{}.jpg'.format(i)),
                    (vis_heatmap_remain *
                     255).cpu().detach().numpy().transpose(
                         (1, 2, 0)).astype(np.uint8))
                vis_heatmap_drop = torchvision.utils.make_grid(heatmap_drop,
                                                               nrow=8,
                                                               padding=2,
                                                               normalize=True)
                cv2.imwrite(
                    os.path.join(save_dir,
                                 'train_heatmap_drop_{}.jpg'.format(i)),
                    (vis_heatmap_drop * 255).cpu().detach().numpy().transpose(
                        (1, 2, 0)).astype(np.uint8))
                vis_select_channel = torchvision.utils.make_grid(
                    select_channel, nrow=8, padding=2, normalize=True)
                cv2.imwrite(
                    os.path.join(save_dir,
                                 'train_select_channel_{}.jpg'.format(i)),
                    (vis_select_channel *
                     255).cpu().detach().numpy().transpose(
                         (1, 2, 0)).astype(np.uint8))

                vis_all_channel = torchvision.utils.make_grid(all_channel,
                                                              nrow=8,
                                                              padding=2,
                                                              normalize=True)
                cv2.imwrite(
                    os.path.join(save_dir,
                                 'train_all_channel_{}.jpg'.format(i)),
                    (vis_all_channel * 255).cpu().detach().numpy().transpose(
                        (1, 2, 0)).astype(np.uint8))

    results_train_file.write('%d, %.4f, %.4f\n' % (epoch, acc.avg, losses.avg))
    results_train_file.flush()
예제 #19
0
def train(epoch, model, model2, criterion, criterion_ml, optimizer1,
          optimizer2, trainloader, use_gpu):
    losses1 = AverageMeter()
    losses2 = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    model2.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)
        optimizer2.zero_grad()
        optimizer1.zero_grad()
        outputs1 = model(imgs)
        outputs2 = model2(imgs)
        # print('11111')
        # embed()
        if isinstance(outputs1, tuple):
            loss1 = DeepSupervision(
                criterion, outputs1,
                pids) + 10 * DeepSupervision(criterion_ml, outputs1, outputs2)
        else:
            loss1 = criterion(outputs1,
                              pids) + 10 * criterion_ml(outputs1, outputs2)

        # optimizer1.zero_grad()
        # loss1.backward()
        # optimizer1.step()
        # outputs1 = model(imgs)
        # outputs2 = model2(imgs)
        # print('2222')
        # embed()
        if isinstance(outputs1, tuple):
            loss2 = DeepSupervision(
                criterion, outputs2,
                pids) + 10 * DeepSupervision(criterion_ml, outputs2, outputs1)
        else:
            loss2 = criterion(outputs2,
                              pids) + 10 * criterion_ml(outputs2, outputs1)
        # optimizer2.zero_grad()
        # optimizer1.zero_grad()
        loss1.backward()
        loss2.backward()
        optimizer2.step()
        optimizer1.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        losses1.update(loss1.item(), pids.size(0))
        losses2.update(loss2.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss1 {loss1.val:.4f} ({loss1.avg:.4f})\t'
                  'Loss2 {loss2.val:.4f} ({loss2.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss1=losses1,
                      loss2=losses2))
예제 #20
0
def test_PCB03(model,
               queryloader,
               galleryloader,
               use_gpu,
               ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        q_pids, q_camids = [], []
        qf = []
        # we have 4 feature , to compute d1,d2,d3,d4 according to N2_norm
        qf_index = []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features, fs_g, fs_L2, _ = model(
                imgs
            )  #  out_loacl_list ,out_globe_feature,L2_feature_list ,sa_list
            batch_time.update(time.time() - end)

            #features = features.data.cpu()
            # for i in range(0,4):
            #     features[i] = features[i].data.cpu()
            # feature = torch.cat((features[0],features[1],features[2],features[3]),1)
            # for i in range(0,2):
            #     fs_L2[i] = fs_L2[i].data.cpu()
            # fs_g =fs_g.data.cpu()
            # feature = torch.cat((feature,fs_L2[0],fs_L2[1],fs_g),1)
            feature = fs_g.data.cpu()
            qf.append(feature)
            #qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        g_pids, g_camids = [], []
        gf = []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features, fs_g, fs_L2, _ = model(
                imgs
            )  # out_loacl_list ,out_globe_feature,L2_feature_list ,sa_list
            batch_time.update(time.time() - end)

            #features = features.data.cpu()
            # for i in range(0, 4):
            #     features[i] = features[i].data.cpu()
            # feature = torch.cat((features[0], features[1], features[2], features[3]), 1)
            # for i in range(0, 2):
            #     fs_L2[i] = fs_L2[i].data.cpu()
            # fs_g = fs_g.data.cpu()
            # feature = torch.cat((feature, fs_L2[0], fs_L2[1], fs_g), 1)
            feature = fs_g.data.cpu()
            gf.append(feature)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    distmat_dict = {}
    for i in range(0, 1):
        m, n = qf.size(0), gf.size(0)
        distmat_dict[i] = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                          torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat_dict[i].addmm_(1, -2, qf, gf.t())
        distmat_dict[i] = distmat_dict[i].numpy()

    distmat = distmat_dict[0]

    #distmat = distmat01+distmat02+distmat03+distmat04 # final distacne = d1+d2+d3+d4

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
예제 #21
0
def train(epoch, model, criterion_class, criterion_metric, optimizer, trainloader, use_gpu):
    model.train()
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    xent_losses = AverageMeter()
    global_losses = AverageMeter()
    local_losses = AverageMeter()

    end = time.time()
    for batch_idx, (imgs, pids) in enumerate(trainloader):    
#        print('pids',pids.shape)
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)
#        outputs, features, local_features = model(imgs)
        #embed()
        if args.aligned:
            outputs,features,local_features = model(imgs)
            #embed()
        elif not args.aligned:
            # only use global feature to get the classifier results
            outputs,features= model(imgs)
        # print('outputs',(outputs.shape))
        # htri_only = False(default)
        if args.htri_only:
            if isinstance(features, tuple):
                global_loss, local_loss = DeepSupervision(criterion_metric, features, pids, local_features)
            else:
                # print ('pids:', pids)
                global_loss, local_loss = criterion_metric(features, pids, local_features)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_class, outputs, pids)
            else:
                if args.use_pcb:
                    xent_loss = 0.0
                    for logits in outputs:
                        stripe_loss = criterion_class(logits, pids)
                        xent_loss += stripe_loss
                elif not args.use_pcb:
                    xent_loss = criterion_class(outputs, pids)
                if isinstance(features, tuple):
                    global_loss, local_loss = DeepSupervision(criterion_metric, features, pids, local_features)
                else:
                    global_loss, local_loss = criterion_metric(features, pids, local_features)
        
        loss = xent_loss + global_loss + local_loss
        # loss = global_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()
        losses.update(loss.item(), pids.size(0))
        xent_losses.update(xent_loss.item(), pids.size(0))
        global_losses.update(global_loss.item(), pids.size(0))
        local_losses.update(local_loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'CLoss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                  'GLoss {global_loss.val:.4f} ({global_loss.avg:.4f})\t'
                  'LLoss {local_loss.val:.4f} ({local_loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time,data_time=data_time,
                   loss=losses,xent_loss=xent_losses, global_loss=global_losses, local_loss = local_losses))
예제 #22
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids, lqf = [], [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features, local_features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            qf.append(features)
            lqf.append(local_features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        lqf = torch.cat(lqf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids, lgf = [], [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features, local_features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            gf.append(features)
            lgf.append(local_features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        lgf = torch.cat(lgf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, 4))
    # feature normlization
    qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) +
                    1e-12)
    gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) +
                    1e-12)
    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    ## Calculating global distance
    print("Using global and local branches")
    from util.distance import low_memory_local_dist
    lqf = lqf.permute(0, 2, 1)
    lgf = lgf.permute(0, 2, 1)
    local_distmat = low_memory_local_dist(lqf.numpy(),
                                          lgf.numpy(),
                                          aligned=True)
    distmat = local_distmat + distmat

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return distmat
예제 #23
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 8]):
    print('------start testing------')
    cmc1 = []
    cmc2 = []
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        # 计算query集的features
        qf, q_pids, q_camids, lqf = [], [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features, local_features = model(imgs)
            embed()
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            qf.append(features)
            lqf.append(local_features)
            q_pids.extend(pids)
            q_camids.extend(camids)

        # embed()
        # 对tensor进行拼接,axis=0表示进行竖向拼接
        qf = torch.cat(qf, 0)
        lqf = torch.cat(lqf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        # 计算gallery集的features
        gf, g_pids, g_camids, lgf = [], [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features, local_features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            gf.append(features)
            lgf.append(local_features)
            g_pids.extend(pids)
            g_camids.extend(camids)

        # 打个断点,看一下gf
        # embed()
        gf = torch.cat(gf, 0)
        lgf = torch.cat(lgf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    # 下面这些是处理要点
    # feature normlization 特征标准化
    qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) +
                    1e-12)
    gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) +
                    1e-12)
    # 这是啥
    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())  # 矩阵相乘
    distmat = distmat.numpy()

    # 用于测试
    mm, nn = distmat.shape[0], distmat.shape[1]
    min = [1, 1, 1, 1, 1, 1, 1, 1]  # min数组的大小应该等于mm
    num = 0
    for i in range(mm):
        for j in range(nn):
            if distmat[i][j] < min[i]:
                min[i] = distmat[i][j]
        if min[i] < 0.4:
            num += 1
    print('经多视角识别后的person_num为:', num)

    if not args.test_distance == 'global':
        print("Only using global branch")
        from util.distance import low_memory_local_dist
        lqf = lqf.permute(0, 2, 1)
        lgf = lgf.permute(0, 2, 1)
        # 计算local_distmat
        local_distmat = low_memory_local_dist(lqf.numpy(),
                                              lgf.numpy(),
                                              aligned=not args.unaligned)
        if args.test_distance == 'local':
            print("Only using local branch")
            distmat = local_distmat
        if args.test_distance == 'global_local':
            print("Using global and local branches")
            # total distmat = local_distmat + distmat(global)
            distmat = local_distmat + distmat
    print("Computing CMC and mAP")
    # 打一个断点,对distmat进行排序
    # embed()
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)
    print("cms's shape: ", cmc.shape)
    print("cms's type: ", cmc.dtype)

    #cmc1 = []
    print("------Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
        cmc1.append(cmc[r - 1])
    print("------------------")

    return cmc[0]
def train(epoch, model, criterion_class, criterion_metric, optimizer,
          trainloader, use_gpu):
    model.train()
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    xent_losses = AverageMeter()
    global_losses = AverageMeter()
    local_losses = AverageMeter()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)
        outputs, features, local_features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                global_loss, local_loss = DeepSupervision(
                    criterion_metric, features, pids, local_features)
            else:
                global_loss, local_loss = criterion_metric(
                    features, pids, local_features)
        else:
            # if isinstance(outputs, tuple):
            #     xent_loss = DeepSupervision(criterion_class, outputs, pids)
            # else:
            xent_loss = criterion_class(outputs, pids)

            if isinstance(features, tuple):
                global_loss, local_loss = DeepSupervision(
                    criterion_metric, features, pids, local_features)
            else:
                global_loss, local_loss = criterion_metric(
                    features, pids, local_features)

        loss = xent_loss + global_loss + local_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()
        losses.update(loss.item(), pids.size(0))
        xent_losses.update(xent_loss.item(), pids.size(0))
        global_losses.update(global_loss.item(), pids.size(0))
        local_losses.update(local_loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'CLoss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                  'GLoss {global_loss.val:.4f} ({global_loss.avg:.4f})\t'
                  'LLoss {local_loss.val:.4f} ({local_loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      xent_loss=xent_losses,
                      global_loss=global_losses,
                      local_loss=local_losses))
def train(epoch, model, criterion_class, criterion_metric, optimizer,
          trainloader, use_gpu):
    model.train()

    losses = AverageMeter()  # 每个batch用时
    xent_losses = AverageMeter()
    triplet_losses = AverageMeter()
    batch_time = AverageMeter()  # 计算一个batch用时
    data_time = AverageMeter()  # 计算一个data用时

    end = time.time()

    # 训练的基本套路:
    # 1 导入数据;2 前向传播得到预测输出 3 计算前向传播所得预测输出与真实值的差值loss 4 反向传播,使用优化器进行梯度优化
    # param@imgs,pids,camids分别为图像路径、person_id、camera_id,由于camid用不到,所以此处选择置空
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        # measure data loading time 数据读取时间
        data_time.update(time.time() - end)
        # 前向传播
        # outputs = model(imgs)# 输出预测的标签
        outputs, features, local_features = model(imgs)
        # 计算损失loss
        xent_loss = criterion_class(
            outputs, pids)  # 对预测的outputs标签与实际标签pids进行计算差值,得到损失loss
        triplet_loss = criterion_metric(features, pids)
        loss = xent_loss + triplet_loss
        # 下面进行优化
        optimizer.zero_grad()  # 优化器梯度先置为0
        # 将损失loss反向传播回去
        loss.backward()
        # 更新优化器的参数
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()
        losses.update(loss.item(), pids.size(0))

        # 测试一下
        # print(batch_idx,loss)
        # break

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'CLoss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
                  'GLoss {global_loss.val:.4f} ({global_loss.avg:.4f})\t'
                  'LLoss {local_loss.val:.4f} ({local_loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      xent_loss=xent_losses,
                      global_loss=global_losses,
                      local_loss=local_losses))
예제 #26
0
        HEAD = HEAD.to(DEVICE)

    # ======= train & validation & save checkpoint =======#
    DISP_FREQ = len(
        train_loader) // 100  # frequency to display training loss & acc

    # NUM_EPOCH_WARM_UP = NUM_EPOCH // 25  # use the first 1/25 epochs to warm up
    # NUM_BATCH_WARM_UP = len(train_loader) * NUM_EPOCH_WARM_UP  # use the first 1/25 epochs to warm up

    batch = 0  # batch index
    for epoch in range(NUM_EPOCH):  # start training process

        BACKBONE.train()  # set to training mode
        HEAD.train()

        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()

        for inputs, labels in tqdm(iter(train_loader)):

            if batch == STAGES[
                    0]:  # adjust LR for each training stage after warm up, you can also choose to adjust LR manually (with slight modification) once plaueau observed
                schedule_lr(OPTIMIZER)
            if batch == STAGES[1]:
                schedule_lr(OPTIMIZER)
            if batch == STAGES[2]:
                schedule_lr(OPTIMIZER)

            # compute output
            inputs = inputs.to(DEVICE)
예제 #27
0
def train(start_epoch):
    best_ori_acc = 0
    best_ori_epoch = 0
    best_def_acc = 0
    best_def_epoch = 0
    best_weight = copy.deepcopy(model.state_dict())

    # training begins
    for epoch in range(start_epoch, args.epochs + 1):
        step_count = 0
        all_loss_save = AverageMeter()
        if args.model.lower() == 'pointnet':
            loss_save = AverageMeter()
            fea_loss_save = AverageMeter()
        acc_save = AverageMeter()
        model.train()

        # one epoch begins
        for data, label in train_loader:
            step_count += 1
            with torch.no_grad():
                data, label = data.float().cuda(), label.long().cuda()
                # to [B, 3, N] point cloud
                data = data.transpose(1, 2).contiguous()

            batch_size = data.size(0)
            opt.zero_grad()

            # calculate loss and BP
            if args.model.lower() == 'pointnet':
                # we may need to calculate feature_transform loss
                logits, trans, trans_feat = model(data)
                loss = criterion(logits, label, False)
                if args.feature_transform:
                    fea_loss = feature_transform_reguliarzer(
                        trans_feat) * 0.001
                else:
                    fea_loss = torch.tensor(0.).cuda()
                all_loss = loss + fea_loss
                all_loss.backward()
                opt.step()

                # calculate training accuracy
                acc = (torch.argmax(logits, dim=-1)
                       == label).sum().float() / float(batch_size)

                # statistics accumulation
                all_loss_save.update(all_loss.item(), batch_size)
                loss_save.update(loss.item(), batch_size)
                fea_loss_save.update(fea_loss.item(), batch_size)
                acc_save.update(acc.item(), batch_size)
                if step_count % args.print_iter == 0:
                    print('Epoch {}, step {}, lr: {:.6f}\n'
                          'All loss: {:.4f}, loss: {:.4f}, Fea loss: {:.4f}\n'
                          'Train acc: {:.4f}'.format(epoch, step_count,
                                                     get_lr(opt),
                                                     all_loss_save.avg,
                                                     loss_save.avg,
                                                     fea_loss_save.avg,
                                                     acc_save.avg))
            else:
                logits = model(data)
                all_loss = criterion(logits, label, False)
                all_loss.backward()
                opt.step()

                # calculate training accuracy
                acc = (torch.argmax(logits, dim=-1)
                       == label).sum().float() / float(batch_size)

                # statistics accumulation
                all_loss_save.update(all_loss.item(), batch_size)
                acc_save.update(acc.item(), batch_size)
                if step_count % args.print_iter == 0:
                    print('Epoch {}, step {}, lr: {:.6f}\n'
                          'All loss: {:.4f}, train acc: {:.4f}'.format(
                              epoch, step_count, get_lr(opt),
                              all_loss_save.avg, acc_save.avg))
                    torch.cuda.empty_cache()

        # eval
        if epoch % 10 == 0 or epoch > 180:
            ori_acc = test(ori_test_loader)
            def_acc = test(def_test_loader)
            if ori_acc > best_ori_acc:
                best_ori_acc = ori_acc
                best_ori_epoch = epoch
            if def_acc > best_def_acc:
                best_def_acc = def_acc
                best_def_epoch = epoch

            print('Epoch {}, ori acc {:.4f}, def acc {:.4f}\n'
                  'Currently best ori acc {:.4f} at epoch {}\n'
                  'Currently best def acc {:.4f} at epoch {}'.format(
                      epoch, ori_acc, def_acc, best_ori_acc, best_ori_epoch,
                      best_def_acc, best_def_epoch))
            torch.save(
                model.state_dict(),
                os.path.join(
                    logs_dir,
                    'model{}_acc_{:.4f}_loss_{:.4f}_lr_{:.6f}.pth'.format(
                        epoch, def_acc, all_loss_save.avg, get_lr(opt))))
            torch.cuda.empty_cache()
            logger.add_scalar('test/ori_acc', ori_acc, epoch)
            logger.add_scalar('test/def_acc', def_acc, epoch)

        logger.add_scalar('train/loss', all_loss_save.avg, epoch)
        logger.add_scalar('train/lr', get_lr(opt), epoch)
        scheduler.step(epoch)

    # save the best model
    torch.save(
        best_weight,
        os.path.join(
            logs_dir,
            'BEST_model{}_acc_{:.4f}.pth'.format(best_def_epoch,
                                                 best_def_acc)))
예제 #28
0
def OneEpoch(epoch, train_loader, OPTIMIZER, DISP_FREQ, NUM_EPOCH_WARM_UP, NUM_BATCH_WARM_UP):
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    batch = 0
#iterator = iter(train_loader)
    start = time.time()
    for inputs, labels in train_loader:
        if (epoch + 1 <= NUM_EPOCH_WARM_UP) and (batch + 1 <= NUM_BATCH_WARM_UP): # adjust LR for each training batch during warm up
            warm_up_lr(batch + 1, NUM_BATCH_WARM_UP, LR, OPTIMIZER)

        # compute output
        inputs = inputs.to(DEVICE, non_blocking=True)
        labels = labels.to(DEVICE, non_blocking=True).long()
        features = BACKBONE(inputs)
        outputs = HEAD(features, labels)
        loss = LOSS(outputs, labels)
    
        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, labels, topk = (1, 5))
        losses.update(loss.data.item(), inputs.size(0))
        top1.update(prec1.data.item(), inputs.size(0))
        top5.update(prec5.data.item(), inputs.size(0))
    
        # compute gradient and do SGD step
        OPTIMIZER.zero_grad()
        loss.backward()
        OPTIMIZER.step()
                
                # dispaly training loss & acc every DISP_FREQ
        if ((batch + 1) % DISP_FREQ == 0) and batch != 0:
            print("=" * 60)
            print('Epoch {}/{} Batch {}/{}\t'
                 'Training Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                 'Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                 'Training Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                 epoch + 1, NUM_EPOCH, batch + 1, len(train_loader) * NUM_EPOCH, loss = losses, top1 = top1, top5 = top5))
            print("Running speed in the last 100 batches: {:.3f} iter/s.".format(DISP_FREQ / (time.time() - start)))
            start = time.time()
            print("=" * 60)
        batch += 1

    epoch_loss = losses.avg
    epoch_acc = top1.avg
    writer.add_scalar("Training_Loss", epoch_loss, epoch + 1)
    writer.add_scalar("Training_Accuracy", epoch_acc, epoch + 1)
    print("=" * 60)
    print('Epoch: {}/{}\t'
        'Training Loss {loss.val:.4f} ({loss.avg:.4f})\t'
        'Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
        'Training Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
        epoch + 1, NUM_EPOCH, loss = losses, top1 = top1, top5 = top5))
    print("=" * 60)
    # perform validation & save checkpoints per epoch
    # validation statistics per epoch (buffer for visualization)
    print("=" * 60)
    print("Perform Evaluation on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGG2_FP, and Save Checkpoints...")
    accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, lfw, lfw_issame)
    buffer_val(writer, "LFW", accuracy_lfw, best_threshold_lfw, roc_curve_lfw, epoch + 1)
#		accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cfp_ff, cfp_ff_issame)
#		buffer_val(writer, "CFP_FF", accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff, epoch + 1)
#		accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cfp_fp, cfp_fp_issame)
#		buffer_val(writer, "CFP_FP", accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp, epoch + 1)
#		accuracy_agedb, best_threshold_agedb, roc_curve_agedb = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, agedb, agedb_issame)
#		buffer_val(writer, "AgeDB", accuracy_agedb, best_threshold_agedb, roc_curve_agedb, epoch + 1)
#		accuracy_calfw, best_threshold_calfw, roc_curve_calfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, calfw, calfw_issame)
#		buffer_val(writer, "CALFW", accuracy_calfw, best_threshold_calfw, roc_curve_calfw, epoch + 1)
#		accuracy_cplfw, best_threshold_cplfw, roc_curve_cplfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cplfw, cplfw_issame)
#		buffer_val(writer, "CPLFW", accuracy_cplfw, best_threshold_cplfw, roc_curve_cplfw, epoch + 1)
    accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, vgg2_fp, vgg2_fp_issame)
    buffer_val(writer, "VGGFace2_FP", accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp, epoch + 1)
    print("=" * 60)

    # save checkpoints per epoch
    if MULTI_GPU:
        torch.save(BACKBONE.module.state_dict(), os.path.join(MODEL_ROOT, "Backbone_{}_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(BACKBONE_NAME, epoch + 1, batch, get_time())))
        torch.save(HEAD.state_dict(), os.path.join(MODEL_ROOT, "Head_{}_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(HEAD_NAME, epoch + 1, batch, get_time())))
    else:
        torch.save(BACKBONE.state_dict(), os.path.join(MODEL_ROOT, "Backbone_{}_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(BACKBONE_NAME, epoch + 1, batch, get_time())))
        torch.save(HEAD.state_dict(), os.path.join(MODEL_ROOT, "Head_{}_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(HEAD_NAME, epoch + 1, batch, get_time())))
예제 #29
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 8]):
    print('------start testing------')
    cmc1 = []
    cmc2 = []
    batch_time = AverageMeter()
    # 测试一下model.eval()方法
    # embed()
    model.eval()

    with torch.no_grad():
        # 计算query集的features
        qf, lqf = [], []  #qf:query feature lqf:local query feature
        i = 0
        # embed()
        for batch_idx, imgs in enumerate(queryloader):
            i += 1
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            # 使用model对图像进行特征提取
            features, local_features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            ### 将query feature入list
            qf.append(features)
            lqf.append(local_features)

        # print("BarchSize:",i)
        # 对tensor进行拼接,axis=0表示进行竖向拼接
        qf = torch.cat(qf, 0)
        lqf = torch.cat(lqf, 0)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        # 计算gallery集的features
        gf, lgf = [], [],

        end = time.time()
        for batch_idx, imgs in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            # 使用resnet50进行图像特征提取
            features, local_features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            gf.append(features)
            lgf.append(local_features)

        # 打个断点,看一下gf
        gf = torch.cat(gf, 0)
        lgf = torch.cat(lgf, 0)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    # embed()
    # print("==> BatchTime(s) {:.3f}".format(batch_time.sum))

    # 下面这些是处理要点
    # feature normlization 特征标准化
    qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) +
                    1e-12)
    gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) +
                    1e-12)
    # 矩阵的行列数
    m, n = qf.size(0), gf.size(0)

    torch.pow(qf, 2).sum(dim=1, keepdim=True)

    # 计算全局距离矩阵global distmat
    # torch.pow(qf,2):求矩阵中各元素的平方
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())  # 矩阵相乘
    distmat = distmat.numpy()

    # 用于测试
    mm, nn = distmat.shape[0], distmat.shape[1]
    min = [1] * mm  # min数组的大小应该等于mm
    num = 0
    for i in range(mm):
        for j in range(nn):
            if distmat[i][j] < min[i]:
                min[i] = distmat[i][j]
        # 这里的判定两object是否为同一object的distance阈值还需要进一步优化
        if min[i] < 1:
            num += 1
    # print('各图像之间的相似度为:\n',distmat)
    # print('经多视角识别后的person_num为:', num)
    ### 下面计算cmc和mAp
    q_pids = process_dir("./data/market1501/view1")
    g_pids = process_dir("./data/market1501/view2")

    q_camids = [1] * mm
    g_camids = [1] * nn
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=False)
    len = max(mm, nn)
    # embed()
    x = np.linspace(1, len, len)
    # embed()
    cmc = [0.439, 0.43, 0.442, 0.448, 0.418]
    plt.title("CMC curve of test")
    plt.xlabel("test times")
    plt.ylabel("cmc")
    plt.plot(x, cmc)
    plt.show()

    ### 集中展示测试结果
    print("")
    print("  本次测试 结果如下")
    print("  ------------------------------")
    print("  person num   | {}".format(num))
    print("  ------------------------------")
    print("  CMC    | {}".format(cmc))
    print("  mAP    | {:.3f}".format(mAP))
    print("  ------------------------------")

    # print("all_cmc:", cmc)
    # print("mAP{:.3f}:".format(mAP))
    return num, cmc, mAP
예제 #30
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids, lqf = [], [], [], []
        for batch_idx, (imgs, pids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
#            qf.append(extract_feature(
#                model, imgs, requires_norm=True, vectorize=True,use_pcb=args.use_pcb).cpu().data)
            features,local_features = model(imgs)
#            print('lqf shape',local_features.shape)
#            print('qf shape',features.shape)
            batch_time.update(time.time() - end)
            
            features = features.data.cpu()
            local_features = local_features.data.cpu()
            qf.append(features)
            lqf.append(local_features)
            q_pids.extend(pids)
#            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        lqf = torch.cat(lqf,0)
        print('qf shape',qf.shape)
        print('lqf shape',lqf.shape)
        q_pids = np.asarray(q_pids)
        #q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids, lgf = [], [], [], []
        end = time.time()
        for batch_idx, (imgs, pids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            # features, local_features = model(imgs)
#            gf.append(extract_feature(
#               model, imgs, requires_norm=True, vectorize=True,use_pcb=args.use_pcb).cpu().data)
            features,local_features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            local_features = local_features.data.cpu()
            gf.append(features)
            lgf.append(local_features)
            g_pids.extend(pids)
#            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
#        lgf = torch.cat(lgf,0)
        print('gf shape',gf.shape)
#        print('lgf shape',lgf.shape)
        g_pids = np.asarray(g_pids)
#        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))
    # feature normlization
    qf = 1. * qf / (torch.norm(qf, 2, dim = -1, keepdim=True).expand_as(qf) + 1e-12)
    gf = 1. * gf / (torch.norm(gf, 2, dim = -1, keepdim=True).expand_as(gf) + 1e-12)
    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    print('distmat shape1',distmat.shape)
    distmat.addmm_(1, -2, qf, gf.t())
    print('distmat shape2',distmat.shape)
    distmat = distmat.cpu().numpy()
    
    # args.test_distance = 'global'(default) 
    if not args.test_distance== 'global':
        print("Only using global branch")
        from util.distance import low_memory_local_dist
        #embed()
        lqf = lqf.permute(0,2,1)
        lgf = lgf.permute(0,2,1)
        local_distmat = low_memory_local_dist(lqf.numpy(),lgf.numpy(),aligned= not args.unaligned)
        if args.test_distance== 'local':
            print("Only using local branch")
            distmat = local_distmat
        if args.test_distance == 'global_local':
            print("Using global and local branches")
            distmat = local_distmat+distmat
    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    # args.reranking = false(default) 
    if args.reranking:
        from util.re_ranking import re_ranking
        if args.test_distance == 'global':
            print("Only using global branch for reranking")
            distmat = re_ranking(qf,gf,k1=20, k2=6, lambda_value=0.3)
        else:
            local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(),aligned= not args.unaligned)
            local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(),aligned= not args.unaligned)
            local_dist = np.concatenate(
                [np.concatenate([local_qq_distmat, local_distmat], axis=1),
                 np.concatenate([local_distmat.T, local_gg_distmat], axis=1)],
                axis=0)
            if args.test_distance == 'local':
                print("Only using local branch for reranking")
                distmat = re_ranking(qf,gf,k1=20,k2=6,lambda_value=0.3,local_distmat=local_dist,only_local=True)
            elif args.test_distance == 'global_local':
                print("Using global and local branches for reranking")
                distmat = re_ranking(qf,gf,k1=20,k2=6,lambda_value=0.3,local_distmat=local_dist,only_local=False)
        print("Computing CMC and mAP for re_ranking")
        cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)

        print("Results ----------")
        print("mAP(RK): {:.1%}".format(mAP))
        print("CMC curve(RK)")
        for r in ranks:
            print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
        print("------------------")
    return mAP
예제 #31
0
def main():
    batch_time_total = AverageMeter()
    start = time.time()
    # 第四个参数:use_gpu,不需要显示的指定
    use_gpu = torch.cuda.is_available()
    # if args.use_cpu: use_gpu = False
    pin_memory = True if use_gpu else False

    # 其实可以换一种写法
    dataset = data_manager.Market1501(root='data')

    # data augmentation
    transform_test = T.Compose([
        # T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    # 第二个参数:queryloader
    queryloader = DataLoader(
        # 问题:dataset.query哪里来的? 答:来自data_manager中self.query = query
        # dataset.query本质为路径集
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=32,
        shuffle=False,
        num_workers=4,
        pin_memory=pin_memory,
        drop_last=False,
    )
    # 第三个参数:galleryloader
    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=32,
        shuffle=False,
        num_workers=4,
        pin_memory=pin_memory,
        drop_last=False,
    )

    model = models.init_model(name='resnet50',
                              num_classes=8,
                              loss={'softmax', 'metric'},
                              aligned=True,
                              use_gpu=use_gpu)

    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_class = CrossEntropyLoss(use_gpu=use_gpu)
    criterion_metric = TripletLossAlignedReID(margin=0.3)
    optimizer = init_optim('adam', model.parameters(), 0.0002, 0.0005)

    scheduler = lr_scheduler.StepLR(optimizer, step_size=150, gamma=0.1)
    start_epoch = 0

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    # embed()
    num, cmc, mAP = test(model, queryloader, galleryloader, use_gpu)
    end = time.time()
    time_stamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

    item_to_json = {
        "time_stamp": time_stamp,
        "test_results": {
            "object_num": num,
            "cmc": cmc,
            "mAP": mAP,
            "time_consumption(s)": end - start
        }
    }
    path = "./output/" + "test_results" + ".json"

    s = SaveJson()

    s.save_file(path, item_to_json)

    # print("==>测试用时: {:.3f} s".format(end - start))

    print("  test time(s)    | {:.3f}".format(end - start))
    print("  ------------------------------")
    print("")
    # print('------测试结束------')

    return 0