コード例 #1
0
def train(dataset, batch_size, max_epoch):
    train_img, train_id, train_cam_id = map(list, zip(*dataset.train))
    train_dataset = ImageDataset(dataset.train,
                                 flag='train',
                                 process_size=(args.image_height,
                                               args.image_width))
    for eps in range(max_epoch):
        losses_t = AverageMeter()
        losses_x = AverageMeter()
        losses = AverageMeter()
        indicies = [
            x for x in RandomIdentitySampler(train_id, batch_size,
                                             args.num_instances)
        ]
        for i in range(len(indicies) // batch_size):
            try:
                # train_batch[0,1,2] are [imgs, pid, cam_id]
                train_batch = train_dataset.__getbatch__(
                    indicies[i * batch_size:(i + 1) * batch_size])
            except:
                train_batch = train_dataset.__getbatch__(
                    indicies[-batch_size:])
            loss, loss_t, loss_x = reid_train_job(
                train_batch[0], train_batch[1].astype(np.float32)).get()

            losses_t.update(loss_t.numpy_list()[0][0], batch_size)
            losses_x.update(loss_x.numpy_list()[0][0], batch_size)
            losses.update(loss.numpy_list()[0][0], batch_size)
        print('epoch: [{0}/{1}]\t'
              'Loss_t {loss_t.val:.4f} ({loss_t.avg:.4f})\t'
              'Loss_x {loss_x.val:.4f} ({loss_x.avg:.4f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(eps + 1,
                                                              args.max_epoch,
                                                              loss_t=losses_t,
                                                              loss_x=losses_x,
                                                              loss=losses))
        if (eps + 1) % args.eval_freq == 0 and (eps + 1) != args.max_epoch:
            cmc, mAP = eval(dataset)
            print("=".ljust(30, "=") + " Result " + "=".ljust(30, "="))
            print('mAP: {:.1%}'.format(mAP))
            print('CMC curve')
            for r in [1, 5, 10]:
                print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
            print("=".ljust(66, "="))
            # print("rank1:{}, mAP:{}".format(cmc[0], mAP))
    print('=> End training')

    print('=> Final test')
    cmc, mAP = eval(dataset)
    print("=".ljust(30, "=") + " Result " + "=".ljust(30, "="))
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in [1, 5, 10]:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
    print("=".ljust(66, "="))
コード例 #2
0
ファイル: reid.py プロジェクト: Oneflow-Inc/models
def evaluate(model, dataset):
    query_dataset = ImageDataset(dataset.query,
                                 flag="test",
                                 process_size=(args.image_height,
                                               args.image_width))
    gallery_dataset = ImageDataset(dataset.gallery,
                                   flag="test",
                                   process_size=(args.image_height,
                                                 args.image_width))
    eval_batch = args.eval_batch_size
    model.eval()
    dist_metric = args.dist_metric  # distance metric, ['euclidean', 'cosine']
    rerank = args.rerank  # use person re-ranking

    save_dir = args.log_dir
    print("Extracting features from query set ...")
    # query features, query person IDs and query camera IDs
    qf, q_pids, q_camids = [], [], []
    q_ind = list(range(len(query_dataset)))
    for i in range((len(query_dataset) // eval_batch)):
        imgs, pids, camids = query_dataset.__getbatch__(
            q_ind[i * eval_batch:(i + 1) * eval_batch])

        imgs = flow.Tensor(np.array(imgs)).to("cuda")
        with flow.no_grad():
            features = model(imgs)

        qf.append(features.numpy())
        q_pids.extend(pids)
        q_camids.extend(camids)

    qf = np.concatenate(qf, 0)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)
    print("Done, obtained {}-by-{} matrix".format(qf.shape[0], qf.shape[1]))

    print("Extracting features from gallery set ...")
    # gallery features, gallery person IDs and gallery camera IDs
    gf, g_pids, g_camids = [], [], []
    g_ind = list(range(len(gallery_dataset)))
    for i in range((len(gallery_dataset) // eval_batch)):
        imgs, pids, camids = gallery_dataset.__getbatch__(
            g_ind[i * eval_batch:(i + 1) * eval_batch])

        imgs = flow.Tensor(np.array(imgs)).to("cuda")

        with flow.no_grad():
            features = model(imgs)
        gf.append(features.numpy())
        g_pids.extend(pids)
        g_camids.extend(camids)

    gf = np.concatenate(gf, 0)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)
    print("Done, obtained {}-by-{} matrix".format(gf.shape[0], gf.shape[1]))

    print("Computing distance matrix with metric={} ...".format(dist_metric))
    distmat = compute_distance_matrix(qf, gf, dist_metric)

    if rerank:
        print("Applying person re-ranking ...")
        distmat_qq = compute_distance_matrix(qf, qf, dist_metric)
        distmat_gg = compute_distance_matrix(gf, gf, dist_metric)
        distmat = re_ranking(distmat, distmat_qq, distmat_gg)

    print("Computing CMC and mAP ...")
    cmc, mAP = _eval(distmat, q_pids, g_pids, q_camids, g_camids)

    print("=".ljust(30, "=") + " Result " + "=".ljust(30, "="))
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in [1, 5, 10]:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("=".ljust(66, "="))

    return cmc[0], mAP
コード例 #3
0
ファイル: reid.py プロジェクト: Oneflow-Inc/models
def train(model, dataset, num_classes, optimizer, scheduler):

    batch_size = args.batch_size

    is_best = False
    best_rank = 0
    print("=> Start training")

    # loss
    criterion_t = TripletLoss(margin=args.margin).to("cuda")
    criterion_x = CrossEntropyLossLS(num_classes=num_classes,
                                     epsilon=args.epsilon).to("cuda")
    weight_t = args.weight_t
    weight_x = 1.0 - args.weight_t

    _, train_id, _ = map(list, zip(*dataset.train))
    train_dataset = ImageDataset(dataset.train,
                                 flag="train",
                                 process_size=(args.image_height,
                                               args.image_width))
    # *****training*******#
    for epoch in range(0, args.max_epoch):
        # shift to train
        model.train()
        indicies = [
            x for x in RandomIdentitySampler(train_id, batch_size,
                                             args.num_instances)
        ]
        for i in range(len(indicies) // batch_size):
            try:
                # train_batch[0,1,2] are [imgs, pid, cam_id]
                imgs, pids, _ = train_dataset.__getbatch__(
                    indicies[i * batch_size:(i + 1) * batch_size])
            except:
                imgs, pids, _ = train_dataset.__getbatch__(
                    indicies[-batch_size:])
            imgs = flow.Tensor(np.array(imgs)).to("cuda")
            pids = flow.Tensor(np.array(pids), dtype=flow.int32).to("cuda")
            outputs, features = model(imgs)
            loss_t = compute_loss(criterion_t, features, pids)
            loss_x = compute_loss(criterion_x, outputs, pids)

            loss = weight_t * loss_t + weight_x * loss_x
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
        scheduler.step()

        print(
            "epoch:",
            epoch + 1,
            "loss_t:",
            loss_t.numpy(),
            "loss_x:",
            loss_x.numpy(),
            "loss:",
            loss.numpy(),
            "lr:",
            optimizer.param_groups[0]["lr"],
        )

        # *****testing********#
        if (epoch + 1) % args.eval_freq == 0 and (epoch + 1) != args.max_epoch:
            rank1, mAP = evaluate(model, dataset)
            if (rank1 + mAP) / 2.0 > best_rank:
                is_best = True
            else:
                is_best = False
            if is_best:
                flow.save(model.state_dict(),
                          args.flow_weight + "_" + str(epoch))
    print("=> End training")

    print("=> Final test")
    rank1, _ = evaluate(model, dataset)
    flow.save(model.state_dict(), args.flow_weight)