Ejemplo n.º 1
0
def test(**kwargs):
    opt._parse(kwargs)

    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, use_all=opt.use_all)

    # load data
    pin_memory = True if use_gpu else False
    dataloader = load_data(dataset, pin_memory)
    print('111')
    print(dataloader['query'].dataset.dataset[0][0])

    print('initializing model ...')
    if opt.loss == 'softmax' or opt.loss == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, opt.last_stride, True)
    elif opt.loss == 'triplet':
        model = ResNetBuilder(None, opt.last_stride, True)

    if opt.pretrained_model:
        if use_gpu:
            state_dict = torch.load(opt.pretrained_model)['state_dict']
        else:
            state_dict = torch.load(opt.pretrained_model,
                                    map_location='cpu')['state_dict']
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)

    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    reid_evaluator.test(dataloader['query'],
                        dataloader['gallery'],
                        savefig=opt.savefig,
                        i=opt.findid)
    return
def train(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(opt.datatype)),
                             sampler=RandomIdentitySampler(
                                 dataset.train, opt.num_instances),
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.datatype)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.datatype)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)
    queryFliploader = DataLoader(ImageData(dataset.query,
                                           TestTransform(opt.datatype, True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageData(dataset.gallery,
                                             TestTransform(opt.datatype,
                                                           True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    print('initializing model ...')
    if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, 1, True)
    elif opt.model_name == 'triplet':
        model = ResNetBuilder(None, 1, True)
    elif opt.model_name == 'bfe':
        if opt.datatype == "person":
            model = BFE(dataset.num_train_pids, 1.0, 0.2)
        else:
            model = BFE(dataset.num_train_pids, 0.5, 0.5)
    elif opt.model_name == 'ide':
        model = IDE(dataset.num_train_pids)
    elif opt.model_name == 'resnet':
        model = Resnet(dataset.num_train_pids)

    optim_policy = model.get_optim_policy()

    if opt.pretrained_model:
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        #state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        reid_evaluator.evaluate(queryloader,
                                galleryloader,
                                queryFliploader,
                                galleryFliploader,
                                re_ranking=opt.re_ranking,
                                savefig=opt.savefig)
        return

    #xent_criterion = nn.CrossEntropyLoss()
    xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)

    if opt.loss == 'triplet':
        embedding_criterion = TripletLoss(opt.margin)
    elif opt.loss == 'lifted':
        embedding_criterion = LiftedStructureLoss(hard_mining=True)
    elif opt.loss == 'weight':
        embedding_criterion = Margin()

    def criterion(triplet_y, softmax_y, labels):
        losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \
                     [xent_criterion(output, labels) for output in softmax_y]
        loss = sum(losses)
        return loss

    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    start_epoch = opt.start_epoch
    # get trainer and evaluator
    reid_trainer = cls_tripletTrainer(opt, model, optimizer, criterion,
                                      summary_writer)

    def adjust_lr(optimizer, ep):
        if ep < 50:
            lr = 1e-4 * (ep // 5 + 1)
        elif ep < 200:
            lr = 1e-3
        elif ep < 300:
            lr = 1e-4
        else:
            lr = 1e-5
        for p in optimizer.param_groups:
            p['lr'] = lr

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0
    for epoch in range(start_epoch, opt.max_epoch):
        if opt.adjust_lr:
            adjust_lr(optimizer, epoch + 1)
        reid_trainer.train(epoch, trainloader)

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:
            if opt.mode == 'class':
                rank1 = test(model, queryloader)
            else:
                rank1 = reid_evaluator.evaluate(queryloader, galleryloader,
                                                queryFliploader,
                                                galleryFliploader)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1
            },
                            is_best=is_best,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
Ejemplo n.º 3
0
def train(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
   # os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    print(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(
        ImageData(dataset.train, TrainTransform(opt.datatype)),
        sampler=RandomIdentitySampler(dataset.train, opt.num_instances),
        batch_size=opt.train_batch, num_workers=opt.workers,
        pin_memory=pin_memory, drop_last=True
    )

    queryloader = DataLoader(
        ImageData(dataset.query, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    galleryloader = DataLoader(
        ImageData(dataset.gallery, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )
    queryFliploader = DataLoader(
        ImageData(dataset.query, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    galleryFliploader = DataLoader(
        ImageData(dataset.gallery, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    print('initializing model ...')
    if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, 1, True)
    elif opt.model_name == 'triplet':
        model = ResNetBuilder(None, 1, True)
    elif opt.model_name == 'CBDB':
        if opt.datatype == "person":
            model = CBDBdataset.num_train_pids, 1.0, 0.33)
        else:
            model = CBDB(dataset.num_train_pids, 0.5, 0.5)
    elif opt.model_name == 'ide':
        model = IDE(dataset.num_train_pids)
    elif opt.model_name == 'resnet':
        model = Resnet(dataset.num_train_pids)
 
    optim_policy = model.get_optim_policy()

    if opt.pretrained_model:
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        #state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        reid_evaluator.evaluate(queryloader, galleryloader, 
            queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)
        return
Ejemplo n.º 4
0
def train(**kwargs):
    opt._parse(kwargs)  ##设置程序的所有参数

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(opt.datatype)),
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.datatype)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.datatype)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)
    queryFliploader = DataLoader(ImageData(dataset.query,
                                           TestTransform(opt.datatype, True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageData(dataset.gallery,
                                             TestTransform(opt.datatype,
                                                           True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    print('initializing model ...')
    if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, 1, True)
    elif opt.model_name == 'triplet':
        model = ResNetBuilder(None, 1, True)
    elif opt.model_name == 'bfe':
        if opt.datatype == "person":
            model = BFE(dataset.num_train_pids, 1.0, 0.33)
        else:
            model = BFE(dataset.num_train_pids, 0.5, 0.5)
    elif opt.model_name == 'ide':
        model = IDE(dataset.num_train_pids)
    elif opt.model_name == 'resnet':
        model = Resnet(dataset.num_train_pids)
    elif opt.model_name == 'strongBaseline':
        model = StrongBaseline(dataset.num_train_pids)

    optim_policy = model.get_optim_policy()

    # update model
    model = resnet18(True)
    model.fc.out_features = 10

    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    #xent_criterion = nn.CrossEntropyLoss()
    criterion = CrossEntropyLabelSmooth(10)
    epochs = 100
    best = 0.0
    b_e = 0
    for e in range(epochs):
        model.train()
        for i, inputs in enumerate(trainloader):
            imgs, pid, _ = inputs
            imgs, pid = imgs.cuda(), pid.cuda()
            outputs = model(imgs)
            loss = criterion(outputs, pid)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print(('epoch=%s \t batch loss=%s') % (e, loss.item()))
Ejemplo n.º 5
0
import codecs
import json
import os
import time
import torch
import pandas as pd
import numpy as np
from models.networks import ResNetBuilder
from utils.transforms import TestTransform
from PIL import Image
import matplotlib.pyplot as plt

print('initializing model ...')
model = ResNetBuilder(751, 1, True)
pretrained_model = 'pytorch-ckpt/softmax_triplet/checkpoint_ep120.pth.tar'
use_gpu = torch.cuda.is_available()
if use_gpu:
    state_dict = torch.load(pretrained_model)['state_dict']
else:
    state_dict = torch.load(pretrained_model, map_location='cpu')['state_dict']
model.load_state_dict(state_dict, False)
print('load pretrained model ' + pretrained_model)
print('model size: {:.5f}M'.format(
    sum(p.numel() for p in model.parameters()) / 1e6))

gf = torch.Tensor(np.load(os.path.join('save', 'feature', 'gf.npy')))
df = pd.read_csv(os.path.join('app', 'data', 'g_data.csv'))

g_pids = df['g_pids'].tolist()
g_camids = df['g_camids'].tolist()
g_path = df['g_path'].tolist()
Ejemplo n.º 6
0
def train(**kwargs):
    opt._parse(kwargs)

    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, use_all=opt.use_all)

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))
    # load data
    pin_memory = True if use_gpu else False
    dataloader = load_data(dataset, pin_memory)

    print('initializing model ...')
    if opt.loss == 'softmax' or opt.loss == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, opt.last_stride, True)
    elif opt.loss == 'triplet':
        model = ResNetBuilder(None, opt.last_stride, True)

    if opt.pretrained_model:
        if use_gpu:
            state_dict = torch.load(opt.pretrained_model)['state_dict']
        else:
            state_dict = torch.load(opt.pretrained_model,
                                    map_location='cpu')['state_dict']

        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)

    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    optim_policy = model.get_optim_policy()
    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        reid_evaluator.evaluate(dataloader['query'],
                                dataloader['gallery'],
                                dataloader['queryFlip'],
                                dataloader['galleryFlip'],
                                savefig=opt.savefig)
        return

    criterion = get_loss()

    # optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=5e-4)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=5e-4)

    scheduler = WarmupMultiStepLR(optimizer, [40, 70], 0.1, 0.01, 10, 'linear')

    start_epoch = opt.start_epoch
    # get trainer and evaluator
    reid_trainer = Trainer(opt, model, optimizer, criterion, summary_writer)

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0
    for epoch in range(start_epoch, opt.max_epoch):
        scheduler.step()

        reid_trainer.train(epoch, dataloader['train'])

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:
            rank1 = reid_evaluator.evaluate(dataloader['query'],
                                            dataloader['gallery'],
                                            dataloader['queryFlip'],
                                            dataloader['galleryFlip'])

            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1
            },
                            is_best=is_best,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))