def train_RRI(model, Ts: int=7):

    base_lrs = [0.001] * 3 + [0.0001] * 10

    for T in range(Ts):
        print('=== T = {} ==='.format(T))
        print('Replacing eigen layer weight...')
        for eigen_layer in model.module.get_fcs():
            replace_weight(eigen_layer)
        print('Replaced.')
        print('--- Restraint ({}) ---'.format(T))
        train_R(model, base_lrs[T], T, fix_eigen_layer=True)
        print('--- Relaxation ({}) ---'.format(T))
        train_R(model, base_lrs[T], T, fix_eigen_layer=False)

    for name in args.target_names:
        print('Evaluating {} ...'.format(name))
        queryloader = testloader_dict[name]['query']
        galleryloader = testloader_dict[name]['gallery']
        rank1 = test(model, queryloader, galleryloader, use_gpu)

    save_checkpoint({
        'state_dict': model.state_dict(),
        'rank1': rank1,
        'epoch': 0,
        'arch': args.arch,
        'optimizer': (),
    }, args.save_dir, prefix='final_')
Example #2
0
def main():
    global args
    set_random_seed(1)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    log_name = 'test.log' if args.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print('** Arguments **')
    arg_keys = list(args.__dict__.keys())
    arg_keys.sort()
    for key in arg_keys:
        print('{}: {}'.format(key, args.__dict__[key]))
    torch.backends.cudnn.benchmark = True

    datamanager = ImageDataManager(batch_size=args.batch_size)
    trainloader, queryloader, galleryloader = datamanager.return_dataloaders()

    print('Building model: {}'.format(args.arch))
    model = build_model(args.arch,
                        4000,
                        args.bias,
                        args.bnneck,
                        pretrained=(not args.no_pretrained))

    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    model.cuda()

    if args.evaluate:
        evaluate(model, queryloader, galleryloader, args.dist_metric,
                 args.normalize_feature)
        return

    criterion = CrossEntropyLoss(4000)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.0003,
                                 weight_decay=5e-04,
                                 betas=(0.9, 0.999))
    scheduler = build_lr_scheduler(optimizer, args.lr_scheduler, args.stepsize)

    time_start = time.time()
    print('=> Start training')
    for epoch in range(args.start_epoch, args.max_epoch):
        train(epoch, model, criterion, optimizer, trainloader)
        scheduler.step()
        if (epoch + 1) % 20 == 0:
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': epoch + 1,
                    'optimizer': optimizer.state_dict(),
                }, args.save_dir)
            evaluate(model, queryloader, galleryloader, args.dist_metric,
                     args.normalize_feature)
    elapsed = round(time.time() - time_start)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print('Elapsed {}'.format(elapsed))
def train_base(model):

    use_sgd = os.environ.get('sgd') is not None

    optimizer_getter = get_base_sgd_optimizer if use_sgd else get_base_optimizer

    optimizer, scheduler = get_base_optimizer(model)

    model.train()
    print('=== train base ===')

    if True:
        open_layers = ['fc', 'classifier1', 'classifier2_1', 'classifier2_2', 'fc2_1', 'fc2_2', 'reduction', 'classifier']

        print('Train {} for {} epochs while keeping other layers frozen'.format(open_layers, 10))

        for epoch in range(10):

            open_specified_layers(model, open_layers)
            train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=True)

    print('Done. All layers are open to train for {} epochs'.format(60))
    open_all_layers(model)

    optimizer, scheduler = optimizer_getter(model)

    for epoch in range(60):
        train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)
        scheduler.step()

        print('=> Test')

        if (epoch + 1) % args.eval_freq == 0:

            for name in args.target_names:
                print('Evaluating {} ...'.format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                rank1 = test(model, queryloader, galleryloader, use_gpu)

    save_checkpoint({
        'state_dict': model.state_dict(),
        'rank1': rank1,
        'epoch': 0,
        'arch': args.arch,
        'optimizer': optimizer.state_dict(),
    }, args.save_dir, prefix='base_')
def train_R(model, lr, T, fix_eigen_layer: bool=False):

    eigen_layers = model.module.get_fcs()

    if fix_eigen_layer:
        for eigen_layer in eigen_layers:
            eigen_layer.eval()
            for p in eigen_layer.parameters():
                p.requires_grad = False

        stage_name = 'restraint'
    else:
        model.train()
        for p in model.parameters():
            p.requires_grad = True

        stage_name = 'relaxation'

    prefix = '{}_{}_'.format(T, stage_name)

    optimizer, scheduler = get_RRI_optimizer(model, lr)

    for epoch in range(20):
        train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)

        scheduler.step()

        print('=> Test')

        if (epoch + 1) % args.eval_freq == 0:
            for name in args.target_names:
                print('Evaluating {} ...'.format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                rank1 = test(model, queryloader, galleryloader, use_gpu)

    save_checkpoint({
        'state_dict': model.state_dict(),
        'rank1': rank1,
        'epoch': 0,
        'arch': args.arch,
        'optimizer': (),
    }, args.save_dir, prefix=prefix)
Example #5
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='path to config file')
    parser.add_argument(
        '--gpu-devices',
        type=str,
        default='',
    )
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
    print('Show configuration\n{}\n'.format(cfg))
    torch.backends.cudnn.benchmark = True

    datamanager = ImageDataManager(**imagedata_kwargs(cfg))
    trainloader, queryloader, galleryloader = datamanager.return_dataloaders()
    print('Building model: {}'.format(cfg.model.name))
    model = build_model(cfg.model.name,
                        datamanager.num_train_pids,
                        'softmax',
                        pretrained=cfg.model.pretrained)

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    model = nn.DataParallel(model).cuda()

    criterion = CrossEntropyLoss(datamanager.num_train_pids,
                                 label_smooth=cfg.loss.softmax.label_smooth)
    optimizer = build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume,
                                                       model,
                                                       optimizer=optimizer)

    if cfg.test.evaluate:
        distmat = evaluate(model,
                           queryloader,
                           galleryloader,
                           dist_metric=cfg.test.dist_metric,
                           normalize_feature=cfg.test.normalize_feature,
                           rerank=cfg.test.rerank,
                           return_distmat=True)
        if cfg.test.visrank:
            visualize_ranked_results(distmat,
                                     datamanager.return_testdataset(),
                                     'image',
                                     width=cfg.data.width,
                                     height=cfg.data.height,
                                     save_dir=osp.join(cfg.data.save_dir,
                                                       'visrank'))
        return

    time_start = time.time()
    print('=> Start training')
    for epoch in range(cfg.train.start_epoch, cfg.train.max_epoch):
        train(epoch,
              cfg.train.max_epoch,
              model,
              criterion,
              optimizer,
              trainloader,
              fixbase_epoch=cfg.train.fixbase_epoch,
              open_layers=cfg.train.open_layers)
        scheduler.step()
        if (epoch + 1) % cfg.test.eval_freq == 0 or (epoch +
                                                     1) == cfg.train.max_epoch:
            rank1 = evaluate(model,
                             queryloader,
                             galleryloader,
                             dist_metric=cfg.test.dist_metric,
                             normalize_feature=cfg.test.normalize_feature,
                             rerank=cfg.test.rerank)
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': epoch + 1,
                    'rank1': rank1,
                    'optimizer': optimizer.state_dict(),
                }, cfg.data.save_dir)
    elapsed = round(time.time() - time_start)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print('Elapsed {}'.format(elapsed))
def main():
    global args
    
    set_random_seed(args.seed)
    if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print('==========\nArgs:{}\n=========='.format(args))

    if use_gpu:
        print('Currently using GPU {}'.format(args.gpu_devices))
        cudnn.benchmark = True
    else:
        warnings.warn('Currently using CPU, however, GPU is highly recommended')

    print('Initializing video data manager')
    dm = VideoDataManager(use_gpu, **video_dataset_kwargs(args))
    trainloader, testloader_dict = dm.return_dataloaders()

    print('Initializing model: {}'.format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent', 'htri'}, pretrained=not args.no_pretrained, use_gpu=use_gpu)
    print('Model size: {:.3f} M'.format(count_num_param(model)))

    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    model = nn.DataParallel(model).cuda() if use_gpu else model

    criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
    criterion_htri = TripletLoss(margin=args.margin)
    optimizer = init_optimizer(model, **optimizer_kwargs(args))
    scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs(args))

    if args.resume and check_isfile(args.resume):
        args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=optimizer)

    if args.evaluate:
        print('Evaluate only')

        for name in args.target_names:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            distmat = test(model, queryloader, galleryloader, args.pool_tracklet_features, use_gpu, return_distmat=True)
        
            if args.visualize_ranks:
                visualize_ranked_results(
                    distmat, dm.return_testdataset_by_name(name),
                    save_dir=osp.join(args.save_dir, 'ranked_results', name),
                    topk=20
                )
        return

    time_start = time.time()
    ranklogger = RankLogger(args.source_names, args.target_names)
    print('=> Start training')

    if args.fixbase_epoch > 0:
        print('Train {} for {} epochs while keeping other layers frozen'.format(args.open_layers, args.fixbase_epoch))
        initial_optim_state = optimizer.state_dict()

        for epoch in range(args.fixbase_epoch):
            train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu, fixbase=True)

        print('Done. All layers are open to train for {} epochs'.format(args.max_epoch))
        optimizer.load_state_dict(initial_optim_state)

    for epoch in range(args.start_epoch, args.max_epoch):
        train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
        
        scheduler.step()
        
        if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (epoch + 1) == args.max_epoch:
            print('=> Test')
            
            for name in args.target_names:
                print('Evaluating {} ...'.format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                rank1 = test(model, queryloader, galleryloader, args.pool_tracklet_features, use_gpu)
                ranklogger.write(name, epoch + 1, rank1)
            
            save_checkpoint({
                'state_dict': model.state_dict(),
                'rank1': rank1,
                'epoch': epoch + 1,
                'arch': args.arch,
                'optimizer': optimizer.state_dict(),
            }, args.save_dir)

    elapsed = round(time.time() - time_start)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print('Elapsed {}'.format(elapsed))
    ranklogger.show_summary()