Exemplo n.º 1
0
def main(args):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create model loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.a1 == 'inception' else \
            (256, 128)

    dataset, dataset_test, num_classes, train_loader, query_loader, gallery_loader = get_data(
        args.dataset, args.split, args.data_dir, args.height, args.width,
        args.batch_size, args.workers, args.combine_trainval, args.loss_mode,
        args.instances_num)

    # Create CNN model, generate 128 dimenional vector through 2 layer fully-connected network
    cnnmodel = models.create(args.a1,
                             num_features=args.features,
                             dropout=args.dropout)

    # Create the score computation model
    classifiermodel = models.create(args.a2, input_num=args.features)

    # Create the crf_mean_field model
    crfmodel = models.create(args.a3, layer_num=args.layernum)

    # Module cude accelaration
    cnnmodel = nn.DataParallel(cnnmodel).cuda()
    classifiermodel = classifiermodel.cuda()
    crfmodel = crfmodel.cuda()

    # Criterion1 Identiciation loss
    criterion_oim = MULOIMLoss(args.features,
                               num_classes,
                               scalar=args.oim_scalar,
                               momentum=args.oim_momentum)

    # Criterion2 Verification loss
    criterion_veri = PairLoss(args.sampling_rate)

    ## Criterion accerlation cuda
    criterion_oim.cuda()
    criterion_veri.cuda()

    # Optimizer

    base_param_ids = set(map(id, cnnmodel.module.base.parameters()))
    new_params = [
        p for p in cnnmodel.parameters() if id(p) not in base_param_ids
    ]
    param_groups = [{
        'params': cnnmodel.module.base.parameters(),
        'lr_mult': 1
    }, {
        'params': new_params,
        'lr_mult': 1
    }, {
        'params': classifiermodel.parameters(),
        'lr_mult': 1
    }, {
        'params': crfmodel.parameters(),
        'lr_mult': 1
    }]

    # Optimizer
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.cnnlr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Schedule Learning rate
    def adjust_lr(epoch):
        # step_size = 60 if args.arch == 'inception' else 40
        lr = args.cnnlr * (0.1**(epoch // 10))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Trainer
    trainer = MULJOINT_MAN_Trainer(cnnmodel, classifiermodel, crfmodel,
                                   criterion_veri, criterion_oim,
                                   args.instances_num)
    start_epoch = best_top1 = 0

    # Evaluation
    evaluator = MsEvaluator(cnnmodel, classifiermodel, crfmodel)
    if args.evaluate == 1:
        checkpoint = load_checkpoint(
            osp.join('../crf_affinity8_models/model101',
                     'cnncheckpoint.pth.tar'))
        cnnmodel.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(
            osp.join('../crf_affinity8_models/model101',
                     'crfcheckpoint.pth.tar'))
        crfmodel.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(
            osp.join('../crf_affinity8_models/model101',
                     'classifiercheckpoint.pth.tar'))
        classifiermodel.load_state_dict(checkpoint['state_dict'])

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery)
        print(top1)

    else:
        if args.retrain:
            checkpoint = load_checkpoint(
                osp.join('/home/zhangle/crf_affinity/logs/mars',
                         'cnncheckpoint.pth.tar'))
            cnnmodel.load_state_dict(checkpoint['state_dict'])

            checkpoint = load_checkpoint(
                osp.join('/home/zhangle/crf_affinity/logs/mars',
                         'crfcheckpoint.pth.tar'))
            crfmodel.load_state_dict(checkpoint['state_dict'])

            checkpoint = load_checkpoint(
                osp.join('/home/zhangle/crf_affinity/logs/mars',
                         'classifiercheckpoint.pth.tar'))
            classifiermodel.load_state_dict(checkpoint['state_dict'])
        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery)
        for epoch in range(start_epoch, args.epochs):
            adjust_lr(epoch)
            trainer.train(epoch, train_loader, optimizer)

            if True:  #epoch % 6 == 0:

                top1 = evaluator.evaluate(query_loader, gallery_loader,
                                          dataset.query, dataset.gallery)
                print(top1)
                top1 = top1[0]
                is_best = top1 > best_top1
                best_top1 = max(top1, best_top1)

                save_checkpoint(
                    {
                        'state_dict': cnnmodel.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    },
                    is_best,
                    fpath=osp.join(args.logs_dir, 'cnncheckpoint.pth.tar'))

                save_checkpoint(
                    {
                        'state_dict': classifiermodel.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    },
                    is_best,
                    fpath=osp.join(args.logs_dir,
                                   'classifiercheckpoint.pth.tar'))

                save_checkpoint(
                    {
                        'state_dict': crfmodel.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    },
                    is_best,
                    fpath=osp.join(args.logs_dir, 'crfcheckpoint.pth.tar'))

            print(
                '\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%} {}\n'.
                format(epoch, top1, best_top1, '*' if is_best else ''))
def main(args):
    if not os.path.exists(args.logs_dir):
        os.mkdir(args.logs_dir)
    if not os.path.exists(args.tensorboard_dir):
        os.mkdir(args.tensorboard_dir)
    tensorboardWrite = SummaryWriter(log_dir = args.tensorboard_dir)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # log file
    if args.evaluate == 1:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_test.txt'))
    else:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    print("Initializing dataset {}".format(args.dataset))
    # from reid.data import get_data ,
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args, args.dataset, args.split, args.data_dir,
                 args.batch_size, args.seq_len, args.seq_srd,
                 args.workers)
    print('[len] train: {}, query: {}, gallery: {}'.format(*list(map(len, [train_loader, query_loader, gallery_loader]))))

    # create CNN model
    # cnn_model = models.create(args.a1, args.flow1, args.flow2, num_features=args.features, dropout=args.dropout)
    cnn_model_flow = [models.create(args.a1, args.flow1, num_features=args.features, dropout=args.dropout)]
    if any(args.flow2):
        cnn_model_flow.append(models.create(args.a1, args.flow2, num_features=args.features, dropout=args.dropout))
    # cnn_model_flow1 = cnn_model_flow1.cuda()
    # cnn_model_flow2 = cnn_model_flow2.cuda()


    # create ATT model
    input_num = cnn_model_flow[0].feat.in_features  # 2048
    output_num = args.features  # 128
    att_model = models.create(args.a2, input_num, output_num)
    # att_model.cuda()

    # # ------peixian:tow attmodel------
    # att_model_flow1 = models.create(args.a2, input_num, output_num)
    # att_model_flow2 = models.create(args.a2, input_num, output_num)
    # # --------------------------------

    # create classifier model
    class_num = 2
    classifier_model = models.create(args.a3,  output_num, class_num)
    # classifier_model.cuda()

    # CUDA acceleration model

    # cnn_model = torch.nn.DataParallel(cnn_model).to(device)
    # # ------peixian:tow attmodel------
    # for att_model in [att_model_flow1, att_model_flow2]:
    #     att_model = att_model.to(device)
    # # --------------------------------
    att_model = att_model.cuda()
    classifier_model = classifier_model.cuda()

    # cnn_model = torch.nn.DataParallel(cnn_model).cuda()
    # cnn_model_flow1 = torch.nn.DataParallel(cnn_model_flow1,device_ids=[0,1,2])
    # cnn_model_flow2 = torch.nn.DataParallel(cnn_model_flow2,device_ids=[0,1,2])
    
    # 
    cnn_model_flow[0].cuda()
    cnn_model_flow[0] = torch.nn.DataParallel(cnn_model_flow[0],device_ids=[0])
    if len(cnn_model_flow) > 1:
        cnn_model_flow[1].cuda()
        cnn_model_flow[1] = torch.nn.DataParallel(cnn_model_flow[1],device_ids=[0])



    # att_model = torch.nn.DataParallel(att_model,device_ids=[1,2,3])
    # classifier_model = torch.nn.DataParallel(classifier_model,device_ids=[1,2,3])


    criterion_oim = OIMLoss(args.features, num_classes,
                            scalar=args.oim_scalar, momentum=args.oim_momentum)
    criterion_veri = PairLoss(args.sampling_rate)
    criterion_oim.cuda()
    criterion_veri.cuda()

    # criterion_oim.cuda()
    # criterion_veri.cuda()

    # Optimizer
    optimizer1 = []
    # cnn_model_flow = [cnn_model_flow1, cnn_model_flow2]
    for cnn_model in range(len(cnn_model_flow)):
        base_param_ids = set(map(id, cnn_model_flow[cnn_model].module.base.parameters()))
        new_params = [p for p in cnn_model_flow[cnn_model].module.parameters() if
                    id(p) not in base_param_ids]

        param_groups1 = [
            {'params': cnn_model_flow[cnn_model].module.base.parameters(), 'lr_mult': 1},
            {'params': new_params, 'lr_mult': 1}]

        optimizer1.append(torch.optim.SGD(param_groups1, lr=args.lr1,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True))
    
    param_groups2 = [
        {'params': att_model.parameters(), 'lr_mult': 1},
        {'params': classifier_model.parameters(), 'lr_mult': 1}]                        
    optimizer2 = torch.optim.SGD(param_groups2, lr=args.lr2,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)
    # optimizer1 = torch.optim.Adam(param_groups1, lr=args.lr1, weight_decay=args.weight_decay)
    #
    # optimizer2 = torch.optim.Adam(param_groups2, lr=args.lr2, weight_decay=args.weight_decay)

    # Schedule Learning rate
    def adjust_lr1(epoch):
        lr = args.lr1 * (0.1 ** (epoch/args.lr1step))
        print(lr)
        for o in optimizer1:
            for g in o.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr2(epoch):
        lr = args.lr2 * (0.01 ** (epoch//args.lr2step))
        print(lr)
        for g in optimizer2.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        # # peixian:  two attmodel:
        # for o in optimizer2:
        #     for g in o.param_groups:
        #         g['lr'] = lr * g.get('lr_mult', 1)
        # #

    def adjust_lr3(epoch):
        lr = args.lr3 * (0.000001 ** (epoch //args.lr3step))
        print(lr)
        return lr

    # Trainer
    trainer = SEQTrainer(cnn_model_flow, att_model, classifier_model, criterion_veri, criterion_oim, args.lr3, args.flow1rate)


    # Evaluator
    evaluator = ATTEvaluator(cnn_model_flow, att_model, classifier_model, args.flow1rate)

    best_top1 = 0
    if args.evaluate == 1 or args.pretrain == 1:  # evaluate
        for cnn_model in range(len(cnn_model_flow)):
            checkpoint = load_checkpoint(osp.join(args.logs_dir, 'cnnmodel_best_flow' + str(cnn_model) + '.pth.tar'))
            cnn_model_flow[cnn_model].module.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'attmodel_best.pth.tar'))
        att_model.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'clsmodel_best.pth.tar'))
        classifier_model.load_state_dict(checkpoint['state_dict'])

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)
        # top1 = evaluator.evaluate(query_loader, gallery_loader,dataset.num_tracklet)

    if args.evaluate == 0:
        for epoch in range(args.start_epoch, args.epochs):
            adjust_lr1(epoch)
            adjust_lr2(epoch)
            rate = adjust_lr3(epoch)
            trainer.train(epoch, train_loader, optimizer1, optimizer2, rate,tensorboardWrite)

            if (epoch+1) % 1 == 0 or (epoch+1) == args.epochs:

                top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)

                is_best = top1 > best_top1
                if is_best:
                    best_top1 = top1
                for cnn_model in range(len(cnn_model_flow)):
                    save_cnn_checkpoint({
                        'state_dict': cnn_model_flow[cnn_model].module.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    }, is_best, index=cnn_model, fpath=osp.join(args.logs_dir, 'cnn_checkpoint_flow'+str(cnn_model)+'.pth.tar'))

                save_att_checkpoint({
                    'state_dict': att_model.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                }, is_best, fpath=osp.join(args.logs_dir, 'att_checkpoint.pth.tar'))

                save_cls_checkpoint({
                    'state_dict': classifier_model.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                }, is_best, fpath=osp.join(args.logs_dir, 'cls_checkpoint.pth.tar'))
Exemplo n.º 3
0
def main(args):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # log file

    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.seq_len, args.seq_srd,
                 args.workers, args.train_mode)

    # create CNN model
    cnn_model = models.create(args.a1, num_features=args.features, dropout=args.dropout)

    # create ATT model
    input_num = cnn_model.feat.in_features
    output_num = args.features
    att_model = models.create(args.a2, input_num, output_num)

    # create classifier model
    class_num = 2
    classifier_model = models.create(args.a3,  output_num, class_num)


    # CUDA acceleration model

    cnn_model = torch.nn.DataParallel(cnn_model).cuda()
    att_model = att_model.cuda()
    classifier_model = classifier_model.cuda()


    # Loss function

    criterion_oim = OIMLoss(args.features, num_classes,
                            scalar=args.oim_scalar, momentum=args.oim_momentum)
    criterion_veri = PairLoss(args.sampling_rate)
    criterion_oim.cuda()
    criterion_veri.cuda()

    # Optimizer
    base_param_ids = set(map(id, cnn_model.module.base.parameters()))
    new_params = [p for p in cnn_model.parameters() if
                  id(p) not in base_param_ids]

    param_groups1 = [
        {'params': cnn_model.module.base.parameters(), 'lr_mult': 1},
        {'params': new_params, 'lr_mult': 1}]
    param_groups2 = [
        {'params': att_model.parameters(), 'lr_mult': 1},
        {'params': classifier_model.parameters(), 'lr_mult': 1}]




    optimizer1 = torch.optim.SGD(param_groups1, lr=args.lr1,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)


    optimizer2 = torch.optim.SGD(param_groups2, lr=args.lr2,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay,
                                 nesterov=True)




    # Schedule Learning rate
    def adjust_lr1(epoch):
        lr = args.lr1 * (0.1 ** (epoch/args.lr1step))
        print(lr)
        for g in optimizer1.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr2(epoch):
        lr = args.lr2 * (0.01 ** (epoch//args.lr2step))
        print(lr)
        for g in optimizer2.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr3(epoch):
        lr = args.lr3 * (0.000001 ** (epoch //args.lr3step))
        print(lr)
        return lr


    best_top1 = 0
    start_epoch = args.start_epoch
    if args.evaluate == 1:
        print('Evaluate:')
        evaluator = ATTEvaluator(cnn_model, att_model, classifier_model, args.train_mode, criterion_veri)
        top1, mAP = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)

    elif args.test == 1:
        print('Test:')
        checkpoint1 = load_checkpoint(osp.join(args.logs_dir, 'cnnmodel_best.pth.tar'))
        cnn_model.load_state_dict(checkpoint1['state_dict'])
        checkpoint2 = load_checkpoint(osp.join(args.logs_dir, 'attmodel_best.pth.tar'))
        att_model.load_state_dict(checkpoint2['state_dict'])
        checkpoint3 = load_checkpoint(osp.join(args.logs_dir, 'clsmodel_best.pth.tar'))
        classifier_model.load_state_dict(checkpoint3['state_dict'])
        evaluator = ATTEvaluator(cnn_model, att_model, classifier_model, args.train_mode, criterion_veri)
        mAP, top1, top5, top10, top20 = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)

    else:
        tensorboard_test_logdir = osp.join(args.logs_dir, 'test_log')
        writer = SummaryWriter(log_dir=tensorboard_test_logdir)
        if args.resume == 1:
            checkpoint1 = load_checkpoint(osp.join(args.logs_dir, 'cnn_checkpoint.pth.tar'))
            cnn_model.load_state_dict(checkpoint1['state_dict'])
            checkpoint2 = load_checkpoint(osp.join(args.logs_dir, 'att_checkpoint.pth.tar'))
            att_model.load_state_dict(checkpoint2['state_dict'])
            checkpoint3 = load_checkpoint(osp.join(args.logs_dir, 'cls_checkpoint.pth.tar'))
            classifier_model.load_state_dict(checkpoint3['state_dict'])
            start_epoch = checkpoint1['epoch']
            best_top1 = checkpoint1['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}"
                  .format(start_epoch, best_top1))
        # Trainer
        tensorboard_train_logdir = osp.join(args.logs_dir, 'train_log')
        trainer = SEQTrainer(cnn_model, att_model, classifier_model, criterion_veri, criterion_oim, args.train_mode, args.lr3, tensorboard_train_logdir)
        # Evaluator
        if args.train_mode == 'cnn':
            evaluator = CNNEvaluator(cnn_model, args.train_mode)
        elif args.train_mode == 'cnn_rnn':
            evaluator = ATTEvaluator(cnn_model, att_model, classifier_model, args.train_mode, criterion_veri)
        else:
            raise RuntimeError('Yes, Evaluator is necessary')

        for epoch in range(start_epoch, args.epochs):
            adjust_lr1(epoch)
            adjust_lr2(epoch)
            rate = adjust_lr3(epoch)
            trainer.train(epoch, train_loader, optimizer1, optimizer2, rate)

            if epoch % 1 == 0:
                mAP, top1, top5, top10, top20 = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)
                writer.add_scalar('test/mAP', mAP, epoch+1)
                writer.add_scalar('test/top1', top1, epoch+1)
                writer.add_scalar('test/top5', top5, epoch+1)
                writer.add_scalar('test/top10', top10, epoch+1)
                writer.add_scalar('test/top20', top20, epoch+1)
                is_best = top1 > best_top1
                if is_best:
                    best_top1 = top1

                save_cnn_checkpoint({
                    'state_dict': cnn_model.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                }, is_best, fpath=osp.join(args.logs_dir, 'cnn_checkpoint.pth.tar'))

                if args.train_mode == 'cnn_rnn':
                    save_att_checkpoint({
                        'state_dict': att_model.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    }, is_best, fpath=osp.join(args.logs_dir, 'att_checkpoint.pth.tar'))

                    save_cls_checkpoint({
                        'state_dict': classifier_model.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    }, is_best, fpath=osp.join(args.logs_dir, 'cls_checkpoint.pth.tar'))

        print('Test: ')
        checkpoint1 = load_checkpoint(osp.join(args.logs_dir, 'cnnmodel_best.pth.tar'))
        cnn_model.load_state_dict(checkpoint1['state_dict'])
        checkpoint2 = load_checkpoint(osp.join(args.logs_dir, 'attmodel_best.pth.tar'))
        att_model.load_state_dict(checkpoint2['state_dict'])
        checkpoint3 = load_checkpoint(osp.join(args.logs_dir, 'clsmodel_best.pth.tar'))
        classifier_model.load_state_dict(checkpoint3['state_dict'])
        evaluator = ATTEvaluator(cnn_model, att_model, classifier_model, args.train_mode, criterion_veri)
        mAP, top1, top5, top10, top20 = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)
Exemplo n.º 4
0
def main(args):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # log file 日志文件  防止重名覆盖
    run = 0
    if args.evaluate == 1:
        while osp.exists("%s" % (osp.join(args.logs_dir, 'log_test{}.txt'.format(run)))):
            run += 1

        sys.stdout = Logger(osp.join(args.logs_dir, 'log_test{}.txt'.format(run)))
    else:
        while osp.exists("%s" % (osp.join(args.logs_dir, 'log_train{}.txt'.format(run)))):
            run += 1

        sys.stdout = Logger(osp.join(args.logs_dir, 'log_train{}.txt'.format(run)))
    print("==========\nArgs:{}\n==========".format(args))

    #
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.seq_len, args.seq_srd,
                 args.workers, only_eval=False)

    # create model
    cnn_model = models.create(args.arch1, num_features=args.features, dropout=args.dropout, numclasses=num_classes)
    siamese_model = models.create(args.arch2, input_num=args.features, output_num=512, class_num=2)
    siamese_model_uncorr = models.create('siamese_video', input_num=2048, output_num=512, class_num=2)

    cnn_model = torch.nn.DataParallel(cnn_model).to(device)
    siamese_model = siamese_model.to(device)
    siamese_model_uncorr = siamese_model_uncorr.to(device)

    # Loss function
    criterion_corr = OIMLoss(2048, num_classes, scalar=args.oim_scalar, momentum=args.oim_momentum)
    criterion_uncorr = OIMLoss(2048, num_classes, scalar=args.oim_scalar, momentum=args.oim_momentum)
    criterion_veri = PairLoss()

    criterion_corr.to(device)
    criterion_uncorr.to(device)
    criterion_veri.to(device)

    # Optimizer
    base_param_ids = set(map(id, cnn_model.module.backbone.parameters()))
    new_params = [p for p in cnn_model.parameters() if
                  id(p) not in base_param_ids]

    param_groups = [
        {'params': cnn_model.module.backbone.parameters(), 'lr_mult': 1},
        {'params': new_params, 'lr_mult': 2},
        {'params': siamese_model.parameters(), 'lr_mult': 2},
        {'params': siamese_model_uncorr.parameters(), 'lr_mult': 2}
        ]

    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay,
                                 nesterov=True)

    def adjust_lr(epoch):
        lr = args.lr * (0.1 ** (epoch//args.lr_step))
        print(lr)
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Evaluator  测试
    evaluator = ATTEvaluator(cnn_model, siamese_model, only_eval=False)
    best_top1 = 0
    if args.evaluate == 1:
        load_best_checkpoint(cnn_model, siamese_model)
        top1 = evaluator.evaluate(dataset.query, dataset.gallery, query_loader, gallery_loader, args.logs_dir, args.visual, args.rerank)
        print('best rank-1 accuracy is', top1)
    else:
        # Trainer  训练器,类的实例化
        tensorboard_train_logdir = osp.join(args.logs_dir, 'train_log')
        remove_repeat_tensorboard_files(tensorboard_train_logdir)

        trainer = SEQTrainer(cnn_model, siamese_model, siamese_model_uncorr, criterion_veri, criterion_corr, criterion_uncorr,
                             tensorboard_train_logdir)
        for epoch in range(args.start_epoch, args.epochs):
            adjust_lr(epoch)
            trainer.train(epoch, train_loader, optimizer)

            # 每训练3个epoch进行一次评估.
            if (epoch+1) % 5 == 0 or (epoch+1) == args.epochs or ((epoch+1) > 30 and (epoch+1) % 3 == 0):
                top1 = evaluator.evaluate(dataset.query, dataset.gallery, query_loader, gallery_loader, args.logs_dir, args.visual, args.rerank)
                is_best = top1 > best_top1
                if is_best:
                    best_top1 = top1
                save_checkpoint(cnn_model, siamese_model, epoch, best_top1, is_best)
                del top1
                torch.cuda.empty_cache()
Exemplo n.º 5
0
def main(args):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # log file
    if args.evaluate == 1:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_test.txt'))
    else:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # from reid.data import get_data ,
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.seq_len, args.seq_srd,
                 args.workers, args.train_mode)

    # create CNN model
    cnn_model = models.create(args.a1,
                              num_features=args.features,
                              dropout=args.dropout)

    # create ATT model
    input_num = cnn_model.feat.in_features  # 2048
    output_num = args.features  # 128
    att_model = models.create(args.a2, input_num, output_num)

    # create classifier model
    class_num = 2
    classifier_model = models.create(args.a3, output_num, class_num)

    # CUDA acceleration model

    cnn_model = torch.nn.DataParallel(cnn_model).to(device)
    att_model = att_model.to(device)
    classifier_model = classifier_model.to(device)

    criterion_oim = OIMLoss(args.features,
                            num_classes,
                            scalar=args.oim_scalar,
                            momentum=args.oim_momentum)
    criterion_veri = PairLoss(args.sampling_rate)
    criterion_oim.to(device)
    criterion_veri.to(device)

    # Optimizer
    base_param_ids = set(map(id, cnn_model.module.base.parameters()))
    new_params = [
        p for p in cnn_model.parameters() if id(p) not in base_param_ids
    ]

    param_groups1 = [{
        'params': cnn_model.module.base.parameters(),
        'lr_mult': 1
    }, {
        'params': new_params,
        'lr_mult': 1
    }]
    param_groups2 = [{
        'params': att_model.parameters(),
        'lr_mult': 1
    }, {
        'params': classifier_model.parameters(),
        'lr_mult': 1
    }]

    optimizer1 = torch.optim.SGD(param_groups1,
                                 lr=args.lr1,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay,
                                 nesterov=True)

    optimizer2 = torch.optim.SGD(param_groups2,
                                 lr=args.lr2,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay,
                                 nesterov=True)

    # optimizer1 = torch.optim.Adam(param_groups1, lr=args.lr1, weight_decay=args.weight_decay)
    #
    # optimizer2 = torch.optim.Adam(param_groups2, lr=args.lr2, weight_decay=args.weight_decay)

    # Schedule Learning rate
    def adjust_lr1(epoch):
        lr = args.lr1 * (0.1**(epoch / args.lr1step))
        print(lr)
        for g in optimizer1.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr2(epoch):
        lr = args.lr2 * (0.01**(epoch // args.lr2step))
        print(lr)
        for g in optimizer2.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr3(epoch):
        lr = args.lr3 * (0.000001**(epoch // args.lr3step))
        print(lr)
        return lr

    # Trainer
    trainer = SEQTrainer(cnn_model, att_model, classifier_model,
                         criterion_veri, criterion_oim, args.train_mode,
                         args.lr3)

    # Evaluator
    if args.train_mode == 'cnn':
        evaluator = CNNEvaluator(cnn_model, args.train_mode)
    elif args.train_mode == 'cnn_rnn':
        evaluator = ATTEvaluator(cnn_model, att_model, classifier_model,
                                 args.train_mode)

    else:
        raise RuntimeError('Yes, Evaluator is necessary')

    best_top1 = 0
    if args.evaluate == 1:  # evaluate
        checkpoint = load_checkpoint(
            osp.join(args.logs_dir, 'cnnmodel_best.pth.tar'))
        cnn_model.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(
            osp.join(args.logs_dir, 'attmodel_best.pth.tar'))
        att_model.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(
            osp.join(args.logs_dir, 'clsmodel_best.pth.tar'))
        classifier_model.load_state_dict(checkpoint['state_dict'])

        top1 = evaluator.evaluate(query_loader, gallery_loader,
                                  dataset.queryinfo, dataset.galleryinfo)

    else:
        for epoch in range(args.start_epoch, args.epochs):
            adjust_lr1(epoch)
            adjust_lr2(epoch)
            rate = adjust_lr3(epoch)
            trainer.train(epoch, train_loader, optimizer1, optimizer2, rate)

            if (epoch + 1) % 3 == 0 or (epoch + 1) == args.epochs:

                top1 = evaluator.evaluate(query_loader, gallery_loader,
                                          dataset.queryinfo,
                                          dataset.galleryinfo)
                is_best = top1 > best_top1
                if is_best:
                    best_top1 = top1

                save_cnn_checkpoint(
                    {
                        'state_dict': cnn_model.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    },
                    is_best,
                    fpath=osp.join(args.logs_dir, 'cnn_checkpoint.pth.tar'))

                if args.train_mode == 'cnn_rnn':
                    save_att_checkpoint(
                        {
                            'state_dict': att_model.state_dict(),
                            'epoch': epoch + 1,
                            'best_top1': best_top1,
                        },
                        is_best,
                        fpath=osp.join(args.logs_dir,
                                       'att_checkpoint.pth.tar'))

                    save_cls_checkpoint(
                        {
                            'state_dict': classifier_model.state_dict(),
                            'epoch': epoch + 1,
                            'best_top1': best_top1,
                        },
                        is_best,
                        fpath=osp.join(args.logs_dir,
                                       'cls_checkpoint.pth.tar'))
Exemplo n.º 6
0
def main(args):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # log file
    if args.evaluate == 1:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_test.txt'))
    else:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # from reid.data import get_data ,
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.seq_len, args.seq_srd,
                 args.workers, args.train_mode)

    # create CNN model
    cnn_model = models.create(args.a1, num_features=args.features, dropout=args.dropout)

    # create ATT model
    input_num = cnn_model.feat.in_features  # 2048
    output_num = args.features  # 128
    att_model = models.create(args.a2, input_num, output_num)

    # create classifier model
    class_num = 2
    classifier_model = models.create(args.a3,  output_num, class_num)

    # CUDA acceleration model

    cnn_model = torch.nn.DataParallel(cnn_model).to(device)
    att_model = att_model.to(device)
    classifier_model = classifier_model.to(device)

    criterion_oim = OIMLoss(args.features, num_classes,
                            scalar=args.oim_scalar, momentum=args.oim_momentum)
    criterion_veri = PairLoss(args.sampling_rate)
    criterion_oim.to(device)
    criterion_veri.to(device)

    # Optimizer
    base_param_ids = set(map(id, cnn_model.module.base.parameters()))
    new_params = [p for p in cnn_model.parameters() if
                  id(p) not in base_param_ids]

    param_groups1 = [
        {'params': cnn_model.module.base.parameters(), 'lr_mult': 1},
        {'params': new_params, 'lr_mult': 1}]
    param_groups2 = [
        {'params': att_model.parameters(), 'lr_mult': 1},
        {'params': classifier_model.parameters(), 'lr_mult': 1}]

    optimizer1 = torch.optim.SGD(param_groups1, lr=args.lr1,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay,
                                 nesterov=True)

    optimizer2 = torch.optim.SGD(param_groups2, lr=args.lr2,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay,
                                 nesterov=True)
    # optimizer1 = torch.optim.Adam(param_groups1, lr=args.lr1, weight_decay=args.weight_decay)
    #
    # optimizer2 = torch.optim.Adam(param_groups2, lr=args.lr2, weight_decay=args.weight_decay)

    # Schedule Learning rate
    def adjust_lr1(epoch):
        lr = args.lr1 * (0.1 ** (epoch/args.lr1step))
        print(lr)
        for g in optimizer1.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr2(epoch):
        lr = args.lr2 * (0.01 ** (epoch//args.lr2step))
        print(lr)
        for g in optimizer2.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr3(epoch):
        lr = args.lr3 * (0.000001 ** (epoch //args.lr3step))
        print(lr)
        return lr

    # Trainer
    trainer = SEQTrainer(cnn_model, att_model, classifier_model, criterion_veri, criterion_oim, args.train_mode, args.lr3)

    # Evaluator
    if args.train_mode == 'cnn':
        evaluator = CNNEvaluator(cnn_model, args.train_mode)
    elif args.train_mode == 'cnn_rnn':
        evaluator = ATTEvaluator(cnn_model, att_model, classifier_model, args.train_mode)

    else:
        raise RuntimeError('Yes, Evaluator is necessary')

    best_top1 = 0
    if args.evaluate == 1:  # evaluate
        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'cnnmodel_best.pth.tar'))
        cnn_model.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'attmodel_best.pth.tar'))
        att_model.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'clsmodel_best.pth.tar'))
        classifier_model.load_state_dict(checkpoint['state_dict'])

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)

    else:
        for epoch in range(args.start_epoch, args.epochs):
            adjust_lr1(epoch)
            adjust_lr2(epoch)
            rate = adjust_lr3(epoch)
            trainer.train(epoch, train_loader, optimizer1, optimizer2, rate)

            if (epoch+1) % 3 == 0 or (epoch+1) == args.epochs:

                top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)
                is_best = top1 > best_top1
                if is_best:
                    best_top1 = top1

                save_cnn_checkpoint({
                    'state_dict': cnn_model.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                }, is_best, fpath=osp.join(args.logs_dir, 'cnn_checkpoint.pth.tar'))

                if args.train_mode == 'cnn_rnn':
                    save_att_checkpoint({
                        'state_dict': att_model.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    }, is_best, fpath=osp.join(args.logs_dir, 'att_checkpoint.pth.tar'))

                    save_cls_checkpoint({
                        'state_dict': classifier_model.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    }, is_best, fpath=osp.join(args.logs_dir, 'cls_checkpoint.pth.tar'))