Exemple #1
0
def main(config):

    # init loaders and base
    loaders = ReIDLoaders(config)
    base = Base(config)

    # make directions
    make_dirs(base.output_path)

    # init logger
    logger = Logger(os.path.join(config.output_path, 'log.txt'))
    logger(config)

    assert config.mode in ['train', 'test', 'visualize']
    if config.mode == 'train':  # train mode

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            start_train_epoch = base.resume_last_model()

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):
            # save model
            base.save_model(current_epoch)
            # train
            _, results = train_an_epoch(config, base, loaders, current_epoch)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

        # test
        base.save_model(config.total_train_epochs)
        mAP, CMC, pres, recalls, thresholds = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format(
            time_now(), config.test_dataset, mAP, CMC))
        plot_prerecall_curve(config, pres, recalls, thresholds, mAP, CMC,
                             'none')

    elif config.mode == 'test':  # test mode
        base.resume_from_model(config.resume_test_model)
        mAP, CMC, pres, recalls, thresholds = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format(
            time_now(), config.test_dataset, mAP, CMC))
        logger(
            'Time: {}; Test Dataset: {}, \nprecision: {} \nrecall: {}\nthresholds: {}'
            .format(time_now(), config.test_dataset, mAP, CMC, pres, recalls,
                    thresholds))
        plot_prerecall_curve(config, pres, recalls, thresholds, mAP, CMC,
                             'none')

    elif config.mode == 'visualize':  # visualization mode
        base.resume_from_model(config.resume_visualize_model)
        visualize(config, base, loaders)
Exemple #2
0
def test_with_graph(config, base, loader):

    base.set_eval()

    target_query_features_meter, target_query_pids_meter, target_query_cids_meter = CatMeter(
    ), CatMeter(), CatMeter()
    target_gallery_features_meter, target_gallery_pids_meter, target_gallery_cids_meter = CatMeter(
    ), CatMeter(), CatMeter()

    target_loaders = [loader.target_query_loader, loader.target_gallery_loader]

    print(time_now(), 'features start')

    with torch.no_grad():
        for target_loader_id, target_loader in enumerate(target_loaders):
            for target_data in target_loader:
                target_images, target_pids, target_cids = target_data
                target_images = target_images.to(base.device)
                target_features, _, target_local_features, _ = \
                    base.feature_extractor(target_images)
                target_graph_global_features = base.graph(
                    target_local_features, target_features)
                target_graph_bn_global_features = base.classifier3(
                    target_graph_global_features).squeeze()

                if target_loader_id == 0:
                    target_query_features_meter.update(
                        target_graph_bn_global_features.data)
                    target_query_pids_meter.update(target_pids)
                    target_query_cids_meter.update(target_cids)
                elif target_loader_id == 1:
                    target_gallery_features_meter.update(
                        target_graph_bn_global_features.data)
                    target_gallery_pids_meter.update(target_pids)
                    target_gallery_cids_meter.update(target_cids)

    print(time_now(), 'features done')

    target_query_features = target_query_features_meter.get_val_numpy()
    target_gallery_features = target_gallery_features_meter.get_val_numpy()

    target_mAP, target_CMC = ReIDEvaluator(
        dist='cosine', mode=config.test_mode).evaluate(
            target_query_features, target_query_pids_meter.get_val_numpy(),
            target_query_cids_meter.get_val_numpy(), target_gallery_features,
            target_gallery_pids_meter.get_val_numpy(),
            target_gallery_cids_meter.get_val_numpy())

    return target_mAP, target_CMC[0:20]
def main(config):

    # init loaders and base
    loaders = ReIDLoaders(config)
    base = Base(config)

    # make directions
    make_dirs(base.output_path)

    # init logger
    logger = Logger(os.path.join(config.output_path, 'log.txt'))
    logger(config)

    assert config.mode in ['train', 'test', 'visualize']
    if config.mode == 'train':  # train mode

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            print('resume', base.output_path)
            start_train_epoch = base.resume_last_model()
        #start_train_epoch = 0

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs + 1):
            # save model
            base.save_model(current_epoch)
            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

        # test
        base.save_model(config.total_train_epochs)
        mAP, CMC = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format(
            time_now(), config.test_dataset, mAP, CMC))

    elif config.mode == 'test':  # test mode
        base.resume_from_model(config.resume_test_model)
        mAP, CMC = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {} with len {}'.
               format(time_now(), config.test_dataset, mAP, CMC, len(CMC)))

    elif config.mode == 'visualize':  # visualization mode
        base.resume_from_model(config.resume_visualize_model)
        visualize(config, base, loaders)
Exemple #4
0
def main(config):
    loader = Loader(config)
    base = Base(config, loader)
    make_dirs(base.output_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_model_path)
    logger = Logger(os.path.join(base.save_logs_path, 'log.txt'))
    logger(config)

    if config.mode == 'train':
        if config.resume_train_epoch >= 0:
            base.resume_model(config.resume_train_epoch)
            start_train_epoch = config.resume_train_epoch
        else:

            start_train_epoch = 0

        if config.auto_resume_training_from_lastest_step:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                base.resume_model(indexes[-1])
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        for current_epoch in range(start_train_epoch,
                                   config.total_train_epoch):
            base.save_model(current_epoch)

            if current_epoch < config.use_graph:
                _, result = train_meta_learning(base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 40 == 0:
                    mAP, CMC = test(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))
            else:
                _, result = train_with_graph(config, base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 5 == 0:
                    mAP, CMC = test_with_graph(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))

    elif config.mode == 'test':
        base.resume_model(config.resume_test_model)
        mAP, CMC = test_with_graph(config, base, loader)
        logger('Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'.
               format(time_now(), config.target_dataset, mAP, CMC))
Exemple #5
0
 def resume_model_from_path(self, path, resume_epoch):
     for ii, _ in enumerate(self.model_list):
         self.model_list[ii].load_state_dict(
             torch.load(os.path.join(path, 'model-{}_{}.pkl'.format(ii, resume_epoch))))
     print('Time: {}, successfully resume model from {}'.format(time_now(), resume_epoch))
Exemple #6
0
def main(config):

    # loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make dirs
    make_dirs(config.save_images_path)
    make_dirs(config.save_models_path)
    make_dirs(config.save_features_path)

    # logger
    logger = Logger(os.path.join(config.output_path, 'log.txt'))
    logger(config)

    if config.mode == 'train':

        # automatically resume model from the latest one
        start_train_epoch = 0
        root, _, files = os_walk(config.save_models_path)
        if len(files) > 0:
            # get indexes of saved models
            indexes = []
            for file in files:
                indexes.append(int(file.replace('.pkl', '').split('_')[-1]))

            # remove the bad-case and get available indexes
            model_num = len(base.model_list)
            available_indexes = copy.deepcopy(indexes)
            for element in indexes:
                if indexes.count(element) < model_num:
                    available_indexes.remove(element)

            available_indexes = sorted(list(set(available_indexes)),
                                       reverse=True)
            unavailable_indexes = list(
                set(indexes).difference(set(available_indexes)))

            if len(available_indexes
                   ) > 0:  # resume model from the latest model
                base.resume_model(available_indexes[0])
                start_train_epoch = available_indexes[0] + 1
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), available_indexes[0]))
            else:  #
                logger('Time: {}, there are no available models')

        # main loop
        for current_epoch in range(
                start_train_epoch, config.warmup_reid_epoches +
                config.warmup_gan_epoches + config.train_epoches):

            # test
            if current_epoch % 10 == 0 and current_epoch > config.warmup_reid_epoches + config.warmup_gan_epoches:
                results = test(config, base, loaders, brief=True)
                for key in results.keys():
                    logger('Time: {}\n Setting: {}\n {}'.format(
                        time_now(), key, results[key]))

            # visualize generated images
            if current_epoch % 10 == 0 or current_epoch <= 10:
                visualize(config, loaders, base, current_epoch)

            # train
            if current_epoch < config.warmup_reid_epoches:  # warmup reid model
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         train_pixel=False,
                                         optimize_sl_enc=True)
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches:  # warmup GAN model
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         train_pixel=False,
                                         optimize_sl_enc=False)
            else:  # joint train
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         train_pixel=True,
                                         optimize_sl_enc=True)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

            # save model
            base.save_model(current_epoch)

        # test
        results = test(config, base, loaders, brief=False)
        for key in results.keys():
            logger('Time: {}\n Setting: {}\n {}'.format(
                time_now(), key, results[key]))

    elif config.mode == 'test':
        # resume from pre-trained model and test
        base.resume_model_from_path(config.pretrained_model_path,
                                    config.pretrained_model_epoch)
        results = test(config, base, loaders, brief=False)
        for key in results.keys():
            logger('Time: {}\n Setting: {}\n {}'.format(
                time_now(), key, results[key]))
Exemple #7
0
def main(config):

    # init loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make directions
    make_dirs(base.output_path)
    make_dirs(base.save_model_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_visualize_market_path)
    make_dirs(base.save_visualize_duke_path)

    # init logger
    logger = Logger(
        os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt'))
    logger('\n' * 3)
    logger(config)

    if config.mode == 'train':  # train mode

        # resume model from the resume_train_epoch
        start_train_epoch = 0

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                # get indexes of saved models
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                # resume model from the latest model
                base.resume_model(indexes[-1])
                #
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):
            # save model
            base.save_model(current_epoch)
            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders, current_epoch)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))
        # test
        testwithVer2(config,
                     logger,
                     base,
                     loaders,
                     'duke',
                     use_gcn=True,
                     use_gm=True)

    elif config.mode == 'test':  # test mode
        # resume from the resume_test_epoch
        if config.resume_test_path != '' and config.resume_test_epoch != 0:
            base.resume_model_from_path(config.resume_test_path,
                                        config.resume_test_epoch)
        else:
            assert 0, 'please set resume_test_path and resume_test_epoch '
        # test
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=False,
                                           use_gm=False)
        logger('Time: {},  base, Dataset: Duke  \nmAP: {} \nRank: {}'.format(
            time_now(), duke_map, duke_rank))
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=True,
                                           use_gm=False)
        logger(
            'Time: {},  base+gcn, Dataset: Duke  \nmAP: {} \nRank: {}'.format(
                time_now(), duke_map, duke_rank))
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=True,
                                           use_gm=True)
        logger('Time: {},  base+gcn+gm, Dataset: Duke  \nmAP: {} \nRank: {}'.
               format(time_now(), duke_map, duke_rank))
        logger('')

    elif config.mode == 'visualize':  # visualization mode
        # resume from the resume_visualize_epoch
        if config.resume_visualize_path != '' and config.resume_visualize_epoch != 0:
            base.resume_model_from_path(config.resume_visualize_path,
                                        config.resume_visualize_epoch)
            print('Time: {}, resume model from {} {}'.format(
                time_now(), config.resume_visualize_path,
                config.resume_visualize_epoch))
        # visualization
        if 'market' in config.train_dataset:
            visualize_ranked_images(config, base, loaders, 'market')
        elif 'duke' in config.train_dataset:
            visualize_ranked_images(config, base, loaders, 'duke')
        else:
            assert 0
Exemple #8
0
def test(config, base, loader):

    base.set_eval()

    source_query_features_meter, source_query_pids_meter, source_query_cids_meter = CatMeter(
    ), CatMeter(), CatMeter()
    source_gallery_features_meter, source_gallery_pids_meter, source_gallery_cids_meter = CatMeter(
    ), CatMeter(), CatMeter()

    target_query_features_meter, target_query_pids_meter, target_query_cids_meter = CatMeter(
    ), CatMeter(), CatMeter()
    target_gallery_features_meter, target_gallery_pids_meter, target_gallery_cids_meter = CatMeter(
    ), CatMeter(), CatMeter()

    source_loaders = [loader.source_query_loader, loader.source_gallery_loader]
    target_loaders = [loader.target_query_loader, loader.target_gallery_loader]

    print(time_now(), 'source_feature start')

    with torch.no_grad():
        for source_loader_id, source_loader in enumerate(source_loaders):
            for source_data in source_loader:
                source_images, source_pids, source_cids = source_data
                source_images = source_images.to(base.device)
                source_features = base.feature_extractor(
                    source_images).squeeze()

                if source_loader_id == 0:
                    source_query_features_meter.update(source_features.data)
                    source_query_pids_meter.update(source_pids)
                    source_query_cids_meter.update(source_cids)
                elif source_loader_id == 1:
                    source_gallery_features_meter.update(source_features.data)
                    source_gallery_pids_meter.update(source_pids)
                    source_gallery_cids_meter.update(source_cids)

    print(time_now(), 'source_features done')

    print(time_now(), 'target_feature start')

    with torch.no_grad():
        for target_loader_id, target_loader in enumerate(target_loaders):
            for target_data in target_loader:
                target_images, target_pids, target_cids = target_data
                target_images = target_images.to(base.device)
                target_features = base.feature_extractor(
                    target_images).squeeze()

                if target_loader_id == 0:
                    target_query_features_meter.update(target_features.data)
                    target_query_pids_meter.update(target_pids)
                    target_query_cids_meter.update(target_cids)
                elif target_loader_id == 1:
                    target_gallery_features_meter.update(target_features.data)
                    target_gallery_pids_meter.update(target_pids)
                    target_gallery_cids_meter.update(target_cids)

    print(time_now(), 'target_features done')

    source_query_features = source_query_features_meter.get_val_numpy()
    source_gallery_features = source_gallery_features_meter.get_val_numpy()

    target_query_features = target_query_features_meter.get_val_numpy()
    target_gallery_features = target_gallery_features_meter.get_val_numpy()

    source_mAP, source_CMC = ReIDEvaluator(
        dist='cosine', mode=config.test_mode).evaluate(
            source_query_features, source_query_pids_meter.get_val_numpy(),
            source_query_cids_meter.get_val_numpy(), source_gallery_features,
            source_gallery_pids_meter.get_val_numpy(),
            source_gallery_cids_meter.get_val_numpy())

    target_mAP, target_CMC = ReIDEvaluator(
        dist='cosine', mode=config.test_mode).evaluate(
            target_query_features, target_query_pids_meter.get_val_numpy(),
            target_query_cids_meter.get_val_numpy(), target_gallery_features,
            target_gallery_pids_meter.get_val_numpy(),
            target_gallery_cids_meter.get_val_numpy())

    return source_mAP, source_CMC[0:20], target_mAP, target_CMC[0:20]
def main(config):

    # loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make dirs
    make_dirs(config.save_images_path)
    make_dirs(config.save_wp_models_path)
    make_dirs(config.save_st_models_path)
    make_dirs(config.save_features_path)

    logger = setup_logger('adaptation_reid', config.output_path, if_train=True)

    if config.mode == 'train':

        if config.resume:
            # automatically resume model from the latest one
            if config.resume_epoch_num == 0:
                start_train_epoch = 0
                root, _, files = os_walk(config.save_models_path)
                if len(files) > 0:
                    # get indexes of saved models
                    indexes = []
                    for file in files:
                        indexes.append(
                            int(file.replace('.pkl', '').split('_')[-1]))

                    # remove the bad-case and get available indexes
                    model_num = len(base.model_list)
                    available_indexes = copy.deepcopy(indexes)
                    for element in indexes:
                        if indexes.count(element) < model_num:
                            available_indexes.remove(element)

                    available_indexes = sorted(list(set(available_indexes)),
                                               reverse=True)
                    unavailable_indexes = list(
                        set(indexes).difference(set(available_indexes)))

                    if len(available_indexes
                           ) > 0:  # resume model from the latest model
                        base.resume_model(available_indexes[0])
                        start_train_epoch = available_indexes[0] + 1
                        logger.info(
                            'Time: {}, automatically resume training from the latest step (model {})'
                            .format(time_now(), available_indexes[0]))
                    else:  #
                        logger.info('Time: {}, there are no available models')
            else:
                start_train_epoch = config.resume_epoch_num
        else:
            start_train_epoch = 0

        # main loop
        for current_epoch in range(
                start_train_epoch, config.warmup_reid_epoches +
                config.warmup_gan_epoches + config.warmup_adaptation_epoches):

            # train
            if current_epoch < config.warmup_reid_epoches:  # warmup reid model
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         self_training=False,
                                         optimize_sl_enc=True,
                                         train_adaptation=False)
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches:  # warmup GAN model
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         self_training=False,
                                         optimize_sl_enc=False,
                                         train_adaptation=False)  # joint train
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches + config.warmup_adaptation_epoches:  #warmup adaptation
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         self_training=False,
                                         optimize_sl_enc=False,
                                         train_adaptation=True)

            print("another epoch")
            logger.info('Time: {};  Epoch: {};  {}'.format(
                time_now(), current_epoch, results))
            # save model
            if current_epoch % config.save_model_interval == 0:
                base.save_model(current_epoch, True)

            if current_epoch % config.test_model_interval == 0:
                visualize(config, loaders, base, current_epoch)
                test(config, base, loaders, epoch=0, brief=False)

        total_wp_epoches = config.warmup_reid_epoches + config.warmup_gan_epoches

        for iter_n in range(config.iteration_number):
            src_dataset, src_dataloader, trg_dataset, trg_dataloader = loaders.get_self_train_loaders(
            )

            trg_labeled_dataloader = generate_labeled_dataset(
                base, iter_n, src_dataset, src_dataloader, trg_dataset,
                trg_dataloader)
            for epoch in range(total_wp_epoches + 1, config.self_train_epoch):
                results = train_an_epoch(
                    config,
                    iter_n,
                    loaders,
                    base,
                    epoch,
                    train_gan=True,
                    train_reid=False,
                    self_training=True,
                    optimize_sl_enc=True,
                    trg_labeled_loader=trg_labeled_dataloader)
                logger.info('Time: {};  Epoch: {};  {}'.format(
                    time_now(), current_epoch, results))

                if epoch % config.save_model_interval == 0:
                    base.save_model(iter_n * config.self_train_epoch + epoch,
                                    False)

    elif config.mode == 'test':
        # resume from pre-trained model and test
        base.resume_model_from_path(config.pretrained_model_path,
                                    config.pretrained_model_epoch)
        cmc, map = test(config, base, loaders, epoch=100, brief=False)
Exemple #10
0
def main(config):

    # init loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make directions
    make_dirs(base.output_path)
    make_dirs(base.save_model_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_visualize_market_path)
    make_dirs(base.save_visualize_duke_path)

    # init logger
    logger = Logger(
        os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt'))
    logger('\n' * 3)
    logger(config)

    if config.mode == 'train':  # train mode

        # resume model from the resume_train_epoch
        if config.resume_train_epoch >= 0:
            base.resume_model(config.resume_train_epoch)
            start_train_epoch = config.resume_train_epoch
        else:
            start_train_epoch = 0

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                # get indexes of saved models
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                # resume model from the latest model
                base.resume_model(indexes[-1])
                #
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):

            # save model
            base.save_model(current_epoch)

            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

            # test
            if (current_epoch + 1) % 40 == 0 and current_epoch + 1 >= 0:
                market_map, market_rank = test(config, base, loaders, 'market')
                duke_map, duke_rank = test(config, base, loaders, 'duke')
                logger(
                    'Time: {},  Dataset: Market  \nmAP: {} \nRank: {}'.format(
                        time_now(), market_map, market_rank))
                logger('Time: {},  Dataset: Duke  \nmAP: {} \nRank: {}'.format(
                    time_now(), duke_map, duke_rank))
                logger('')

    elif config.mode == 'test':  # test mode
        # resume from the resume_test_epoch
        if config.resume_test_epoch >= 0:
            base.resume_model(config.resume_test_epoch)
        # test
        market_map, market_rank = test(config, base, loaders, 'market')
        duke_map, duke_rank = test(config, base, loaders, 'duke')
        logger('Time: {},  Dataset: Market  \nmAP: {} \nRank: {}'.format(
            time_now(), market_map, market_rank))
        logger('Time: {},  Dataset: Duke  \nmAP: {} \nRank: {}'.format(
            time_now(), duke_map, duke_rank))
        logger('')

    elif config.mode == 'visualize':  # visualization mode
        # resume from the resume_visualize_epoch
        if config.resume_visualize_epoch >= 0:
            base.resume_model(config.resume_visualize_epoch)
        # visualization
        visualize_ranking_list(config, base, loaders, 'market')
        visualize_ranking_list(config, base, loaders, 'duke')
def test(config, base, loaders):

    base.set_eval()

    # meters
    query_features_meter, query_pids_meter, query_cids_meter = CatMeter(
    ), CatMeter(), CatMeter()
    gallery_features_meter, gallery_pids_meter, gallery_cids_meter = CatMeter(
    ), CatMeter(), CatMeter()

    # init dataset
    if config.test_dataset == 'market':
        loaders = [loaders.market_query_loader, loaders.market_gallery_loader]
    elif config.test_dataset == 'duke':
        loaders = [loaders.duke_query_loader, loaders.duke_gallery_loader]
    elif config.test_dataset == 'msmt':
        loaders = [loaders.msmt_query_loader, loaders.msmt_gallery_loader]
    elif 'njust' in config.test_dataset:
        loaders = [loaders.njust_query_loader, loaders.njust_gallery_loader]
    elif config.test_dataset == 'wildtrack':
        loaders = [
            loaders.wildtrack_query_loader, loaders.wildtrack_gallery_loader
        ]
    else:
        assert 0, 'test dataset error, expect market/duke/msmt/njust_win/njust_spr, given {}'.format(
            config.test_dataset)

    print(time_now(), 'feature start')

    # compute query and gallery features
    with torch.no_grad():
        for loader_id, loader in enumerate(loaders):
            for data in loader:
                # compute feautres
                images, pids, cids = data
                images = images.to(base.device)
                features = base.model(images)
                # save as query features
                if loader_id == 0:
                    query_features_meter.update(features.data)
                    query_pids_meter.update(pids)
                    query_cids_meter.update(cids)
                # save as gallery features
                elif loader_id == 1:
                    gallery_features_meter.update(features.data)
                    gallery_pids_meter.update(pids)
                    gallery_cids_meter.update(cids)

    print(time_now(), 'feature done')

    #
    query_features = query_features_meter.get_val_numpy()
    gallery_features = gallery_features_meter.get_val_numpy()

    # compute mAP and rank@k
    mAP, CMC = ReIDEvaluator(dist='cosine', mode=config.test_mode).evaluate(
        query_features, query_cids_meter.get_val_numpy(),
        query_pids_meter.get_val_numpy(), gallery_features,
        gallery_cids_meter.get_val_numpy(), gallery_pids_meter.get_val_numpy())

    # compute precision-recall curve
    thresholds = np.linspace(1.0, 0.0, num=101)
    pres, recalls, thresholds = PrecisionRecall(
        dist='cosine',
        mode=config.test_mode).evaluate(thresholds, query_features,
                                        query_cids_meter.get_val_numpy(),
                                        query_pids_meter.get_val_numpy(),
                                        gallery_features,
                                        gallery_cids_meter.get_val_numpy(),
                                        gallery_pids_meter.get_val_numpy())

    return mAP, CMC[0:150], pres, recalls, thresholds