Ejemplo n.º 1
0
 def save_model(self, save_epoch):
     feature_extractor_file_path = os.path.join(self.save_model_path, 'feature_extractor_{}.pkl'.format(save_epoch))
     torch.save(self.feature_extractor.state_dict(), feature_extractor_file_path)
     identity_classifier_file_path = os.path.join(self.save_model_path, 'identity_classifier_{}.pkl'
                                                  .format(save_epoch))
     torch.save(self.identity_classifier.state_dict(), identity_classifier_file_path)
     identitydomain_classifier_file_path = os.path.join(self.save_model_path, 'identitydomain_classifier_{}.pkl'
                                                         .format(save_epoch))
     torch.save(self.identitydomain_classifier.state_dict(), identitydomain_classifier_file_path)
     camera_classifier_file_path = os.path.join(self.save_model_path, 'camera_classifier_{}.pkl'
                                                  .format(save_epoch))
     torch.save(self.camera_classifier.state_dict(), camera_classifier_file_path)
     if self.max_save_model_num > 0:
         root, _, files = os_walk(self.save_model_path)
         for file in files:
             if '.pkl' not in file:
                 files.remove(file)
         if len(files) > 4 * self.max_save_model_num:
             file_iters = sorted([int(file.replace('.pkl', '').split('_')[2]) for file in files], reverse=False)
             feature_extractor_file_path = os.path.join(root, 'feature_extractor_{}.pkl'.format(file_iters[0]))
             os.remove(feature_extractor_file_path)
             identity_classifier_file_path = os.path.join(root, 'identity_classifier_{}.pkl'.
                                                              format(file_iters[0]))
             os.remove(identity_classifier_file_path)
             identitydomain_classifier_file_path = os.path.join(root, 'identitydomain_classifier_{}.pkl'.
                                                              format(file_iters[0]))
             os.remove(identitydomain_classifier_file_path)
             camera_classifier_file_path = os.path.join(root, 'camera_classifier_{}.pkl'.format(file_iters[0]))
             os.remove(camera_classifier_file_path)
Ejemplo n.º 2
0
def main(config):
    loader = Loader(config)
    base = Base(config, loader)
    make_dirs(base.output_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_model_path)
    logger = Logger(os.path.join(base.save_logs_path, 'log.txt'))
    logger(config)

    if config.mode == 'train':
        if config.resume_train_epoch >= 0:
            base.resume_model(config.resume_train_epoch)
            start_train_epoch = config.resume_train_epoch
        else:

            start_train_epoch = 0

        if config.auto_resume_training_from_lastest_step:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                base.resume_model(indexes[-1])
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        for current_epoch in range(start_train_epoch,
                                   config.total_train_epoch):
            base.save_model(current_epoch)

            if current_epoch < config.use_graph:
                _, result = train_meta_learning(base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 40 == 0:
                    mAP, CMC = test(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))
            else:
                _, result = train_with_graph(config, base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 5 == 0:
                    mAP, CMC = test_with_graph(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))

    elif config.mode == 'test':
        base.resume_model(config.resume_test_model)
        mAP, CMC = test_with_graph(config, base, loader)
        logger('Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'.
               format(time_now(), config.target_dataset, mAP, CMC))
Ejemplo n.º 3
0
 def _load_samples(self, floder_dir):
     samples = []
     root_path, _, files_name = os_walk(floder_dir)
     for file_name in files_name:
         if 'jpg' in file_name:
             identity_id, camera_id = self._analysis_file_name(file_name)
             samples.append([root_path + file_name, identity_id, camera_id])
     return samples
Ejemplo n.º 4
0
    def _load_samples(self, floder_dir, camera_id):
        label_samples = []
        root_path, _, files_name = os_walk(floder_dir)
        for file_name in files_name:
            if 'jpg' in file_name:
                identity, camera = self._analysis_file_name(file_name)
                if camera == camera_id:
                    label_samples.append(
                        [root_path + file_name, identity, camera_id])

        return label_samples
Ejemplo n.º 5
0
    def _load_samples(self, floder_dir):
        samples = []
        root_path, _, files_name = os_walk(floder_dir)
        file_names = []
        for file_name in files_name:
            if 'jpg' in file_name:
                file_names.append(file_name)
        file_names = file_names[::-1]
        for i, sample in enumerate(file_names):
            identity_id = i
            camera_id = self._analysis_file_name(sample)
            samples.append([root_path + sample, identity_id, camera_id])

        return samples
Ejemplo n.º 6
0
 def save_model(self, save_epoch):
     feature_file_path = os.path.join(self.save_model_path,
                                      'feature_{}.pkl'.format(save_epoch))
     torch.save(self.feature_extractor.state_dict(), feature_file_path)
     graph_file_path = os.path.join(self.save_model_path,
                                    'graph_{}.pkl'.format(save_epoch))
     torch.save(self.graph.state_dict(), graph_file_path)
     classifier1_file_path = os.path.join(
         self.save_model_path, 'classifier1_{}.pkl'.format(save_epoch))
     torch.save(self.classifier1.state_dict(), classifier1_file_path)
     classifier2_file_path = os.path.join(
         self.save_model_path, 'classifier2_{}.pkl'.format(save_epoch))
     torch.save(self.classifier2.state_dict(), classifier2_file_path)
     local_classifier_file_path = os.path.join(
         self.save_model_path, 'localclassifier_{}.pkl'.format(save_epoch))
     torch.save(self.local_classifier.state_dict(),
                local_classifier_file_path)
     classifier3_file_path = os.path.join(
         self.save_model_path, 'classifier3_{}.pkl'.format(save_epoch))
     torch.save(self.classifier3.state_dict(), classifier3_file_path)
     if self.max_save_model_num > 0:
         root, _, files = os_walk(self.save_model_path)
         for file in files:
             if '.pkl' not in file:
                 files.remove(file)
         if len(files) > 6 * self.max_save_model_num:
             file_iters = sorted([
                 int(file.replace('.pkl', '').split('_')[1])
                 for file in files
             ],
                                 reverse=False)
             feature_file_path = os.path.join(
                 root, 'feature_{}.pkl'.format(file_iters[0]))
             os.remove(feature_file_path)
             graph_file_path = os.path.join(
                 root, 'graph_{}.pkl'.format(file_iters[0]))
             os.remove(graph_file_path)
             classifier1_file_path = os.path.join(
                 root, 'classifier1_{}.pkl'.format(file_iters[0]))
             os.remove(classifier1_file_path)
             classifier2_file_path = os.path.join(
                 root, 'classifier2_{}.pkl'.format(file_iters[0]))
             os.remove(classifier2_file_path)
             local_classifier_file_path = os.path.join(
                 root, 'localclassifier_{}.pkl'.format(file_iters[0]))
             os.remove(local_classifier_file_path)
             classifier3_file_path = os.path.join(
                 root, 'classifier3_{}.pkl'.format(file_iters[0]))
             os.remove(classifier3_file_path)
Ejemplo n.º 7
0
 def resume_last_model(self):
     root, _, files = os_walk(self.save_model_path)
     for file in files:
         if '.pkl' not in file:
             files.remove(file)
     if len(files) > 0:
         indexes = []
         for file in files:
             indexes.append(int(file.replace('.pkl', '').split('_')[-1]))
         indexes = sorted(list(set(indexes)), reverse=False)
         self.resume_model(indexes[-1])
         start_train_epoch = indexes[-1]
         return start_train_epoch
     else:
         return 0
	def save_model(self, save_epoch):
		'''save model as save_epoch'''
		# save model
		file_path = os.path.join(self.output_path, 'model_{}.pkl'.format(save_epoch))
		torch.save(self.model.state_dict(), file_path)
		# if saved model is more than max num, delete the model with smallest iter
		if self.max_save_model_num > 0:
			# find all files in format of *.pkl
			root, _, files = os_walk(self.output_path)
			for file in files:
				if '.pkl' not in file:
					files.remove(file)
			# remove extra model
			if len(files) > self.max_save_model_num:
				file_iters = sorted([int(file.replace('.pkl', '').split('_')[1]) for file in files], reverse=False)
				file_path = os.path.join(root, 'model_{}.pkl'.format(file_iters[0]))
				os.remove(file_path)
	def resume_last_model(self):
		'''resume model from the last one in path self.output_path'''
		# find all files in format of *.pkl
		root, _, files = os_walk(self.output_path)
		for file in files:
			if '.pkl' not in file:
				files.remove(file)
		# find the last one
		if len(files) > 0:
			# get indexes of saved models
			indexes = []
			for file in files:
				indexes.append(int(file.replace('.pkl', '').split('_')[-1]))
			indexes = sorted(list(set(indexes)), reverse=False)
			# resume model from the latest model
			self.resume_model(indexes[-1])
			#
			start_train_epoch = indexes[-1]
			return start_train_epoch
		else:
			return 0
Ejemplo n.º 10
0
    def save_model(self, save_epoch):

        # save model
        for ii, _ in enumerate(self.model_list):
            torch.save(self.model_list[ii].state_dict(), os.path.join(self.config.save_models_path, 'model-{}_{}.pkl'.format(ii, save_epoch)))

        # if saved model is more than max num, delete the model with smallest epoch
        if self.config.max_save_model_num > 0:
            root, _, files = os_walk(self.config.save_models_path)

            # get indexes of saved models
            indexes = []
            for file in files:
                indexes.append(int(file.replace('.pkl', '').split('_')[-1]))

            # remove the bad-case and get available indexes
            model_num = len(self.model_list)
            available_indexes = copy.deepcopy(indexes)
            for element in indexes:
                if indexes.count(element) < model_num:
                    available_indexes.remove(element)

            available_indexes = sorted(list(set(available_indexes)), reverse=True)
            unavailable_indexes = list(set(indexes).difference(set(available_indexes)))

            # delete all unavailable models
            for unavailable_index in unavailable_indexes:
                try:
                    # os.system('find . -name "{}*_{}.pkl" | xargs rm  -rf'.format(self.config.save_models_path, unavailable_index))
                    for ii in range(len(self.model_list)):
                        os.remove(os.path.join(root, 'model-{}_{}.pkl'.format(ii, unavailable_index)))
                except:
                    pass

            # delete extra models
            if len(available_indexes) >= self.config.max_save_model_num:
                for extra_available_index in available_indexes[self.config.max_save_model_num:]:
                    # os.system('find . -name "{}*_{}.pkl" | xargs rm  -rf'.format(self.config.save_models_path, extra_available_index))
                    for ii in range(len(self.model_list)):
                        os.remove(os.path.join(root, 'model-{}_{}.pkl'.format(ii, extra_available_index)))
Ejemplo n.º 11
0
def main(config):

    # loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make dirs
    make_dirs(config.save_images_path)
    make_dirs(config.save_models_path)
    make_dirs(config.save_features_path)

    # logger
    logger = Logger(os.path.join(config.output_path, 'log.txt'))
    logger(config)

    if config.mode == 'train':

        # automatically resume model from the latest one
        start_train_epoch = 0
        root, _, files = os_walk(config.save_models_path)
        if len(files) > 0:
            # get indexes of saved models
            indexes = []
            for file in files:
                indexes.append(int(file.replace('.pkl', '').split('_')[-1]))

            # remove the bad-case and get available indexes
            model_num = len(base.model_list)
            available_indexes = copy.deepcopy(indexes)
            for element in indexes:
                if indexes.count(element) < model_num:
                    available_indexes.remove(element)

            available_indexes = sorted(list(set(available_indexes)),
                                       reverse=True)
            unavailable_indexes = list(
                set(indexes).difference(set(available_indexes)))

            if len(available_indexes
                   ) > 0:  # resume model from the latest model
                base.resume_model(available_indexes[0])
                start_train_epoch = available_indexes[0] + 1
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), available_indexes[0]))
            else:  #
                logger('Time: {}, there are no available models')

        # main loop
        for current_epoch in range(
                start_train_epoch, config.warmup_reid_epoches +
                config.warmup_gan_epoches + config.train_epoches):

            # test
            if current_epoch % 10 == 0 and current_epoch > config.warmup_reid_epoches + config.warmup_gan_epoches:
                results = test(config, base, loaders, brief=True)
                for key in results.keys():
                    logger('Time: {}\n Setting: {}\n {}'.format(
                        time_now(), key, results[key]))

            # visualize generated images
            if current_epoch % 10 == 0 or current_epoch <= 10:
                visualize(config, loaders, base, current_epoch)

            # train
            if current_epoch < config.warmup_reid_epoches:  # warmup reid model
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         train_pixel=False,
                                         optimize_sl_enc=True)
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches:  # warmup GAN model
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         train_pixel=False,
                                         optimize_sl_enc=False)
            else:  # joint train
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         train_pixel=True,
                                         optimize_sl_enc=True)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

            # save model
            base.save_model(current_epoch)

        # test
        results = test(config, base, loaders, brief=False)
        for key in results.keys():
            logger('Time: {}\n Setting: {}\n {}'.format(
                time_now(), key, results[key]))

    elif config.mode == 'test':
        # resume from pre-trained model and test
        base.resume_model_from_path(config.pretrained_model_path,
                                    config.pretrained_model_epoch)
        results = test(config, base, loaders, brief=False)
        for key in results.keys():
            logger('Time: {}\n Setting: {}\n {}'.format(
                time_now(), key, results[key]))
Ejemplo n.º 12
0
def main(config):

    # init loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make directions
    make_dirs(base.output_path)
    make_dirs(base.save_model_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_visualize_market_path)
    make_dirs(base.save_visualize_duke_path)

    # init logger
    logger = Logger(
        os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt'))
    logger('\n' * 3)
    logger(config)

    if config.mode == 'train':  # train mode

        # resume model from the resume_train_epoch
        start_train_epoch = 0

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                # get indexes of saved models
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                # resume model from the latest model
                base.resume_model(indexes[-1])
                #
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):
            # save model
            base.save_model(current_epoch)
            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders, current_epoch)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))
        # test
        testwithVer2(config,
                     logger,
                     base,
                     loaders,
                     'duke',
                     use_gcn=True,
                     use_gm=True)

    elif config.mode == 'test':  # test mode
        # resume from the resume_test_epoch
        if config.resume_test_path != '' and config.resume_test_epoch != 0:
            base.resume_model_from_path(config.resume_test_path,
                                        config.resume_test_epoch)
        else:
            assert 0, 'please set resume_test_path and resume_test_epoch '
        # test
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=False,
                                           use_gm=False)
        logger('Time: {},  base, Dataset: Duke  \nmAP: {} \nRank: {}'.format(
            time_now(), duke_map, duke_rank))
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=True,
                                           use_gm=False)
        logger(
            'Time: {},  base+gcn, Dataset: Duke  \nmAP: {} \nRank: {}'.format(
                time_now(), duke_map, duke_rank))
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=True,
                                           use_gm=True)
        logger('Time: {},  base+gcn+gm, Dataset: Duke  \nmAP: {} \nRank: {}'.
               format(time_now(), duke_map, duke_rank))
        logger('')

    elif config.mode == 'visualize':  # visualization mode
        # resume from the resume_visualize_epoch
        if config.resume_visualize_path != '' and config.resume_visualize_epoch != 0:
            base.resume_model_from_path(config.resume_visualize_path,
                                        config.resume_visualize_epoch)
            print('Time: {}, resume model from {} {}'.format(
                time_now(), config.resume_visualize_path,
                config.resume_visualize_epoch))
        # visualization
        if 'market' in config.train_dataset:
            visualize_ranked_images(config, base, loaders, 'market')
        elif 'duke' in config.train_dataset:
            visualize_ranked_images(config, base, loaders, 'duke')
        else:
            assert 0
def main(config):

    # loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make dirs
    make_dirs(config.save_images_path)
    make_dirs(config.save_wp_models_path)
    make_dirs(config.save_st_models_path)
    make_dirs(config.save_features_path)

    logger = setup_logger('adaptation_reid', config.output_path, if_train=True)

    if config.mode == 'train':

        if config.resume:
            # automatically resume model from the latest one
            if config.resume_epoch_num == 0:
                start_train_epoch = 0
                root, _, files = os_walk(config.save_models_path)
                if len(files) > 0:
                    # get indexes of saved models
                    indexes = []
                    for file in files:
                        indexes.append(
                            int(file.replace('.pkl', '').split('_')[-1]))

                    # remove the bad-case and get available indexes
                    model_num = len(base.model_list)
                    available_indexes = copy.deepcopy(indexes)
                    for element in indexes:
                        if indexes.count(element) < model_num:
                            available_indexes.remove(element)

                    available_indexes = sorted(list(set(available_indexes)),
                                               reverse=True)
                    unavailable_indexes = list(
                        set(indexes).difference(set(available_indexes)))

                    if len(available_indexes
                           ) > 0:  # resume model from the latest model
                        base.resume_model(available_indexes[0])
                        start_train_epoch = available_indexes[0] + 1
                        logger.info(
                            'Time: {}, automatically resume training from the latest step (model {})'
                            .format(time_now(), available_indexes[0]))
                    else:  #
                        logger.info('Time: {}, there are no available models')
            else:
                start_train_epoch = config.resume_epoch_num
        else:
            start_train_epoch = 0

        # main loop
        for current_epoch in range(
                start_train_epoch, config.warmup_reid_epoches +
                config.warmup_gan_epoches + config.warmup_adaptation_epoches):

            # train
            if current_epoch < config.warmup_reid_epoches:  # warmup reid model
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         self_training=False,
                                         optimize_sl_enc=True,
                                         train_adaptation=False)
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches:  # warmup GAN model
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         self_training=False,
                                         optimize_sl_enc=False,
                                         train_adaptation=False)  # joint train
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches + config.warmup_adaptation_epoches:  #warmup adaptation
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         self_training=False,
                                         optimize_sl_enc=False,
                                         train_adaptation=True)

            print("another epoch")
            logger.info('Time: {};  Epoch: {};  {}'.format(
                time_now(), current_epoch, results))
            # save model
            if current_epoch % config.save_model_interval == 0:
                base.save_model(current_epoch, True)

            if current_epoch % config.test_model_interval == 0:
                visualize(config, loaders, base, current_epoch)
                test(config, base, loaders, epoch=0, brief=False)

        total_wp_epoches = config.warmup_reid_epoches + config.warmup_gan_epoches

        for iter_n in range(config.iteration_number):
            src_dataset, src_dataloader, trg_dataset, trg_dataloader = loaders.get_self_train_loaders(
            )

            trg_labeled_dataloader = generate_labeled_dataset(
                base, iter_n, src_dataset, src_dataloader, trg_dataset,
                trg_dataloader)
            for epoch in range(total_wp_epoches + 1, config.self_train_epoch):
                results = train_an_epoch(
                    config,
                    iter_n,
                    loaders,
                    base,
                    epoch,
                    train_gan=True,
                    train_reid=False,
                    self_training=True,
                    optimize_sl_enc=True,
                    trg_labeled_loader=trg_labeled_dataloader)
                logger.info('Time: {};  Epoch: {};  {}'.format(
                    time_now(), current_epoch, results))

                if epoch % config.save_model_interval == 0:
                    base.save_model(iter_n * config.self_train_epoch + epoch,
                                    False)

    elif config.mode == 'test':
        # resume from pre-trained model and test
        base.resume_model_from_path(config.pretrained_model_path,
                                    config.pretrained_model_epoch)
        cmc, map = test(config, base, loaders, epoch=100, brief=False)
Ejemplo n.º 14
0
def main(config):

    # init loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make directions
    make_dirs(base.output_path)
    make_dirs(base.save_model_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_visualize_market_path)
    make_dirs(base.save_visualize_duke_path)

    # init logger
    logger = Logger(
        os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt'))
    logger('\n' * 3)
    logger(config)

    if config.mode == 'train':  # train mode

        # resume model from the resume_train_epoch
        if config.resume_train_epoch >= 0:
            base.resume_model(config.resume_train_epoch)
            start_train_epoch = config.resume_train_epoch
        else:
            start_train_epoch = 0

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                # get indexes of saved models
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                # resume model from the latest model
                base.resume_model(indexes[-1])
                #
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):

            # save model
            base.save_model(current_epoch)

            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

            # test
            if (current_epoch + 1) % 40 == 0 and current_epoch + 1 >= 0:
                market_map, market_rank = test(config, base, loaders, 'market')
                duke_map, duke_rank = test(config, base, loaders, 'duke')
                logger(
                    'Time: {},  Dataset: Market  \nmAP: {} \nRank: {}'.format(
                        time_now(), market_map, market_rank))
                logger('Time: {},  Dataset: Duke  \nmAP: {} \nRank: {}'.format(
                    time_now(), duke_map, duke_rank))
                logger('')

    elif config.mode == 'test':  # test mode
        # resume from the resume_test_epoch
        if config.resume_test_epoch >= 0:
            base.resume_model(config.resume_test_epoch)
        # test
        market_map, market_rank = test(config, base, loaders, 'market')
        duke_map, duke_rank = test(config, base, loaders, 'duke')
        logger('Time: {},  Dataset: Market  \nmAP: {} \nRank: {}'.format(
            time_now(), market_map, market_rank))
        logger('Time: {},  Dataset: Duke  \nmAP: {} \nRank: {}'.format(
            time_now(), duke_map, duke_rank))
        logger('')

    elif config.mode == 'visualize':  # visualization mode
        # resume from the resume_visualize_epoch
        if config.resume_visualize_epoch >= 0:
            base.resume_model(config.resume_visualize_epoch)
        # visualization
        visualize_ranking_list(config, base, loaders, 'market')
        visualize_ranking_list(config, base, loaders, 'duke')