def main(config): # init loaders and base loaders = ReIDLoaders(config) base = Base(config) # make directions make_dirs(base.output_path) # init logger logger = Logger(os.path.join(config.output_path, 'log.txt')) logger(config) assert config.mode in ['train', 'test', 'visualize'] if config.mode == 'train': # train mode # automatically resume model from the latest one if config.auto_resume_training_from_lastest_steps: start_train_epoch = base.resume_last_model() # main loop for current_epoch in range(start_train_epoch, config.total_train_epochs): # save model base.save_model(current_epoch) # train _, results = train_an_epoch(config, base, loaders, current_epoch) logger('Time: {}; Epoch: {}; {}'.format(time_now(), current_epoch, results)) # test base.save_model(config.total_train_epochs) mAP, CMC, pres, recalls, thresholds = test(config, base, loaders) logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format( time_now(), config.test_dataset, mAP, CMC)) plot_prerecall_curve(config, pres, recalls, thresholds, mAP, CMC, 'none') elif config.mode == 'test': # test mode base.resume_from_model(config.resume_test_model) mAP, CMC, pres, recalls, thresholds = test(config, base, loaders) logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format( time_now(), config.test_dataset, mAP, CMC)) logger( 'Time: {}; Test Dataset: {}, \nprecision: {} \nrecall: {}\nthresholds: {}' .format(time_now(), config.test_dataset, mAP, CMC, pres, recalls, thresholds)) plot_prerecall_curve(config, pres, recalls, thresholds, mAP, CMC, 'none') elif config.mode == 'visualize': # visualization mode base.resume_from_model(config.resume_visualize_model) visualize(config, base, loaders)
def main(config): # init loaders and base loaders = ReIDLoaders(config) base = Base(config) # make directions make_dirs(base.output_path) # init logger logger = Logger(os.path.join(config.output_path, 'log.txt')) logger(config) assert config.mode in ['train', 'test', 'visualize'] if config.mode == 'train': # train mode # automatically resume model from the latest one if config.auto_resume_training_from_lastest_steps: print('resume', base.output_path) start_train_epoch = base.resume_last_model() #start_train_epoch = 0 # main loop for current_epoch in range(start_train_epoch, config.total_train_epochs + 1): # save model base.save_model(current_epoch) # train base.lr_scheduler.step(current_epoch) _, results = train_an_epoch(config, base, loaders) logger('Time: {}; Epoch: {}; {}'.format(time_now(), current_epoch, results)) # test base.save_model(config.total_train_epochs) mAP, CMC = test(config, base, loaders) logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format( time_now(), config.test_dataset, mAP, CMC)) elif config.mode == 'test': # test mode base.resume_from_model(config.resume_test_model) mAP, CMC = test(config, base, loaders) logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {} with len {}'. format(time_now(), config.test_dataset, mAP, CMC, len(CMC))) elif config.mode == 'visualize': # visualization mode base.resume_from_model(config.resume_visualize_model) visualize(config, base, loaders)
def main(config): # loaders and base loaders = Loaders(config) base = Base(config, loaders) # make dirs make_dirs(config.save_images_path) make_dirs(config.save_models_path) make_dirs(config.save_features_path) # logger logger = Logger(os.path.join(config.output_path, 'log.txt')) logger(config) if config.mode == 'train': # automatically resume model from the latest one start_train_epoch = 0 root, _, files = os_walk(config.save_models_path) if len(files) > 0: # get indexes of saved models indexes = [] for file in files: indexes.append(int(file.replace('.pkl', '').split('_')[-1])) # remove the bad-case and get available indexes model_num = len(base.model_list) available_indexes = copy.deepcopy(indexes) for element in indexes: if indexes.count(element) < model_num: available_indexes.remove(element) available_indexes = sorted(list(set(available_indexes)), reverse=True) unavailable_indexes = list( set(indexes).difference(set(available_indexes))) if len(available_indexes ) > 0: # resume model from the latest model base.resume_model(available_indexes[0]) start_train_epoch = available_indexes[0] + 1 logger( 'Time: {}, automatically resume training from the latest step (model {})' .format(time_now(), available_indexes[0])) else: # logger('Time: {}, there are no available models') # main loop for current_epoch in range( start_train_epoch, config.warmup_reid_epoches + config.warmup_gan_epoches + config.train_epoches): # test if current_epoch % 10 == 0 and current_epoch > config.warmup_reid_epoches + config.warmup_gan_epoches: results = test(config, base, loaders, brief=True) for key in results.keys(): logger('Time: {}\n Setting: {}\n {}'.format( time_now(), key, results[key])) # visualize generated images if current_epoch % 10 == 0 or current_epoch <= 10: visualize(config, loaders, base, current_epoch) # train if current_epoch < config.warmup_reid_epoches: # warmup reid model results = train_an_epoch(config, loaders, base, current_epoch, train_gan=True, train_reid=True, train_pixel=False, optimize_sl_enc=True) elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches: # warmup GAN model results = train_an_epoch(config, loaders, base, current_epoch, train_gan=True, train_reid=False, train_pixel=False, optimize_sl_enc=False) else: # joint train results = train_an_epoch(config, loaders, base, current_epoch, train_gan=True, train_reid=True, train_pixel=True, optimize_sl_enc=True) logger('Time: {}; Epoch: {}; {}'.format(time_now(), current_epoch, results)) # save model base.save_model(current_epoch) # test results = test(config, base, loaders, brief=False) for key in results.keys(): logger('Time: {}\n Setting: {}\n {}'.format( time_now(), key, results[key])) elif config.mode == 'test': # resume from pre-trained model and test base.resume_model_from_path(config.pretrained_model_path, config.pretrained_model_epoch) results = test(config, base, loaders, brief=False) for key in results.keys(): logger('Time: {}\n Setting: {}\n {}'.format( time_now(), key, results[key]))
def main(config): # init loaders and base loaders = Loaders(config) base = Base(config, loaders) # make directions make_dirs(base.output_path) make_dirs(base.save_model_path) make_dirs(base.save_logs_path) make_dirs(base.save_visualize_market_path) make_dirs(base.save_visualize_duke_path) # init logger logger = Logger( os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt')) logger('\n' * 3) logger(config) if config.mode == 'train': # train mode # resume model from the resume_train_epoch start_train_epoch = 0 # automatically resume model from the latest one if config.auto_resume_training_from_lastest_steps: root, _, files = os_walk(base.save_model_path) if len(files) > 0: # get indexes of saved models indexes = [] for file in files: indexes.append(int( file.replace('.pkl', '').split('_')[-1])) indexes = sorted(list(set(indexes)), reverse=False) # resume model from the latest model base.resume_model(indexes[-1]) # start_train_epoch = indexes[-1] logger( 'Time: {}, automatically resume training from the latest step (model {})' .format(time_now(), indexes[-1])) # main loop for current_epoch in range(start_train_epoch, config.total_train_epochs): # save model base.save_model(current_epoch) # train base.lr_scheduler.step(current_epoch) _, results = train_an_epoch(config, base, loaders, current_epoch) logger('Time: {}; Epoch: {}; {}'.format(time_now(), current_epoch, results)) # test testwithVer2(config, logger, base, loaders, 'duke', use_gcn=True, use_gm=True) elif config.mode == 'test': # test mode # resume from the resume_test_epoch if config.resume_test_path != '' and config.resume_test_epoch != 0: base.resume_model_from_path(config.resume_test_path, config.resume_test_epoch) else: assert 0, 'please set resume_test_path and resume_test_epoch ' # test duke_map, duke_rank = testwithVer2(config, logger, base, loaders, 'duke', use_gcn=False, use_gm=False) logger('Time: {}, base, Dataset: Duke \nmAP: {} \nRank: {}'.format( time_now(), duke_map, duke_rank)) duke_map, duke_rank = testwithVer2(config, logger, base, loaders, 'duke', use_gcn=True, use_gm=False) logger( 'Time: {}, base+gcn, Dataset: Duke \nmAP: {} \nRank: {}'.format( time_now(), duke_map, duke_rank)) duke_map, duke_rank = testwithVer2(config, logger, base, loaders, 'duke', use_gcn=True, use_gm=True) logger('Time: {}, base+gcn+gm, Dataset: Duke \nmAP: {} \nRank: {}'. format(time_now(), duke_map, duke_rank)) logger('') elif config.mode == 'visualize': # visualization mode # resume from the resume_visualize_epoch if config.resume_visualize_path != '' and config.resume_visualize_epoch != 0: base.resume_model_from_path(config.resume_visualize_path, config.resume_visualize_epoch) print('Time: {}, resume model from {} {}'.format( time_now(), config.resume_visualize_path, config.resume_visualize_epoch)) # visualization if 'market' in config.train_dataset: visualize_ranked_images(config, base, loaders, 'market') elif 'duke' in config.train_dataset: visualize_ranked_images(config, base, loaders, 'duke') else: assert 0
def main(config): # loaders and base loaders = Loaders(config) base = Base(config, loaders) # make dirs make_dirs(config.save_images_path) make_dirs(config.save_wp_models_path) make_dirs(config.save_st_models_path) make_dirs(config.save_features_path) logger = setup_logger('adaptation_reid', config.output_path, if_train=True) if config.mode == 'train': if config.resume: # automatically resume model from the latest one if config.resume_epoch_num == 0: start_train_epoch = 0 root, _, files = os_walk(config.save_models_path) if len(files) > 0: # get indexes of saved models indexes = [] for file in files: indexes.append( int(file.replace('.pkl', '').split('_')[-1])) # remove the bad-case and get available indexes model_num = len(base.model_list) available_indexes = copy.deepcopy(indexes) for element in indexes: if indexes.count(element) < model_num: available_indexes.remove(element) available_indexes = sorted(list(set(available_indexes)), reverse=True) unavailable_indexes = list( set(indexes).difference(set(available_indexes))) if len(available_indexes ) > 0: # resume model from the latest model base.resume_model(available_indexes[0]) start_train_epoch = available_indexes[0] + 1 logger.info( 'Time: {}, automatically resume training from the latest step (model {})' .format(time_now(), available_indexes[0])) else: # logger.info('Time: {}, there are no available models') else: start_train_epoch = config.resume_epoch_num else: start_train_epoch = 0 # main loop for current_epoch in range( start_train_epoch, config.warmup_reid_epoches + config.warmup_gan_epoches + config.warmup_adaptation_epoches): # train if current_epoch < config.warmup_reid_epoches: # warmup reid model results = train_an_epoch(config, 0, loaders, base, current_epoch, train_gan=True, train_reid=True, self_training=False, optimize_sl_enc=True, train_adaptation=False) elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches: # warmup GAN model results = train_an_epoch(config, 0, loaders, base, current_epoch, train_gan=True, train_reid=False, self_training=False, optimize_sl_enc=False, train_adaptation=False) # joint train elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches + config.warmup_adaptation_epoches: #warmup adaptation results = train_an_epoch(config, 0, loaders, base, current_epoch, train_gan=True, train_reid=False, self_training=False, optimize_sl_enc=False, train_adaptation=True) print("another epoch") logger.info('Time: {}; Epoch: {}; {}'.format( time_now(), current_epoch, results)) # save model if current_epoch % config.save_model_interval == 0: base.save_model(current_epoch, True) if current_epoch % config.test_model_interval == 0: visualize(config, loaders, base, current_epoch) test(config, base, loaders, epoch=0, brief=False) total_wp_epoches = config.warmup_reid_epoches + config.warmup_gan_epoches for iter_n in range(config.iteration_number): src_dataset, src_dataloader, trg_dataset, trg_dataloader = loaders.get_self_train_loaders( ) trg_labeled_dataloader = generate_labeled_dataset( base, iter_n, src_dataset, src_dataloader, trg_dataset, trg_dataloader) for epoch in range(total_wp_epoches + 1, config.self_train_epoch): results = train_an_epoch( config, iter_n, loaders, base, epoch, train_gan=True, train_reid=False, self_training=True, optimize_sl_enc=True, trg_labeled_loader=trg_labeled_dataloader) logger.info('Time: {}; Epoch: {}; {}'.format( time_now(), current_epoch, results)) if epoch % config.save_model_interval == 0: base.save_model(iter_n * config.self_train_epoch + epoch, False) elif config.mode == 'test': # resume from pre-trained model and test base.resume_model_from_path(config.pretrained_model_path, config.pretrained_model_epoch) cmc, map = test(config, base, loaders, epoch=100, brief=False)
def main(config): # init loaders and base loaders = Loaders(config) base = Base(config, loaders) # make directions make_dirs(base.output_path) make_dirs(base.save_model_path) make_dirs(base.save_logs_path) make_dirs(base.save_visualize_market_path) make_dirs(base.save_visualize_duke_path) # init logger logger = Logger( os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt')) logger('\n' * 3) logger(config) if config.mode == 'train': # train mode # resume model from the resume_train_epoch if config.resume_train_epoch >= 0: base.resume_model(config.resume_train_epoch) start_train_epoch = config.resume_train_epoch else: start_train_epoch = 0 # automatically resume model from the latest one if config.auto_resume_training_from_lastest_steps: root, _, files = os_walk(base.save_model_path) if len(files) > 0: # get indexes of saved models indexes = [] for file in files: indexes.append(int( file.replace('.pkl', '').split('_')[-1])) indexes = sorted(list(set(indexes)), reverse=False) # resume model from the latest model base.resume_model(indexes[-1]) # start_train_epoch = indexes[-1] logger( 'Time: {}, automatically resume training from the latest step (model {})' .format(time_now(), indexes[-1])) # main loop for current_epoch in range(start_train_epoch, config.total_train_epochs): # save model base.save_model(current_epoch) # train base.lr_scheduler.step(current_epoch) _, results = train_an_epoch(config, base, loaders) logger('Time: {}; Epoch: {}; {}'.format(time_now(), current_epoch, results)) # test if (current_epoch + 1) % 40 == 0 and current_epoch + 1 >= 0: market_map, market_rank = test(config, base, loaders, 'market') duke_map, duke_rank = test(config, base, loaders, 'duke') logger( 'Time: {}, Dataset: Market \nmAP: {} \nRank: {}'.format( time_now(), market_map, market_rank)) logger('Time: {}, Dataset: Duke \nmAP: {} \nRank: {}'.format( time_now(), duke_map, duke_rank)) logger('') elif config.mode == 'test': # test mode # resume from the resume_test_epoch if config.resume_test_epoch >= 0: base.resume_model(config.resume_test_epoch) # test market_map, market_rank = test(config, base, loaders, 'market') duke_map, duke_rank = test(config, base, loaders, 'duke') logger('Time: {}, Dataset: Market \nmAP: {} \nRank: {}'.format( time_now(), market_map, market_rank)) logger('Time: {}, Dataset: Duke \nmAP: {} \nRank: {}'.format( time_now(), duke_map, duke_rank)) logger('') elif config.mode == 'visualize': # visualization mode # resume from the resume_visualize_epoch if config.resume_visualize_epoch >= 0: base.resume_model(config.resume_visualize_epoch) # visualization visualize_ranking_list(config, base, loaders, 'market') visualize_ranking_list(config, base, loaders, 'duke')