def get_data(data_dir, source, target, height, width, batch_size, num_instance=2, workers=8): dataset = DA(data_dir, source, target) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) num_classes = dataset.num_train_ids train_transformer = T.Compose([ T.Resize((256, 128), interpolation=3), T.Pad(10), T.RandomCrop((256,128)), T.RandomHorizontalFlip(0.5), T.RandomRotation(5), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.Resize((256, 128), interpolation=3), T.ToTensor(), normalizer, ]) source_train_loader = DataLoader( Preprocessor_occluded(dataset.source_train, root=osp.join(dataset.source_images_dir, dataset.source_train_path), transform=train_transformer, train=True), batch_size=batch_size, num_workers=workers, sampler=IdentitySampler(dataset.source_train, num_instance), pin_memory=True, drop_last=True) query_loader = DataLoader( Preprocessor_occluded(dataset.query, root=osp.join(dataset.target_images_dir, dataset.query_path), transform=test_transformer), batch_size=42, num_workers=workers, shuffle=False, pin_memory=True) gallery_loader = DataLoader( Preprocessor_occluded(dataset.gallery, root=osp.join(dataset.target_images_dir, dataset.gallery_path), transform=test_transformer), batch_size=42, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, source_train_loader, query_loader, gallery_loader
def get_data(data_dir, height, width, batch_size, num_instances, re=0, workers=8): dataset = DA(data_dir) test_dataset = TotalData(data_dir) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) num_classes = dataset.num_source_ids train_transformer = T.Compose([ T.Resize((256, 128), interpolation=3), T.Pad(10), T.RandomCrop((256,128)), T.RandomHorizontalFlip(0.5), T.RandomRotation(5), T.ColorJitter(brightness=(0.5, 2.0), saturation=(0.5, 2.0), hue=(-0.1, 0.1)), T.ToTensor(), normalizer, # T.RandomErasing(EPSILON=re), ]) test_transformer = T.Compose([ T.Resize((256, 128), interpolation=3), T.ToTensor(), normalizer, ]) # Train source_train_loader = DataLoader( Preprocessor(dataset.source_train, transform=train_transformer), batch_size=batch_size, num_workers=workers, # shuffle=True, pin_memory=True, drop_last=True) sampler=RandomIdentitySampler(dataset.source_train, batch_size, num_instances), pin_memory=True, drop_last=True) # Test grid_query_loader = DataLoader( Preprocessor(test_dataset.grid_query, root=osp.join(test_dataset.grid_images_dir, test_dataset.query_path), transform=test_transformer), batch_size=64, num_workers=4, shuffle=False, pin_memory=True) grid_gallery_loader = DataLoader( Preprocessor(test_dataset.grid_gallery, root=osp.join(test_dataset.grid_images_dir, test_dataset.gallery_path), transform=test_transformer), batch_size=64, num_workers=4, shuffle=False, pin_memory=True) prid_query_loader = DataLoader( Preprocessor(test_dataset.prid_query, root=osp.join(test_dataset.prid_images_dir, test_dataset.query_path), transform=test_transformer), batch_size=64, num_workers=4, shuffle=False, pin_memory=True) prid_gallery_loader = DataLoader( Preprocessor(test_dataset.prid_gallery, root=osp.join(test_dataset.prid_images_dir, test_dataset.gallery_path), transform=test_transformer), batch_size=64, num_workers=4, shuffle=False, pin_memory=True) viper_query_loader = DataLoader( Preprocessor(test_dataset.viper_query, root=osp.join(test_dataset.viper_images_dir, test_dataset.query_path), transform=test_transformer), batch_size=64, num_workers=4, shuffle=False, pin_memory=True) viper_gallery_loader = DataLoader( Preprocessor(test_dataset.viper_gallery, root=osp.join(test_dataset.viper_images_dir, test_dataset.gallery_path), transform=test_transformer), batch_size=64, num_workers=4, shuffle=False, pin_memory=True) ilid_query_loader = DataLoader( Preprocessor(test_dataset.ilid_query, root=osp.join(test_dataset.ilid_images_dir, "images"), transform=test_transformer), batch_size=64, num_workers=4, shuffle=False, pin_memory=True) ilid_gallery_loader = DataLoader( Preprocessor(test_dataset.ilid_gallery, root=osp.join(test_dataset.ilid_images_dir, "images"), transform=test_transformer), batch_size=64, num_workers=4, shuffle=False, pin_memory=True) return dataset, test_dataset, num_classes, source_train_loader, grid_query_loader, grid_gallery_loader,prid_query_loader, prid_gallery_loader,viper_query_loader, viper_gallery_loader, ilid_query_loader, ilid_gallery_loader
def get_data(dataname, data_dir, model, matcher, save_path, args): root = osp.join(data_dir, dataname) dataset = datasets.create(dataname, root, combine_all=args.combine_all) num_classes = dataset.num_train_ids train_transformer = T.Compose([ T.Resize((args.height, args.width), interpolation=InterpolationMode.BICUBIC), T.Pad(10), T.RandomCrop((args.height, args.width)), T.RandomHorizontalFlip(0.5), T.RandomRotation(5), T.ColorJitter(brightness=(0.5, 2.0), contrast=(0.5, 2.0), saturation=(0.5, 2.0), hue=(-0.1, 0.1)), T.RandomOcclusion(args.min_size, args.max_size), T.ToTensor(), ]) test_transformer = T.Compose([ T.Resize((args.height, args.width), interpolation=InterpolationMode.BICUBIC), T.ToTensor(), ]) train_path = osp.join(dataset.images_dir, dataset.train_path) train_loader = DataLoader( Preprocessor(dataset.train, root=train_path, transform=train_transformer), batch_size=args.batch_size, num_workers=args.workers, sampler=GraphSampler(dataset.train, train_path, test_transformer, model, matcher, args.batch_size, args.num_instance, args.test_gal_batch, args.test_prob_batch, save_path, args.gs_verbose), pin_memory=True) query_loader = DataLoader(Preprocessor(dataset.query, root=osp.join( dataset.images_dir, dataset.query_path), transform=test_transformer), batch_size=args.test_fea_batch, num_workers=args.workers, shuffle=False, pin_memory=True) gallery_loader = DataLoader(Preprocessor(dataset.gallery, root=osp.join( dataset.images_dir, dataset.gallery_path), transform=test_transformer), batch_size=args.test_fea_batch, num_workers=args.workers, shuffle=False, pin_memory=True) return dataset, num_classes, train_loader, query_loader, gallery_loader
def main(args): cudnn.deterministic = False cudnn.benchmark = True exp_database_dir = osp.join(args.exp_dir, string.capwords(args.dataset)) output_dir = osp.join(exp_database_dir, args.method, args.sub_method) log_file = osp.join(output_dir, 'log.txt') # Redirect print to both console and log file sys.stdout = Logger(log_file) # Create data loaders dataset, num_classes, train_loader, _, _ = \ get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.combine_all, args.workers, args.test_fea_batch) # Create model #model = seTest.resnst50().cuda() model = resmap.create(args.arch, ibn_type=args.ibn, final_layer=args.final_layer, neck=args.neck).cuda() num_features = model.num_features #num_features = 64 # print(model) # print('\n') feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32} hei = args.height // feamap_factor[args.final_layer] wid = args.width // feamap_factor[args.final_layer] matcher = QAConv(num_features, hei, wid).cuda() for arg in sys.argv: print('%s ' % arg, end='') print('\n') # Criterion criterion = ClassMemoryLoss(matcher, num_classes, num_features, hei, wid, args.mem_batch_size).cuda() # Optimizer base_param_ids = set(map(id, model.base.parameters())) new_params = [p for p in model.parameters() if id(p) not in base_param_ids] param_groups = [{ 'params': model.base.parameters(), 'lr': 0.1 * args.lr }, { 'params': new_params, 'lr': args.lr }, { 'params': criterion.parameters(), 'lr': args.lr }] optimizer = torch.optim.SGD(param_groups, lr=args.lr, momentum=0.9, weight_decay=5e-4, nesterov=True) # Load from checkpoint start_epoch = 0 if args.resume or args.evaluate: print('Loading checkpoint...') if args.resume and (args.resume != 'ori'): checkpoint = load_checkpoint(args.resume) else: checkpoint = load_checkpoint( osp.join(output_dir, 'checkpoint.pth.tar')) model.load_state_dict(checkpoint['model']) criterion.load_state_dict(checkpoint['criterion']) optimizer.load_state_dict(checkpoint['optim']) start_epoch = checkpoint['epoch'] print("=> Start epoch {} ".format(start_epoch)) elif args.pre_epochs > 0: pre_tr = PreTrainer(model, criterion, optimizer, train_loader, args.pre_epochs, args.max_steps, args.num_trials) result_file = osp.join(exp_database_dir, args.method, 'pretrain_metric.txt') model, criterion, optimizer = pre_tr.train(result_file, args.method, args.sub_method) # Decay LR by a factor of 0.1 every step_size epochs lr_scheduler = StepLR(optimizer, step_size=args.step_size, gamma=0.1, last_epoch=start_epoch - 1) model = nn.DataParallel(model).cuda() criterion = nn.DataParallel(criterion).cuda() enhance_data_aug = False if not args.evaluate: # Trainer trainer = Trainer(model, criterion) t0 = time.time() # Start training for epoch in range(start_epoch, args.epochs): loss, acc = trainer.train(epoch, train_loader, optimizer) lr = list(map(lambda group: group['lr'], optimizer.param_groups)) lr_scheduler.step() train_time = time.time() - t0 print( '* Finished epoch %d at lr=[%g, %g, %g]. Loss: %.3f. Acc: %.2f%%. Training time: %.0f seconds. \n' % (epoch + 1, lr[0], lr[1], lr[2], loss, acc * 100, train_time)) save_checkpoint( { 'model': model.module.state_dict(), 'criterion': criterion.module.state_dict(), 'optim': optimizer.state_dict(), 'epoch': epoch + 1, }, fpath=osp.join(output_dir, 'checkpoint.pth.tar')) if not enhance_data_aug and epoch < args.epochs - 1 and acc > args.acc_thr: enhance_data_aug = True print('\nAcc = %.2f%% > %.2f%%. Start to Flip and Block.\n' % (acc * 100, args.acc_thr * 100)) train_transformer = T.Compose([ T.Resize((args.height, args.width), interpolation=3), T.Pad(10), T.RandomCrop((args.height, args.width)), T.RandomHorizontalFlip(0.5), T.RandomRotation(5), T.ColorJitter(brightness=(0.5, 2.0), contrast=(0.5, 2.0), saturation=(0.5, 2.0), hue=(-0.1, 0.1)), T.RandomOcclusion(args.min_size, args.max_size), T.ToTensor(), ]) train_loader = DataLoader(Preprocessor( dataset.train, root=osp.join(dataset.images_dir, dataset.train_path), transform=train_transformer), batch_size=args.batch_size, num_workers=args.workers, shuffle=True, pin_memory=True, drop_last=True) # Final test print('Evaluate the learned model:') t0 = time.time() # Evaluator evaluator = Evaluator(model) avg_rank1 = 0 avg_mAP = 0 num_testsets = 0 results = {} test_names = args.testset.strip().split(',') for test_name in test_names: if test_name not in datasets.names(): print('Unknown dataset: %s.' % test_name) continue testset, test_query_loader, test_gallery_loader = \ get_test_data(test_name, args.data_dir, args.height, args.width, args.workers, args.test_fea_batch) if not args.do_tlift: testset.has_time_info = False test_rank1, test_mAP, test_rank1_rerank, test_mAP_rerank, test_rank1_tlift, test_mAP_tlift, test_dist, \ test_dist_rerank, test_dist_tlift, pre_tlift_dict = \ evaluator.evaluate(matcher, testset, test_query_loader, test_gallery_loader, args.test_gal_batch, args.test_prob_batch, args.tau, args.sigma, args.K, args.alpha) results[test_name] = [test_rank1, test_mAP] if test_name != args.dataset: avg_rank1 += test_rank1 avg_mAP += test_mAP num_testsets += 1 if testset.has_time_info: print( ' %s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f,' ' rank1_rerank_tlift=%.1f, mAP_rerank_tlift=%.1f.\n' % (test_name, test_rank1 * 100, test_mAP * 100, test_rank1_rerank * 100, test_mAP_rerank * 100, test_rank1_tlift * 100, test_mAP_tlift * 100)) else: print(' %s: rank1=%.1f, mAP=%.1f.\n' % (test_name, test_rank1 * 100, test_mAP * 100)) result_file = osp.join(exp_database_dir, args.method, test_name + '_results.txt') with open(result_file, 'a') as f: f.write('%s/%s:\n' % (args.method, args.sub_method)) if testset.has_time_info: f.write( '\t%s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f, rank1_rerank_tlift=%.1f, ' 'mAP_rerank_tlift=%.1f.\n\n' % (test_name, test_rank1 * 100, test_mAP * 100, test_rank1_rerank * 100, test_mAP_rerank * 100, test_rank1_tlift * 100, test_mAP_tlift * 100)) else: f.write('\t%s: rank1=%.1f, mAP=%.1f.\n\n' % (test_name, test_rank1 * 100, test_mAP * 100)) if args.save_score: test_gal_list = np.array( [fname for fname, _, _, _ in testset.gallery], dtype=np.object) test_prob_list = np.array( [fname for fname, _, _, _ in testset.query], dtype=np.object) test_gal_ids = [pid for _, pid, _, _ in testset.gallery] test_prob_ids = [pid for _, pid, _, _ in testset.query] test_gal_cams = [c for _, _, c, _ in testset.gallery] test_prob_cams = [c for _, _, c, _ in testset.query] test_score_file = osp.join(exp_database_dir, args.method, args.sub_method, '%s_score.mat' % test_name) sio.savemat(test_score_file, { 'score': 1. - test_dist, 'score_rerank': 1. - test_dist_rerank, 'score_tlift': 1. - test_dist_tlift, 'gal_time': pre_tlift_dict['gal_time'], 'prob_time': pre_tlift_dict['prob_time'], 'gal_list': test_gal_list, 'prob_list': test_prob_list, 'gal_ids': test_gal_ids, 'prob_ids': test_prob_ids, 'gal_cams': test_gal_cams, 'prob_cams': test_prob_cams }, oned_as='column', do_compression=True) test_time = time.time() - t0 avg_rank1 /= num_testsets avg_mAP /= num_testsets for key in results.keys(): print('%s: rank1=%.1f%%, mAP=%.1f%%.' % (key, results[key][0] * 100, results[key][1] * 100)) print('Average: rank1=%.2f%%, mAP=%.2f%%.\n\n' % (avg_rank1 * 100, avg_mAP * 100)) result_file = osp.join(exp_database_dir, args.method, args.sub_method[:-5] + '_avg_results.txt') with open(result_file, 'a') as f: f.write('%s/%s:\n' % (args.method, args.sub_method)) if not args.evaluate: f.write('\t Loss: %.3f, acc: %.2f%%. ' % (loss, acc * 100)) f.write("Train: %.0fs. " % train_time) f.write("Test: %.0fs. " % test_time) f.write('Rank1: %.2f%%, mAP: %.2f%%.\n' % (avg_rank1 * 100, avg_mAP * 100)) for key in results.keys(): f.write('\t %s: Rank1: %.1f%%, mAP: %.1f%%.\n' % (key, results[key][0] * 100, results[key][1] * 100)) f.write('\n') if not args.evaluate: print('Finished training at epoch %d, loss = %.3f, acc = %.2f%%.\n' % (epoch + 1, loss, acc * 100)) print( "Total training time: %.3f sec. Average training time per epoch: %.3f sec." % (train_time, train_time / (args.epochs - start_epoch + 1))) print("Total testing time: %.3f sec.\n" % test_time) for arg in sys.argv: print('%s ' % arg, end='') print('\n')