def main(): global args torch.manual_seed(args.seed) if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False log_name = 'log_test.txt' if args.evaluate else 'log_train.txt' sys.stderr = sys.stdout = Logger(osp.join(args.save_dir, log_name)) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU, however, GPU is highly recommended") print("Initializing image data manager") dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args)) trainloader, testloader_dict = dm.return_dataloaders() print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, use_gpu=use_gpu, args=vars(args)) print(model) print("Model size: {:.3f} M".format(count_num_param(model))) criterion = get_criterion(dm.num_train_pids, use_gpu, args) regularizer = get_regularizer(vars(args)) optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args)) scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma) if args.load_weights and check_isfile(args.load_weights): # load pretrained weights but ignore layers that don't match in size try: checkpoint = torch.load(args.load_weights) except Exception as e: print(e) checkpoint = torch.load(args.load_weights, map_location={'cuda:0': 'cpu'}) pretrain_dict = checkpoint['state_dict'] model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) print("Loaded pretrained weights from '{}'".format(args.load_weights)) if args.resume and check_isfile(args.resume): checkpoint = torch.load(args.resume) state = model.state_dict() state.update(checkpoint['state_dict']) model.load_state_dict(state) # args.start_epoch = checkpoint['epoch'] + 1 print("Loaded checkpoint from '{}'".format(args.resume)) print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, checkpoint['rank1'])) if use_gpu: model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") for name in args.target_names: print("Evaluating {} ...".format(name)) queryloader = testloader_dict[name]['query'], testloader_dict[ name]['query_flip'] galleryloader = testloader_dict[name]['gallery'], testloader_dict[ name]['gallery_flip'] distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True, name=name) from scipy import io as s_io fname = 'ABD-Net-' + name + '.mat' root_A = './distmat' if not os.path.exists(root_A): os.makedirs(root_A) fname = os.path.join(root_A, fname) s_io.savemat(fname, {'distmat': distmat}) if args.visualize_ranks: visualize_ranked_results(distmat, dm.return_testdataset_by_name(name), save_dir=osp.join( args.save_dir, 'ranked_results', name), topk=20) return start_time = time.time() ranklogger = RankLogger(args.source_names, args.target_names) train_time = 0 print("==> Start training") if args.fixbase_epoch > 0: oldenv = os.environ.get('sa', '') os.environ['sa'] = '' print( "Train {} for {} epochs while keeping other layers frozen".format( args.open_layers, args.fixbase_epoch)) initial_optim_state = optimizer.state_dict() for epoch in range(args.fixbase_epoch): start_train_time = time.time() train(epoch, model, criterion, regularizer, optimizer, trainloader, use_gpu, fixbase=True) train_time += round(time.time() - start_train_time) print("Done. All layers are open to train for {} epochs".format( args.max_epoch)) optimizer.load_state_dict(initial_optim_state) os.environ['sa'] = oldenv max_r1 = 0 for epoch in range(args.start_epoch, args.max_epoch): start_train_time = time.time() print(epoch) print(criterion) train(epoch, model, criterion, regularizer, optimizer, trainloader, use_gpu, fixbase=False) train_time += round(time.time() - start_train_time) if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint( { 'state_dict': state_dict, 'rank1': 0, 'epoch': epoch, }, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) scheduler.step() if (epoch + 1) > args.start_eval and args.eval_freq > 0 and ( epoch + 1) % args.eval_freq == 0 or (epoch + 1) == args.max_epoch: print("==> Test") for name in args.target_names: print("Evaluating {} ...".format(name)) queryloader = testloader_dict[name]['query'], testloader_dict[ name]['query_flip'] galleryloader = testloader_dict[name][ 'gallery'], testloader_dict[name]['gallery_flip'] rank1 = test(model, queryloader, galleryloader, use_gpu, name=name) ranklogger.write(name, epoch + 1, rank1) if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() if max_r1 < rank1: print('Save!', max_r1, rank1) save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, False, osp.join(args.save_dir, 'checkpoint_best.pth.tar')) max_r1 = rank1 elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time)) ranklogger.show_summary()
def main(): global use_apex global args torch.manual_seed(args.seed) if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False log_name = 'log_test.txt' if args.evaluate else 'log_train.txt' sys.stderr = sys.stdout = Logger(osp.join(args.save_dir, log_name)) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU, however, GPU is highly recommended") print("Initializing image data manager") dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args)) trainloader, testloader_dict = dm.return_dataloaders() print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, use_gpu=use_gpu, args=vars(args)) print(model) print("Model size: {:.3f} M".format(count_num_param(model))) if use_gpu: print("using gpu") model = model.cuda() print("criterion===>") criterion = get_criterion(dm.num_train_pids, use_gpu, args) print(criterion) print("regularizer===>") regularizer = get_regularizer(vars(args)) print(regularizer) print("optimizer===>") optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args)) print(optimizer) print("scheduler===>") scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=0.1, patience=5, verbose=True) print(scheduler) if args.load_weights and check_isfile(args.load_weights): # load pretrained weights but ignore layers that don't match in size try: checkpoint = torch.load(args.load_weights) except Exception as e: print(e) checkpoint = torch.load(args.load_weights, map_location={'cuda:0': 'cpu'}) pretrain_dict = checkpoint['state_dict'] model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) print("Loaded pretrained weights from '{}'".format(args.load_weights)) max_r1 = 0 if args.resume and check_isfile(args.resume): checkpoint = torch.load(args.resume) state = model.state_dict() state.update(checkpoint['state_dict']) model.load_state_dict(state) optimizer.load_state_dict(checkpoint['optimizer']) args.start_epoch = checkpoint['epoch'] + 1 max_r1 = checkpoint['rank1'] print("Loaded checkpoint from '{}'".format(args.resume)) print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, checkpoint['rank1'])) if use_apex: print("using apex") model, optimizer = amp.initialize(model, optimizer, opt_level="O0") if args.evaluate: print("Evaluate only") for name in args.target_names: print("Evaluating {} ...".format(name)) queryloader = testloader_dict[name]['query'], testloader_dict[ name]['query_flip'] galleryloader = testloader_dict[name]['gallery'], testloader_dict[ name]['gallery_flip'] distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True) if args.visualize_ranks: visualize_ranked_results(distmat, dm.return_testdataset_by_name(name), save_dir=osp.join( args.save_dir, 'ranked_results', name), topk=20) return start_time = time.time() ranklogger = RankLogger(args.source_names, args.target_names) train_time = 0 print("==> Start training") if args.fixbase_epoch > 0: oldenv = os.environ.get('sa', '') os.environ['sa'] = '' print( "Train {} for {} epochs while keeping other layers frozen".format( args.open_layers, args.fixbase_epoch)) initial_optim_state = optimizer.state_dict() for epoch in range(args.fixbase_epoch): start_train_time = time.time() train(epoch, model, criterion, regularizer, optimizer, trainloader, use_gpu, fixbase=True) train_time += round(time.time() - start_train_time) print("Done. All layers are open to train for {} epochs".format( args.max_epoch)) optimizer.load_state_dict(initial_optim_state) os.environ['sa'] = oldenv for epoch in range(args.start_epoch, args.max_epoch): auto_reset_learning_rate(optimizer, args) print( f"===========================start epoch {epoch + 1} {now()}===========================================" ) print(f"lr:{optimizer.param_groups[0]['lr']}") loss = train(epoch, model, criterion, regularizer, optimizer, trainloader, use_gpu, fixbase=False) train_time += round(time.time() - start_train_time) state_dict = model.state_dict() rank1 = 0 if (epoch + 1) > args.start_eval and args.eval_freq > 0 and ( epoch + 1) % args.eval_freq == 0 or (epoch + 1) == args.max_epoch: print("==> Test") for name in args.target_names: print("Evaluating {} ...".format(name)) queryloader = testloader_dict[name]['query'], testloader_dict[ name]['query_flip'] galleryloader = testloader_dict[name][ 'gallery'], testloader_dict[name]['gallery_flip'] rank1 = test(model, queryloader, galleryloader, use_gpu) ranklogger.write(name, epoch + 1, rank1) if max_r1 < rank1: print('Save!', max_r1, rank1) save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, 'optimizer': optimizer.state_dict(), }, False, osp.join(args.save_dir, 'checkpoint_best.pth.tar')) max_r1 = rank1 save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, 'optimizer': optimizer.state_dict(), }, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) scheduler.step(rank1) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time)) ranklogger.show_summary()
def main(): global args, dropout_optimizer torch.manual_seed(args.seed) if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False log_name = 'log_test.txt' if args.evaluate else 'log_train.txt' sys.stderr = sys.stdout = Logger(osp.join(args.save_dir, log_name)) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU, however, GPU is highly recommended") print("Initializing image data manager") dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args)) trainloader, testloader_dict = dm.return_dataloaders() print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, use_gpu=use_gpu, dropout_optimizer=dropout_optimizer) print(model) print("Model size: {:.3f} M".format(count_num_param(model))) # criterion = WrappedCrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth) criterion, fix_criterion, switch_criterion, htri_param_controller = get_criterions(dm.num_train_pids, use_gpu, args) regularizer, reg_param_controller = get_regularizer(args.regularizer) optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args)) scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma) if args.load_weights and check_isfile(args.load_weights): # load pretrained weights but ignore layers that don't match in size try: checkpoint = torch.load(args.load_weights) except Exception as e: print(e) checkpoint = torch.load(args.load_weights, map_location={'cuda:0': 'cpu'}) # dropout_optimizer.set_p(checkpoint.get('dropout_p', 0)) # print(list(checkpoint.keys()), checkpoint['dropout_p']) pretrain_dict = checkpoint['state_dict'] model_dict = model.state_dict() pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()} model_dict.update(pretrain_dict) model.load_state_dict(model_dict) print("Loaded pretrained weights from '{}'".format(args.load_weights)) if args.resume and check_isfile(args.resume): checkpoint = torch.load(args.resume) state = model.state_dict() state.update(checkpoint['state_dict']) model.load_state_dict(state) # args.start_epoch = checkpoint['epoch'] + 1 print("Loaded checkpoint from '{}'".format(args.resume)) print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, checkpoint['rank1'])) if use_gpu: model = nn.DataParallel(model, device_ids=list(range(len(args.gpu_devices.split(','))))).cuda() extract_train_info(model, trainloader)