def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_dataset( root=args.root, name=args.dataset, split_id=args.split_id, cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split, ) transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False trainloader = DataLoader( ImageDataset(dataset.train, transform=transform_train), batch_size=args.train_batch, shuffle=True, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( ImageDataset(dataset.query, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( ImageDataset(dataset.gallery, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'cent'}) print("Model size: {:.5f}M".format( sum(p.numel() for p in model.parameters()) / 1000000.0)) criterion_xent = CrossEntropyLabelSmooth( num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_cent = CenterLoss(num_classes=dataset.num_train_pids, feat_dim=model.feat_dim, use_gpu=use_gpu) optimizer_model = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) optimizer_cent = torch.optim.SGD(criterion_cent.parameters(), lr=args.lr_cent) if args.stepsize > 0: scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma) start_epoch = args.start_epoch if args.resume: print("Loading checkpoint from '{}'".format(args.resume)) checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) start_epoch = checkpoint['epoch'] if use_gpu: model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") test(model, queryloader, galleryloader, use_gpu) return start_time = time.time() train_time = 0 best_rank1 = -np.inf best_epoch = 0 print("==> Start training") for epoch in range(start_epoch, args.max_epoch): start_train_time = time.time() train(epoch, model, criterion_xent, criterion_cent, optimizer_model, optimizer_cent, trainloader, use_gpu) train_time += round(time.time() - start_train_time) if args.stepsize > 0: scheduler.step() if args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or ( epoch + 1) == args.max_epoch: print("==> Test") rank1 = test(model, queryloader, galleryloader, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format( best_rank1, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time))
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_vidreid_dataset(root=args.root, name=args.dataset) transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False # decompose tracklets into images for image-based training new_train = [] for img_paths, pid, camid in dataset.train: for img_path in img_paths: new_train.append((img_path, pid, camid)) trainloader = DataLoader( ImageDataset(new_train, transform=transform_train), sampler=RandomIdentitySampler(new_train, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( VideoDataset(dataset.query, seq_len=args.seq_len, sample='evenly', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( VideoDataset(dataset.gallery, seq_len=args.seq_len, sample='evenly', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'}) print("Model size: {:.3f} M".format(count_num_param(model))) criterion_xent = CrossEntropyLabelSmooth( num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=args.margin) optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay) scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma) start_epoch = args.start_epoch if args.resume: print("Loading checkpoint from '{}'".format(args.resume)) checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) start_epoch = checkpoint['epoch'] if use_gpu: model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") test(model, queryloader, galleryloader, args.pool, use_gpu) return start_time = time.time() train_time = 0 best_rank1 = -np.inf best_epoch = 0 print("==> Start training") for epoch in range(start_epoch, args.max_epoch): start_train_time = time.time() train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu) train_time += round(time.time() - start_train_time) scheduler.step() if (epoch + 1) > args.start_eval and args.eval_step > 0 and ( epoch + 1) % args.eval_step == 0 or (epoch + 1) == args.max_epoch: print("==> Test") rank1 = test(model, queryloader, galleryloader, args.pool, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format( best_rank1, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time))
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_dataset(name=args.dataset) transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False trainloader = DataLoader( VideoDataset(dataset.train, seq_len=args.seq_len, sample='random', transform=transform_train), sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( VideoDataset(dataset.query, seq_len=args.seq_len, sample='dense', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( VideoDataset(dataset.gallery, seq_len=args.seq_len, sample='dense', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) if args.arch == 'resnet503d': model = resnet3d.resnet50(num_classes=dataset.num_train_pids, sample_width=args.width, sample_height=args.height, sample_duration=args.seq_len) if not os.path.exists(args.pretrained_model): raise IOError("Can't find pretrained model: {}".format( args.pretrained_model)) print("Loading checkpoint from '{}'".format(args.pretrained_model)) checkpoint = torch.load(args.pretrained_model) state_dict = {} for key in checkpoint['state_dict']: if 'fc' in key: continue state_dict[key.partition("module.") [2]] = checkpoint['state_dict'][key] model.load_state_dict(state_dict, strict=False) else: model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'}) print("Model size: {:.5f}M".format( sum(p.numel() for p in model.parameters()) / 1000000.0)) criterion_xent = CrossEntropyLabelSmooth( num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=args.margin) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) if args.stepsize > 0: scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma) start_epoch = args.start_epoch if use_gpu: model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") test(model, queryloader, galleryloader, args.pool, use_gpu) return start_time = time.time() best_rank1 = -np.inf if args.arch == 'resnet503d': torch.backends.cudnn.benchmark = False for epoch in range(start_epoch, args.max_epoch): print("==> Epoch {}/{}".format(epoch + 1, args.max_epoch)) train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu) if args.stepsize > 0: scheduler.step() if args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or ( epoch + 1) == args.max_epoch: print("==> Test") rank1 = test(model, queryloader, galleryloader, args.pool, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
# init the logger init_logger(args) # data loading dataset, trainloader, queryloader, galleryloader = init_data_loaders(args) num_train_pids = dataset.num_train_pids # init model model = init_model(args, num_train_pids) if use_gpu: model = nn.DataParallel(model).cuda() vis = utils.get_visdom_for_current_run(args.save_dir, args.prefix + '_stage1_training') # init objective functions criterion_xent = CrossEntropyLabelSmooth(num_classes=num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=args.margin) # init optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) if args.stepsize > 0: scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma) start_epoch = args.start_epoch # if only evaluation was needed if args.evaluate: print("Evaluate only") test(model, queryloader, galleryloader, use_gpu, args) exit(0)
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_imgreid_dataset( root=args.root, name=args.dataset, split_id=args.split_id, cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split, use_lmdb=args.use_lmdb, ) transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False trainloader = DataLoader( ImageDataset(dataset.train, transform=transform_train, use_lmdb=args.use_lmdb, lmdb_path=dataset.train_lmdb_path), sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( ImageDataset(dataset.query, transform=transform_test, use_lmdb=args.use_lmdb, lmdb_path=dataset.train_lmdb_path), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( ImageDataset(dataset.gallery, transform=transform_test, use_lmdb=args.use_lmdb, lmdb_path=dataset.train_lmdb_path), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'}) print("Model size: {:.3f} M".format(count_num_param(model))) criterion_xent = CrossEntropyLabelSmooth( num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=args.margin) optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay) scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma) if args.load_weights: # load pretrained weights but ignore layers that don't match in size if check_isfile(args.load_weights): checkpoint = torch.load(args.load_weights) pretrain_dict = checkpoint['state_dict'] model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) print("Loaded pretrained weights from '{}'".format( args.load_weights)) if args.resume: if check_isfile(args.resume): checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) args.start_epoch = checkpoint['epoch'] rank1 = checkpoint['rank1'] print("Loaded checkpoint from '{}'".format(args.resume)) print("- start_epoch: {}\n- rank1: {}".format( args.start_epoch, rank1)) if use_gpu: model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True) if args.vis_ranked_res: visualize_ranked_results( distmat, dataset, save_dir=osp.join(args.save_dir, 'ranked_results'), topk=20, ) return start_time = time.time() train_time = 0 best_rank1 = -np.inf best_epoch = 0 print("==> Start training") for epoch in range(args.start_epoch, args.max_epoch): start_train_time = time.time() train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu) train_time += round(time.time() - start_train_time) scheduler.step() if (epoch + 1) > args.start_eval and args.eval_step > 0 and ( epoch + 1) % args.eval_step == 0 or (epoch + 1) == args.max_epoch: print("==> Test") rank1 = test(model, queryloader, galleryloader, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format( best_rank1, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time))
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_img_dataset( root=args.root, name=args.dataset, split_id=args.split_id, cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split, ) transform_train = T.Compose([ T.Resize((args.height, args.width)), T.RandomHorizontalFlip(p=0.5), T.Pad(10), T.RandomCrop([args.height, args.width]), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), torchvision.transforms.RandomErasing(p=0.5, scale=(0.02, 0.4), ratio=(0.3, 3.33), value=(0.4914, 0.4822, 0.4465)) # T.RandomErasing(probability=0.5, sh=0.4, mean=(0.4914, 0.4822, 0.4465)), ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False trainloader = DataLoader( ImageDataset(dataset.train, transform=transform_train), sampler=RandomIdentitySampler2(dataset.train, batch_size=args.train_batch, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( ImageDataset(dataset.query, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( ImageDataset(dataset.gallery, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'}) print("Model size: {:.5f}M".format( sum(p.numel() for p in model.parameters()) / 1000000.0)) #embed() criterion_xent = CrossEntropyLabelSmooth( num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=args.margin) optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay) # if args.stepsize > 0: # scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma) '''------Modify lr_schedule here------''' current_schedule = init_lr_schedule(schedule=args.schedule, warm_up_epoch=args.warm_up_epoch, half_cos_period=args.half_cos_period, lr_milestone=args.lr_milestone, gamma=args.gamma, stepsize=args.stepsize) scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=current_schedule) '''------Please refer to the args.xxx for details of hyperparams------''' #embed() start_epoch = args.start_epoch if args.resume: print("Loading checkpoint from '{}'".format(args.resume)) checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) start_epoch = checkpoint['epoch'] if use_gpu: model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") test(model, queryloader, galleryloader, use_gpu) return start_time = time.time() train_time = 0 best_rank1 = -np.inf best_epoch = 0 print("==> Start training") for epoch in range(start_epoch, args.max_epoch): start_train_time = time.time() train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu) train_time += round(time.time() - start_train_time) if args.schedule: scheduler.step() if (epoch + 1) > args.start_eval and args.eval_step > 0 and ( epoch + 1) % args.eval_step == 0 or (epoch + 1) == args.max_epoch: print("==> Test") rank1 = test(model, queryloader, galleryloader, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format( best_rank1, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time))
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_imgreid_dataset( root=args.root, name=args.dataset, split_id=args.split_id, cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split, use_lmdb=args.use_lmdb, ) transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), # T.Resize((args.height, args.width)), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False trainset = ImageDataset(dataset.train, transform=transform_train, use_lmdb=args.use_lmdb, lmdb_path=dataset.train_lmdb_path) trainloader = DataLoader( trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryset = ImageDataset(dataset.query, transform=transform_test, use_lmdb=args.use_lmdb, lmdb_path=dataset.query_lmdb_path) queryloader = DataLoader( queryset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryset = ImageDataset(dataset.gallery, transform=transform_test, use_lmdb=args.use_lmdb, lmdb_path=dataset.gallery_lmdb_path) galleryloader = DataLoader( galleryset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent'}, use_gpu=use_gpu) print("Model size: {:.3f} M".format(count_num_param(model))) # summary(model, (3, 160, 64)) criterion = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu) optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay) scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma) start_epoch = args.start_epoch if args.fixbase_epoch > 0: if hasattr(model, 'classifier') and isinstance(model.classifier, nn.Module): optimizer_tmp = init_optim(args.optim, model.classifier.parameters(), args.fixbase_lr, args.weight_decay) else: print( "Warn: model has no attribute 'classifier' and fixbase_epoch is reset to 0" ) args.fixbase_epoch = 0 if args.load_weights: # load pretrained weights but ignore layers that don't match in size print("Loading pretrained weights from '{}'".format(args.load_weights)) if torch.cuda.is_available(): checkpoint = torch.load(args.load_weights) else: checkpoint = torch.load(args.load_weights, map_location='cpu') pretrain_dict = checkpoint['state_dict'] model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) if args.resume: checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) start_epoch = checkpoint['epoch'] rank1 = checkpoint['rank1'] print("Loaded checkpoint from '{}'".format(args.resume)) print("- start_epoch: {}\n- rank1: {}".format(start_epoch, rank1)) if use_gpu: model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") test(model, queryloader, galleryloader, use_gpu) return start_time = time.time() train_time = 0 best_rank1 = -np.inf best_epoch = 0 print("==> Start training") if args.fixbase_epoch > 0: print( "Train classifier for {} epochs while keeping base network frozen". format(args.fixbase_epoch)) for epoch in range(args.fixbase_epoch): start_train_time = time.time() train(epoch, model, criterion, optimizer_tmp, trainloader, use_gpu, freeze_bn=True) train_time += round(time.time() - start_train_time) del optimizer_tmp print("Now open all layers for training") for epoch in range(start_epoch, args.max_epoch): start_train_time = time.time() train(epoch, model, criterion, optimizer, trainloader, use_gpu) train_time += round(time.time() - start_train_time) scheduler.step() if (epoch + 1) > args.start_eval and args.eval_step > 0 and ( epoch + 1) % args.eval_step == 0 or (epoch + 1) == args.max_epoch: print("==> Test") rank1 = test(model, queryloader, galleryloader, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) ''' if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch, }, True, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar')) ''' print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format( best_rank1, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time))
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'), mode='a') else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'), mode='a') print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_imgreid_dataset(name=args.dataset, dataset_dir=args.root, fore_dir=args.fore_dir) transform_train = ST.Compose([ ST.Scale((args.height, args.width), interpolation=3), ST.RandomHorizontalFlip(), ST.ToTensor(), ST.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ST.RandomErasing(0.5) ]) transform_test = ST.Compose([ ST.Scale((args.height, args.width), interpolation=3), ST.ToTensor(), ST.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False trainloader = DataLoader( ImageDataset_hardSplit_seg(dataset.train, transform=transform_train), sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( ImageDataset(dataset.query, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( ImageDataset(dataset.gallery, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids) print(model) criterion_xent = CrossEntropyLabelSmooth(use_gpu=use_gpu) criterion_htri = TripletLoss() criterion_mask = MaskLoss() criterion_split = HardSplitLoss() criterion_cluster = ClusterLoss() optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay) scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma) if args.resume: if check_isfile(args.resume): checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) args.start_epoch = checkpoint['epoch'] rank1 = checkpoint['rank1'] print("Loaded checkpoint from '{}'".format(args.resume)) print("- start_epoch: {}\n- rank1: {}".format( args.start_epoch, rank1)) if use_gpu: model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") test(model, queryloader, galleryloader, use_gpu) return start_time = time.time() train_time = 0 best_rank1 = -np.inf best_epoch = 0 print("==> Start training") for epoch in range(args.start_epoch, args.max_epoch): start_train_time = time.time() train(epoch, model, criterion_xent, criterion_htri, criterion_mask, criterion_split, criterion_cluster, optimizer, trainloader, use_gpu) train_time += round(time.time() - start_train_time) scheduler.step() if (epoch + 1) > args.start_eval and ( epoch + 1) % args.eval_step == 0 or epoch == 0: print("==> Test") rank1 = test(model, queryloader, galleryloader, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format( best_rank1, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time)) print("==========\nArgs:{}\n==========".format(args))
def load_state_dict(network, state_dict=''): if state_dict: checkpoint = torch.load(state_dict) network.load_state_dict(checkpoint) return network model = models.init_model(name='mobilenet_ifn', num_classes=len(class_names)) print(model) model = model.cuda() state_dict = opt.resume load_state_dict(model, state_dict) criterion = CrossEntropyLabelSmooth(num_classes=len(class_names), use_gpu=use_gpu) # criterion = nn.CrossEntropyLoss() # for mobilenetv2 optimizer_ft = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=100, gamma=0.1) # for resnet50 # ignored_params = list(map(id, model.fc.parameters() )) # base_params = filter(lambda p: id(p) not in ignored_params, model.parameters()) # optimizer_ft = optim.SGD([ # {'params': base_params, 'lr': 0.1*0.05},
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_dataset(name=args.dataset) transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = False trainloader = DataLoader( VideoDataset(dataset.train, seq_len=args.seq_len, sample='random', transform=transform_train), sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( VideoDataset(dataset.query, seq_len=args.seq_len, sample='dense', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( VideoDataset(dataset.gallery, seq_len=args.seq_len, sample='dense', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'}) print("Model size: {:.5f}M".format( sum(p.numel() for p in model.parameters()) / 1000000.0)) criterion_xent = CrossEntropyLabelSmooth( num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=args.margin) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) if args.stepsize > 0: scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma) start_epoch = args.start_epoch start_time = time.time() print(start_time) for batch_idx, (imgs, pids, _) in enumerate(trainloader): print(batch_idx) print('x') if use_gpu: imgs, pids = imgs.cuda(), pids.cuda() imgs, pids = Variable(imgs), Variable(pids) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) # tensorboardX # writer = SummaryWriter(log_dir=osp.join(args.save_dir,'summary')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_img_dataset( root=args.root, name=args.dataset, split_id=args.split_id, cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split, ) transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if args.random_erasing: transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), RandomErasing(probability=args.probability, mean=[0.0, 0.0, 0.0]), ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False if args.loss == 'xent,htri': trainloader = DataLoader( ImageDataset(dataset.train, transform=transform_train), sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) elif args.loss == 'xent': trainloader = DataLoader( ImageDataset(dataset.train, transform=transform_train), batch_size=args.train_batch, shuffle=True, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( ImageDataset(dataset.query, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( ImageDataset(dataset.gallery, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss=args.loss) print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0)) criterion_xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=args.margin) optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay) if args.stepsize > 0: if not args.warmup: scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma) start_epoch = args.start_epoch if args.resume: print("Loading checkpoint from '{}'".format(args.resume)) checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) start_epoch = checkpoint['epoch'] if use_gpu: model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") test(model, queryloader, galleryloader, use_gpu) return def adjust_lr(optimizer, ep): if ep < 20: lr = 1e-4 * (ep + 1) / 2 elif ep < 80: #lr = 1e-3 * len(args.gpu_devices) lr = 1e-3 elif ep < 180: #lr = 1e-4 * len(args.gpu_devices) lr = 1e-4 elif ep < 300: #lr = 1e-5 * len(args.gpu_devices) lr = 1e-5 elif ep < 320: #lr = 1e-5 * 0.1 ** ((ep - 320) / 80) * len(args.gpu_devices) lr = 1e-5 * 0.1 ** ((ep - 320) / 80) elif ep < 400: lr = 1e-6 elif ep < 480: #lr = 1e-4 * len(args.gpu_devices) lr = 1e-4 else: #lr = 1e-5 * len(args.gpu_devices) lr = 1e-5 for p in optimizer.param_groups: p['lr'] = lr length = len(trainloader) start_time = time.time() train_time = 0 best_rank1 = -np.inf best_epoch = 0 #best_rerank1 = -np.inf #best_rerankepoch = 0 print("==> Start training") for epoch in range(start_epoch, args.max_epoch): start_train_time = time.time() if args.stepsize > 0: if args.warmup: adjust_lr(optimizer, epoch + 1) else: scheduler.step() train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu=use_gpu, summary=None, length=length) train_time += round(time.time() - start_train_time) if (epoch+1) > args.start_eval and args.eval_step > 0 and (epoch+1) % args.eval_step == 0 or (epoch+1) == args.max_epoch: print("==> Test") rank1 = test(epoch, model, queryloader, galleryloader, use_gpu=True, summary=None) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 ####### Best Rerank #is_rerankbest = rerank1 > best_rerank1 #if is_rerankbest: # best_rerank1 = rerank1 # best_rerankepoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar')) writer.close() print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch)) #print("==> Best Rerank-1 {:.1%}, achieved at epoch {}".format(best_rerank1, best_rerankepoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
def testseq(dataset_name, use_gpu): dataset_root = './video2img/track1_sct_img_test_big/' dataset = Graph_data_manager.AICityTrack2(root=dataset_root) width = 224 height = 224 transform_train = T.Compose([ T.Random2DTranslation(height, width), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) transform_test = T.Compose([ T.Resize((height, width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False seq_len = 4 num_instance = 4 train_batch = 32 test_batch = 1 queryloader = DataLoader( VideoDataset(dataset.query, seq_len=seq_len, sample='dense', transform=transform_test), batch_size=test_batch, shuffle=False, num_workers=4, pin_memory=pin_memory, drop_last=False, ) arch = "resnet50ta" pretrained_model = "./log/track12_ta224_checkpoint_ep500.pth.tar" start_epoch = 0 print("Initializing model: {}".format(arch)) dataset.num_train_pids = 517 if arch == 'resnet503d': model = resnet3d.resnet50(num_classes=dataset.num_train_pids, sample_width=width, sample_height=height, sample_duration=seq_len) if not os.path.exists(pretrained_model): raise IOError( "Can't find pretrained model: {}".format(pretrained_model)) print("Loading checkpoint from '{}'".format(pretrained_model)) checkpoint = torch.load(pretrained_model) state_dict = {} for key in checkpoint['state_dict']: if 'fc' in key: continue state_dict[key.partition("module.") [2]] = checkpoint['state_dict'][key] model.load_state_dict(state_dict, strict=False) else: if not os.path.exists(pretrained_model): model = models.init_model(name=arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'}) else: model = models.init_model(name=arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'}) checkpoint = torch.load(pretrained_model) model.load_state_dict(checkpoint['state_dict']) start_epoch = checkpoint['epoch'] + 1 print("Loaded checkpoint from '{}'".format(pretrained_model)) print("- start_epoch: {}\n- rank1: {}".format( start_epoch, checkpoint['rank1'])) print("Model size: {:.5f}M".format( sum(p.numel() for p in model.parameters()) / 1000000.0)) criterion_xent = CrossEntropyLabelSmooth( num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=0.3) lr = 0.0003 gamma = 0.1 stepsize = 200 weight_decay = 5e-04 optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) if stepsize > 0: scheduler = lr_scheduler.StepLR(optimizer, step_size=stepsize, gamma=gamma) start_epoch = start_epoch if use_gpu: model = nn.DataParallel(model).cuda() test(model, queryloader, 'avg', use_gpu, dataset, -1, meta_data_tab=None)
def main(): set_seed(args.seed) cudnn.enabled = True cudnn.benchmark = True args.distributed = False if 'WORLD_SIZE' in os.environ: args.distributed = int(os.environ['WORLD_SIZE']) > 1 args.gpu = 0 args.world_size = 1 if args.distributed: set_seed(args.local_rank) args.gpu = args.local_rank torch.cuda.set_device(args.gpu) torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() if args.local_rank == 0: logging.info("args = {}".format(args)) logging.info("unparsed_args = {}".format(unparsed)) logging.info("distributed = {}".format(args.distributed)) logging.info("sync_bn = {}".format(args.sync_bn)) logging.info("opt_level = {}".format(args.opt_level)) logging.info("keep_batchnorm_fp32 = {}".format( args.keep_batchnorm_fp32)) logging.info("loss_scale = {}".format(args.loss_scale)) logging.info("CUDNN VERSION: {}".format( torch.backends.cudnn.version())) # create model if args.model == 'MobileNetV3_Large': model = MobileNetV3_Large(args.num_classes, args.dropout_rate, args.zero_init_last_bn) elif args.model == 'MobileNetV3_Small': model = MobileNetV3_Small(args.num_classes, args.dropout_rate, args.zero_init_last_bn) else: raise Exception('invalid type of model') if args.sync_bn: if args.local_rank == 0: logging.info("using apex synced BN") model = parallel.convert_syncbn_model(model) model = model.cuda().to(memory_format=memory_format ) if memory_format is not None else model.cuda() # define criterion and optimizer if args.label_smooth > 0.0: criterion = CrossEntropyLabelSmooth(args.num_classes, args.label_smooth) else: criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() params = get_params(model) if args.no_wd_bias_bn else model.parameters() optimizer = torch.optim.SGD(params, args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # Initialize Amp if args.opt_level is not None: model, optimizer = amp.initialize( model, optimizer, opt_level=args.opt_level, keep_batchnorm_fp32=args.keep_batchnorm_fp32, loss_scale=args.loss_scale) # For distributed training, wrap the model with apex.parallel.DistributedDataParallel. # This must be done AFTER the call to amp.initialize. if args.distributed: # By default, apex.parallel.DistributedDataParallel overlaps communication with # computation in the backward pass. # delay_allreduce delays all communication to the end of the backward pass. model = DDP(model, delay_allreduce=True) else: model = nn.DataParallel(model) # exponential moving average if args.ema_decay > 0.0: ema = EMA(model, args.ema_decay) ema.register() else: ema = None # define transform and initialize dataloader batch_size = args.batch_size // args.world_size workers = args.workers // args.world_size if args.trans_mode == 'tv': train_transform = get_train_transform(args.color_jitter) val_transform = get_val_transform() train_dataset = ImageList(root=args.train_root, list_path=args.train_list, transform=train_transform) val_dataset = ImageList(root=args.val_root, list_path=args.val_list, transform=val_transform) train_sampler = None val_sampler = None if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset, shuffle=True) val_sampler = torch.utils.data.distributed.DistributedSampler( val_dataset, shuffle=False) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, num_workers=workers, pin_memory=True, sampler=train_sampler, shuffle=(train_sampler is None)) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, num_workers=workers, pin_memory=True, sampler=val_sampler, shuffle=False) args.batches_per_epoch = len(train_loader) elif args.trans_mode == 'dali': pipe = HybridTrainPipe(batch_size=batch_size, num_threads=workers, device_id=args.local_rank, root=args.train_root, list_path=args.train_list, crop=224, shard_id=args.local_rank, num_shards=args.world_size, coji=args.color_jitter, dali_cpu=args.dali_cpu) pipe.build() train_loader = DALIClassificationIterator( pipe, size=int(pipe.epoch_size("Reader") / args.world_size)) args.batches_per_epoch = train_loader._size // train_loader.batch_size args.batches_per_epoch += (train_loader._size % train_loader.batch_size) != 0 pipe = HybridValPipe(batch_size=batch_size, num_threads=workers, device_id=args.local_rank, root=args.val_root, list_path=args.val_list, size=256, crop=224, shard_id=args.local_rank, num_shards=args.world_size, dali_cpu=args.dali_cpu) pipe.build() val_loader = DALIClassificationIterator( pipe, size=int(pipe.epoch_size("Reader") / args.world_size)) else: raise Exception('invalid image transformation mode') # define learning rate scheduler scheduler = get_lr_scheduler(optimizer) best_acc_top1 = 0 best_acc_top5 = 0 start_epoch = 0 # restart from snapshot if args.snapshot and os.path.isfile(args.snapshot): if args.local_rank == 0: logging.info('loading snapshot from {}'.format(args.snapshot)) checkpoint = torch.load( args.snapshot, map_location=lambda storage, loc: storage.cuda(args.gpu)) start_epoch = checkpoint['epoch'] best_acc_top1 = checkpoint['best_acc_top1'] best_acc_top5 = checkpoint['best_acc_top5'] model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) if checkpoint['ema'] is not None: ema.load_state_dict(checkpoint['ema']) if args.opt_level is not None: amp.load_state_dict(checkpoint['amp']) scheduler = get_lr_scheduler(optimizer) for epoch in range(start_epoch): if epoch < args.warmup_epochs: adjust_learning_rate(optimizer, scheduler, epoch, -1) warmup_lr = get_last_lr(optimizer) if args.local_rank == 0: logging.info('Epoch: %d, Warming-up lr: %e', epoch, warmup_lr) else: current_lr = get_last_lr(optimizer) if args.local_rank == 0: logging.info('Epoch: %d lr %e', epoch, current_lr) if epoch < args.warmup_epochs: for param_group in optimizer.param_groups: param_group['lr'] = args.lr else: if args.lr_scheduler in [ 'linear_epoch', 'cosine_epoch', 'step_epoch' ]: adjust_learning_rate(optimizer, scheduler, epoch, -1) if args.lr_scheduler in [ 'linear_batch', 'cosine_batch', 'step_batch' ]: for batch_idx in range(args.batches_per_epoch): adjust_learning_rate(optimizer, scheduler, epoch, batch_idx) # the main loop for epoch in range(start_epoch, args.epochs): if epoch < args.warmup_epochs: adjust_learning_rate(optimizer, scheduler, epoch, -1) warmup_lr = get_last_lr(optimizer) if args.local_rank == 0: logging.info('Epoch: %d, Warming-up lr: %e', epoch, warmup_lr) else: current_lr = get_last_lr(optimizer) if args.local_rank == 0: logging.info('Epoch: %d lr %e', epoch, current_lr) if args.distributed and args.trans_mode == 'tv': train_sampler.set_epoch(epoch) epoch_start = time.time() train_acc, train_obj = train(train_loader, model, ema, criterion, optimizer, scheduler, epoch) if args.local_rank == 0: logging.info('Train_acc: %f', train_acc) val_acc_top1, val_acc_top5, val_obj = validate(val_loader, model, criterion) if args.local_rank == 0: logging.info('Val_acc_top1: %f', val_acc_top1) logging.info('Val_acc_top5: %f', val_acc_top5) logging.info('Epoch time: %ds.', time.time() - epoch_start) if args.local_rank == 0: is_best = False if val_acc_top1 > best_acc_top1: best_acc_top1 = val_acc_top1 best_acc_top5 = val_acc_top5 is_best = True save_checkpoint( { 'epoch': epoch + 1, 'model': model.state_dict(), 'ema': ema.state_dict() if ema is not None else None, 'best_acc_top1': best_acc_top1, 'best_acc_top5': best_acc_top5, 'optimizer': optimizer.state_dict(), 'amp': amp.state_dict() if args.opt_level is not None else None, }, is_best, args.save) if epoch < args.warmup_epochs: for param_group in optimizer.param_groups: param_group['lr'] = args.lr else: adjust_learning_rate(optimizer, scheduler, epoch, -1) if args.trans_mode == 'dali': train_loader.reset() val_loader.reset()
def main(): runId = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') cfg.OUTPUT_DIR = os.path.join(cfg.OUTPUT_DIR, runId) if not os.path.exists(cfg.OUTPUT_DIR): os.mkdir(cfg.OUTPUT_DIR) print(cfg.OUTPUT_DIR) torch.manual_seed(cfg.RANDOM_SEED) random.seed(cfg.RANDOM_SEED) np.random.seed(cfg.RANDOM_SEED) os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID use_gpu = torch.cuda.is_available() and cfg.MODEL.DEVICE == "cuda" if not cfg.EVALUATE_ONLY: sys.stdout = Logger(osp.join(cfg.OUTPUT_DIR, 'log_train.txt')) else: sys.stdout = Logger(osp.join(cfg.OUTPUT_DIR, 'log_test.txt')) print("==========\nConfigs:{}\n==========".format(cfg)) if use_gpu: print("Currently using GPU {}".format(cfg.MODEL.DEVICE_ID)) cudnn.benchmark = True torch.cuda.manual_seed_all(cfg.RANDOM_SEED) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(cfg.DATASETS.NAME)) dataset = data_manager.init_dataset(root=cfg.DATASETS.ROOT_DIR, name=cfg.DATASETS.NAME) print("Initializing model: {}".format(cfg.MODEL.NAME)) if cfg.MODEL.ARCH == 'video_baseline': torch.backends.cudnn.benchmark = False model = models.init_model(name=cfg.MODEL.ARCH, num_classes=625, pretrain_choice=cfg.MODEL.PRETRAIN_CHOICE, last_stride=cfg.MODEL.LAST_STRIDE, neck=cfg.MODEL.NECK, model_name=cfg.MODEL.NAME, neck_feat=cfg.TEST.NECK_FEAT, model_path=cfg.MODEL.PRETRAIN_PATH) print("Model size: {:.5f}M".format( sum(p.numel() for p in model.parameters()) / 1000000.0)) transform_train = T.Compose([ T.Resize(cfg.INPUT.SIZE_TRAIN), T.RandomHorizontalFlip(p=cfg.INPUT.PROB), T.Pad(cfg.INPUT.PADDING), T.RandomCrop(cfg.INPUT.SIZE_TRAIN), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), T.RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN) ]) transform_test = T.Compose([ T.Resize(cfg.INPUT.SIZE_TEST), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) pin_memory = True if use_gpu else False cfg.DATALOADER.NUM_WORKERS = 0 trainloader = DataLoader(VideoDataset( dataset.train, seq_len=cfg.DATASETS.SEQ_LEN, sample=cfg.DATASETS.TRAIN_SAMPLE_METHOD, transform=transform_train, dataset_name=cfg.DATASETS.NAME), sampler=RandomIdentitySampler( dataset.train, num_instances=cfg.DATALOADER.NUM_INSTANCE), batch_size=cfg.SOLVER.SEQS_PER_BATCH, num_workers=cfg.DATALOADER.NUM_WORKERS, pin_memory=pin_memory, drop_last=True) queryloader = DataLoader(VideoDataset( dataset.query, seq_len=cfg.DATASETS.SEQ_LEN, sample=cfg.DATASETS.TEST_SAMPLE_METHOD, transform=transform_test, max_seq_len=cfg.DATASETS.TEST_MAX_SEQ_NUM, dataset_name=cfg.DATASETS.NAME), batch_size=cfg.TEST.SEQS_PER_BATCH, shuffle=False, num_workers=cfg.DATALOADER.NUM_WORKERS, pin_memory=pin_memory, drop_last=False) galleryloader = DataLoader( VideoDataset(dataset.gallery, seq_len=cfg.DATASETS.SEQ_LEN, sample=cfg.DATASETS.TEST_SAMPLE_METHOD, transform=transform_test, max_seq_len=cfg.DATASETS.TEST_MAX_SEQ_NUM, dataset_name=cfg.DATASETS.NAME), batch_size=cfg.TEST.SEQS_PER_BATCH, shuffle=False, num_workers=cfg.DATALOADER.NUM_WORKERS, pin_memory=pin_memory, drop_last=False, ) if cfg.MODEL.SYN_BN: if use_gpu: model = nn.DataParallel(model) if cfg.SOLVER.FP_16: model = apex.parallel.convert_syncbn_model(model) model.cuda() start_time = time.time() xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids) tent = TripletLoss(cfg.SOLVER.MARGIN) optimizer = make_optimizer(cfg, model) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) # metrics = test(model, queryloader, galleryloader, cfg.TEST.TEMPORAL_POOL_METHOD, use_gpu) no_rise = 0 best_rank1 = 0 start_epoch = 0 for epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCHS): # if no_rise == 10: # break scheduler.step() print("noriase:", no_rise) print("==> Epoch {}/{}".format(epoch + 1, cfg.SOLVER.MAX_EPOCHS)) print("current lr:", scheduler.get_lr()[0]) train(model, trainloader, xent, tent, optimizer, use_gpu) if cfg.SOLVER.EVAL_PERIOD > 0 and ( (epoch + 1) % cfg.SOLVER.EVAL_PERIOD == 0 or (epoch + 1) == cfg.SOLVER.MAX_EPOCHS): print("==> Test") metrics = test(model, queryloader, galleryloader, cfg.TEST.TEMPORAL_POOL_METHOD, use_gpu) rank1 = metrics[0] if rank1 > best_rank1: best_rank1 = rank1 no_rise = 0 else: no_rise += 1 continue if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() torch.save( state_dict, osp.join( cfg.OUTPUT_DIR, "rank1_" + str(rank1) + '_checkpoint_ep' + str(epoch + 1) + '.pth')) # best_p = osp.join(cfg.OUTPUT_DIR, "rank1_" + str(rank1) + '_checkpoint_ep' + str(epoch + 1) + '.pth') elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
# load pretrained model if given epoch = 0 if opt.pretrained_model != "": load_pretrained_weights(model, opt.pretrained_model) # determine the epoch of pretrained model epoch = int((opt.pretrained_model.split("/")[-1]).split("-")[-1]) # if only evaluation is required if opt.evaluate: print("-- evaluate only") test(epoch, model) exit(0) # Define optimizer and loss function person_id_criterion = CrossEntropyLabelSmooth( train_dataset.number_classes(), use_gpu=opt.cuda) attribute_criterion = AttributeCriterion(attribute_choices, CrossEntropyLabelSmooth) triplet_criterion = TripletLoss(opt.margin) optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=5e-4) # Default lr = 3e-4 print("Using triplet loss = ", triplet_criterion) print("Using person_id = ", person_id_criterion) print("Using Attribute loss = ", attribute_criterion) print("Optimizer = ", optimizer) # scheduler creation lr_scheduler, num_epochs = create_scheduler(opt, optimizer) if epoch > 0:
def main(): args.save_dir = args.save_dir + '/' + args.arch torch.manual_seed(args.seed) # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False # add data to save_dir args.save_dir = args.save_dir + '_' + args.dataset + '_combined_multisteplr11' if args.pretrained_model is not None: args.save_dir = os.path.dirname(args.pretrained_model) if not osp.exists(args.save_dir): os.makedirs(args.save_dir) log_name = 'test.log' if args.evaluate else 'train.log' log_name += time.strftime('-%Y-%m-%d-%H-%M-%S') sys.stdout = Logger(osp.join(args.save_dir, log_name)) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_dataset(name=args.dataset) print("Train Transforms: \n\ Random2DTranslation, \n\ RandomHorizontalFlip, \n\ ToTensor, \n\ normalize\ ") transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), T.RandomHorizontalFlip(), # T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # T.RandomErasing(p=0.5, scale=(0.02, 0.4), ratio=(0.3, 3.3), value=[0.485, 0.456, 0.406]) ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False trainloader = DataLoader( VideoDataset(dataset.train, seq_len=args.seq_len, sample=args.data_selection, transform=transform_train), sampler=RandomIdentitySampler( dataset.train, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( VideoDataset(dataset.query, seq_len=args.seq_len, sample='dense', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( VideoDataset(dataset.gallery, seq_len=args.seq_len, sample='dense', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, seq_len=args.seq_len) # pretrained model loading if args.pretrained_model is not None: if not os.path.exists(args.pretrained_model): raise IOError("Can't find pretrained model: {}".format( args.pretrained_model)) print("Loading checkpoint from '{}'".format(args.pretrained_model)) pretrained_state = torch.load(args.pretrained_model)['state_dict'] print(len(pretrained_state), ' keys in pretrained model') current_model_state = model.state_dict() pretrained_state = {key: val for key, val in pretrained_state.items() if key in current_model_state and val.size() == current_model_state[key].size()} print(len(pretrained_state), ' keys in pretrained model are available in current model') current_model_state.update(pretrained_state) model.load_state_dict(current_model_state) print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0)) if use_gpu: model = nn.DataParallel(model).cuda() criterion_xent = CrossEntropyLabelSmooth( num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=args.margin) optimizer = torch.optim.Adam( model.parameters(), lr=args.lr, weight_decay=args.weight_decay) if args.stepsize > 0: scheduler = lr_scheduler.StepLR( optimizer, step_size=args.stepsize, gamma=args.gamma) start_epoch = args.start_epoch if args.evaluate: print("Evaluate only") test(model, queryloader, galleryloader, use_gpu) return start_time = time.time() best_rank1 = -np.inf is_first_time = True for epoch in range(start_epoch, args.max_epoch): eta_seconds = (time.time() - start_time) * (args.max_epoch - epoch) / max(epoch, 1) eta_str = str(datetime.timedelta(seconds=int(eta_seconds))) print("==> Epoch {}/{} \teta {}".format(epoch+1, args.max_epoch, eta_str)) train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu) if args.stepsize > 0: scheduler.step() rank1 = 'NA' mAP = 'NA' is_best = False if args.eval_step > 0 and (epoch+1) % args.eval_step == 0 or (epoch+1) == args.max_epoch: print("==> Test") rank1, mAP = test(model, queryloader, galleryloader, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 # save the model as required if (epoch+1) % args.save_step == 0: if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, args.save_prefix, 'model' + '.pth.tar-' + str(epoch+1))) is_first_time = False if not is_first_time: utils.disable_all_print_once() elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
def main(): if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) use_gpu = torch.cuda.is_available() np.random.seed(args.seed) random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True cudnn.benchmark = True print("Initializing train dataset {}".format(args.train_dataset)) train_dataset = data_manager.init_dataset(name=args.train_dataset) print("Initializing test dataset {}".format(args.test_dataset)) test_dataset = data_manager.init_dataset(name=args.test_dataset) # print("Initializing train dataset {}".format(args.train_dataset, split_id=6)) # train_dataset = data_manager.init_dataset(name=args.train_dataset) # print("Initializing test dataset {}".format(args.test_dataset, split_id=6)) # test_dataset = data_manager.init_dataset(name=args.test_dataset) transform_train = T.Compose([ T.Resize([args.height, args.width]), T.RandomHorizontalFlip(), T.Pad(10), T.RandomCrop([args.height, args.width]), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406]) ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) pin_memory = True if use_gpu else False # random_snip first_snip constrain_random evenly trainloader = DataLoader( VideoDataset(train_dataset.train, seq_len=args.seq_len, sample='constrain_random', transform=transform_train), sampler=RandomIdentitySampler(train_dataset.train, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( VideoDataset(test_dataset.query, seq_len=args.seq_len, sample='evenly', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( VideoDataset(test_dataset.gallery, seq_len=args.seq_len, sample='evenly', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=train_dataset.num_train_pids, loss={'xent', 'htri'}) print("Model size: {:.5f}M".format( sum(p.numel() for p in model.parameters()) / 1000000.0)) print("load model {0} from {1}".format(args.arch, args.load_model)) if args.load_model != '': pretrained_model = torch.load(args.load_model) model_dict = model.state_dict() pretrained_dict = { k: v for k, v in pretrained_model['state_dict'].items() if k in model_dict } model_dict.update(pretrained_dict) model.load_state_dict(model_dict) start_epoch = pretrained_model['epoch'] + 1 best_rank1 = pretrained_model['rank1'] else: start_epoch = args.start_epoch best_rank1 = -np.inf criterion = dict() criterion['triplet'] = WeightedRegularizedTriplet() criterion['xent'] = CrossEntropyLabelSmooth( num_classes=train_dataset.num_train_pids) criterion['center'] = CenterLoss(num_classes=train_dataset.num_train_pids, feat_dim=512, use_gpu=True) print(criterion) optimizer = dict() optimizer['model'] = model.get_optimizer(args) optimizer['center'] = torch.optim.SGD(criterion['center'].parameters(), lr=0.5) scheduler = lr_scheduler.MultiStepLR(optimizer['model'], milestones=args.stepsize, gamma=args.gamma) print(model) model = nn.DataParallel(model).cuda() if args.evaluate: print("Evaluate only") distmat = test(model, queryloader, galleryloader, args.pool, use_gpu, return_distmat=True) return start_time = time.time() train_time = 0 best_epoch = args.start_epoch print("==> Start training") for epoch in range(start_epoch, args.max_epoch): scheduler.step() print('Epoch', epoch, 'lr', scheduler.get_lr()[0]) start_train_time = time.time() train(epoch, model, criterion, optimizer, trainloader, use_gpu) train_time += round(time.time() - start_train_time) if (epoch + 1) > args.start_eval and args.eval_step > 0 and ( epoch + 1) % args.eval_step == 0 or (epoch + 1) == args.max_epoch: print("==> Test") rank1 = test(model, queryloader, galleryloader, args.pool, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint( { 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format( best_rank1, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time))
def main(): torch.manual_seed(args.seed) # 为CPU设置种子用于生成随机数,以使得结果是确定的 os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices # 在代码中指定需要使用的GPU use_gpu = torch.cuda.is_available() # 查看当前环境是否支持CUDA,支持返回true,不支持返回false if args.use_cpu: use_gpu = False if not args.evaluate: # 如果不是评估,那就是训练,输出训练日志;否则输出测试日志。 sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) # 打印所有参数 if use_gpu: # 如果使用gpu,输出选定的gpu, print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True # 在程序刚开始加这条语句可以提升一点训练速度,没什么额外开销 torch.cuda.manual_seed_all(args.seed) # 为GPU设置种子用于生成随机数,以使得结果是确定的 else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_dataset(name=args.dataset) # 初始化数据集,从data_manager.py文件中加载。 # import transforms as T. # T.Compose=一起组合几个变换。 transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), # 以一个概率进行,首先将图像大小增加到(1 + 1/8),然后执行随机裁剪。 T.RandomHorizontalFlip(), # 以给定的概率(0.5)随机水平翻转给定的PIL图像。 T.ToTensor(), # 将``PIL Image``或``numpy.ndarray``转换为张量。 T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # 用平均值和标准偏差归一化张量图像。 # input[channel] = (input[channel] - mean[channel]) / std[channel] ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), # 将输入PIL图像的大小调整为给定大小。 T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) # 设置pin_memory=True,则意味着生成的Tensor数据最开始是属于内存中的锁页内存,这样将内存的Tensor转义到GPU的显存就会更快一些。 pin_memory = True if use_gpu else False # DataLoader数据加载器。 组合数据集和采样器,并在数据集上提供单进程或多进程迭代器。 trainloader = DataLoader( # VideoDataset:基于视频的person reid的数据集.(训练的数据集,视频序列长度,采样方法:随机,进行数据增强) VideoDataset(dataset.train, seq_len=args.seq_len, sample='random', transform=transform_train), # 随机抽样N个身份,然后对于每个身份,随机抽样K个实例,因此批量大小为N * K. sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances), batch_size=args.train_batch, # 训练的批次大小 num_workers=args.workers, # 多进程的数目 pin_memory=pin_memory, drop_last=True, ) # 如果数据集大小不能被批量大小整除,则设置为“True”以删除最后一个不完整的批次。 queryloader = DataLoader( VideoDataset(dataset.query, seq_len=args.seq_len, sample='dense', transform=transform_test), batch_size=args.test_batch, shuffle=False, # 设置为“True”以使数据在每个时期重新洗牌(默认值:False)。 num_workers=args.workers, pin_memory=pin_memory, drop_last=False, # 如果“False”和数据集的大小不能被批量大小整除,那么最后一批将更小。 ) galleryloader = DataLoader( VideoDataset(dataset.gallery, seq_len=args.seq_len, sample='dense', transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) # 模型的初始化 if args.arch == 'resnet503d': model = resnet3d.resnet50(num_classes=dataset.num_train_pids, sample_width=args.width, sample_height=args.height, sample_duration=args.seq_len) # 如果不存在预训练模型,则报错 if not os.path.exists(args.pretrained_model): raise IOError("Can't find pretrained model: {}".format(args.pretrained_model)) # 导入预训练的模型 print("Loading checkpoint from '{}'".format(args.pretrained_model)) checkpoint = torch.load(args.pretrained_model) state_dict = {} # 状态字典,从checkpoint文件中加载参数 for key in checkpoint['state_dict']: if 'fc' in key: continue state_dict[key.partition("module.")[2]] = checkpoint['state_dict'][key] model.load_state_dict(state_dict, strict=False) else: model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'}) print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0)) # 损失函数:xent:softmax交叉熵损失函数。htri:三元组损失函数。 criterion_xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu) criterion_htri = TripletLoss(margin=args.margin) # 优化器:adam optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # stepsize,逐步减少学习率(> 0表示已启用) if args.stepsize > 0: scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma) # lr_scheduler学习率计划,StepLR,将每个参数组的学习速率设置为每个步长时期由gamma衰减的初始lr. start_epoch = args.start_epoch # 手动时期编号(重启时有用) if use_gpu: model = nn.DataParallel(model).cuda() # 多GPU训练 # DataParallel是torch.nn下的一个类,需要制定的参数是module(可以多gpu运行的类函数)和input(数据集) if args.evaluate: # 这里的evaluate没有意义,应该添加代码导入保存的checkpoint,再test print("Evaluate only") # 进行评估 test(model, queryloader, galleryloader, args.pool, use_gpu) return start_time = time.time() # 开始的时间 best_rank1 = -np.inf # 初始化,负无穷 if args.arch == 'resnet503d': # 如果模型为resnet503d, torch.backends.cudnn.benchmark = False for epoch in range(start_epoch, args.max_epoch): # epoch,从开始到最大,进行训练。 print("==> Epoch {}/{}".format(epoch+1, args.max_epoch)) train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu) if args.stepsize > 0: scheduler.step() # 如果运行一次评估的需要的epoch数大于0,并且当前epoch+1能整除这个epoch数,或者等于最大epoch数。那么就进行一次评估。 if args.eval_step > 0 and (epoch+1) % args.eval_step == 0 or (epoch+1) == args.max_epoch: print("==> Test") rank1 = test(model, queryloader, galleryloader, args.pool, use_gpu) is_best = rank1 > best_rank1 # 比较,大于则返回true,否则返回false。 if is_best: best_rank1 = rank1 if use_gpu: state_dict = model.module.state_dict() # 函数static_dict()用于返回包含模块所有状态的字典,包括参数和缓存。 else: state_dict = model.state_dict() # 保存checkpoint文件 save_checkpoint({ 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar')) # 经过的时间 elapsed = round(time.time() - start_time) # round() 方法返回浮点数x的四舍五入值 elapsed = str(datetime.timedelta(seconds=elapsed)) # 对象代表两个时间之间的时间差, print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))