def train(**kwargs): opt._parse(kwargs) opt.model_name = 'bfe_test' # set random seed and cudnn benchmark torch.manual_seed(opt.seed) os.makedirs(opt.save_dir, exist_ok=True) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode) pin_memory = True if use_gpu else False summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) trainloader = DataLoader(ImageData(dataset.train, TrainTransform(opt.datatype)), sampler=RandomIdentitySampler( dataset.train, opt.num_instances), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True) queryloader = DataLoader(ImageData(dataset.query, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryloader = DataLoader(ImageData(dataset.gallery, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) queryFliploader = DataLoader(ImageData(dataset.query, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryFliploader = DataLoader(ImageData(dataset.gallery, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) print('initializing model ...') model = BFE(dataset.num_train_pids, 1.0, 0.33) optim_policy = model.get_optim_policy() if opt.pretrained_model: state_dict = torch.load(opt.pretrained_model)['state_dict'] # state_dict = {k: v for k, v in state_dict.items() \ # if not ('reduction' in k or 'softmax' in k)} model.load_state_dict(state_dict, False) print('load pretrained model ' + opt.pretrained_model) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() reid_evaluator = ResNetEvaluator(model) if opt.evaluate: reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig) return # xent_criterion = nn.CrossEntropyLoss() xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids) if opt.loss == 'triplet': embedding_criterion = TripletLoss(opt.margin) elif opt.loss == 'lifted': embedding_criterion = LiftedStructureLoss(hard_mining=True) elif opt.loss == 'weight': embedding_criterion = Margin() def criterion(triplet_y, softmax_y, labels): losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \ [xent_criterion(output, labels) for output in softmax_y] loss = sum(losses) return loss # get optimizer if opt.optim == "sgd": optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay) else: optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay) start_epoch = opt.start_epoch # get trainer and evaluator reid_trainer = cls_tripletTrainer(opt, model, optimizer, criterion, summary_writer) def adjust_lr(optimizer, ep): if ep < 10: lr = opt.lr * 0.1 * (ep / 10.0) # warm_up elif ep < 50: lr = opt.lr * (ep // 5 + 1) elif ep < 200: lr = opt.lr * 10.0 elif ep < 300: lr = opt.lr else: lr = opt.lr * 0.1 for p in optimizer.param_groups: p['lr'] = lr # start training best_rank1 = opt.best_rank best_epoch = 0 for epoch in range(start_epoch, opt.max_epoch): if opt.adjust_lr: adjust_lr(optimizer, epoch + 1) reid_trainer.train(epoch, trainloader) # skip if not save model if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or ( epoch + 1) == opt.max_epoch: if opt.mode == 'class': rank1 = test(model, queryloader) else: rank1 = reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1 }, is_best=is_best, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar') print('Best rank-1 {:.1%}, achived at epoch {}'.format( best_rank1, best_epoch))
reid_evaluator = ResNetEvaluator(model) if opt.evaluate: reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig) return #xent_criterion = nn.CrossEntropyLoss() #print(dataset.num_train_pids) xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids) xent_criterion2 = CrossEntropyLabelSmooth2(dataset.num_train_pids) if opt.loss == 'triplet': embedding_criterion = TripletLoss(opt.margin) elif opt.loss == 'lifted': embedding_criterion = LiftedStructureLoss(hard_mining=True) elif opt.loss == 'weight': embedding_criterion = Margin() def criterion(a ,b, c, d, e, f, triplet_y, softmax_y, labels): losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \ [xent_criterion(output, labels) for output in softmax_y]+\ [xent_criterion2(i, j, labels) for i, j in zip(a,a)]+\ [xent_criterion2(i, j, labels) for i, j in zip(b,b)]+\ [xent_criterion2(i, j, labels) for i, j in zip(c,c)]+\ [xent_criterion2(i, j, labels) for i, j in zip(d,d)]+\ [xent_criterion2(i, j, labels) for i, j in zip(e,e)]+\ [xent_criterion2(i, j, labels) for i, j in zip(f,f)] loss = sum(losses) return loss