def test(**kwargs): opt._parse(kwargs) sys.stdout = Logger(osp.join(opt.save_dir, 'log_test.txt')) ctx = mx.gpu(0) print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset) queryloader = DataLoader( ImageData(dataset.query, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, ) galleryloader = DataLoader( ImageData(dataset.gallery, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, ) print('loading model ...') model = get_baseline_model(dataset.num_train_pids, ctx) model.load_parameters(opt.load_model, ctx) print('model size: {:.5f}M'.format( sum(p.data().size for p in model.collect_params().values()) / 1e6)) reid_evaluator = reidEvaluator(model, ctx) reid_evaluator.evaluate(queryloader, galleryloader)
def test(**kwargs): opt._parse(kwargs) os.makedirs(opt.save_dir, exist_ok=True) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset, use_all=opt.use_all) # load data pin_memory = True if use_gpu else False dataloader = load_data(dataset, pin_memory) print('111') print(dataloader['query'].dataset.dataset[0][0]) print('initializing model ...') if opt.loss == 'softmax' or opt.loss == 'softmax_triplet': model = ResNetBuilder(dataset.num_train_pids, opt.last_stride, True) elif opt.loss == 'triplet': model = ResNetBuilder(None, opt.last_stride, True) if opt.pretrained_model: if use_gpu: state_dict = torch.load(opt.pretrained_model)['state_dict'] else: state_dict = torch.load(opt.pretrained_model, map_location='cpu')['state_dict'] model.load_state_dict(state_dict, False) print('load pretrained model ' + opt.pretrained_model) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() reid_evaluator = ResNetEvaluator(model) reid_evaluator.test(dataloader['query'], dataloader['gallery'], savefig=opt.savefig, i=opt.findid) return
def prepare_running(opt): os.environ['CUDA_VISIBLE_DEVICES'] = ','.join( [str(i) for i in eval(opt.gpus)]) sys.stdout = Logger(os.path.join('./exps', opt.exp, 'log_train.txt')) print('current commit hash: {}'.format( subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip())) pprint(opt) _random_seed(opt.seed) torch.backends.cudnn.benchmark = True torch.autograd.set_detect_anomaly(True)
def test(**kwargs): opt._parse(kwargs) # set random seed and cudnn benchmark torch.manual_seed(opt.seed) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) if use_gpu: print('currently using GPU {}'.format(opt.gpu)) cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset) pin_memory = True if use_gpu else False queryloader = DataLoader(ImageData(dataset.query, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryloader = DataLoader(ImageData(dataset.gallery, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) print('loading model ...') model, optim_policy = get_baseline_model(dataset.num_train_pids) # ckpt = torch.load(opt.load_model) # model.load_state_dict(ckpt['state_dict']) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() reid_evaluator = ResNetEvaluator(model) reid_evaluator.evaluate(queryloader, galleryloader)
def test_cycle_gan(**kwargs): opt._parse(kwargs) torch.manual_seed(opt.seed) torch.cuda.manual_seed(opt.seed) np.random.seed(opt.seed) random.seed(opt.seed) # Write standard output into file sys.stdout = Logger(os.path.join(opt.save_dir, 'log_test.txt')) print('========user config========') pprint(opt._state_dict()) print('===========end=============') if opt.use_gpu: print('currently using GPU') torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') pin_memory = True if opt.use_gpu else False print('initializing dataset {}'.format(opt.dataset_mode)) dataset = UnalignedDataset(opt) testloader = DataLoader(dataset, opt.batchSize, True, num_workers=opt.workers, pin_memory=pin_memory) summaryWriter = SummaryWriter(os.path.join(opt.save_dir, 'tensorboard_log')) print('initializing model ... ') netG_A, netG_B, netD_A, netD_B = load_checkpoint(opt) start_epoch = opt.start_epoch if opt.use_gpu: netG_A = torch.nn.DataParallel(netG_A).cuda() netG_B = torch.nn.DataParallel(netG_B).cuda() netD_A = torch.nn.DataParallel(netD_A).cuda() netD_B = torch.nn.DataParallel(netD_B).cuda() # get tester cycleganTester = Tester(opt, netG_A, netG_B, netD_A, netD_B, summaryWriter) for epoch in range(start_epoch, opt.max_epoch): # test over whole dataset cycleganTester.test(epoch, testloader)
def train(**kwargs): opt._parse(kwargs) #opt.lr=0.00002 opt.model_name = 'AlignedReid' # set random seed and cudnn benchmark torch.manual_seed(opt.seed) os.makedirs(opt.save_dir, exist_ok=True) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode) pin_memory = True if use_gpu else False summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) trainloader = DataLoader(ImageData(dataset.train, TrainTransform(opt.datatype)), sampler=RandomIdentitySampler( dataset.train, opt.num_instances), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True) queryloader = DataLoader(ImageData(dataset.query, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryloader = DataLoader(ImageData(dataset.gallery, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) queryFliploader = queryloader galleryFliploader = galleryloader # queryFliploader = DataLoader( # ImageData(dataset.query, TestTransform(opt.datatype, True)), # batch_size=opt.test_batch, num_workers=opt.workers, # pin_memory=pin_memory # ) # # galleryFliploader = DataLoader( # ImageData(dataset.gallery, TestTransform(opt.datatype, True)), # batch_size=opt.test_batch, num_workers=opt.workers, # pin_memory=pin_memory # ) print('initializing model ...') model = AlignedResNet50(num_classes=dataset.num_train_pids, loss={'softmax', 'metric'}, aligned=True, use_gpu=use_gpu) optim_policy = model.get_optim_policy() if opt.pretrained_model: state_dict = torch.load(opt.pretrained_model)['state_dict'] # state_dict = {k: v for k, v in state_dict.items() \ # if not ('reduction' in k or 'softmax' in k)} model.load_state_dict(state_dict, False) print('load pretrained model ' + opt.pretrained_model) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() reid_evaluator = AlignedEvaluator(model) if opt.evaluate: #rank1 = test(model, queryloader, galleryloader, use_gpu) reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig, test_distance='global') return # xent_criterion = nn.CrossEntropyLoss() xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids, use_gpu=use_gpu) embedding_criterion = TripletLossAlignedReID(margin=opt.margin) # def criterion(triplet_y, softmax_y, labels): # losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \ # [xent_criterion(output, labels) for output in softmax_y] # loss = sum(losses) # return loss def criterion(outputs, features, local_features, labels): if opt.htri_only: if isinstance(features, tuple): global_loss, local_loss = DeepSupervision( embedding_criterion, features, labels) else: global_loss, local_loss = embedding_criterion( features, labels, local_features) else: if isinstance(outputs, tuple): xent_loss = DeepSupervision(xent_criterion, outputs, labels) else: xent_loss = xent_criterion(outputs, labels) if isinstance(features, tuple): global_loss, local_loss = DeepSupervision( embedding_criterion, features, labels) else: global_loss, local_loss = embedding_criterion( features, labels, local_features) loss = xent_loss + global_loss + local_loss return loss # get optimizer if opt.optim == "sgd": optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay) else: optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay) start_epoch = opt.start_epoch # get trainer and evaluator reid_trainer = AlignedTrainer(opt, model, optimizer, criterion, summary_writer) def adjust_lr(optimizer, ep): if ep < 50: lr = opt.lr * (ep // 5 + 1) elif ep < 200: lr = opt.lr * 10 elif ep < 300: lr = opt.lr else: lr = opt.lr * 0.1 for p in optimizer.param_groups: p['lr'] = lr # start training best_rank1 = opt.best_rank best_epoch = 0 print('start train......') for epoch in range(start_epoch, opt.max_epoch): if opt.adjust_lr: adjust_lr(optimizer, epoch + 1) reid_trainer.train(epoch, trainloader) # skip if not save model if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or ( epoch + 1) == opt.max_epoch: # just avoid out of memory during eval,and can't save the model if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1 }, is_best=0, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar') if opt.mode == 'class': rank1 = test(model, queryloader) else: rank1 = reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader) #rank1 = test(model, queryloader, galleryloader, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() if is_best: save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1 }, is_best=is_best, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar') print('Best rank-1 {:.1%}, achived at epoch {}'.format( best_rank1, best_epoch))
def train(**kwargs): opt._parse(kwargs) # set random seed and cudnn benchmark torch.manual_seed(opt.seed) # os.makedirs(opt.save_dir, exist_ok=True) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') print(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode) pin_memory = True if use_gpu else False summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) trainloader = DataLoader( ImageData(dataset.train, TrainTransform(opt.datatype)), sampler=RandomIdentitySampler(dataset.train, opt.num_instances), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True ) queryloader = DataLoader( ImageData(dataset.query, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory ) galleryloader = DataLoader( ImageData(dataset.gallery, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory ) queryFliploader = DataLoader( ImageData(dataset.query, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory ) galleryFliploader = DataLoader( ImageData(dataset.gallery, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory ) print('initializing model ...') if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet': model = ResNetBuilder(dataset.num_train_pids, 1, True) elif opt.model_name == 'triplet': model = ResNetBuilder(None, 1, True) elif opt.model_name == 'CBDB': if opt.datatype == "person": model = CBDBdataset.num_train_pids, 1.0, 0.33) else: model = CBDB(dataset.num_train_pids, 0.5, 0.5) elif opt.model_name == 'ide': model = IDE(dataset.num_train_pids) elif opt.model_name == 'resnet': model = Resnet(dataset.num_train_pids) optim_policy = model.get_optim_policy() if opt.pretrained_model: state_dict = torch.load(opt.pretrained_model)['state_dict'] #state_dict = {k: v for k, v in state_dict.items() \ # if not ('reduction' in k or 'softmax' in k)} model.load_state_dict(state_dict, False) print('load pretrained model ' + opt.pretrained_model) print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() reid_evaluator = ResNetEvaluator(model) if opt.evaluate: reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig) return
def test(**kwargs): opt._parse(kwargs) sys.stdout = Logger( os.path.join("./pytorch-ckpt/current", opt.save_dir, 'log_test_{}.txt'.format(opt.testset_name))) torch.manual_seed(opt.seed) random.seed(opt.seed) np.random.seed(opt.seed) use_gpu = torch.cuda.is_available() print('initializing dataset {}'.format(opt.testset_name)) dataset = data_manager.init_dataset( name=opt.testset_name, num_bn_sample=opt.batch_num_bn_estimatation * opt.test_batch) pin_memory = True if use_gpu else False print('loading model from {} ...'.format(opt.save_dir)) model = ResNetBuilder() model_path = os.path.join("./pytorch-ckpt/current", opt.save_dir, 'model_best.pth.tar') model = load_previous_model(model, model_path, load_fc_layers=False) model.eval() if use_gpu: model = torch.nn.DataParallel(model).cuda() reid_evaluator = evaluator_manager.init_evaluator(opt.testset_name, model, flip=True) def _calculate_bn_and_features(all_data, sampled_data): time.sleep(1) all_features, all_ids, all_cams = [], [], [] available_cams = list(sampled_data) for current_cam in tqdm.tqdm(available_cams): camera_samples = sampled_data[current_cam] data_for_camera_loader = DataLoader(data_manager.init_datafolder( opt.testset_name, camera_samples, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=False, drop_last=True) reid_evaluator.collect_sim_bn_info(data_for_camera_loader) camera_data = all_data[current_cam] data_loader = DataLoader(data_manager.init_datafolder( opt.testset_name, camera_data, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory, shuffle=False) fs, pids, camids = reid_evaluator.produce_features(data_loader, normalize=True) all_features.append(fs) all_ids.append(pids) all_cams.append(camids) all_features = torch.cat(all_features, 0) all_ids = np.concatenate(all_ids, axis=0) all_cams = np.concatenate(all_cams, axis=0) time.sleep(1) return all_features, all_ids, all_cams print('Processing query features...') qf, q_pids, q_camids = _calculate_bn_and_features( dataset.query_per_cam, dataset.query_per_cam_sampled) print('Processing gallery features...') gf, g_pids, g_camids = _calculate_bn_and_features( dataset.gallery_per_cam, dataset.gallery_per_cam_sampled) print('Computing CMC and mAP...') reid_evaluator.get_final_results_with_features(qf, q_pids, q_camids, gf, g_pids, g_camids)
def train(**kwargs): opt._parse(kwargs) ##设置程序的所有参数 # set random seed and cudnn benchmark torch.manual_seed(opt.seed) os.makedirs(opt.save_dir, exist_ok=True) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode) pin_memory = True if use_gpu else False trainloader = DataLoader(ImageData(dataset.train, TrainTransform(opt.datatype)), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True) queryloader = DataLoader(ImageData(dataset.query, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryloader = DataLoader(ImageData(dataset.gallery, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) queryFliploader = DataLoader(ImageData(dataset.query, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryFliploader = DataLoader(ImageData(dataset.gallery, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) print('initializing model ...') if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet': model = ResNetBuilder(dataset.num_train_pids, 1, True) elif opt.model_name == 'triplet': model = ResNetBuilder(None, 1, True) elif opt.model_name == 'bfe': if opt.datatype == "person": model = BFE(dataset.num_train_pids, 1.0, 0.33) else: model = BFE(dataset.num_train_pids, 0.5, 0.5) elif opt.model_name == 'ide': model = IDE(dataset.num_train_pids) elif opt.model_name == 'resnet': model = Resnet(dataset.num_train_pids) elif opt.model_name == 'strongBaseline': model = StrongBaseline(dataset.num_train_pids) optim_policy = model.get_optim_policy() # update model model = resnet18(True) model.fc.out_features = 10 print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() # get optimizer if opt.optim == "sgd": optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay) else: optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay) #xent_criterion = nn.CrossEntropyLoss() criterion = CrossEntropyLabelSmooth(10) epochs = 100 best = 0.0 b_e = 0 for e in range(epochs): model.train() for i, inputs in enumerate(trainloader): imgs, pid, _ = inputs imgs, pid = imgs.cuda(), pid.cuda() outputs = model(imgs) loss = criterion(outputs, pid) optimizer.zero_grad() loss.backward() optimizer.step() print(('epoch=%s \t batch loss=%s') % (e, loss.item()))
def train(**kwargs): #### Part 1 : Initialization opt._parse(kwargs) torch.backends.cudnn.deterministic = True # set random seed and cudnn benchmark #torch.manual_seed(opt.seed) #random.seed(opt.seed) #np.random.seed(opt.seed) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') #### Part 2 : Preparing Data print('initializing train dataset {}'.format(opt.trainset)) train_dataset = data_manager.init_dataset(name=opt.trainset) print('initializing test dataset {}'.format(opt.testset)) test_dataset = data_manager.init_dataset(name=opt.testset) pin_memory = True if use_gpu else False pin_memory = False summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) collateFn = NormalCollateFn() if opt.sampler == "randomidentity": trainloader = DataLoader( data_manager.init_datafolder( opt.trainset, train_dataset.train, TrainTransform(opt.height, opt.width, random_erase=opt.with_randomerase), if_train=True), sampler=RandomIdentitySampler(train_dataset.train, opt.num_instances), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True, collate_fn=collateFn, ) elif opt.sampler == "randomidentitycamera": trainloader = DataLoader( data_manager.init_datafolder( opt.trainset, train_dataset.train, TrainTransform(opt.height, opt.width, random_erase=opt.with_randomerase), if_train=True), batch_sampler=RandomIdentityCameraSampler(train_dataset.train, opt.num_instances, opt.train_batch), num_workers=opt.workers, pin_memory=pin_memory, collate_fn=collateFn, ) queryloader = DataLoader(data_manager.init_datafolder(opt.testset, test_dataset.query, TestTransform( opt.height, opt.width), if_train=False), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryloader = DataLoader(data_manager.init_datafolder( opt.testset, test_dataset.gallery, TestTransform(opt.height, opt.width), if_train=False), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) #### Part 3 : Preparing Backbone Network print('initializing model ...') if opt.model_name in ['triplet', 'distance']: model, optim_policy = get_baseline_model(num_classes=None, model='triplet') elif opt.model_name in ["softmax"]: model, optim_policy = get_baseline_model(train_dataset.num_train_pids, model='softmax', drop_prob=opt.drop) else: assert False, "unknown model name" if (not opt.model_path == 'zero') and 'tar' in opt.model_path: print('load pretrain reid model......' + opt.model_path) ckpt = torch.load(opt.model_path) # remove classifer tmp = dict() for k, v in ckpt['state_dict'].items(): if opt.keep_layer: for i in opt.keep_layer: if 'layer' + str(i) in k: #print(k+" skip....") continue if opt.keepfc or ('fc' not in k and 'classifier' not in k): tmp[k] = v ckpt['state_dict'] = tmp model.load_state_dict(ckpt['state_dict'], strict=False) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) #### Part 4: Preparing Loss Functions if opt.margin1 is not None: distance_loss = DistanceLoss(margin=(opt.margin1, opt.margin2)) else: distance_loss = DistanceLoss() tri_loss = TripletLoss(margin=opt.margin) xent_loss = nn.CrossEntropyLoss() vis = dict() vis['tri_acc1'] = AverageMeter() vis['tri_acc2'] = AverageMeter() vis['cls_accuracy'] = AverageMeter() vis['cls_loss'] = AverageMeter() def dist_criterion(feat, targets, cameras, model=None, paths=None, epoch=0): dis_loss, tri_acc1, tri_acc2 = distance_loss(feat, targets, cameras, model, paths, epoch=epoch) vis['tri_acc1'].update(float(tri_acc1)) vis['tri_acc2'].update(float(tri_acc2)) return dis_loss def triplet_criterion(feat, targets): triplet_loss, tri_accuracy, _, _ = tri_loss(feat, targets) vis['tri_acc1'].update(float(tri_accuracy)) return triplet_loss def cls_criterion(cls_scores, targets): cls_loss = xent_loss(cls_scores, targets) _, preds = torch.max(cls_scores.data, 1) corrects = float(torch.sum(preds == targets.data)) vis['cls_accuracy'].update(float(corrects / opt.train_batch)) vis['cls_loss'].update(float(cls_loss)) return cls_loss #### Part 5: Preparing Optimizer and Trainer optimizer, adjust_lr = get_optimizer_strategy(opt.model_name, optim_policy, opt) start_epoch = opt.start_epoch if use_gpu: model = nn.DataParallel(model).cuda() #model=model.cuda() # get trainer and evaluatori if opt.model_name == "distance": reid_trainer = tripletTrainer(opt, model, optimizer, dist_criterion, summary_writer, need_cam=True) elif opt.model_name == 'triplet' or opt.model_name == 'triplet_fc': reid_trainer = tripletTrainer(opt, model, optimizer, triplet_criterion, summary_writer) elif opt.model_name == 'softmax': reid_trainer = clsTrainer(opt, model, optimizer, cls_criterion, summary_writer) else: print("Error: Unknown model name {}".format(opt.model_name)) reid_evaluator = evaluator_manager.init_evaluator(opt.testset, model, flip=True) #### Part 6 : Training best_rank1 = -np.inf best_epoch = 0 for epoch in range(start_epoch, opt.max_epoch): if opt.step_size > 0: current_lr = adjust_lr(optimizer, epoch) reid_trainer.train(epoch, trainloader) for k, v in vis.items(): print("{}:{}".format(k, v.mean)) v.reset() if (epoch + 1) == opt.max_epoch: if use_gpu and opt.num_gpu > 1: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1, }, is_best=False, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar') # skip if not save model if (opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 and epoch >= 0 or (epoch + 1) == opt.max_epoch): #print('Test on '+opt.testset) #rank1 = reid_evaluator.evaluate(queryloader, galleryloader,normalize=opt.with_normalize) print('Test on ' + opt.trainset) if use_gpu and opt.num_gpu > 1: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1, }, is_best=False, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar') rank1, mAP = reid_evaluator.evaluate(queryloader, galleryloader, normalize=opt.with_normalize) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 save_checkpoint( { 'state_dict': state_dict, 'epoch': epoch + 1, }, is_best=False, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar') print('Best rank-1 {:.1%}, achieved at epoch {}'.format( best_rank1, best_epoch))
def train(**kwargs): opt._parse(kwargs) # set random seed and cudnn benchmark torch.manual_seed(opt.seed) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset) pin_memory = True if use_gpu else False summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) if 'triplet' in opt.model_name: trainloader = DataLoader( ImageData(dataset.train, TrainTransform(opt.height, opt.width)), sampler=RandomIdentitySampler(dataset.train, opt.num_instances), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True) else: trainloader = DataLoader(ImageData( dataset.train, TrainTransform(opt.height, opt.width)), batch_size=opt.train_batch, shuffle=True, num_workers=opt.workers, pin_memory=pin_memory) queryloader = DataLoader(ImageData(dataset.query, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryloader = DataLoader(ImageData(dataset.gallery, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) print('initializing model ...') if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet': model, optim_policy = get_baseline_model(dataset.num_train_pids) elif opt.model_name == 'triplet': model, optim_policy = get_baseline_model(num_classes=None) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) # xent_criterion = nn.CrossEntropyLoss() xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids) tri_criterion = TripletLoss(opt.margin) def cls_criterion(cls_scores, targets): cls_loss = xent_criterion(cls_scores, targets) return cls_loss def triplet_criterion(feat, targets): triplet_loss, _, _ = tri_criterion(feat, targets) return triplet_loss def cls_tri_criterion(cls_scores, feat, targets): cls_loss = xent_criterion(cls_scores, targets) triplet_loss, _, _ = tri_criterion(feat, targets) loss = cls_loss + triplet_loss return loss # get optimizer optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay) def adjust_lr(optimizer, ep): if ep < 20: lr = 1e-4 * (ep + 1) / 2 elif ep < 80: lr = 1e-3 * opt.num_gpu elif ep < 180: lr = 1e-4 * opt.num_gpu elif ep < 300: lr = 1e-5 * opt.num_gpu elif ep < 320: lr = 1e-5 * 0.1**((ep - 320) / 80) * opt.num_gpu elif ep < 400: lr = 1e-6 elif ep < 480: lr = 1e-4 * opt.num_gpu else: lr = 1e-5 * opt.num_gpu for p in optimizer.param_groups: p['lr'] = lr start_epoch = opt.start_epoch if use_gpu: model = nn.DataParallel(model).cuda() # get trainer and evaluator if opt.model_name == 'softmax': reid_trainer = clsTrainer(opt, model, optimizer, cls_criterion, summary_writer) elif opt.model_name == 'softmax_triplet': reid_trainer = cls_tripletTrainer(opt, model, optimizer, cls_tri_criterion, summary_writer) elif opt.model_name == 'triplet': reid_trainer = tripletTrainer(opt, model, optimizer, triplet_criterion, summary_writer) reid_evaluator = ResNetEvaluator(model) # start training best_rank1 = -np.inf best_epoch = 0 for epoch in range(start_epoch, opt.max_epoch): if opt.step_size > 0: adjust_lr(optimizer, epoch + 1) reid_trainer.train(epoch, trainloader) # skip if not save model if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or ( epoch + 1) == opt.max_epoch: rank1 = reid_evaluator.evaluate(queryloader, galleryloader) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1, }, is_best=is_best, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar') print('Best rank-1 {:.1%}, achived at epoch {}'.format( best_rank1, best_epoch))
def train(**kwargs): opt._parse(kwargs) # torch.backends.cudnn.deterministic = True # I think this line may slow down the training process # set random seed and cudnn benchmark torch.manual_seed(opt.seed) random.seed(opt.seed) np.random.seed(opt.seed) use_gpu = torch.cuda.is_available() sys.stdout = Logger( os.path.join('./pytorch-ckpt/current', opt.save_dir, 'log_train.txt')) if use_gpu: print('currently using GPU') cudnn.benchmark = True else: print('currently using cpu') print(opt._state_dict()) print('initializing dataset {}'.format(opt.trainset_name)) if opt.trainset_name == 'combine': #input dataset name as 'datasets' train_dataset = data_manager.init_combine_dataset( name=opt.trainset_name, options=opt, datasets=opt.datasets, num_bn_sample=opt.batch_num_bn_estimatation * opt.test_batch, share_cam=opt.share_cam, num_pids=opt.num_pids) elif opt.trainset_name == 'unreal': # input dataset dir in 'datasets' train_dataset = data_manager.init_unreal_dataset( name=opt.trainset_name, datasets=opt.datasets, num_pids=opt.num_pids, num_cams=opt.num_cams, img_per_person=opt.img_per_person) else: train_dataset = data_manager.init_dataset( name=opt.trainset_name, num_bn_sample=opt.batch_num_bn_estimatation * opt.test_batch, num_pids=opt.num_pids) pin_memory = True if use_gpu else False summary_writer = SummaryWriter( os.path.join('./pytorch-ckpt/current', opt.save_dir, 'tensorboard_log')) if opt.cam_bal: IDSampler = IdentityCameraSampler else: IDSampler = IdentitySampler if opt.trainset_name == 'combine': samp = IDSampler(train_dataset.train, opt.train_batch, opt.num_instances, train_dataset.cams_of_dataset, train_dataset.len_of_real_dataset) else: samp = IDSampler(train_dataset.train, opt.train_batch, opt.num_instances) trainloader = DataLoader(data_manager.init_datafolder( opt.trainset_name, train_dataset.train, TrainTransform(opt.height, opt.width)), sampler=samp, batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True, collate_fn=NormalCollateFn()) print('initializing model ...') num_pid = train_dataset.num_train_pids if opt.loss == 'softmax' else None model = ResNetBuilder(num_pid) if opt.model_path is not None and 'moco' in opt.model_path: model = load_moco_model(model, opt.model_path) elif opt.model_path is not None: model = load_previous_model(model, opt.model_path, load_fc_layers=False) optim_policy = model.get_optim_policy() print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = CamDataParallel(model).cuda() xent = nn.CrossEntropyLoss() triplet = TripletLoss() def standard_cls_criterion(feat, preditions, targets, global_step, summary_writer): identity_loss = xent(preditions, targets) identity_accuracy = torch.mean( (torch.argmax(preditions, dim=1) == targets).float()) summary_writer.add_scalar('cls_loss', identity_loss.item(), global_step) summary_writer.add_scalar('cls_accuracy', identity_accuracy.item(), global_step) return identity_loss def triplet_criterion(feat, preditons, targets, global_step, summary_writer): triplet_loss, acc = triplet(feat, targets) summary_writer.add_scalar('loss', triplet_loss.item(), global_step) print(np.mean(acc.item())) summary_writer.add_scalar('accuracy', acc.item(), global_step) return triplet_loss # get trainer and evaluator optimizer, adjust_lr = get_our_optimizer_strategy(opt, optim_policy) if opt.loss == 'softmax': crit = standard_cls_criterion elif opt.loss == 'triplet': crit = triplet_criterion reid_trainer = CameraClsTrainer(opt, model, optimizer, crit, summary_writer) print('Start training') for epoch in range(opt.max_epoch): adjust_lr(optimizer, epoch) reid_trainer.train(epoch, trainloader) if (epoch + 1) % opt.save_step == 0: if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1, }, save_dir=os.path.join('./pytorch-ckpt/current', opt.save_dir), ep=epoch + 1) # if (epoch+1)%15==0: # save_checkpoint({ # 'state_dict': state_dict, # 'epoch': epoch + 1, # }, save_dir=os.path.join('./pytorch-ckpt/current', opt.save_dir)) if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1, }, save_dir=os.path.join('./pytorch-ckpt/current', opt.save_dir))
def train(**kwargs): opt._parse(kwargs) os.makedirs(opt.save_dir, exist_ok=True) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset, use_all=opt.use_all) summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) # load data pin_memory = True if use_gpu else False dataloader = load_data(dataset, pin_memory) print('initializing model ...') if opt.loss == 'softmax' or opt.loss == 'softmax_triplet': model = ResNetBuilder(dataset.num_train_pids, opt.last_stride, True) elif opt.loss == 'triplet': model = ResNetBuilder(None, opt.last_stride, True) if opt.pretrained_model: if use_gpu: state_dict = torch.load(opt.pretrained_model)['state_dict'] else: state_dict = torch.load(opt.pretrained_model, map_location='cpu')['state_dict'] model.load_state_dict(state_dict, False) print('load pretrained model ' + opt.pretrained_model) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) optim_policy = model.get_optim_policy() if use_gpu: model = nn.DataParallel(model).cuda() reid_evaluator = ResNetEvaluator(model) if opt.evaluate: reid_evaluator.evaluate(dataloader['query'], dataloader['gallery'], dataloader['queryFlip'], dataloader['galleryFlip'], savefig=opt.savefig) return criterion = get_loss() # optimizer if opt.optim == "sgd": optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=5e-4) else: optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=5e-4) scheduler = WarmupMultiStepLR(optimizer, [40, 70], 0.1, 0.01, 10, 'linear') start_epoch = opt.start_epoch # get trainer and evaluator reid_trainer = Trainer(opt, model, optimizer, criterion, summary_writer) # start training best_rank1 = opt.best_rank best_epoch = 0 for epoch in range(start_epoch, opt.max_epoch): scheduler.step() reid_trainer.train(epoch, dataloader['train']) # skip if not save model if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or ( epoch + 1) == opt.max_epoch: rank1 = reid_evaluator.evaluate(dataloader['query'], dataloader['gallery'], dataloader['queryFlip'], dataloader['galleryFlip']) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1 }, is_best=is_best, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar') print('Best rank-1 {:.1%}, achived at epoch {}'.format( best_rank1, best_epoch))
def train(**kwargs): opt._parse(kwargs) # set random seed and cudnn benchmark sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset) summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) if 'triplet' in opt.model_name: trainloader = DataLoader( ImageData(dataset.train, TrainTransform(opt.height, opt.width)), sampler=RandomIdentitySampler(dataset.train, opt.num_instances), batch_size=opt.train_batch, num_workers=opt.workers, last_batch='discard') else: trainloader = DataLoader( ImageData(dataset.train, TrainTransform(opt.height, opt.width)), batch_size=opt.train_batch, shuffle=True, num_workers=opt.workers, ) queryloader = DataLoader( ImageData(dataset.query, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, ) galleryloader = DataLoader( ImageData(dataset.gallery, TestTransform(opt.height, opt.width)), batch_size=opt.test_batch, num_workers=opt.workers, ) print('initializing model ...') model = get_baseline_model(dataset.num_train_pids, mx.gpu(0), opt.pretrained_model) print('model size: {:.5f}M'.format( sum(p.data().size for p in model.collect_params().values()) / 1e6)) xent_criterion = gluon.loss.SoftmaxCrossEntropyLoss() tri_criterion = TripletLoss(opt.margin) def cls_criterion(cls_scores, feat, targets): cls_loss = xent_criterion(cls_scores, targets) return cls_loss def triplet_criterion(cls_scores, feat, targets): triplet_loss, dist_ap, dist_an = tri_criterion(feat, targets) return triplet_loss def cls_tri_criterion(cls_scores, feat, targets): cls_loss = xent_criterion(cls_scores, targets) triplet_loss, dist_ap, dist_an = tri_criterion(feat, targets) loss = cls_loss + triplet_loss return loss # get optimizer optimizer = gluon.Trainer(model.collect_params(), opt.optim, { 'learning_rate': opt.lr, 'wd': opt.weight_decay }) def adjust_lr(optimizer, ep): if ep < 20: lr = 1e-4 * (ep + 1) / 2 elif ep < 80: lr = 1e-3 * opt.num_gpu elif ep < 180: lr = 1e-4 * opt.num_gpu elif ep < 300: lr = 1e-5 * opt.num_gpu elif ep < 320: lr = 1e-5 * 0.1**((ep - 320) / 80) * opt.num_gpu elif ep < 400: lr = 1e-6 elif ep < 480: lr = 1e-4 * opt.num_gpu else: lr = 1e-5 * opt.num_gpu optimizer.set_learning_rate(lr) start_epoch = opt.start_epoch # get trainer and evaluator use_criterion = None if opt.model_name == 'softmax': use_criterion = cls_criterion elif opt.model_name == 'softmax_triplet': use_criterion = cls_tri_criterion elif opt.model_name == 'triplet': use_criterion = triplet_criterion reid_trainer = reidTrainer(opt, model, optimizer, use_criterion, summary_writer, mx.gpu(0)) reid_evaluator = reidEvaluator(model, mx.gpu(0)) # start training best_rank1 = -np.inf best_epoch = 0 for epoch in range(start_epoch, opt.max_epoch): if opt.step_size > 0: adjust_lr(optimizer, epoch + 1) reid_trainer.train(epoch, trainloader) # skip if not save model if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or ( epoch + 1) == opt.max_epoch: rank1 = reid_evaluator.evaluate(queryloader, galleryloader) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 state_dict = {'model': model, 'epoch': epoch} save_checkpoint(state_dict, is_best=is_best, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.params') print('Best rank-1 {:.1%}, achived at epoch {}'.format( best_rank1, best_epoch))
def test(**kwargs): opt._parse(kwargs) if opt.save_dir.startswith('pytorch'): opt.save_dir = os.path.split(opt.save_dir)[1] save_file = 'log_test_{}_{}.txt'.format(opt.testset_name, opt.testepoch) if opt.testset_name == 'unreal_test': save_file = 'log_test_{}_{}_{}.txt'.format(opt.testset_name, opt.testepoch, opt.datasets) sys.stdout = Logger( os.path.join("./pytorch-ckpt/current", opt.save_dir, save_file)) torch.manual_seed(opt.seed) random.seed(opt.seed) np.random.seed(opt.seed) use_gpu = torch.cuda.is_available() print('initializing dataset {}'.format(opt.testset_name)) if opt.testset_name != 'unreal_test': dataset = data_manager.init_dataset( name=opt.testset_name, num_bn_sample=opt.batch_num_bn_estimatation * opt.test_batch) if opt.testset_name == 'unreal_test': dataset = data_manager.init_unreal_dataset( name=opt.testset_name, datasets=opt.datasets, num_pids=opt.num_pids, img_per_person=opt.img_per_person, num_bn_sample=opt.batch_num_bn_estimatation * opt.test_batch) pin_memory = True if use_gpu else False print('loading model from {} ...'.format(opt.save_dir)) model = ResNetBuilder() if opt.model_path is not None: model_path = opt.model_path else: model_path = os.path.join("./pytorch-ckpt/current", opt.save_dir, '{}.pth.tar'.format(opt.testepoch)) if opt.model_path is not None and opt.model_path.find('moco') != -1: model = load_moco_model(model, model_path) else: model = load_previous_model(model, model_path, load_fc_layers=False) model.eval() if use_gpu: model = torch.nn.DataParallel(model).cuda() reid_evaluator = evaluator_manager.init_evaluator(opt.testset_name, model, flip=True) def _calculate_bn_and_features(all_data, sampled_data): time.sleep(1) all_features, all_ids, all_cams, all_paths = [], [], [], [] available_cams = list(sampled_data) cam_bn_info = dict() for current_cam in tqdm.tqdm(available_cams): camera_data = all_data[current_cam] if len(camera_data) == 0: continue camera_samples = sampled_data[current_cam] data_for_camera_loader = DataLoader(data_manager.init_datafolder( opt.testset_name, camera_samples, TestTransform(opt.height, opt.width), with_path=True), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=False, drop_last=True) bn_info = reid_evaluator.collect_sim_bn_info( data_for_camera_loader) cam_bn_info[current_cam] = bn_info camera_data = all_data[current_cam] if len(camera_data) == 0: continue data_loader = DataLoader(data_manager.init_datafolder( opt.testset_name, camera_data, TestTransform(opt.height, opt.width), with_path=True), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory, shuffle=False) fs, pids, camids, img_paths = reid_evaluator.produce_features( data_loader, normalize=True) all_features.append(fs) all_ids.append(pids) all_cams.append(camids) all_paths.extend(img_paths) all_features = torch.cat(all_features, 0) all_ids = np.concatenate(all_ids, axis=0) all_cams = np.concatenate(all_cams, axis=0) time.sleep(1) pickle.dump( cam_bn_info, open( 'cam_bn_info-{}-{}.pkl'.format(opt.save_dir, opt.testset_name), 'wb')) return all_features, all_ids, all_cams, all_paths print('Processing query features...') qf, q_pids, q_camids, q_paths = _calculate_bn_and_features( dataset.query_per_cam, dataset.query_per_cam_sampled) print('Processing gallery features...') gf, g_pids, g_camids, g_paths = _calculate_bn_and_features( dataset.gallery_per_cam, dataset.gallery_per_cam_sampled) if opt.testset_name == 'msmt_sepcam': cid2label = dataset.cid2label label2cid = dict() for c, l in cid2label.items(): label2cid[l] = c[0] print(label2cid) q_cids = list() for qc in q_camids: q_cids.append(label2cid[qc]) g_cids = list() for gc in g_camids: g_cids.append(label2cid[gc]) q_camids = np.asarray(q_cids) g_camids = np.asarray(g_cids) pickle.dump({'qp': q_paths, 'gp': g_paths}, open('paths.pkl', 'wb')) print('Computing CMC and mAP...') reid_evaluator.get_final_results_with_features(qf, q_pids, q_camids, gf, g_pids, g_camids, q_paths, g_paths)
def train(**kwargs): opt._parse(kwargs) # torch.backends.cudnn.deterministic = True # I think this line may slow down the training process # set random seed and cudnn benchmark torch.manual_seed(opt.seed) random.seed(opt.seed) np.random.seed(opt.seed) use_gpu = torch.cuda.is_available() sys.stdout = Logger(os.path.join('./pytorch-ckpt/current', opt.save_dir, 'log_train.txt')) if use_gpu: print('currently using GPU') cudnn.benchmark = True else: print('currently using cpu') print('initializing dataset {}'.format(opt.trainset_name)) train_dataset = data_manager.init_dataset(name=opt.trainset_name, num_bn_sample=opt.batch_num_bn_estimatation * opt.test_batch) pin_memory = True if use_gpu else False summary_writer = SummaryWriter(os.path.join('./pytorch-ckpt/current', opt.save_dir, 'tensorboard_log')) trainloader = DataLoader( data_manager.init_datafolder(opt.trainset_name, train_dataset.train, TrainTransform(opt.height, opt.width)), sampler=IdentitySampler(train_dataset.train, opt.train_batch, opt.num_instances), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True, collate_fn=NormalCollateFn() ) print('initializing model ...') model = ResNetBuilder(train_dataset.num_train_pids) optim_policy = model.get_optim_policy() print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = CamDataParallel(model).cuda() xent = nn.CrossEntropyLoss() def standard_cls_criterion(preditions, targets, global_step, summary_writer): identity_loss = xent(preditions, targets) identity_accuracy = torch.mean((torch.argmax(preditions, dim=1) == targets).float()) summary_writer.add_scalar('cls_loss', identity_loss.item(), global_step) summary_writer.add_scalar('cls_accuracy', identity_accuracy.item(), global_step) return identity_loss # get trainer and evaluator optimizer, adjust_lr = get_optimizer_strategy(opt, optim_policy) reid_trainer = CameraClsTrainer(opt, model, optimizer, standard_cls_criterion, summary_writer) print('Start training') for epoch in range(opt.max_epoch): adjust_lr(optimizer, epoch) reid_trainer.train(epoch, trainloader) if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1, }, save_dir=os.path.join('./pytorch-ckpt/current', opt.save_dir))
def train(**kwargs): opt._parse(kwargs) # set random seed and cudnn benchmark torch.manual_seed(opt.seed) # os.environ['CUDA_VISIBLE_DEVICES'] = '0' os.makedirs(opt.save_dir, exist_ok=True) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') pin_memory = True if use_gpu else False summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) # -------------- model and parameter loading ------------------ print('initializing model ...') if opt.model_name == 'bfe': if opt.datatype == "person": model = BFE(751, 1.0, 0.33) else: model = BFE(751, 0.5, 0.5) optim_policy = model.parameters() if opt.pretrained_model: state_dict = torch.load(opt.pretrained_model)['state_dict'] model.load_state_dict(state_dict, False) print('load pretrained model ' + opt.pretrained_model) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() # model.cuda() reid_evaluator = ResNetEvaluator(model) # -------------------------- load end ------------------------- def handleDataset(): print('initializing dataset {}'.format(opt.dataset)) # 之前不需要动 dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode) # for query images queryloader = DataLoader( ImageData(dataset.query, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, # test_batch = 1 pin_memory=pin_memory) # for target image galleryloader = DataLoader(ImageData(dataset.target, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) queryFliploader = DataLoader(ImageData( dataset.query, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryFliploader = DataLoader(ImageData( dataset.target, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) return queryloader, galleryloader, queryFliploader, galleryFliploader # def deleteDirImage(): def recv_and_send_data(clnt_sock): # 循环接收和发送数据 strSend = 'Please send messages to me... \n' strSend = strSend.encode() clnt_sock.send(strSend) print("send successfully") while True: recv_data = clnt_sock.recv(1024) queryloader, galleryloader, queryFliploader, galleryFliploader = handleDataset( ) cmc = reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig) # reply 需要换成对应的数据 if recv_data: reply = 'cmc : ' + str(cmc) # cmc is numpy.floate clnt_sock.sendall(reply.encode()) # 删除文件夹数据 # deleteDirImage else: break clnt_sock.close() # ------------------------ start TCP ---------------------------- serv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print("socket creating...") # bind try: serv_sock.bind(('127.0.0.1', 8801)) except socket.error: print("Bind failed ") sys.exit() print("socket bind successfully") # listen serv_sock.listen(10) print("socket start listening") # accept while True: clnt_sock, clnt_addr = serv_sock.accept() print("Connected to IP:port —— ", clnt_addr[0], ' : ', str(clnt_addr[1])) # core part start_new_thread(recv_and_send_data, (clnt_sock, )) # 元组形式 # close serv_sock.close()
def train_cycle_gan(**kwargs): opt._parse(kwargs) torch.manual_seed(opt.seed) # Write standard output into file sys.stdout = Logger(os.path.join(opt.save_dir, 'log_train.txt')) print('========user config========') pprint(opt._state_dict()) print('===========end=============') if opt.use_gpu: print('currently using GPU') torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') pin_memory = True if opt.use_gpu else False print('initializing dataset {}'.format(opt.dataset_mode)) dataset = UnalignedDataset(opt) trainloader = DataLoader(dataset, opt.batchSize, True, num_workers=opt.workers, pin_memory=pin_memory) summaryWriter = SummaryWriter(os.path.join(opt.save_dir, 'tensorboard_log')) print('initializing model ... ') use_dropout = not opt.no_dropout netG_A = define_G(opt.input_nc, opt.output_nc, opt.ndf, opt.which_model_netG, opt.norm, use_dropout) netG_B = define_G(opt.output_nc, opt.input_nc, opt.ndf, opt.which_model_netG, opt.norm, use_dropout) use_sigmoid = opt.no_lsgan netD_A = define_D(opt.output_nc, opt.ndf, opt.which_model_netD, opt.n_layers_D, opt.norm, use_sigmoid) netD_B = define_D(opt.input_nc, opt.ndf, opt.which_model_netD, opt.n_layers_D, opt.norm, use_sigmoid) # print(netD_A) optimizer_G = torch.optim.Adam(itertools.chain(netG_A.parameters(), netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) optimizer_D = torch.optim.Adam(itertools.chain(netD_A.parameters(), netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) def get_scheduler(optimizer, opt): if opt.lr_policy == 'lambda': def lambda_rule(epoch): lr_l = 1.0 - max(0, epoch + 1 + opt.start_epoch - opt.niter) / float(opt.lr_decay_iters + 1) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif opt.lr_policy == 'step': scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif opt.lr_policy == 'plateau': scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) else: return NotImplementedError( 'learning rate policy [{}] is not implemented'.format( opt.lr_policy)) return scheduler scheduler_G = get_scheduler(optimizer_G, opt) scheduler_D = get_scheduler(optimizer_D, opt) start_epoch = opt.start_epoch if opt.use_gpu: netG_A = torch.nn.DataParallel(netG_A).cuda() netG_B = torch.nn.DataParallel(netG_B).cuda() netD_A = torch.nn.DataParallel(netD_A).cuda() netD_B = torch.nn.DataParallel(netD_B).cuda() # get trainer cycleganTrainer = Trainer(opt, netG_A, netG_B, netD_A, netD_B, optimizer_G, optimizer_D, summaryWriter) # start training for epoch in range(start_epoch, opt.max_epoch): scheduler_G.step() scheduler_D.step() # train over whole dataset cycleganTrainer.train(epoch, trainloader) if (epoch + 1) % opt.save_freq == 0 or (epoch + 1) == opt.max_epoch: if opt.use_gpu: state_dict_netG_A = netG_A.module.state_dict() state_dict_netG_B = netG_B.module.state_dict() state_dict_netD_A = netD_A.module.state_dict() state_dict_netD_B = netD_B.module.state_dict() else: state_dict_netG_A = netG_A.state_dict() state_dict_netG_B = netG_B.state_dict() state_dict_netD_A = netD_A.state_dict() state_dict_netD_B = netD_B.state_dict() save_checkpoint( { 'netG_A': state_dict_netG_A, 'netG_B': state_dict_netG_B, 'netD_A': state_dict_netD_A, 'netD_B': state_dict_netD_B, 'epoch': epoch + 1, }, False, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1))
def train(**kwargs): opt._parse(kwargs) #opt.lr=0.00002 opt.model_name='PCB' # set random seed and cudnn benchmark torch.manual_seed(opt.seed) os.makedirs(opt.save_dir, exist_ok=True) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode) tgt_dataset = data_manager.init_dataset(name=opt.tgt_dataset,mode=opt.mode) pin_memory = True if use_gpu else False summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) trainloader = DataLoader( ImageData(dataset.train, TrainTransform(opt.datatype)), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True ) tgt_trainloader = DataLoader( ImageData(tgt_dataset.train, TrainTransform(opt.datatype)), batch_size=opt.train_batch,num_workers=opt.workers, pin_memory=pin_memory,drop_last=True ) tgt_queryloader = DataLoader( ImageData(tgt_dataset.query, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory ) tgt_galleryloader = DataLoader( ImageData(tgt_dataset.gallery, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory ) tgt_queryFliploader = DataLoader( ImageData(tgt_dataset.query, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory ) tgt_galleryFliploader = DataLoader( ImageData(tgt_dataset.gallery, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory ) print('initializing model ...') model = PCB(dataset.num_train_pids) optim_policy = model.get_optim_policy() start_epoch = opt.start_epoch if opt.pretrained_model: checkpoint = torch.load(opt.pretrained_model) state_dict = checkpoint['state_dict'] # state_dict = {k: v for k, v in state_dict.items() \ # if not ('reduction' in k or 'softmax' in k)} try: model.load_state_dict(state_dict, False) print('load pretrained model ' + opt.pretrained_model) except: RuntimeError('please keep the same size with source dataset..') else: raise RuntimeError('please load a pre-trained model...') print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() reid_evaluator = ResNetEvaluator(model) if opt.evaluate: print('transfer directly....... ') reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader, tgt_queryFliploader, tgt_galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig) return #xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids) embedding_criterion = SelfTraining_TripletLoss(margin=0.5,num_instances=4) # def criterion(triplet_y, softmax_y, labels): # losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \ # [xent_criterion(output, labels) for output in softmax_y] # loss = sum(losses) # return loss def criterion(triplet_y, softmax_y, labels): #losses = [torch.sum(torch.stack([xent_criterion(logits, labels) for logits in softmax_y]))] losses = [torch.sum(torch.stack([embedding_criterion(output,labels) for output in triplet_y]))] loss = sum(losses) return loss # get optimizer if opt.optim == "sgd": optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay) else: optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay) # get trainer and evaluator reid_trainer = PCBTrainer(opt, model, optimizer, criterion, summary_writer) def adjust_lr(optimizer, ep): if ep < 50: lr = opt.lr * (ep // 5 + 1) elif ep < 200: lr = opt.lr*10 elif ep < 300: lr = opt.lr else: lr = opt.lr*0.1 for p in optimizer.param_groups: p['lr'] = lr # start training best_rank1 = opt.best_rank best_epoch = 0 print('transfer directly.....') reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader, tgt_queryFliploader, tgt_galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig) for iter_n in range(start_epoch,opt.max_epoch): if opt.lambda_value == 0: source_features = 0 else: # get source datas' feature print('Iteration {}: Extracting Source Dataset Features...'.format(iter_n + 1)) source_features, _ = extract_pcb_features(model, trainloader) # extract training images' features print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n + 1)) target_features, _ = extract_pcb_features(model, tgt_trainloader) # synchronization feature order with dataset.train # calculate distance and rerank result print('Calculating feature distances...') target_features = target_features.numpy() rerank_dist = re_ranking( source_features, target_features, lambda_value=opt.lambda_value) if iter_n == 0: # DBSCAN cluster tri_mat = np.triu(rerank_dist, 1) # tri_mat.dim=2 取上三角 tri_mat = tri_mat[np.nonzero(tri_mat)] # tri_mat.dim=1 tri_mat = np.sort(tri_mat, axis=None) top_num = np.round(opt.rho * tri_mat.size).astype(int) eps = tri_mat[:top_num].mean() # DBSCAN聚类半径 print('eps in cluster: {:.3f}'.format(eps)) cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=8) # select & cluster images as training set of this epochs print('Clustering and labeling...') labels = cluster.fit_predict(rerank_dist) del(rerank_dist) del(source_features) del(target_features) try: gc.collect() except: print('cannot collect') num_ids = len(set(labels)) - 1 print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids)) # generate new dataset new_dataset = [] for (fname, _, _), label in zip(tgt_dataset.train, labels): if label == -1: continue # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0 new_dataset.append((fname, label, 0)) print('Iteration {} have {} training images'.format(iter_n + 1, len(new_dataset))) selftrain_loader = DataLoader( ImageData(new_dataset, TrainTransform(opt.datatype)), sampler=RandomIdentitySampler(new_dataset, opt.num_instances), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True ) # train model with new generated dataset trainer = PCBTrainer(opt, model, optimizer, criterion, summary_writer) reid_evaluator = ResNetEvaluator(model) # Start training for epoch in range(opt.selftrain_iterations): trainer.train(epoch, selftrain_loader) # skip if not save model if opt.eval_step > 0 and (iter_n + 1) % opt.eval_step == 0 or (iter_n + 1) == opt.max_epoch: # just avoid out of memory during eval,and can't save the model if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({'state_dict': state_dict, 'epoch': iter_n + 1}, is_best=0, save_dir=opt.save_dir, filename='checkpoint_ep' + str(iter_n + 1) + '.pth.tar') if (iter_n + 1) % (opt.eval_step*4) == 0: if opt.mode == 'class': rank1 = test(model, tgt_queryloader) else: rank1 = reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader, tgt_queryFliploader, tgt_galleryFliploader) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = iter_n + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() if is_best: save_checkpoint({'state_dict': state_dict, 'epoch': iter_n + 1}, is_best=is_best, save_dir=opt.save_dir, filename='checkpoint_ep' + str(iter_n + 1) + '.pth.tar') print('Best rank-1 {:.1%}, achived at epoch {}'.format(best_rank1, best_epoch))
def train(**kwargs): opt._parse(kwargs) opt.model_name = 'bfe_test' # set random seed and cudnn benchmark torch.manual_seed(opt.seed) os.makedirs(opt.save_dir, exist_ok=True) use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt')) print('=========user config==========') pprint(opt._state_dict()) print('============end===============') if use_gpu: print('currently using GPU') cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) else: print('currently using cpu') print('initializing dataset {}'.format(opt.dataset)) dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode) pin_memory = True if use_gpu else False summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log')) trainloader = DataLoader(ImageData(dataset.train, TrainTransform(opt.datatype)), sampler=RandomIdentitySampler( dataset.train, opt.num_instances), batch_size=opt.train_batch, num_workers=opt.workers, pin_memory=pin_memory, drop_last=True) queryloader = DataLoader(ImageData(dataset.query, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryloader = DataLoader(ImageData(dataset.gallery, TestTransform(opt.datatype)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) queryFliploader = DataLoader(ImageData(dataset.query, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryFliploader = DataLoader(ImageData(dataset.gallery, TestTransform(opt.datatype, True)), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) print('initializing model ...') model = BFE(dataset.num_train_pids, 1.0, 0.33) optim_policy = model.get_optim_policy() if opt.pretrained_model: state_dict = torch.load(opt.pretrained_model)['state_dict'] # state_dict = {k: v for k, v in state_dict.items() \ # if not ('reduction' in k or 'softmax' in k)} model.load_state_dict(state_dict, False) print('load pretrained model ' + opt.pretrained_model) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() reid_evaluator = ResNetEvaluator(model) if opt.evaluate: reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig) return # xent_criterion = nn.CrossEntropyLoss() xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids) if opt.loss == 'triplet': embedding_criterion = TripletLoss(opt.margin) elif opt.loss == 'lifted': embedding_criterion = LiftedStructureLoss(hard_mining=True) elif opt.loss == 'weight': embedding_criterion = Margin() def criterion(triplet_y, softmax_y, labels): losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \ [xent_criterion(output, labels) for output in softmax_y] loss = sum(losses) return loss # get optimizer if opt.optim == "sgd": optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay) else: optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay) start_epoch = opt.start_epoch # get trainer and evaluator reid_trainer = cls_tripletTrainer(opt, model, optimizer, criterion, summary_writer) def adjust_lr(optimizer, ep): if ep < 10: lr = opt.lr * 0.1 * (ep / 10.0) # warm_up elif ep < 50: lr = opt.lr * (ep // 5 + 1) elif ep < 200: lr = opt.lr * 10.0 elif ep < 300: lr = opt.lr else: lr = opt.lr * 0.1 for p in optimizer.param_groups: p['lr'] = lr # start training best_rank1 = opt.best_rank best_epoch = 0 for epoch in range(start_epoch, opt.max_epoch): if opt.adjust_lr: adjust_lr(optimizer, epoch + 1) reid_trainer.train(epoch, trainloader) # skip if not save model if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or ( epoch + 1) == opt.max_epoch: if opt.mode == 'class': rank1 = test(model, queryloader) else: rank1 = reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': epoch + 1 }, is_best=is_best, save_dir=opt.save_dir, filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar') print('Best rank-1 {:.1%}, achived at epoch {}'.format( best_rank1, best_epoch))
def test(**kwargs): opt._parse(kwargs) # set random seed and cudnn benchmark torch.manual_seed(opt.seed) use_gpu = torch.cuda.is_available() sys.stdout = Logger( osp.join(opt.save_dir, 'log_test_{}_{}.txt'.format(opt.testset, opt.testepoch))) if use_gpu: print('currently using GPU {}'.format(opt.device_ids)) cudnn.benchmark = True torch.cuda.manual_seed_all(opt.seed) os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.device_ids) else: print('currently using cpu') print('initializing dataset {}'.format(opt.testset)) dataset = data_manager.init_dataset(name=opt.testset) pin_memory = True if use_gpu else False queryloader = DataLoader(data_manager.init_datafolder(opt.testset, dataset.query, TestTransform( opt.height, opt.width), if_train=False), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) galleryloader = DataLoader(data_manager.init_datafolder(opt.testset, dataset.gallery, TestTransform( opt.height, opt.width), if_train=False), batch_size=opt.test_batch, num_workers=opt.workers, pin_memory=pin_memory) print('loading model ...') model, optim_policy = get_baseline_model(model="softmax", model_path=opt.model_path) best_model_path = os.path.join(opt.save_dir, 'model_best.pth.tar') if os.path.exists(best_model_path) == False: best_model_path = os.path.join( opt.save_dir, "{}_checkpoint_ep{}.pth.tar".format(opt.testepoch, opt.testepoch)) if torch.cuda.is_available(): ckpt = torch.load(best_model_path) else: ckpt = torch.load(best_model_path, map_location="cpu") # remove classifer tmp = dict() for k, v in ckpt['state_dict'].items(): if 'fc' not in k and 'classifier' not in k: tmp[k] = v ckpt['state_dict'] = tmp print(model) print(ckpt) model.load_state_dict(ckpt['state_dict'], strict=False) print('model size: {:.5f}M'.format( sum(p.numel() for p in model.parameters()) / 1e6)) if use_gpu: model = nn.DataParallel(model).cuda() reid_evaluator = evaluator_manager.init_evaluator(opt.testset, model, flip=True) reid_evaluator.evaluate(queryloader, galleryloader, normalize=opt.with_normalize, rerank=False)