def train(args): if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) shutil.copy(args.config_file, cfg.OUTPUT_DIR) num_gpus = torch.cuda.device_count() logger = setup_logger('reid_baseline', output_dir, 0) logger.info('Using {} GPUS'.format(num_gpus)) logger.info(args) logger.info('Running with config:\n{}'.format(cfg)) train_dl, val_dl, num_query, num_classes = make_dataloader(cfg, num_gpus) model = build_model(cfg, num_classes) loss_func = make_loss(cfg, num_classes) trainer = BaseTrainer(cfg, model, train_dl, val_dl, loss_func, num_query, num_gpus) for epoch in range(trainer.epochs): for batch in trainer.train_dl: trainer.step(batch) trainer.handle_new_batch() trainer.handle_new_epoch()
def main(): parser = argparse.ArgumentParser(description="PyTorch Template MNIST Training") parser.add_argument( "--config_file", default="", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("template_model", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) train(cfg)
def parse_config(): # create the parser parser = argparse.ArgumentParser(description="MobileNetReID baseline") parser.add_argument("--config_file", default='', help="path to specified config file", type=str) #remainder parameters in a list parser.add_argument( "opts", default=None, help='modify some value for the config file in command line', nargs=argparse.REMAINDER) args = parser.parse_args() if args.config_file != "": # use config file to update the default config value cfg.merge_from_file(args.config_file) # note that opts is a list cfg.merge_from_list(args.opts) # cfg.freeze() output_dir = cfg.OUTPUT.ROOT_DIR if output_dir != "": if not os.path.exists(output_dir): os.makedirs(output_dir) else: print("ERROR:please specify an output path") exit(1) #config the logger logger = setup_logger("MobileNetReID", output_dir, 0, cfg.OUTPUT.LOG_NAME) use_gpu = cfg.MODEL.DEVICE == 'cuda' if use_gpu: logger.info("Train with GPU: {}".format(cfg.MODEL.DEVICE_ID)) else: logger.info("Train with CPU") #print the all arguments logger.info(args) #read the config file if args.config_file != "": logger.info("load configuration file {}".format(args.config_file)) """ with open(args.config_file,'r') as cf: strs = '\n' + cf.read() logger.info(strs) """ #config after update by config file logger.info("runing with config:\n{}".format(cfg)) if use_gpu: os.environ["CUDA_VISIBLE_DEVICES"] = cfg.MODEL.DEVICE_ID #this setup will facilitate the training cudnn.benchmark = True
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument("--config_file", default="", help="path to config file", type=str) parser.add_argument("flag", action='store_false', help="whether to test multiple models") parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID cudnn.benchmark = True train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) model = build_model(cfg, num_classes) if args.flag: path, _ = os.path.split(cfg.TEST.WEIGHT) model_list = [] for root, dirs, files in os.walk(path): for i in files: if i.startswith('resnet50_model'): model_list.append(i) for i in model_list: print(i) model.load_param(os.path.join(path, i)) inference(cfg, model, val_loader, num_query) else: model.load_param(cfg.TEST.WEIGHT) inference(cfg, model, val_loader, num_query)
def test(config_file, **kwargs): cfg.merge_from_file(config_file) if kwargs: opts = [] for k, v in kwargs.items(): opts.append(k) opts.append(v) cfg.merge_from_list(opts) cfg.freeze() re_ranking = cfg.RE_RANKING if not re_ranking: logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR, 'result') logger.info("Test Results:") else: logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR, 'result_re-ranking') logger.info("Re-Ranking Test Results:") device = torch.device(cfg.DEVICE) _, val_loader, num_query, num_classes = data_loader( cfg, cfg.DATASETS.NAMES) model = getattr(models, cfg.MODEL.NAME)(num_classes) model.load(cfg.OUTPUT_DIR, cfg.TEST.LOAD_EPOCH) if device: model.to(device) model = model.eval() all_feats = [] all_pids = [] all_camids = [] since = time.time() for data in tqdm(val_loader, desc='Feature Extraction', leave=False): with torch.no_grad(): images, pids, camids = data if device: model.to(device) images = images.to(device) feats = model(images) all_feats.append(feats) all_pids.extend(np.asarray(pids)) all_camids.extend(np.asarray(camids)) cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query, re_ranking) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) test_time = time.time() - since logger.info('Testing complete in {:.0f}m {:.0f}s'.format( test_time // 60, test_time % 60))
def read_setting(path): # read data from setting,cfg cfg.merge_from_file(path) host = cfg.OBDT_01.IP port = cfg.OBDT_01.PORT return host, str(port)
def read_setting(path): # read data from setting,cfg cfg.merge_from_file(path) host = cfg.SEARCH_ID.IP port = cfg.SEARCH_ID.PORT return host, str(port)
def train(train_loader, num_classes): parser = argparse.ArgumentParser(description="ReID Baseline Training") parser.add_argument( "--config_file", default="", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() set_seed(cfg.SOLVER.SEED) output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = setup_logger("reid_baseline", output_dir, if_train=True) logger.info("Saving model in the path :{}".format(cfg.OUTPUT_DIR)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID if cfg.MODEL.PRETRAIN_CHOICE == 'finetune': model = make_model(cfg, num_class=num_classes) model.load_param_finetune(cfg.MODEL.PRETRAIN_PATH) print('Loading pretrained model for finetuning......') else: model = make_model(cfg, num_class=num_classes) loss_func = make_loss(cfg, num_classes=num_classes) optimizer = make_optimizer(cfg, model) scheduler = WarmupCosineAnnealingLR(optimizer, cfg.SOLVER.MAX_EPOCHS, cfg.SOLVER.DELAY_ITERS, cfg.SOLVER.ETA_MIN_LR, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_EPOCHS, cfg.SOLVER.WARMUP_METHOD) logger.info("use WarmupCosineAnnealingLR, delay_step:{}".format(cfg.SOLVER.DELAY_ITERS)) do_train( cfg, model, train_loader, optimizer, scheduler, # modify for using self trained model loss_func )
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument( "--config_file", default="", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID cudnn.benchmark = True train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) model = build_mt_model( num_features=cfg.mt.num_features, last_stride=cfg.mt.last_conv_stride, num_classes=1, #not used since clf is not loaded num_classes_seg=cfg.mt.num_classes_seg, global_branch=cfg.mt.global_branch, mask_branch=cfg.mt.mask_branch, part_branch=cfg.mt.part_branch, mask_dim=cfg.mt.mask_dim, part_dim=cfg.mt.part_dim, part_info=cfg.mt.part_info, attr_mask_weight=cfg.mt.attr_mask_weight, use_attr=cfg.mt.use_attr, part_layer=cfg.mt.part_layer, part_abla=cfg.mt.part_abla ) print(cfg.TEST.WEIGHT) model.load_param(cfg.TEST.WEIGHT) inference(cfg, model, val_loader, num_query)
def main(args): """ Main function for the script :param args: parsed command line arguments :return: None """ from config import cfg as opt opt.merge_from_file(args.config) opt.freeze() print("Creating generator object ...") # create the generator object gen = Generator(resolution=opt.dataset.resolution, num_channels=opt.dataset.channels, structure=opt.structure, **opt.model.gen) print("Loading the generator weights from:", args.generator_file) # load the weights into it # gen.load_state_dict(torch.load(args.generator_file)) gen.load(args.generator_file) # path for saving the files: save_path = args.output_dir os.makedirs(save_path, exist_ok=True) latent_size = opt.model.gen.latent_size out_depth = int(np.log2(opt.dataset.resolution)) - 2 print("Generating scale synchronized images ...") # generate the images: # with torch.no_grad(): with jt.no_grad(): # point = torch.randn(args.n_row * args.n_col, latent_size) np.random.seed(1000) point = np.random.randn(args.n_row * args.n_col, latent_size) # point = (point / point.norm()) * (latent_size ** 0.5) point = (point / np.linalg.norm(point)) * (latent_size**0.5) point = jt.array(point, dtype='float32') ss_image = gen(point, depth=out_depth, alpha=1) # color adjust the generated image: ss_image = adjust_dynamic_range(ss_image) print("gen done") # save the ss_image in the directory # ss_image = torch.from_numpy(ss_image.data) # save_image(ss_image, os.path.join(save_path, "grid.png"), nrow=args.n_row, # normalize=True, scale_each=True, pad_value=128, padding=1) jt.save_image_my(ss_image, os.path.join(save_path, "grid.png"), nrow=args.n_row, normalize=True, scale_each=True, pad_value=128, padding=1) print('Done.')
def main(): #解析命令行参数,详见argparse模块 parser = argparse.ArgumentParser( description="Classification Baseline Training") parser.add_argument("--config_file", default="", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER ) #nargs=argparse.REMAINDER是指所有剩余的参数均转化为一个列表赋值给此项 args = parser.parse_args() #os.environ()是python用来获取系统相关信息的。如environ[‘HOME’]就代表了当前这个用户的主目录 ## WORLD_SIZE 由torch.distributed.launch.py产生 具体数值为 nproc_per_node*node(主机数,这里为1) num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 #此处是指如果有类似yaml重新赋值参数的文件在的话会把它读进来。这也是rbgirshick/yacs模块的优势所在——参数与代码分离 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.TRAIN.DATALOADER.IMS_PER_BATCH = cfg.TRAIN.DATALOADER.CATEGORIES_PER_BATCH * cfg.TRAIN.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH cfg.VAL.DATALOADER.IMS_PER_BATCH = cfg.VAL.DATALOADER.CATEGORIES_PER_BATCH * cfg.VAL.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH cfg.TEST.DATALOADER.IMS_PER_BATCH = cfg.TEST.DATALOADER.CATEGORIES_PER_BATCH * cfg.TEST.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH cfg.freeze( ) #最终要freeze一下,prevent further modification,也就是参数设置在这一步就完成了,后面都不能再改变了 output_dir = cfg.SOLVER.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) #logger主要用于输出运行日志,相比print有一定优势。 logger = setup_logger("classification", output_dir, "training", 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) # print the configuration file ''' if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) #''' logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = ",".join( "%s" % i for i in cfg.MODEL.DEVICE_ID) # int tuple -> str # cfg.MODEL.DEVICE_ID cudnn.benchmark = True train(cfg)
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument('-cfg', "--config_file", default="", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # set pretrian = False to avoid loading weight repeatedly cfg.MODEL.PRETRAIN = False cfg.freeze() logger = setup_logger("reid_baseline", False, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) test_dataloader, num_query, _ = get_test_dataloader(cfg, test_phase=False) distmat_paths = [cfg.TEST.DISTMAT1, cfg.TEST.DISTMAT2, cfg.TEST.DISTMAT3, cfg.TEST.DISTMAT4, cfg.TEST.DISTMAT5, cfg.TEST.DISTMAT6] # 加载dist_mats dist_mats = [] cnt = 0 thresh = 3 for distmat_path in distmat_paths: if os.path.isfile(distmat_path): f = h5py.File(distmat_path, 'r') #mat = f['dist_mat'][()] if cnt < thresh: mat = f['dist_mat1'][()] else: mat = f['dist_mat1'][()] mat = mat[np.newaxis, ...] dist_mats.append(mat) f.close() cnt += 1 logger.info(f'Average {cnt} results') dist_mat = np.concatenate(dist_mats, axis=0).mean(axis=0) inference_with_distmat(cfg, test_dataloader, num_query, dist_mat)
def main(): parser = argparse.ArgumentParser( description="Image Classification Training") parser.add_argument("--config_file", default="", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpu = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 experiment_name = 'no_config' if args.config_file != "": cfg.merge_from_file(args.config_file) experiment_name = args.config_file.split('/')[-1].split('.')[0] cfg.merge_from_list(args.opts) cfg.freeze() output_dir = os.path.join(cfg.MODEL.OUTPUT_PATH, experiment_name) if not os.path.exists(output_dir): os.makedirs(output_dir) logger, log_path = setup_logger('{}'.format(cfg.PROJECT.NAME), output_dir, experiment_name) logger.info("Running with config:\n{}".format(cfg.PROJECT.NAME)) logger.info("Using {} GPU".format(num_gpu)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID cudnn.benchmark = True train(cfg, experiment_name=experiment_name) try: logger.info("Drawing curve ......") plot_curve(log_path=log_path, experiment_name=experiment_name, output=output_dir) logger.info("The curve is saved in {}".format(output_dir)) except Exception as e: print(e)
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument("--config_file", default="", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID cudnn.benchmark = True train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) model = build_model_pre(cfg, num_classes) model.load_param(cfg.TEST.WEIGHT) # inference(cfg, model, val_loader, num_query) device = cfg.MODEL.DEVICE evaluator = create_supervised_evaluator( model, metrics={ 'pre_selection_index': pre_selection_index(num_query, max_rank=100, feat_norm=cfg.TEST.FEAT_NORM) }, device=device) evaluator.run(val_loader) index = evaluator.state.metrics['pre_selection_index'] with open(cfg.Pre_Index_DIR, 'w+') as f: json.dump(index.tolist(), f) print("Pre_Selection_Done")
def main(): parser = argparse.ArgumentParser(description="AGW Re-ID Baseline") parser.add_argument("--config_file", default="", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ[ 'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID # new add by gu cudnn.benchmark = True data_loader, num_query, num_classes = make_data_loader(cfg) model = build_model(cfg, num_classes) if 'cpu' not in cfg.MODEL.DEVICE: if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) model.to(device=cfg.MODEL.DEVICE) if cfg.TEST.EVALUATE_ONLY == 'on': logger.info("Evaluate Only") model.load_param(cfg.TEST.WEIGHT) do_test(cfg, model, data_loader, num_query) return
def main(args): """ Main function for the script :param args: parsed command line arguments :return: None """ from config import cfg as opt opt.merge_from_file(args.config) opt.freeze() print("Creating generator object ...") print(opt.model.gen) # create the generator object gen = Generator(resolution=opt.dataset.resolution, num_channels=opt.dataset.channels, structure=opt.structure, **opt.model.gen) print("Loading the generator weights from:", args.generator_file) # load the weights into it # gen.load_state_dict(torch.load(args.generator_file)) gen = load(gen, args.generator_file) # path for saving the files: save_path = args.output_dir os.makedirs(save_path, exist_ok=True) latent_size = opt.model.gen.latent_size out_depth = int(np.log2(opt.dataset.resolution)) - 2 if args.input is None: print("Generating scale synchronized images ...") for img_num in tqdm(range(1, args.num_samples + 1)): # generate the images: with torch.no_grad(): point = torch.randn(1, latent_size) point = (point / point.norm()) * (latent_size ** 0.5) ss_image = gen(point, depth=out_depth, alpha=1) # color adjust the generated image: ss_image = adjust_dynamic_range(ss_image) # save the ss_image in the directory save_image(ss_image, os.path.join(save_path, str(img_num) + ".png")) print("Generated %d images at %s" % (args.num_samples, save_path)) else: code = np.load(args.input) dlatent_in = torch.unsqueeze(torch.from_numpy(code), 0) ss_image = gen.g_synthesis(dlatent_in, depth=out_depth, alpha=1) # color adjust the generated image: ss_image = adjust_dynamic_range(ss_image) save_image(ss_image, args.output)
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument('-cfg', "--config_file", default="", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # set pretrian = False to avoid loading weight repeatedly cfg.MODEL.PRETRAIN = False cfg.DATASETS.PRELOAD_IMAGE = False cfg.freeze() logger = setup_logger("reid_baseline", False, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) cudnn.benchmark = True model = build_model(cfg, 0) #print('model', model) model = model.cuda() model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT)) test_dataloader, num_query, _ = get_test_dataloader(cfg, test_phase=False) #inference_no_rerank(cfg, model, test_dataloader, num_query) #inference(cfg, model, test_dataloader, num_query) #inference_aligned(cfg, model, test_dataloader, num_query) # using flipped image inference_aligned_flipped(cfg, model, test_dataloader, num_query, use_local_feature=False, use_rerank=True, use_cross_feature=True)
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument("--config_file", default="", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) cudnn.benchmark = True train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) model = build_model(cfg, num_classes) model.load_state_dict(torch.load(cfg.TEST.WEIGHT)) model = model.cuda() model = model.eval() logger = logging.getLogger("reid_baseline.inference") logger.info("Start inferencing") with torch.no_grad(): qf, gf = extract_feature(model, val_loader, num_query) # save feature np.save('../data/feature_expansion/' + cfg.TEST.QF_NAME, qf.cpu().numpy()) np.save('../data/feature_expansion/' + cfg.TEST.GF_NAME, gf.cpu().numpy()) '''
def main(): parser = argparse.ArgumentParser(description="ReID Model Training") parser.add_argument('-cfg', "--config_file", default="", metavar="FILE", help="path to config file", type=str) # parser.add_argument("--local_rank", type=int, default=0) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) gpus = os.environ[ "CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0' gpus = [int(i) for i in gpus.split(',')] num_gpus = len(gpus) # cfg.SOLVER.DIST = num_gpus > 1 # if cfg.SOLVER.DIST: # torch.cuda.set_device(args.local_rank) # torch.distributed.init_process_group( # backend="nccl", init_method="env://" # ) # torch.cuda.synchronize() cfg.freeze() log_save_dir = os.path.join(cfg.OUTPUT_DIR, '-'.join(cfg.DATASETS.TEST_NAMES), cfg.MODEL.VERSION) if not os.path.exists(log_save_dir): os.makedirs(log_save_dir) logger = setup_logger("reid_baseline.train", log_save_dir, 0) logger.info("Using {} GPUs.".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) logger.info('start training') cudnn.benchmark = True writer = SummaryWriter(os.path.join(log_save_dir, 'tf')) reid_system = ReidSystem(cfg, logger, writer) reid_system.train()
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument('-cfg', "--config_file", default="", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() gpus = os.environ[ "CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0' gpus = [int(i) for i in gpus.split(',')] num_gpus = len(gpus) if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # set pretrian = False to avoid loading weight repeatedly cfg.MODEL.PRETRAIN = False cfg.freeze() logger = setup_logger("reid_baseline", False, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) cudnn.benchmark = True model = build_model(cfg, 0) model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT)) if num_gpus > 1: model = nn.DataParallel(model) model = model.cuda() print('prepare test set ...') test_dataloader_collection, num_query_collection, test_items_collection = get_test_dataloader( cfg) inference(cfg, model, test_dataloader_collection, num_query_collection, is_vis=True, test_collection=test_items_collection)
def main(): parser = argparse.ArgumentParser(description="Classification Baseline Inference") parser.add_argument( "--config_file", default="", help="path to config file", type=str ) parser.add_argument( "--target_set", default="", help="name of target dataset: train, valid, test, all", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.TRAIN.DATALOADER.IMS_PER_BATCH = cfg.TRAIN.DATALOADER.CATEGORIES_PER_BATCH * cfg.TRAIN.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH cfg.VAL.DATALOADER.IMS_PER_BATCH = cfg.VAL.DATALOADER.CATEGORIES_PER_BATCH * cfg.VAL.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH cfg.TEST.DATALOADER.IMS_PER_BATCH = cfg.TEST.DATALOADER.CATEGORIES_PER_BATCH * cfg.TEST.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH cfg.freeze() output_dir = cfg.SOLVER.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("classification", output_dir, "eval_on_{}".format(args.target_set), 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = ",".join("%s"%i for i in cfg.MODEL.DEVICE_ID) # int tuple -> str # cfg.MODEL.DEVICE_ID cudnn.benchmark = True logger.info("Eval on the {} dataset".format(args.target_set)) if args.target_set == "train" or args.target_set == "valid" or args.target_set == "test": eval(cfg, args.target_set) elif args.target_set == "all": eval(cfg, "train") eval(cfg, "valid") eval(cfg, "test") else: raise Exception("Wrong dataset name with {}".format(args.dataset_name))
def main(merge_list=None): parser = argparse.ArgumentParser(description="ReID Baseline Training") parser.add_argument("--config_file", default="", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) if merge_list: cfg.merge_from_list(merge_list) saver = Saver(cfg) if cfg.TEST.IF_ON: log_file = 'test-log.txt' else: log_file = 'train-log.txt' logger = setup_logger("reid_baseline", saver.save_dir, log_file) logger.setLevel(logging.INFO) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) logger.info("=" * 20) # os.environ['CUDA_VISIBLE_DEVICES'] = '3' torch.cuda.set_device(cfg.GPU.DEVICE_ID) logger.info(f"Using GPU: {cfg.GPU.DEVICE_ID}") logger.info(f"CUDNN VERSION: {cudnn.version()}") cudnn.enabled = True cudnn.benchmark = True if cfg.GPU.IF_DETERMINISTIC: # using cuDNN cudnn.benchmark = False cudnn.deterministic = True torch.random.manual_seed(1024) torch.random.manual_seed(1024) torch.cuda.manual_seed(1024) # gpu torch.cuda.manual_seed_all(1024) np.random.seed(1024) # numpy random.seed(1024) # random and transforms torch.set_printoptions(precision=10) return cfg, saver
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument('-cfg', "--config_file", default="", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # set pretrian = False to avoid loading weight repeatedly cfg.MODEL.PRETRAIN = False cfg.freeze() logger = setup_logger("reid_baseline", False, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) cudnn.benchmark = True model = build_model(cfg, 0) sd = model.state_dict() xsd = torch.load(cfg.TEST.WEIGHT) print('sd', sd.keys()) print('xsd', xsd.keys()) for key in xsd.keys(): if key.startswith('base') or key.startswith('bn') or key.startswith('conv1'): new_key = 'base.' + key elif key.startswith('shallow_cam'): new_key = 'base.base.' + key else: new_key = key sd[new_key] = xsd[key] torch.save(sd, cfg.TEST.WEIGHT + '.updated')
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument( "--config_file", default="", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID cudnn.benchmark = True #train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) #model = build_model(cfg, num_classes) #model.load_param(cfg.TEST.WEIGHT) train_loader, val_loader, num_query, num_classes, num_classes2, image_map_label2 = make_data_loader(cfg) model = build_model(cfg, num_classes, num_classes2) print('--- resume from ', cfg.MODEL.PRETRAIN_PATH2) if cfg.MODEL.ONCE_LOAD == 'yes': print('\n---ONCE_LOAD...\n') model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH2, map_location=lambda storage, loc: storage)) else: functions.load_state_dict(model, cfg.MODEL.PRETRAIN_PATH2, cfg.MODEL.ONLY_BASE, cfg.MODEL.WITHOUT_FC) inference(cfg, model, val_loader, num_query)
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument("--config_file", default="configs/softmax_triplet_with_center.yml", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) parser.add_argument( "--cfg", default="configs/cls_hrnet_w32_sgd_lr5e-2_wd1e-4_bs32_x100.yaml", help="path to config file", type=str) args = parser.parse_args() update_config(cfg_hr, args) num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) cudnn.benchmark = True train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) model = build_model(cfg, cfg_hr, num_classes) model = nn.DataParallel(model) model.load_state_dict(torch.load(cfg.TEST.WEIGHT)) inference(cfg, model, val_loader, num_query)
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument("--config_file", default="", help="path to config file", type=str) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID cudnn.benchmark = True train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) model = build_model(cfg, num_classes) ## 这里需要修改为模型文件的地址 # model.part1.load_param("/home/liuk/kesci_result/baseline_kesci_focal_all/se_resnext101_model_120.pth") # model.part2.load_param("/home/liuk/kesci_result/ibn_kesci_all/resnet50_ibn_a_model_120.pth") # model.part3.load_param("/home/liuk/kesci_result/rank_softmax_all/se_resnext101_model_120.pth") model.load_param(cfg.TEST.WEIGHT) inference_kesci(cfg, model, val_loader, num_query)
def main(args): """ Main function for the script :param args: parsed command line arguments :return: None """ from config import cfg as opt opt.merge_from_file(args.config) opt.model.gen.use_noise = False opt.freeze() print("Creating generator object ...") # create the generator object gen = Generator(resolution=opt.dataset.resolution, num_channels=opt.dataset.channels, structure=opt.structure, **opt.model.gen) print("Loading the generator weights from:", args.generator_file) # load the weights into it # gen.load_state_dict(torch.load(args.generator_file)) gen.load(args.generator_file) # path for saving the files: # generate the images: # src_seeds = [639, 701, 687, 615, 1999], dst_seeds = [888, 888, 888], # src_seeds = [i for i in range(200)] # src_seeds = [166, 1721, 1181, 21, 239] # dst_seeds = [284, 2310, 1140, 255, 626] src_seeds = [166, 1721, 1181, 255, 239, 284, 2310, 1140] dst_seeds = [21] * 6 draw_style_mixing_figure(args.output, gen, out_depth=5, src_seeds=src_seeds, dst_seeds=dst_seeds, style_ranges=[range(0, 2)] * 1 + [range(0, 4)] * 1 + [range(0, 6)] * 1 + [range(0, 8)] * 1 + [range(0, 10)] * 1 + [range(0, 12)] * 1) # draw_style_mixing_figure(os.path.join('figure03-style-mixing.png'), gen, # out_depth=4, src_seeds=[670, 1995, 687, 255, 1999], dst_seeds=[888, 888, 888], # style_ranges=[range(0, 1)] * 1 + [range(1, 6)] * 1 + [range(6, 10)] * 1) print('Done.')
def main(file_name, log): set_seed(cfg.SOLVER.SEED) config_file = './configs/' + file_name cfg.merge_from_file(config_file) # os.environ["CUDA_VISIBLE_DEVICES"] = cfg.MODEL.DEVICE_ID USE_CUDA = torch.cuda.is_available() device = torch.device("cuda:0" if USE_CUDA else "cpu") weight_path = cfg.MODEL.MODEL_PATH + cfg.MODEL.NAME + '.pth' model = choose_net(name=cfg.MODEL.NAME, num_classes=cfg.MODEL.CLASSES, weight_path=cfg.MODEL.WEIGHT_FROM) best_acc = 0.0 log.info('Train : {}'.format(cfg.MODEL.NAME)) if os.path.exists(weight_path): checkpoint = torch.load(weight_path) state_dict = checkpoint['state_dict'] best_acc = checkpoint['best_acc'] model.load_state_dict(state_dict) log.info('Network loaded from {}'.format(weight_path)) model.to(device) # model.cuda() if torch.cuda.device_count() > 1: model = nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.SOLVER.BASE_LR, amsgrad=True) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, cfg.SOLVER.MAX_EPOCHS, eta_min=1e-6) train_dataset = FGVC7Data(root=cfg.DATASETS.ROOT_DIR, phase='train', transform=get_transform(cfg.INPUT.SIZE_TRAIN, 'train')) indices = range(len(train_dataset)) split = int(cfg.DATASETS.SPLIT * len(train_dataset)) train_indices = indices[split:] test_indices = indices[:split] train_sampler = SubsetRandomSampler(train_indices) valid_sampler = SubsetRandomSampler(test_indices) train_loader = DataLoader(train_dataset, batch_size=cfg.DATASETS.BATCH_SIZE, sampler=train_sampler, num_workers=cfg.DATASETS.WORKERS, pin_memory=True) val_loader = DataLoader(train_dataset, batch_size=cfg.DATASETS.BATCH_SIZE, sampler=valid_sampler, num_workers=cfg.DATASETS.WORKERS, pin_memory=True) for epoch in range(cfg.SOLVER.MAX_EPOCHS): # pbar = tqdm(total=len(train_loader), unit='batches', ncols=150) # unit 表示迭代速度的单位 # pbar.set_description('Epoch {}/{}'.format(epoch + 1, cfg.SOLVER.MAX_EPOCHS)) train(model, optimizer, epoch, train_loader, log) scheduler.step() if (epoch+1) % 5 == 0: acc = validate(model, val_loader, epoch, log) if acc > best_acc: if torch.cuda.device_count()>1: torch.save({'best_acc':best_acc, 'state_dict':model.module.state_dict()}, weight_path) else: torch.save({'best_acc':best_acc, 'state_dict':model.state_dict()}, weight_path)
def parse_config(): parser = argparse.ArgumentParser(description='CDNet inference') parser.add_argument("--config_file", default="", help="path to specified config file", type=str) parser.add_argument("opts", default=None, help="modify some value in config file", nargs=argparse.REMAINDER) args = parser.parse_args() if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT.DIRS if output_dir != "": if not os.path.exists(output_dir): os.makedirs(output_dir) else: print("ERROR: please specify an output path") exit(1) logger = setup_logger("CDNet", output_dir, 0, cfg.OUTPUT.LOG_NAME) use_gpu = cfg.MODEL.DEVICE == 'cuda' if use_gpu: logger.info("Test with GPU: {}".format(cfg.MODEL.DEVICE_IDS)) else: logger.info("Test with CPU") logger.info(args) if args.config_file != "": logger.info("load configuratioin file {}".format(args.config_file)) logger.info("test with config:\n{}".format(cfg)) if use_gpu: os.environ["CUDA_VISIBLE_DEVICE"] = cfg.MODEL.DEVICE_IDS cudnn.benchmark = True
def update_my_config(): cfg.defrost() cfg.merge_from_file('experiments/mpii/hrnet/w32_256x256_adam_lr1e-3.yaml') opts = [ "TEST.MODEL_FILE", "/mnt/models/HRNet/pose_mpii/pose_hrnet_w32_256x256.pth" ] cfg.merge_from_list(opts) cfg.OUTPUT_DIR = "output_test" cfg.LOG_DIR = "log_test" cfg.freeze()