def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # automatic OUTPUT dir cfg.merge_from_file(args.config_file) config_file_name = args.config_file.split('/') for i, x in enumerate(config_file_name): if x == 'configs': config_file_name[i] = 'logs' if '.yml' in x: config_file_name[i] = config_file_name[i][:-4] cfg.OUTPUT_DIR = '/'.join(config_file_name) cfg.merge_from_list(args.opts) cfg.freeze() if args.eval_only or args.dist_only or args.tsne_only or args.domain_only: if args.eval_only: tmp = 'eval' if args.dist_only: tmp = 'dist' if args.tsne_only: tmp = 'tsne' if args.domain_only: tmp = 'domain' default_setup(cfg, args, tmp=tmp) else: default_setup(cfg, args) return cfg
def setup_cfg(config_file, opts): # load config from file and command-line arguments cfg = get_cfg() cfg.merge_from_file(config_file) cfg.merge_from_list(opts) cfg.freeze() return cfg
def setup_cfg(args): cfg = get_cfg() # add_cls_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg
def map(wrapper): model = wrapper cfg = get_cfg() # test_loader, num_query = build_reid_test_loader(cfg, dataset_name="TEST", T.Compose([])) test_loader, num_query = build_reid_test_loader(cfg, dataset_name="TEST") feats = [] pids = [] camids = [] for batch in test_loader: for image_path in batch["img_paths"]: t = torch.Tensor(np.array([model.infer(cv2.imread(image_path))])) t.to(torch.device(GPU_ID)) feats.append(t) pids.extend(batch["targets"].numpy()) camids.extend(batch["camids"].numpy()) feats = torch.cat(feats, dim=0) q_feat = feats[:num_query] g_feat = feats[num_query:] q_pids = np.asarray(pids[:num_query]) g_pids = np.asarray(pids[num_query:]) q_camids = np.asarray(camids[:num_query]) g_camids = np.asarray(camids[num_query:]) distmat = 1 - torch.mm(q_feat, g_feat.t()) distmat = distmat.numpy() all_cmc, all_AP, all_INP = eval_market1501(distmat, q_pids, g_pids, q_camids, g_camids, 5) mAP = np.mean(all_AP) print("mAP {}, rank-1 {}".format(mAP, all_cmc[0]))
def setup_cfg(args): # load config from file and command-line arguments cfg = get_cfg() # add_partialreid_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg
def __init__(self, cfg): super().__init__(cfg) # Get teacher model config model_ts = [] for i in range(len(cfg.KD.MODEL_CONFIG)): cfg_t = get_cfg() cfg_t.merge_from_file(cfg.KD.MODEL_CONFIG[i]) cfg_t.defrost() cfg_t.MODEL.META_ARCHITECTURE = "Baseline" # Change syncBN to BN due to no DDP wrapper if cfg_t.MODEL.BACKBONE.NORM == "syncBN": cfg_t.MODEL.BACKBONE.NORM = "BN" if cfg_t.MODEL.HEADS.NORM == "syncBN": cfg_t.MODEL.HEADS.NORM = "BN" model_t = build_model(cfg_t) # No gradients for teacher model for param in model_t.parameters(): param.requires_grad_(False) logger.info("Loading teacher model weights ...") Checkpointer(model_t).load(cfg.KD.MODEL_WEIGHTS[i]) model_ts.append(model_t) self.ema_enabled = cfg.KD.EMA.ENABLED self.ema_momentum = cfg.KD.EMA.MOMENTUM if self.ema_enabled: cfg_self = cfg.clone() cfg_self.defrost() cfg_self.MODEL.META_ARCHITECTURE = "Baseline" if cfg_self.MODEL.BACKBONE.NORM == "syncBN": cfg_self.MODEL.BACKBONE.NORM = "BN" if cfg_self.MODEL.HEADS.NORM == "syncBN": cfg_self.MODEL.HEADS.NORM = "BN" model_self = build_model(cfg_self) # No gradients for self model for param in model_self.parameters(): param.requires_grad_(False) if cfg_self.MODEL.WEIGHTS != '': logger.info("Loading self distillation model weights ...") Checkpointer(model_self).load(cfg_self.MODEL.WEIGHTS) else: # Make sure the initial state is same for param_q, param_k in zip(self.parameters(), model_self.parameters()): param_k.data.copy_(param_q.data) model_ts.insert(0, model_self) # Not register teacher model as `nn.Module`, this is # make sure teacher model weights not saved self.model_ts = model_ts
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() add_cross_domain_baseline_config(cfg) add_memory_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg
def setup_cfg(): cfg_ = get_cfg() cfg_.MODEL.DEVICE = ('cuda' if torch.cuda.is_available() else 'cpu') """ Torch not compiled with CUDA enabled """ cfg_.MODEL.DEVICE = 'cuda' """ car """ cfg_.merge_from_file(abspath('configs/VehicleID/bagtricks_R50-ibn.yml')) cfg_.merge_from_list( ['MODEL.WEIGHTS', abspath('models/vehicleid_bot_R50-ibn.pth')]) # cfg_.merge_from_file(abspath('configs/VERIWild/bagtricks_R50-ibn.yml')) # cfg_.merge_from_list(['MODEL.WEIGHTS', abspath('models/veriwild_bot_R50-ibn.pth')]) cfg_.freeze() return cfg_
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.SPECIFIC_DATASET = args.specific_dataset if args.specific_dataset is not None and not args.eval_only: cfg.OUTPUT_DIR = osp.join(cfg.OUTPUT_DIR, args.specific_dataset) cfg.freeze() default_setup(cfg, args) return cfg
def test_fusebn(self): cfg = get_cfg() cfg.defrost() cfg.MODEL.BACKBONE.NAME = 'build_repvgg_backbone' cfg.MODEL.BACKBONE.DEPTH = 'B1g2' cfg.MODEL.BACKBONE.PRETRAIN = False model = build_backbone(cfg) model.eval() test_inp = torch.randn((1, 3, 256, 128)) y = model(test_inp) model.deploy(mode=True) from ipdb import set_trace set_trace() fused_y = model(test_inp) print("final error :", torch.max(torch.abs(fused_y - y)).item())
def __init__(self, model_config, model_path, use_cuda=True): cfg = get_cfg() cfg.merge_from_file(model_config) cfg.MODEL.BACKBONE.PRETRAIN = False self.net = DefaultTrainer.build_model(cfg) self.device = "cuda" if torch.cuda.is_available( ) and use_cuda else "cpu" Checkpointer(self.net).load(model_path) logger = logging.getLogger("root.tracker") logger.info("Loading weights from {}... Done!".format(model_path)) self.net.to(self.device) self.net.eval() height, width = cfg.INPUT.SIZE_TEST self.size = (width, height) self.norm = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ])
def __init__(self, cfg): super(Distiller, self).__init__(cfg) # Get teacher model config cfg_t = get_cfg() cfg_t.merge_from_file(cfg.KD.MODEL_CONFIG) model_t = build_model(cfg_t) logger.info("Teacher model:\n{}".format(model_t)) # No gradients for teacher model for param in model_t.parameters(): param.requires_grad_(False) logger.info("Loading teacher model weights ...") Checkpointer(model_t).load(cfg.KD.MODEL_WEIGHTS) # Not register teacher model as `nn.Module`, this is # make sure teacher model weights not saved self.model_t = [model_t.backbone, model_t.heads]
def __init__(self, config_file): cfg = get_cfg() cfg.merge_from_file(config_file) cfg.defrost() cfg.MODEL.WEIGHTS = 'projects/bjzProject/logs/bjz/arcface_adam/model_final.pth' model = build_model(cfg) Checkpointer(model).resume_or_load(cfg.MODEL.WEIGHTS) model.cuda() model.eval() self.model = model # self.model = torch.jit.load("reid_model.pt") # self.model.eval() # self.model.cuda() example = torch.rand(1, 3, 256, 128) example = example.cuda() traced_script_module = torch.jit.trace_module(model, {'inference': example}) traced_script_module.save("reid_feat_extractor.pt")
def __init__(self, cfg): super().__init__(cfg) # Get teacher model config model_ts = [] for i in range(len(cfg.KD.MODEL_CONFIG)): cfg_t = get_cfg() cfg_t.merge_from_file(cfg.KD.MODEL_CONFIG[i]) model_t = build_model(cfg_t) # No gradients for teacher model for param in model_t.parameters(): param.requires_grad_(False) logger.info("Loading teacher model weights ...") Checkpointer(model_t).load(cfg.KD.MODEL_WEIGHTS[i]) model_ts.append(model_t) # Not register teacher model as `nn.Module`, this is # make sure teacher model weights not saved self.model_ts = model_ts
def setup_cfg(args): # load confiimport argparseg from file and command-line arguments cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) return cfg