def loadModel(modelpath): dummy = 10 mdl = Baseline(dummy, 1, modelpath, 'bnneck', 'after', 'resnet50', 'self') mdl.load_param(modelpath) model = nn.DataParallel(mdl) model = model.to('cuda') model.eval() return model
def __init__(self, model_path): self.model = Baseline('resnet50', num_classes=0, last_stride=1, with_ibn=False, with_se=False, gcb=None, stage_with_gcb=[False, False, False, False], pretrain=False, model_path='') self.model.load_params_wo_fc(torch.load(model_path)) # state_dict = torch.load('/export/home/lxy/reid_baseline/logs/2019.8.12/bj/ibn_lighting/models/model_119.pth') # self.model.load_params_wo_fc(state_dict['model']) self.model.cuda() self.model.eval()
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument( "--config_file", default="", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID cudnn.benchmark = True train_loader, val_loader, num_query, num_classes = get_data(cfg) model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NAME, cfg.MODEL.PRETRAIN_CHOICE,cfg.MODEL.BREACH) model.load_param(cfg.TEST.WEIGHT) #加载训练好的模型参数 inference(cfg, model, val_loader, num_query) #
print("ONNX export Done.") print("Saving graph of ONNX exported model to {} ...".format( graph_save_path)) predict_net.export_graph(graph_save_path) print("Checking if tf.pb is right") _check_pytorch_tf_model(model, graph_save_path) if __name__ == '__main__': model = Baseline('resnet50', num_classes=0, last_stride=1, with_ibn=False, with_se=False, gcb=None, stage_with_gcb=[False, False, False, False], pretrain=False, model_path='') model.load_params_wo_fc( torch.load( 'logs/bjstation/res50_baseline_v0.4/ckpts/model_epoch80.pth')) # model.cuda() model.eval() dummy_inputs = torch.randn(1, 3, 384, 128) export_tf_reid_model(model, dummy_inputs, 'reid_tf.pb') # inputs = torch.rand(1, 3, 384, 128).cuda() # # _export_via_onnx(model, inputs)
class Reid(object): def __init__(self, model_path): self.model = Baseline('resnet50', num_classes=0, last_stride=1, with_ibn=False, with_se=False, gcb=None, stage_with_gcb=[False, False, False, False], pretrain=False, model_path='') self.model.load_params_wo_fc(torch.load(model_path)) # state_dict = torch.load('/export/home/lxy/reid_baseline/logs/2019.8.12/bj/ibn_lighting/models/model_119.pth') # self.model.load_params_wo_fc(state_dict['model']) self.model.cuda() self.model.eval() # self.model = torch.jit.load("reid_model.pt") # self.model.eval() # self.model.cuda() # example = torch.rand(1, 3, 256, 128) # example = example.cuda() # traced_script_module = torch.jit.trace(self.model, example) # traced_script_module.save("reid_model.pt") @torch.no_grad() def demo(self, img_path): img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (128, 384)) img = img / 255.0 img = (img - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225] img = img.transpose((2, 0, 1)).astype(np.float32) img = img[np.newaxis, :, :, :] data = torch.from_numpy(img).cuda().float() output = self.model(data) feat = output.cpu().data.numpy() return feat @torch.no_grad() def extract_feat(self, dataloader): prefetcher = test_data_prefetcher(dataloader) feats = [] labels = [] batch = prefetcher.next() num_count = 0 while batch[0] is not None: img, pid, camid = batch feat = self.model(img) feats.append(feat.cpu()) labels.extend(np.asarray(pid)) # if num_count > 2: # break batch = prefetcher.next() # num_count += 1 feats = torch.cat(feats, dim=0) id_feats = defaultdict(list) for f, i in zip(feats, labels): id_feats[i].append(f) all_feats = [] label_names = [] for i in id_feats: all_feats.append(torch.stack(id_feats[i], dim=0).mean(dim=0)) label_names.append(i) label_names = np.asarray(label_names) all_feats = torch.stack(all_feats, dim=0) # (n, 2048) all_feats = F.normalize(all_feats, p=2, dim=1) np.save('feats.npy', all_feats.cpu()) np.save('labels.npy', label_names) cos = torch.mm(all_feats, all_feats.t()).numpy() # (n, n) cos -= np.eye(all_feats.shape[0]) f = open('check_cross_folder_similarity.txt', 'w') for i in range(len(label_names)): sim_indx = np.argwhere(cos[i] > 0.5)[:, 0] sim_name = label_names[sim_indx] write_str = label_names[i] + ' ' # f.write(label_names[i]+'\t') for n in sim_name: write_str += (n + ' ') # f.write(n+'\t') f.write(write_str + '\n') def prepare_gt(self, json_file): feat = [] label = [] with open(json_file, 'r') as f: total = json.load(f) for index in total: label.append(index) feat.append(np.array(total[index])) time_label = [int(i[0:10]) for i in label] return np.array(feat), np.array(label), np.array(time_label) def compute_topk(self, k, feat, feats, label): # num_gallery = feats.shape[0] # new_feat = np.tile(feat,[num_gallery,1]) norm_feat = np.sqrt(np.sum(np.square(feat), axis=-1)) norm_feats = np.sqrt(np.sum(np.square(feats), axis=-1)) matrix = np.sum(np.multiply(feat, feats), axis=-1) dist = matrix / np.multiply(norm_feat, norm_feats) # print('feat:',feat.shape) # print('feats:',feats.shape) # print('label:',label.shape) # print('dist:',dist.shape) index = np.argsort(-dist) # print('index:',index.shape) result = [] for i in range(min(feats.shape[0], k)): print(dist[index[i]]) result.append(label[index[i]]) return result