def load_pytorch_model(): model = pose_estimation.PoseModel(num_point=19, num_vector=19, pretrained=True) return model
def construct_model(args): model = pose_estimation.PoseModel(num_point=19, num_vector=19, pretrained=True) # state_dict = torch.load(args.pretrained)['state_dict'] # from collections import OrderedDict # new_state_dict = OrderedDict() # for k, v in state_dict.items(): # name = k[7:] # new_state_dict[name] = v # model.load_state_dict(new_state_dict) # model.fc = nn.Linear(2048, 80) model = torch.nn.DataParallel(model, device_ids=args.gpu).cuda() return model
def construct_model(args): if not args.snapshot: model = pose_estimation.PoseModel(num_point=19, num_vector=19, pretrained=True) else: print('--------load model from {}----------------'.format(args.snapshot)) model = pose_estimation.PoseModel(num_point=19, num_vector=19, pretrained=True) state_dict = torch.load(args.snapshot)['state_dict'] model.load_state_dict(state_dict) # if not args.pretrained: # model = pose_estimation.PoseModel(num_point=19, num_vector=19, pretrained=True) # else: # state_dict = torch.load(args.pretrained)['state_dict'] # from collections import OrderedDict # new_state_dict = OrderedDict() # for k, v in state_dict.items(): # name = k[7:] # new_state_dict[name] = v # model.load_state_dict(new_state_dict) # os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(gpu) for gpu in args.gpu]) # model = torch.nn.DataParallel(model, device_ids=range(len(args.gpu))).cuda() model.cuda() # single gpu return model
def construct_model(args): model = pose_estimation.PoseModel(num_point=19, num_vector=19) state_dict = torch.load(args.model)['state_dict'] from collections import OrderedDict new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k[7:] new_state_dict[name] = v state_dict = model.state_dict() state_dict.update(new_state_dict) model.load_state_dict(state_dict) model = model.cuda() model.eval() return model
def main(): import pose_estimation #pytorch_model = '/home/xiangyu/data/pretrain/COCO/coco_pose_iter_440000.pth.tar' pytorch_model = '/home/xiangyu/samsung_pose/experiments/baseline/60000.pth.tar' model = pose_estimation.PoseModel(num_point=19, num_vector=19) img_dir = '/home/xiangyu/data/coco/images/val2014/' annFile = '/home/xiangyu/data/coco/annotations/person_keypoints_minival2014.json' num_imgs = 50 # orderCOCO = [ 0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3 ] #[1, 0, 7, 9, 11, 6, 8, 10, 13, 15, 17, 12, 14, 16, 3, 2, 5, 4] myjsonValidate = list(dict()) cocoGt = COCO(annFile) img_names = cocoGt.imgs # filter only person cats = cocoGt.loadCats(cocoGt.getCatIds()) catIds = cocoGt.getCatIds(catNms=['person']) imgIds = cocoGt.getImgIds(catIds=catIds) #-------------------------- pytorch model------------------ state_dict = torch.load(pytorch_model)['state_dict'] model.load_state_dict(state_dict) model = model.cuda() model.eval() #-------------------------------------------------------- # for i in range(num_imgs): print('{}/{}'.format(i, num_imgs)) img_info = cocoGt.loadImgs(imgIds[i])[0] image_id = img_info['id'] oriImg = cv2.imread(os.path.join(img_dir, img_info['file_name'])) multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] # apply model candidate, subset, canvas = apply_model(oriImg, model, multiplier) #cv2.imwrite(os.path.join('./result', img_info['file_name']), canvas) for j in range(len(subset)): category_id = 1 keypoints = np.zeros(51) score = 0 for part in range(18): if part == 1: continue index = int(subset[j][part]) if index > 0: #realpart = orderCOCO[part] - 1 realpart = orderCOCO[part] if realpart == -1: continue # if part == 0: # keypoints[realpart * 3] = candidate[index][0] -0.5 # keypoints[realpart * 3 + 1] = candidate[index][1] -0.5 # keypoints[realpart * 3 + 2] = 1 # # score = score + candidate[index][2] else: keypoints[(realpart) * 3] = candidate[index][0] keypoints[(realpart) * 3 + 1] = candidate[index][1] keypoints[(realpart) * 3 + 2] = 1 # score = score + candidate[index][2] keypoints_list = keypoints.tolist() current_dict = { 'image_id': image_id, 'category_id': category_id, 'keypoints': keypoints_list, 'score': subset[j][-2] } myjsonValidate.append(current_dict) #count = count + 1 import json with open('evaluationResult.json', 'w') as outfile: json.dump(myjsonValidate, outfile) resJsonFile = 'evaluationResult.json' cocoDt2 = cocoGt.loadRes(resJsonFile) image_ids = [] for i in range(num_imgs): img = cocoGt.loadImgs(imgIds[i])[0] image_ids.append(img['id']) # running evaluation cocoEval = COCOeval(cocoGt, cocoDt2, 'keypoints') cocoEval.params.imgIds = image_ids cocoEval.evaluate() cocoEval.accumulate() k = cocoEval.summarize()
def main(): import pose_estimation model = pose_estimation.PoseModel(num_point=19, num_vector=19) img_dir = '/home/bst2017/workspace/data/coco/images/val2014/' annFile = '/home/bst2017/workspace/data/coco/annotations/person_keypoints_minival2014.json' num_imgs = 50 #50 # COCO 38% orderCOCO = [ 0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3 ] #[1, 0, 7, 9, 11, 6, 8, 10, 13, 15, 17, 12, 14, 16, 3, 2, 5, 4] myjsonValidate = list(dict()) cocoGt = COCO(annFile) img_names = cocoGt.imgs # filter only person cats = cocoGt.loadCats(cocoGt.getCatIds()) catIds = cocoGt.getCatIds(catNms=['person']) imgIds = cocoGt.getImgIds(catIds=catIds) #ids = list(cocoGt.imgs.keys()) #-------------------------------------------------------- # for i in range(num_imgs): print('{}/{}'.format(i, num_imgs)) img_info = cocoGt.loadImgs(imgIds[i])[0] image_id = img_info['id'] oriImg = cv2.imread(os.path.join(img_dir, img_info['file_name'])) ann_ids = cocoGt.getAnnIds(imgIds=image_id) img_anns = cocoGt.loadAnns(ann_ids) candidate, subset, canvas = mechanism(oriImg, img_anns) cv2.imwrite(os.path.join('./result', img_info['file_name']), canvas) for j in range(len(subset)): category_id = 1 keypoints = np.zeros(51) score = 0 for part in range(18): if part == 1: continue index = int(subset[j][part]) if index > 0: #realpart = orderCOCO[part] - 1 realpart = orderCOCO[part] if realpart == -1: continue # if part == 0: # keypoints[realpart * 3] = candidate[index][0] -0.5 # keypoints[realpart * 3 + 1] = candidate[index][1] -0.5 # keypoints[realpart * 3 + 2] = 1 # # score = score + candidate[index][2] else: keypoints[(realpart) * 3] = candidate[index][0] keypoints[(realpart) * 3 + 1] = candidate[index][1] keypoints[(realpart) * 3 + 2] = 2 # score = score + candidate[index][2] keypoints_list = keypoints.tolist() current_dict = { 'image_id': image_id, 'category_id': category_id, 'keypoints': keypoints_list, 'score': subset[j][-2] } myjsonValidate.append(current_dict) #count = count + 1 import json with open('evaluationResult.json', 'w') as outfile: json.dump(myjsonValidate, outfile) resJsonFile = 'evaluationResult.json' cocoDt2 = cocoGt.loadRes(resJsonFile) image_ids = [] for i in range(num_imgs): img = cocoGt.loadImgs(imgIds[i])[0] image_ids.append(img['id']) # running evaluation cocoEval = COCOeval(cocoGt, cocoDt2, 'keypoints') cocoEval.params.imgIds = image_ids cocoEval.evaluate() cocoEval.accumulate() k = cocoEval.summarize()