def main(oriImg): shape0 = oriImg.shape candidate, subset = body_estimation(oriImg) canvas = copy.deepcopy(oriImg) shape1 = canvas.shape canvas = util.draw_bodypose(canvas, candidate, subset) # detect hand hands_list = util.handDetect(candidate, subset, oriImg) all_hand_peaks = [] shape2 = canvas.shape for x, y, w, is_left in hands_list: # cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA) # cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # if is_left: # plt.imshow(oriImg[y:y+w, x:x+w, :][:, :, [2, 1, 0]]) # plt.show() peaks = hand_estimation(oriImg[y:y+w, x:x+w, :]) peaks[:, 0] = np.where(peaks[:, 0]==0, peaks[:, 0], peaks[:, 0]+x) peaks[:, 1] = np.where(peaks[:, 1]==0, peaks[:, 1], peaks[:, 1]+y) # else: # peaks = hand_estimation(cv2.flip(oriImg[y:y+w, x:x+w, :], 1)) # peaks[:, 0] = np.where(peaks[:, 0]==0, peaks[:, 0], w-peaks[:, 0]-1+x) # peaks[:, 1] = np.where(peaks[:, 1]==0, peaks[:, 1], peaks[:, 1]+y) # print(peaks) all_hand_peaks.append(peaks) shape3 = canvas.shape canvas = util.draw_handpose(canvas, all_hand_peaks) shape4 = canvas.shape cv2.imwrite("test.png", canvas) canvas = cv2.resize(canvas, (shape0[1],shape0[0])) # raise AttributeError(shape0, shape1, shape2, shape3, shape4) return canvas[:, :, [2, 1, 0]]
def process_frame(txt_file, frame, body=True, hands=False): canvas = copy.deepcopy(frame) if body: candidate, subset = body_estimation(frame) if (is_data_valid(subset)): body_points = get_18_body_points(candidate, subset) list_to_file(txt_file, body_points) else: canvas = copy.deepcopy(frame) canvas = util.draw_bodypose(canvas, candidate, subset) # plt.imshow(canvas[:, :, [2, 1, 0]]) # plt.axis('off') # plt.show() print(subset) if hands: hands_list = util.handDetect(candidate, subset, frame) all_hand_peaks = [] for x, y, w, is_left in hands_list: peaks = hand_estimation(frame[y:y + w, x:x + w, :]) peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) all_hand_peaks.append(peaks) canvas = util.draw_handpose(canvas, all_hand_peaks) return canvas
def detect_keypoint(test_image, is_vis): body_estimation = Body('model/body_pose_model.pth') hand_estimation = Hand('model/hand_pose_model.pth') oriImg = cv2.imread(test_image) # B,G,R order # detect body # subset: n*20 array, n is the human_number in the index, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts # candidate: m*4, m is the keypoint number in the image, [x, y, confidence, id] candidate, subset = body_estimation( oriImg ) # candidate: output the keypoints([25, 4]), x, y, score, keypoint_index canvas = copy.deepcopy(oriImg) canvas, bodypoints = util.draw_bodypose(canvas, candidate, subset) # detect hand hands_list = util.handDetect(candidate, subset, oriImg) all_hand_peaks = [] hand_personid_isleft = [] for x, y, w, is_left, person_id in hands_list: # cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA) # cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # if is_left: # plt.imshow(oriImg[y:y+w, x:x+w, :][:, :, [2, 1, 0]]) # plt.show() peaks = hand_estimation(oriImg[y:y + w, x:x + w, :]) peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) # else: # peaks = hand_estimation(cv2.flip(oriImg[y:y+w, x:x+w, :], 1)) # peaks[:, 0] = np.where(peaks[:, 0]==0, peaks[:, 0], w-peaks[:, 0]-1+x) # peaks[:, 1] = np.where(peaks[:, 1]==0, peaks[:, 1], peaks[:, 1]+y) # print(peaks) all_hand_peaks.append(peaks) hand_personid_isleft.append([person_id, is_left]) # all_hand_peaks: [p, 21, 2] p is the hand number in the image # hand_personid_isleft: [p, 2] is_isleft, person_id all_hand_peaks = np.asarray(all_hand_peaks) hand_personid_isleft = np.asarray(hand_personid_isleft) canvas = util.draw_handpose(canvas, all_hand_peaks) if is_vis: plt.imshow(canvas[:, :, [2, 1, 0]]) plt.axis('off') plt.show() return bodypoints, all_hand_peaks, hand_personid_isleft
def process_frame(frame, body=True, hands=True): canvas = copy.deepcopy(frame) if body: candidate, subset = body_estimation(frame) canvas = util.draw_bodypose(canvas, candidate, subset) if hands: hands_list = util.handDetect(candidate, subset, frame) all_hand_peaks = [] for x, y, w, is_left in hands_list: peaks = hand_estimation(frame[y:y + w, x:x + w, :]) peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) all_hand_peaks.append(peaks) canvas = util.draw_handpose(canvas, all_hand_peaks) return canvas
all_hand_peaks_values = [] all_hand_names = [] end = time.time() for file in files: if int(file[:8])>28999 and int(file[:8])<32560:#32560 #import pdb;pdb.set_trace() image_path = os.path.join(image_folder,file) oriImg = cv2.imread(image_path) # B,G,R hand_peaks = [] peaks,values = hand_estimation(images=oriImg) #import pdb;pdb.set_trace() hand_peaks.append(peaks) if int(file[:8])%500 == 0: save_img_path = os.path.join(save_folder,'image') os.makedirs(save_img_path, exist_ok=True) canvas = copy.deepcopy(oriImg) canvas = util.draw_handpose(canvas, hand_peaks) plt.imshow(canvas[:, :, [2, 1, 0]]) plt.axis('off') plt.savefig(os.path.join(save_img_path,file)) print('{0} demo out saved!'.format(file)) plt.close() print('Time {0:.3f}\t'.format(time.time() - end)) all_hand_peaks.append(peaks) all_hand_peaks_values.append(values) #all_hand_names.append(np.array([int(file[:8])])) all_hand_names.append(np.array([file])) #import pdb;pdb.set_trace() detect_out_path = os.path.join(save_folder,'freihand-train.json') dump(detect_out_path,all_hand_peaks,all_hand_peaks_values,all_hand_names)
binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8) #import pdb;pdb.set_trace() # 全部小于阈值 if np.sum(binary) == 0: all_peaks.append([0, 0]) all_peaks_value.append([0.0])#cyj continue label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim) #import pdb;pdb.set_trace() max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1 label_img[label_img != max_index] = 0 map_ori[label_img == 0] = 0 #import pdb;pdb.set_trace() y, x = util.npmax(map_ori) all_peaks.append([x, y]) # confidence coefficient value = map_ori[y,x] all_peaks_value.append([value]) #return np.array(all_peaks) return np.array(all_peaks), np.array(all_peaks_value) if __name__ == "__main__": hand_estimation = Hand('../model/hand_pose_model.pth') # test_image = '../images/hand.jpg' test_image = '../images/hand.jpg' oriImg = cv2.imread(test_image) # B,G,R order peaks = hand_estimation(oriImg) canvas = util.draw_handpose(oriImg, peaks, True) cv2.imshow('', canvas) cv2.waitKey(0)