def main(oriImg): shape0 = oriImg.shape candidate, subset = body_estimation(oriImg) canvas = copy.deepcopy(oriImg) shape1 = canvas.shape canvas = util.draw_bodypose(canvas, candidate, subset) # detect hand hands_list = util.handDetect(candidate, subset, oriImg) all_hand_peaks = [] shape2 = canvas.shape for x, y, w, is_left in hands_list: # cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA) # cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # if is_left: # plt.imshow(oriImg[y:y+w, x:x+w, :][:, :, [2, 1, 0]]) # plt.show() peaks = hand_estimation(oriImg[y:y+w, x:x+w, :]) peaks[:, 0] = np.where(peaks[:, 0]==0, peaks[:, 0], peaks[:, 0]+x) peaks[:, 1] = np.where(peaks[:, 1]==0, peaks[:, 1], peaks[:, 1]+y) # else: # peaks = hand_estimation(cv2.flip(oriImg[y:y+w, x:x+w, :], 1)) # peaks[:, 0] = np.where(peaks[:, 0]==0, peaks[:, 0], w-peaks[:, 0]-1+x) # peaks[:, 1] = np.where(peaks[:, 1]==0, peaks[:, 1], peaks[:, 1]+y) # print(peaks) all_hand_peaks.append(peaks) shape3 = canvas.shape canvas = util.draw_handpose(canvas, all_hand_peaks) shape4 = canvas.shape cv2.imwrite("test.png", canvas) canvas = cv2.resize(canvas, (shape0[1],shape0[0])) # raise AttributeError(shape0, shape1, shape2, shape3, shape4) return canvas[:, :, [2, 1, 0]]
def predict(self, cv2_img): candidate, subset = self.body_estimation(cv2_img) if len(subset) == 0: return None subset = subset.astype(np.int32) result = np.zeros(shape=(len(subset), 18+21+21, 3), dtype=np.int32) - 1 for n, person in enumerate(subset): for k in range(18): index = person[k] if index == -1: continue else: x, y = candidate[index][0:2] result[n, k] = [x, y, 1] person = person[np.newaxis, :] hands = util.handDetect(candidate, person, cv2_img) for x, y, w, is_left in hands: peaks = self.hand_estimation(cv2_img[y:y + w, x:x + w, :]) peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) ones = np.ones(shape=(len(peaks), 1), dtype=np.int32) peaks = np.hstack((peaks, ones)) for k in range(len(peaks)): if peaks[k, 0] == 0 and peaks[k, 1] == 0: peaks[k, 2] = -1 if is_left: result[n, 18:39] = peaks else: result[n, 39:60] = peaks return result
def process_frame(txt_file, frame, body=True, hands=False): canvas = copy.deepcopy(frame) if body: candidate, subset = body_estimation(frame) if (is_data_valid(subset)): body_points = get_18_body_points(candidate, subset) list_to_file(txt_file, body_points) else: canvas = copy.deepcopy(frame) canvas = util.draw_bodypose(canvas, candidate, subset) # plt.imshow(canvas[:, :, [2, 1, 0]]) # plt.axis('off') # plt.show() print(subset) if hands: hands_list = util.handDetect(candidate, subset, frame) all_hand_peaks = [] for x, y, w, is_left in hands_list: peaks = hand_estimation(frame[y:y + w, x:x + w, :]) peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) all_hand_peaks.append(peaks) canvas = util.draw_handpose(canvas, all_hand_peaks) return canvas
def detect_keypoint(test_image, is_vis): body_estimation = Body('model/body_pose_model.pth') hand_estimation = Hand('model/hand_pose_model.pth') oriImg = cv2.imread(test_image) # B,G,R order # detect body # subset: n*20 array, n is the human_number in the index, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts # candidate: m*4, m is the keypoint number in the image, [x, y, confidence, id] candidate, subset = body_estimation( oriImg ) # candidate: output the keypoints([25, 4]), x, y, score, keypoint_index canvas = copy.deepcopy(oriImg) canvas, bodypoints = util.draw_bodypose(canvas, candidate, subset) # detect hand hands_list = util.handDetect(candidate, subset, oriImg) all_hand_peaks = [] hand_personid_isleft = [] for x, y, w, is_left, person_id in hands_list: # cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA) # cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # if is_left: # plt.imshow(oriImg[y:y+w, x:x+w, :][:, :, [2, 1, 0]]) # plt.show() peaks = hand_estimation(oriImg[y:y + w, x:x + w, :]) peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) # else: # peaks = hand_estimation(cv2.flip(oriImg[y:y+w, x:x+w, :], 1)) # peaks[:, 0] = np.where(peaks[:, 0]==0, peaks[:, 0], w-peaks[:, 0]-1+x) # peaks[:, 1] = np.where(peaks[:, 1]==0, peaks[:, 1], peaks[:, 1]+y) # print(peaks) all_hand_peaks.append(peaks) hand_personid_isleft.append([person_id, is_left]) # all_hand_peaks: [p, 21, 2] p is the hand number in the image # hand_personid_isleft: [p, 2] is_isleft, person_id all_hand_peaks = np.asarray(all_hand_peaks) hand_personid_isleft = np.asarray(hand_personid_isleft) canvas = util.draw_handpose(canvas, all_hand_peaks) if is_vis: plt.imshow(canvas[:, :, [2, 1, 0]]) plt.axis('off') plt.show() return bodypoints, all_hand_peaks, hand_personid_isleft
def process_frame(frame, body=True, hands=True): canvas = copy.deepcopy(frame) if body: candidate, subset = body_estimation(frame) canvas = util.draw_bodypose(canvas, candidate, subset) if hands: hands_list = util.handDetect(candidate, subset, frame) all_hand_peaks = [] for x, y, w, is_left in hands_list: peaks = hand_estimation(frame[y:y + w, x:x + w, :]) peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) all_hand_peaks.append(peaks) canvas = util.draw_handpose(canvas, all_hand_peaks) return canvas
from src import model from src import util from src.body import Body from src.hand import Hand body_estimation = Body('model/body_pose_model.pth') hand_estimation = Hand('model/hand_pose_model.pth') test_image = 'images/demo.jpg' oriImg = cv2.imread(test_image) # B,G,R order candidate, subset = body_estimation(oriImg) canvas = copy.deepcopy(oriImg) canvas = util.draw_bodypose(canvas, candidate, subset) # detect hand hands_list = util.handDetect(candidate, subset, oriImg) all_hand_peaks = [] for x, y, w, is_left in hands_list: # cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA) # cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # if is_left: # plt.imshow(oriImg[y:y+w, x:x+w, :][:, :, [2, 1, 0]]) # plt.show() peaks = hand_estimation(oriImg[y:y + w, x:x + w, :]) peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) # else: # peaks = hand_estimation(cv2.flip(oriImg[y:y+w, x:x+w, :], 1)) # peaks[:, 0] = np.where(peaks[:, 0]==0, peaks[:, 0], w-peaks[:, 0]-1+x)