def get_rect(net, images, height_size):
    net = net.eval()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []
    delay = 33
    for image in images:
        rect_path = image.replace('.%s' % (image.split('.')[-1]), '_rect.txt')
        img = cv2.imread(image, cv2.IMREAD_COLOR)
        orig_img = img.copy()
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = demo.infer_fast(net, img, height_size, stride, upsample_ratio, cpu=False)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
        current_poses = []

        rects = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            valid_keypoints = []
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
                    valid_keypoints.append([pose_keypoints[kpt_id, 0], pose_keypoints[kpt_id, 1]])
            valid_keypoints = np.array(valid_keypoints)
            
            if pose_entries[n][10] != -1.0 or pose_entries[n][13] != -1.0:
              pmin = valid_keypoints.min(0)
              pmax = valid_keypoints.max(0)

              center = (0.5 * (pmax[:2] + pmin[:2])).astype(np.int)
              radius = int(0.65 * max(pmax[0]-pmin[0], pmax[1]-pmin[1]))
            elif pose_entries[n][10] == -1.0 and pose_entries[n][13] == -1.0 and pose_entries[n][8] != -1.0 and pose_entries[n][11] != -1.0:
              # if leg is missing, use pelvis to get cropping
              center = (0.5 * (pose_keypoints[8] + pose_keypoints[11])).astype(np.int)
              radius = int(1.45*np.sqrt(((center[None,:] - valid_keypoints)**2).sum(1)).max(0))
              center[1] += int(0.05*radius)
            else:
              center = np.array([img.shape[1]//2,img.shape[0]//2])
              radius = max(img.shape[1]//2,img.shape[0]//2)

            x1 = center[0] - radius
            y1 = center[1] - radius

            rects.append([x1, y1, 2*radius, 2*radius])

        np.savetxt(rect_path, np.array(rects), fmt='%d')
Ejemplo n.º 2
0
def getPose(net, img, stride, upsample_ratio):
    num_keypoints = Pose.num_kpts
    kpt_names = Pose.kpt_names
    previous_poses = []
    orig_img = img.copy()
    heatmaps, pafs, scale, pad = infer_fast(net, img, 256, stride,
                                            upsample_ratio, False)

    total_keypoints_num = 0
    all_keypoints_by_type = []
    for kpt_idx in range(num_keypoints):  # 19th for bg
        total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                 all_keypoints_by_type,
                                                 total_keypoints_num)

    pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                  pafs,
                                                  demo=True)
    for kpt_id in range(all_keypoints.shape[0]):
        all_keypoints[kpt_id,
                      0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio
                            - pad[1]) / scale
        all_keypoints[kpt_id,
                      1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio
                            - pad[0]) / scale
    current_poses = []
    pose_keypoints = None
    for n in range(len(pose_entries)):
        if len(pose_entries[n]) == 0:
            continue
        pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
        for kpt_id in range(num_keypoints):
            if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                pose_keypoints[kpt_id, 0] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 0])
                pose_keypoints[kpt_id, 1] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 1])
        #pose = Pose(pose_keypoints, pose_entries[n][18])
        #current_poses.append(pose)
        #pose.draw(img)
        #found_keypoints = pose.found_keypoints
    img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
    track_ids = True
    if track_ids == True:
        propagate_ids(previous_poses, current_poses)
        previous_poses = current_poses
        for pose in current_poses:
            cv2.rectangle(
                img, (pose.bbox[0], pose.bbox[1]),
                (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]),
                (0, 255, 0))
            cv2.putText(img, 'id: {}'.format(pose.id),
                        (pose.bbox[0], pose.bbox[1] - 16),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
    cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
    key = cv2.waitKey(33)
    if key == 27:  # esc
        return
    return pose_keypoints
Ejemplo n.º 3
0
    def start_training(self,cpu):
        trainee = Human()
        training_output=[]
        cnt = 0
        for side_frame,front_frame in self.frame_provider:
            if not global_state.continue_training:
                global_state.rep_count = self.excercise.reps
                break
            net = self.net.eval()
            if not cpu:
                net = self.net.cuda()

            stride = 8
            upsample_ratio = 4
            height_size = 256
            total_keypoints_num = 0
            total_keypoints_num2 = 0

            heatmaps, pafs, scale, pad, heatmaps2, pafs2, scale2, pad2 = infer_fast(net, side_frame, front_frame, height_size, stride, upsample_ratio, cpu)
            all_keypoints_by_type = []
            all_keypoints_by_type2 = []
            for kpt_idx in range(18):  # 19th for bg
                total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
                total_keypoints_num2 += extract_keypoints(heatmaps2[:, :, kpt_idx], all_keypoints_by_type2, total_keypoints_num2)

            pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
            pose_entries2, all_keypoints2 = group_keypoints(all_keypoints_by_type2, pafs2, demo=True)
            for kpt_id in range(all_keypoints.shape[0]):
                all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
                all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
            for kpt_id in range(all_keypoints2.shape[0]):
                all_keypoints2[kpt_id, 0] = (all_keypoints2[kpt_id, 0] * stride / upsample_ratio - pad2[1]) / scale2
                all_keypoints2[kpt_id, 1] = (all_keypoints2[kpt_id, 1] * stride / upsample_ratio - pad2[0]) / scale2
            if len(pose_entries) * len(pose_entries2) != 0:
                
                trainee.side.updatePositions(pose_entries[0],all_keypoints)
                trainee.front.updatePositions(pose_entries2[0], all_keypoints2)
                
                self.excercise.setHuman(trainee)
                self.excercise.continueExercise()

                if self.excercise.currentState and self.excercise.currentState.isToleranceExceeded():
                    self.excercise.reset()
                
                self.markTrainee(trainee.side, side_frame,self.excercise)
                self.markTrainee(trainee.front, front_frame,self.excercise)

            if not cpu:
                output_frame = np.concatenate((side_frame,front_frame), axis=1)
                training_output.append(output_frame)
                cv2.imshow('Output',output_frame)

                key = cv2.waitKey(33)
                if key == 27:
                    break
        
        # self.saveTrainingVideo(training_output)
        global_state.stopped = True
Ejemplo n.º 4
0
def run_on_image(net, height_size, cpu, track, smooth, img, stride,
                 upsample_ratio, num_keypoints, threshold):
    global previous_poses
    orig_img = img.copy()
    heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                            upsample_ratio, cpu)
    score = 0
    total_keypoints_num = 0
    all_keypoints_by_type = []
    for kpt_idx in range(num_keypoints):  # 19th for bg
        total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                 all_keypoints_by_type,
                                                 total_keypoints_num)

    pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                  pafs,
                                                  demo=True)
    for kpt_id in range(all_keypoints.shape[0]):
        all_keypoints[kpt_id,
                      0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio
                            - pad[1]) / scale
        all_keypoints[kpt_id,
                      1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio
                            - pad[0]) / scale
    current_poses = []
    for n in range(len(pose_entries)):
        if len(pose_entries[n]) == 0:
            continue
        pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
        for kpt_id in range(num_keypoints):
            if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                pose_keypoints[kpt_id, 0] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 0])
                pose_keypoints[kpt_id, 1] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 1])
        pose = Pose(pose_keypoints, pose_entries[n][18])
        current_poses.append(pose)

    if track:
        track_poses(previous_poses, current_poses, smooth=smooth)
        previous_poses = current_poses
    for pose in current_poses:
        pose.draw(img)
    img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
    for pose in current_poses:
        # cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),

        r_hand_center, r_hand_width, l_hand_center, l_hand_width, = detect_hand(
            pose)

        if -1 not in r_hand_center:
            cv2.circle(img, (r_hand_center[0], r_hand_center[1]), 5,
                       (255, 0, 0), 5)
            cv2.rectangle(img, (r_hand_center[0] - r_hand_width,
                                r_hand_center[1] - r_hand_width),
                          (r_hand_center[0] + r_hand_width,
                           r_hand_center[1] + r_hand_width), (0, 255, 255))
        if -1 not in l_hand_center:
            cv2.circle(img, (l_hand_center[0], l_hand_center[1]), 5,
                       (255, 0, 0), 5)
            cv2.rectangle(img, (l_hand_center[0] - l_hand_width,
                                l_hand_center[1] - l_hand_width),
                          (l_hand_center[0] + l_hand_width,
                           l_hand_center[1] + l_hand_width), (0, 255, 255))

        face_center, face_width = detect_face(pose)
        if -1 not in face_center:
            cv2.rectangle(
                img,
                (face_center[0] - face_width, face_center[1] - face_width),
                (face_center[0] + face_width, face_center[1] + face_width),
                (0, 0, 255))

            #               (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
            if track:
                cv2.putText(img, 'id: {}'.format(pose.id),
                            (face_center[0] - face_width,
                             face_center[1] - face_width - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))

        if -1 not in r_hand_center:
            x, y, h, w, score = detect_touch(face_center, face_width,
                                             r_hand_center, r_hand_width)
            if h != 0:
                cv2.rectangle(img, (x, y), (x + h, y + w), (255, 0, 255))
                cv2.putText(img, f'Score: {score:0.2f}', (x, y - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0))
        if -1 not in l_hand_center:
            x, y, h, w, score = detect_touch(face_center, face_width,
                                             l_hand_center, l_hand_width)
            if h != 0:
                cv2.rectangle(img, (x, y), (x + h, y + w), (255, 0, 255))
                cv2.putText(img, f'Score: {score:0.2f}', (x, y - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0))
    cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
    delay = 1
    detect = False

    key = cv2.waitKey(delay)
    if key == 27:  # esc
        return
    elif key == 112:  # 'p'
        if delay == 33:
            delay = 0
        else:
            delay = 33
    return score > threshold