示例#1
0
def run_demo(net, image_provider, height_size, cpu, track_ids):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []
    for img in image_provider:
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                                upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs,
                                                      demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)
            pose.draw(img)

        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        if track_ids == True:
            propagate_ids(previous_poses, current_poses)
            previous_poses = current_poses
            for pose in current_poses:
                cv2.rectangle(
                    img, (pose.bbox[0], pose.bbox[1]),
                    (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]),
                    (0, 255, 0))
                cv2.putText(img, 'id: {}'.format(pose.id),
                            (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
        cv2.imwrite('/Users/Utente/Desktop/pose/out.jpg', img)
        key = cv2.waitKey(33)
        if key == 27:  # esc
            return
def run_demo(net, image_provider, height_size, cpu, track, smooth):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []
    delay = 0
    for img in image_provider:
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)

        if track:
            track_poses(previous_poses, current_poses, smooth=smooth)
            previous_poses = current_poses

        print("draw", img.dtype, img.shape, img.min(), img.max())
        for pose in current_poses:
            pose.draw(img)
        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        print(img.min(), img.max())
        for pose in current_poses:
            cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                          (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
            if track:
                cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
        cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
        key = cv2.waitKey(delay)
        if key == 27:  # esc
            return
        elif key == 112:  # 'p'
            if delay == 33:
                delay = 0
            else:
                delay = 33
def run_demo(net, image_provider, height_size, cpu, track_ids):
    net = net.eval()
    if not cpu:#run the model "net" in cuda if not specified to run specifically in CPU
        # gets in here since by default store true is false. Hence uses GPU for inference by default
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []
    for img in image_provider:
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)
            pose.draw(img)

        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        if track_ids == True:
            propagate_ids(previous_poses, current_poses)
            previous_poses = current_poses
            for pose in current_poses:
                cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                              (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
                cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))

        # cv2.resize(img,(720,1280))
        cv2.imshow('Lightweight Human Pose Estimation Python Demo', cv2.resize(img,(2800,1800)))
        key = cv2.waitKey(33)
        if key == 27:  # esc
            return
示例#4
0
def extract_pose(heatmaps, pafs, scale, pad, num_keypoints, stride,
                 upsample_ratio):
    total_keypoints_num = 0
    all_keypoints_by_type = []
    for kpt_idx in range(num_keypoints):
        total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                 all_keypoints_by_type,
                                                 total_keypoints_num)

    pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                  pafs,
                                                  demo=True)
    for kpt_id in range(all_keypoints.shape[0]):
        all_keypoints[kpt_id,
                      0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio
                            - pad[1]) / scale
        all_keypoints[kpt_id,
                      1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio
                            - pad[0]) / scale
    current_poses = []

    for n in range(len(pose_entries)):
        if len(pose_entries[n]) == 0:
            continue
        pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
        for kpt_id in range(num_keypoints):
            if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                pose_keypoints[kpt_id, 0] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 0])
                pose_keypoints[kpt_id, 1] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 1])
        pose = Pose(pose_keypoints, pose_entries[n][18])
        current_poses.append(pose)

    return current_poses
示例#5
0
def get_final_keypoint_positions(all_keypoints_by_type, pafs, num_keypoints,
                                 stride, scale, upsample_ratio, pad):
    pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                  pafs,
                                                  demo=True)
    for kpt_id in range(all_keypoints.shape[0]):
        all_keypoints[kpt_id,
                      0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio
                            - pad[1]) / scale
        all_keypoints[kpt_id,
                      1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio
                            - pad[0]) / scale
    current_poses = []
    for n in range(len(pose_entries)):
        if len(pose_entries[n]) == 0:
            continue
        pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
        for kpt_id in range(num_keypoints):
            if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                pose_keypoints[kpt_id, 0] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 0])
                pose_keypoints[kpt_id, 1] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 1])
        pose = Pose(pose_keypoints, pose_entries[n][18])
        current_poses.append(pose)

    return current_poses
示例#6
0
def parse_poses(inference_results, input_scale, stride, fx, is_video=False):
    global previous_poses_2d
    poses_2d = get_root_relative_poses(inference_results)
    poses_2d_scaled = []
    for pose_2d in poses_2d:
        num_kpt = (pose_2d.shape[0] - 1) // 3
        pose_2d_scaled = np.ones(pose_2d.shape[0], dtype=np.float32) * -1
        for kpt_id in range(num_kpt):
            if pose_2d[kpt_id * 3] != -1:
                if pose_2d[kpt_id * 3] >0 and pose_2d[kpt_id * 3 + 1]>0:
                    
                    pose_2d_scaled[kpt_id * 3] = pose_2d[kpt_id * 3] * stride / input_scale
                    pose_2d_scaled[kpt_id * 3 + 1] = pose_2d[kpt_id * 3 + 1] * stride / input_scale
                    pose_2d_scaled[kpt_id * 3 + 2] = pose_2d[kpt_id * 3 + 2]
#                     print(previous_poses_2d)
        pose_2d_scaled[-1] = pose_2d[-1]
        poses_2d_scaled.append(pose_2d_scaled)

    if is_video:  # track poses ids
        current_poses_2d = []
        for pose_2d_scaled,pre_pose in zip(poses_2d_scaled,previous_poses_2d):
            pose_keypoints = np.ones((Pose.num_kpts, 2), dtype=np.int32) * -1
            for kpt_id in range(Pose.num_kpts):
                if pose_2d_scaled[kpt_id * 3] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0:2] = pose_2d_scaled[kpt_id * 3:kpt_id * 3 + 2].astype(np.int32)
                else:
                    pose_keypoints[kpt_id, 0:2] = pre_pose.keypoints[kpt_id, 0:2].astype(np.int32)
            pose = Pose(pose_keypoints, pose_2d_scaled[-1])
            current_poses_2d.append(pose)
        propagate_ids(previous_poses_2d, current_poses_2d)
        previous_poses_2d = current_poses_2d


    return np.array(poses_2d_scaled)
示例#7
0
    def _dect_pose(self, **kwargs):
        """Detect poses.
        Arguments:
            img {ndarray}: input image.
            model {PoseEstimationWithMobileNet}: initialized OpenPose model.
            previous_poses {list}: previous poses for tracking mode.
        Returns:
            list: detected poses.
        """

        img = kwargs.get('img', None)
        model = kwargs.get('model', None)
        previous_poses = kwargs.get('previous_poses', None)
        use_cuda = kwargs.get('use_cuda', False)
        track = self.__params.track
        smooth = self.__params.smooth
        stride = self.__params.stride
        upsample_ratio = self.__params.upsample_ratio
        num_keypoints = Pose.num_kpts

        #orig_img = img.copy()
        heatmaps, pafs, scale, pad = self._infer_fast(model=model,
                                                      img=img,
                                                      use_cuda=use_cuda)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale

        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)

        if track:
            track_poses(previous_poses, current_poses, smooth=smooth)
            previous_poses = current_poses

        return current_poses
    def __call__(self, img, height_size=256):
        stride = 8
        upsample_ratio = 4
        num_keypoints = Pose.num_kpts
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(self.model, img, height_size,
                                                stride, upsample_ratio, False)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)
        for pose in current_poses:
            pose.draw(img)
        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        cv2.imwrite("/data1/qilei_chen/DEVELOPMENTS/test1.jpg", img)
        return current_poses
示例#9
0
    def visualize_prediction(self, image):
        orig_img = image.copy()
        if not np.array_equal(self.image, image):
            self.image = image
            self.__inference_fast(self.image, self.height_size, self.stride,
                                  self.upsample_ratio)

        current_poses = []
        for n in range(len(self.pose_entries)):
            if len(self.pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones(
                (self.num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(self.num_keypoints):
                if self.pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        self.all_keypoints[int(self.pose_entries[n][kpt_id]),
                                           0])
                    pose_keypoints[kpt_id, 1] = int(
                        self.all_keypoints[int(self.pose_entries[n][kpt_id]),
                                           1])
            pose = Pose(pose_keypoints, self.pose_entries[n][18])
            current_poses.append(pose)

        # if self.track:
        #     previous_poses = []
        #     track_poses(previous_poses, current_poses, smooth=smooth)
        #     previous_poses = current_poses

        for pose in current_poses:
            pose.draw(image)
        image = cv2.addWeighted(orig_img, 0.6, image, 0.4, 0)

        # plt.imshow(image)
        # plt.show()
        return image
示例#10
0
def pose_detector(img, net, height_size, cpu, track, smooth):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts

    heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                            upsample_ratio, cpu)
    import ipdb

    # ipdb.set_trace()

    total_keypoints_num = 0
    all_keypoints_by_type = []
    for kpt_idx in range(num_keypoints):  # 19th for bg
        total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                 all_keypoints_by_type,
                                                 total_keypoints_num)

    pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                  pafs,
                                                  demo=True)
    for kpt_id in range(all_keypoints.shape[0]):
        all_keypoints[kpt_id,
                      0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio
                            - pad[1]) / scale
        all_keypoints[kpt_id,
                      1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio
                            - pad[0]) / scale
    current_poses = []
    for n in range(len(pose_entries)):
        if len(pose_entries[n]) == 0:
            continue
        pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
        for kpt_id in range(num_keypoints):
            if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                pose_keypoints[kpt_id, 0] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 0])
                pose_keypoints[kpt_id, 1] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 1])
        pose = Pose(pose_keypoints, pose_entries[n][18])
        current_poses.append(pose)
    return current_poses
示例#11
0
    def track_poses(self, previous_poses, current_poses, threshold=3, smooth=False):
        """Propagate poses ids from previous frame results. Id is propagated,
        if there are at least `threshold` similar keypoints between pose from previous frame and current.
        If correspondence between pose on previous and current frame was established, pose keypoints are smoothed.

        :param previous_poses: poses from previous frame with ids
        :param current_poses: poses from current frame to assign ids
        :param threshold: minimal number of similar keypoints between poses
        :param smooth: smooth pose keypoints between frames
        :return: None
        """
        current_poses = sorted(current_poses, key=lambda pose: pose.confidence, reverse=True)  # match confident poses first
        mask = np.ones(len(previous_poses), dtype=np.int32)
        for current_pose in current_poses:
            best_matched_id = None
            best_matched_pose_id = None
            best_matched_iou = 0
            for id, previous_pose in enumerate(previous_poses):
                if not mask[id]:
                    continue
                iou = self.get_similarity(current_pose, previous_pose)
                if iou > best_matched_iou:
                    best_matched_iou = iou
                    best_matched_pose_id = previous_pose.id
                    best_matched_id = id
            if best_matched_iou >= threshold:
                mask[best_matched_id] = 0
            else:  # pose not similar to any previous
                best_matched_pose_id = None
            current_pose.update_id(best_matched_pose_id)

            if smooth:
                for kpt_id in range(Pose.num_kpts):
                    if current_pose.keypoints[kpt_id, 0] == -1:
                        continue
                    # reuse filter if previous pose has valid filter
                    if (best_matched_pose_id is not None
                            and previous_poses[best_matched_id].keypoints[kpt_id, 0] != -1):
                        current_pose.filters[kpt_id] = previous_poses[best_matched_id].filters[kpt_id]
                    current_pose.keypoints[kpt_id, 0] = current_pose.filters[kpt_id][0](current_pose.keypoints[kpt_id, 0])
                    current_pose.keypoints[kpt_id, 1] = current_pose.filters[kpt_id][1](current_pose.keypoints[kpt_id, 1])
                current_pose.bbox = Pose.get_bbox(current_pose.keypoints)
示例#12
0
    def getPose(self,
                height_size=256,
                stride=8,
                upsample_ratio=4,
                num_keypoints=18):
        heatmaps, pafs, scale, pad = infer_fast(net, self.frametmp,
                                                height_size, stride,
                                                upsample_ratio, 0)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs,
                                                      demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)
        if current_poses != [] and len(current_poses) != 0:
            self.current_pose = get_max_human(current_poses)
        else:
            self.current_pose = None
示例#13
0
def parse_poses_2d(inference_results, input_scale, stride, fx, is_video=False):
    global previous_poses_2d
    poses_3d, poses_2d, features_shape = get_root_relative_poses(
        inference_results)
    poses_2d_scaled = []
    for pose_2d in poses_2d:
        num_kpt = (pose_2d.shape[0] - 1) // 3
        pose_2d_scaled = np.ones(
            pose_2d.shape[0], dtype=np.float32) * -1  # +1 for pose confidence
        for kpt_id in range(num_kpt):
            if pose_2d[kpt_id * 3] != -1:
                pose_2d_scaled[kpt_id * 3] = int(pose_2d[kpt_id * 3] * stride /
                                                 input_scale)
                pose_2d_scaled[kpt_id * 3 + 1] = int(pose_2d[kpt_id * 3 + 1] *
                                                     stride / input_scale)
                pose_2d_scaled[kpt_id * 3 + 2] = pose_2d[kpt_id * 3 + 2]
        pose_2d_scaled[-1] = pose_2d[-1]
        poses_2d_scaled.append(pose_2d_scaled)

    if is_video:  # track poses ids
        current_poses_2d = []
        for pose_id in range(len(poses_2d_scaled)):
            pose_keypoints = np.ones((Pose.num_kpts, 2), dtype=np.int32) * -1
            for kpt_id in range(Pose.num_kpts):
                if poses_2d_scaled[pose_id][kpt_id *
                                            3] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        poses_2d_scaled[pose_id][kpt_id * 3 + 0])
                    pose_keypoints[kpt_id, 1] = int(
                        poses_2d_scaled[pose_id][kpt_id * 3 + 1])
            pose = Pose(pose_keypoints, poses_2d_scaled[pose_id][-1])
            current_poses_2d.append(pose)
        propagate_ids(previous_poses_2d, current_poses_2d)
        previous_poses_2d = current_poses_2d

    return np.array(poses_2d_scaled)
示例#14
0
def run_demo(net, image_provider, height_size, cpu, track, smooth):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []
    delay = 33

    for img in image_provider:
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                                upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs,
                                                      demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)
            # import ipdb; ipdb.set_trace()
            # print(current_poses)
            current_poses[-1].keypoints

        if track:
            track_poses(previous_poses, current_poses, smooth=smooth)
            previous_poses = current_poses
        for pose in current_poses:
            pose.draw(img)
        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)

        cv2.imshow("Lightweight Human Pose Estimation Python Demo", img)
        key = cv2.waitKey(delay)
        if key == 27:  # esc
            return
        elif key == 112:  # 'p'
            if delay == 33:
                delay = 0
            else:
                delay = 33
示例#15
0
def find_temperature(net, image_provider = VideoReader(), send = False, cpu = False):

    height_size = 256
    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts

    data = {}

    frames = image_provider.next_frame()

    img = np.array(frames[0])
    depth = np.array(frames[1])

    """
    Estimate the pose and find the person in the middle
    """

    heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)

    total_keypoints_num = 0
    all_keypoints_by_type = []
    for kpt_idx in range(num_keypoints):  # 19th for bg
        total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)

    pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
    for kpt_id in range(all_keypoints.shape[0]):
        all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
        all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
    current_poses = []

    distMin = 310
    midPose = None

    for n in range(len(pose_entries)):
        if len(pose_entries[n]) == 0:
            continue
        pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
        for kpt_id in range(num_keypoints):
            if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
                pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
        pose = Pose(pose_keypoints, pose_entries[n][18])

        dist = abs(pose.keypoints[0][0] - image_provider.width/2)
        if dist < distMin:
            distMin = dist
            midPose = pose
        current_poses.append(pose)

    """
    Find the temperature of each exposed parts of the body
    """

    if midPose != None:
        for n in range(len(Pose.kpt_names)):
            if midPose.keypoints[n][0] != 0 or midPose.keypoints[n][1] != 0:
                data[Pose.kpt_names[n]] = get_temperature(
                    midPose, n, depth
                )
    return data
示例#16
0
    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

            ## Rescale Image size
        rescale_factor = 1
        width = int(cv_image.shape[1] * rescale_factor)
        height = int(cv_image.shape[0] * rescale_factor)
        dim = (width, height)
        resized_img = cv2.resize(cv_image, dim)

        net = PoseEstimationWithMobileNet()
        checkpoint = torch.load(
            "/home/zheng/lightweight-human-pose-estimation.pytorch/checkpoint_iter_370000.pth",
            map_location='cpu')
        load_state(net, checkpoint)
        height_size = 256
        net = net.eval()
        net = net.cuda()
        net.eval()

        stride = 8
        upsample_ratio = 4
        num_keypoints = Pose.num_kpts
        previous_poses = []
        delay = 33
        # img = cv2.imread("/home/zheng/lightweight-human-pose-estimation.pytorch/data/image_1400.jpg")
        img = asarray(cv_image)
        orig_img = img
        heatmaps, pafs, scale, pad = infer_fast(net,
                                                img,
                                                height_size,
                                                stride,
                                                upsample_ratio,
                                                cpu="store_true")

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs,
                                                      demo=True)

        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []

        ##   Collect all keypoint in numpy array to send it to Ros"
        pose_keypoints_ros_data = np.zeros(16)
        my_array_for_publishing = Float32MultiArray()

        ####
        pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
        for kpt_id in range(8):
            if pose_entries[0][kpt_id] != -1.0:  # keypoint was found
                pose_keypoints[kpt_id, 0] = int(
                    all_keypoints[int(pose_entries[0][kpt_id]), 0])
                pose_keypoints[kpt_id, 1] = int(
                    all_keypoints[int(pose_entries[0][kpt_id]), 1])

            pose = Pose(pose_keypoints, pose_entries[0][18])
            current_poses.append(pose)
            pose_keypoints_ros_data[2 * kpt_id] = pose.keypoints[kpt_id][0]
            pose_keypoints_ros_data[2 * kpt_id + 1] = pose.keypoints[kpt_id][1]
        for pose in current_poses:
            pose.draw(img)
        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        my_array_for_publishing.data = [
            pose_keypoints_ros_data[0],
            pose_keypoints_ros_data[1],
            pose_keypoints_ros_data[2],
            pose_keypoints_ros_data[3],
            pose_keypoints_ros_data[4],
            pose_keypoints_ros_data[5],
            pose_keypoints_ros_data[6],
            pose_keypoints_ros_data[7],
            pose_keypoints_ros_data[8],
            pose_keypoints_ros_data[9],
            pose_keypoints_ros_data[10],
            pose_keypoints_ros_data[11],
            pose_keypoints_ros_data[12],
            pose_keypoints_ros_data[13],
            pose_keypoints_ros_data[14],
            pose_keypoints_ros_data[15],
        ]
        # cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
        self.image_pub.publish(self.bridge.cv2_to_imgmsg(img, "bgr8"))
        self.keypts_pub.publish(my_array_for_publishing)
        # cv2.imwrite('/home/zheng/Bureau/image_1400_key.jpg',img)

        cv2.waitKey(2)
示例#17
0
def parse_poses(inference_results, input_scale, stride, fx, is_video=False):
    global previous_poses_2d
    poses_3d, poses_2d, features_shape = get_root_relative_poses(
        inference_results)
    poses_2d_scaled = []
    for pose_2d in poses_2d:
        num_kpt = (pose_2d.shape[0] - 1) // 3
        pose_2d_scaled = np.ones(
            pose_2d.shape[0], dtype=np.float32) * -1  # +1 for pose confidence
        for kpt_id in range(num_kpt):
            if pose_2d[kpt_id * 3] != -1:
                pose_2d_scaled[kpt_id * 3] = int(pose_2d[kpt_id * 3] * stride /
                                                 input_scale)
                pose_2d_scaled[kpt_id * 3 + 1] = int(pose_2d[kpt_id * 3 + 1] *
                                                     stride / input_scale)
                pose_2d_scaled[kpt_id * 3 + 2] = pose_2d[kpt_id * 3 + 2]
        pose_2d_scaled[-1] = pose_2d[-1]
        poses_2d_scaled.append(pose_2d_scaled)

    if is_video:  # track poses ids
        current_poses_2d = []
        for pose_id in range(len(poses_2d_scaled)):
            pose_keypoints = np.ones((Pose.num_kpts, 2), dtype=np.int32) * -1
            for kpt_id in range(Pose.num_kpts):
                if poses_2d_scaled[pose_id][kpt_id *
                                            3] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        poses_2d_scaled[pose_id][kpt_id * 3 + 0])
                    pose_keypoints[kpt_id, 1] = int(
                        poses_2d_scaled[pose_id][kpt_id * 3 + 1])
            pose = Pose(pose_keypoints, poses_2d_scaled[pose_id][-1])
            current_poses_2d.append(pose)
        propagate_ids(previous_poses_2d, current_poses_2d)
        previous_poses_2d = current_poses_2d

    translated_poses_3d = []
    # translate poses
    for pose_id in range(len(poses_3d)):
        pose_3d = poses_3d[pose_id].reshape((-1, 4)).transpose()
        pose_2d = poses_2d[pose_id][:-1].reshape((-1, 3)).transpose()
        num_valid = np.count_nonzero(pose_2d[2] != -1)
        pose_3d_valid = np.zeros((3, num_valid), dtype=np.float32)
        pose_2d_valid = np.zeros((2, num_valid), dtype=np.float32)
        valid_id = 0
        for kpt_id in range(pose_3d.shape[1]):
            if pose_2d[2, kpt_id] == -1:
                continue
            pose_3d_valid[:, valid_id] = pose_3d[0:3, kpt_id]
            pose_2d_valid[:, valid_id] = pose_2d[0:2, kpt_id]
            valid_id += 1

        pose_2d_valid[0] = pose_2d_valid[0] - features_shape[2] / 2
        pose_2d_valid[1] = pose_2d_valid[1] - features_shape[1] / 2
        mean_3d = np.expand_dims(pose_3d_valid.mean(axis=1), axis=1)
        mean_2d = np.expand_dims(pose_2d_valid.mean(axis=1), axis=1)
        numerator = np.trace(
            np.dot((pose_3d_valid[:2, :] - mean_3d[:2, :]).transpose(),
                   pose_3d_valid[:2, :] - mean_3d[:2, :])).sum()
        numerator = np.sqrt(numerator)
        denominator = np.sqrt(
            np.trace(
                np.dot((pose_2d_valid[:2, :] - mean_2d[:2, :]).transpose(),
                       pose_2d_valid[:2, :] - mean_2d[:2, :])).sum())
        mean_2d = np.array(
            [mean_2d[0, 0], mean_2d[1, 0], fx * input_scale / stride])
        mean_3d = np.array([mean_3d[0, 0], mean_3d[1, 0], 0])
        translation = numerator / denominator * mean_2d - mean_3d

        if is_video:
            translation = current_poses_2d[pose_id].filter(translation)
        for kpt_id in range(19):
            pose_3d[0, kpt_id] = pose_3d[0, kpt_id] + translation[0]
            pose_3d[1, kpt_id] = pose_3d[1, kpt_id] + translation[1]
            pose_3d[2, kpt_id] = pose_3d[2, kpt_id] + translation[2]
        translated_poses_3d.append(pose_3d.transpose().reshape(-1))

    return np.array(translated_poses_3d), np.array(poses_2d_scaled)
示例#18
0
def run_demo(net,action_net, image_provider, height_size, cpu):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts

    i = 0
    for img in image_provider:
        orig_img = img.copy()
        # print(i)

        if i % 1 == 0:
            heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)

            total_keypoints_num = 0
            all_keypoints_by_type = []
            for kpt_idx in range(num_keypoints):  # 19th for bg
                total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)

            pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
            for kpt_id in range(all_keypoints.shape[0]):
                all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
                all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
            current_poses = []
            for n in range(len(pose_entries)):
                if len(pose_entries[n]) == 0:
                    continue
                pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
                for kpt_id in range(num_keypoints):
                    if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                        pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
                        pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
                pose = Pose(pose_keypoints, pose_entries[n][18])
                if len(pose.getKeyPoints()) >= 10:
                    current_poses.append(pose)
                # current_poses.append(pose)


            for pose in current_poses:
                pose.img_pose = pose.draw(img,show_draw=True)
                crown_proportion = pose.bbox[2]/pose.bbox[3] #宽高比
                pose = action_detect(action_net,pose,crown_proportion)

                if pose.pose_action == 'fall':
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 0, 255),thickness=3)
                    cv2.putText(img, 'state: {}'.format(pose.pose_action), (pose.bbox[0], pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
                else:
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
                    cv2.putText(img, 'state: {}'.format(pose.pose_action), (pose.bbox[0], pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0))

            img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
            cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)

            cv2.waitKey(1)
        i += 1
    cv2.destroyAllWindows()
示例#19
0
def run_demo(net, image_provider, height_size, cpu, track_ids, arm):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []

    stateMachines = {}

    for img in image_provider:
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])

            """
            kpt_names = ['nose', 'neck',
                 'r_sho', 'r_elb', 'r_wri', 'l_sho', 'l_elb', 'l_wri',
                 'r_hip', 'r_knee', 'r_ank', 'l_hip', 'l_knee', 'l_ank',
                 'r_eye', 'l_eye',
                 'r_ear', 'l_ear']
            r_elb-3, r-wri-4, l_elb-6, l_wri-7
            """
            # print('ID: {}'.format(n))
            # print('\tRight elbow: {}, right wrist: {}'.format(pose_keypoints[3], pose_keypoints[4]))
            # print('\tLeft elbow: {}, left wrist: {}'.format(pose_keypoints[6], pose_keypoints[7]))
    
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)
            pose.draw(img)

        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        if track_ids == True:
            propagate_ids(previous_poses, current_poses, threshold=3)
            previous_poses = current_poses
            for pose in current_poses:
                cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                              (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
                cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
        
        for pose in current_poses:
            if pose.id not in stateMachines.keys():
                stateMachines[pose.id] = StateMachine(pose.id, pose.keypoints, arm)
                #print('ID {} detected'.format(pose.id))
                continue
            # call stateMachine methods
            stateMachines[pose.id].update(pose.keypoints, img)
            
        cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
        key = cv2.waitKey(33)
        if key == 27:  # esc
            return
示例#20
0
def run_demo(net, height_size, track, smooth, record_vid, camera_type):
    net = net.eval()
    net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []

    ##Tarit defined
    slope_threshold = 0.4
    ear_slope_threshold = 0.5
    eye_ear_slope_threshold = 0.5
    not_detected = (-1, -1)
    sleep_confirmation_time = 60  #in seconds

    #flags to detect whether the person is sleeping or not
    sleeping = False

    timer_started = False

    time_notified = 0
    selected_pose = None

    while True:
        #msg, frame = receiver.receive(timeout = 60.0)
        #img = cv2.imdecode(np.frombuffer(frame, dtype='uint8'), -1)
        img = cap.read()
        if camera_type == "jetson":
            img = img[1300:1780, 1320:1960]

        #start_time = time.time()
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                                upsample_ratio)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 1])

            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)

        if track:
            track_poses(previous_poses, current_poses, smooth=smooth)
            previous_poses = current_poses
        '''for pose in current_poses:
            pose.draw(img)'''

        ##find longest_nect_to_nose_dst and select that pose
        longest_nect_to_nose_dst = 0
        for pose in current_poses:
            nose = tuple(pose.keypoints[0])
            neck = tuple(pose.keypoints[1])
            ##pythagoras
            nect_to_nose_dst = pow((pow(abs(nose[0] - neck[0]), 2)) +
                                   (pow(abs(nose[1] - neck[1]), 2)), 1 / 2)
            if nect_to_nose_dst > longest_nect_to_nose_dst:
                longest_nect_to_nose_dst = nect_to_nose_dst
                selected_pose = pose

        if selected_pose is not None:
            selected_pose.draw(img)

            nose = tuple(selected_pose.keypoints[0])
            neck = tuple(selected_pose.keypoints[1])
            l_ear = tuple(selected_pose.keypoints[16])
            r_ear = tuple(selected_pose.keypoints[17])
            l_eye = tuple(selected_pose.keypoints[15])
            r_eye = tuple(selected_pose.keypoints[14])

            #print(cal_slope(l_eye,l_ear),cal_slope(r_eye,r_ear))

            ##detect if the person back if facing to the camera
            if nose == (-1, -1):
                if l_ear != not_detected and r_ear != not_detected:
                    ear_slope = abs(l_ear[1] - r_ear[1]) / abs(l_ear[0] -
                                                               r_ear[0])
                    cv2.circle(img, l_ear, 5, (255, 0, 0), 3)
                    cv2.circle(img, r_ear, 5, (0, 255, 0), 3)
                    if ear_slope > ear_slope_threshold:
                        sleeping = True
                        print("sleeping")
                    else:
                        sleeping = False
                else:
                    ##out of condition, can't detect
                    sleeping = False
            else:
                cv2.circle(img, nose, 5, (255, 0, 0), 3)
                cv2.circle(img, neck, 5, (0, 255, 0), 3)

                slope_inverse = (nose[0] - neck[0]) / (nose[1] - neck[1])
                l_ear_eye_slope = cal_slope(l_eye, l_ear)
                r_ear_eye_slope = cal_slope(r_eye, r_ear)

                #increase the slope_threshold if the person is turning their head
                #print(pose.keypoints[16],pose.keypoints[17]) #print ear location
                if l_ear == (-1, -1) or r_ear == (-1, -1):
                    slope_threshold = 1
                    print("one ear missing , Increasing slope_threshold")
                else:
                    slope_threshold = 0.4

                if abs(slope_inverse) > slope_threshold:
                    #cv2.putText(img,"".join([str(pose.id),"sleeping"]),(20,50),cv2.FONT_HERSHEY_COMPLEX,2,(255,0,0),3)
                    print("Sleeping (neck bend more than threshold)")
                    #cv2.putText(img,"sleeping",(20,50),cv2.FONT_HERSHEY_COMPLEX,2,(255,0,0),3)
                    sleeping = True

                elif l_eye == not_detected or r_eye == not_detected:
                    sleeping = True
                    print("Sleeping (not seeing both eyes)")

                elif l_ear_eye_slope < -0.6 or r_ear_eye_slope > 0.6 or l_ear_eye_slope > eye_ear_slope_threshold or r_ear_eye_slope < -eye_ear_slope_threshold:
                    sleeping = True
                    print("Sleeping (ears higher/lower than eyes)")

                else:
                    print("Not sleeping")
                    sleeping = False

            if sleeping:
                if not timer_started:
                    t_start_sleep = time.time()
                    timer_started = True
                else:
                    if time.time() - t_start_sleep > sleep_confirmation_time:
                        print("sending line message")
                        pic_name = "".join(
                            ["log_data/",
                             str(time_notified), ".jpg"])
                        cv2.imwrite(pic_name, img)
                        #lineNotify("Elderly sleeping %d"%time_notified)
                        notifyFile("Elderly sleeping %d" % time_notified,
                                   pic_name)
                        time_notified += 1
                        timer_started = False
                        sleeping = False
            else:
                timer_started = False

            #song = AudioSegment.from_mp3("Alarm_Clock_Sound.mp3")
            #play(song)

        img = cv2.addWeighted(orig_img, 0.6, img, 0.6, 0)

        for pose in current_poses:
            cv2.rectangle(
                img, (pose.bbox[0], pose.bbox[1]),
                (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]),
                (0, 255, 0))
            if track:
                cv2.putText(img, 'id: {}'.format(pose.id),
                            (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))

        cv2.imshow('Sleep detector', img)
        if record_vid:
            out_raw.write(orig_img)
            out_pose.write(img)

        #print((1/(time.time()-start_time)))
        if cv2.waitKey(1) == 27:  # esc
            #receiver.close()
            cap.stop()
            if record_vid:
                out_raw.release()
                out_pose.release()
            return
示例#21
0
def run_demo(net, action_net, image_provider, height_size, cpu, track, smooth):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts

    with open(
            "D:/py/openpose_lightweight/performance_evaluation/action_result.txt",
            "a") as f:

        for img, img_name, label in image_provider:

            if label == 'fall':
                label_ = 0
            else:
                label_ = 1

            heatmaps, pafs, scale, pad = infer_fast(net, img, height_size,
                                                    stride, upsample_ratio,
                                                    cpu)

            total_keypoints_num = 0
            all_keypoints_by_type = []
            for kpt_idx in range(num_keypoints):  # 19th for bg
                total_keypoints_num += extract_keypoints(
                    heatmaps[:, :, kpt_idx], all_keypoints_by_type,
                    total_keypoints_num)

            pose_entries, all_keypoints = group_keypoints(
                all_keypoints_by_type, pafs, demo=True)
            for kpt_id in range(all_keypoints.shape[0]):
                all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                            upsample_ratio - pad[1]) / scale
                all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                            upsample_ratio - pad[0]) / scale
            current_poses = []
            for n in range(len(pose_entries)):
                if len(pose_entries[n]) == 0:
                    continue
                pose_keypoints = np.ones(
                    (num_keypoints, 2), dtype=np.int32) * -1
                for kpt_id in range(num_keypoints):
                    if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                        pose_keypoints[kpt_id, 0] = int(
                            all_keypoints[int(pose_entries[n][kpt_id]), 0])
                        pose_keypoints[kpt_id, 1] = int(
                            all_keypoints[int(pose_entries[n][kpt_id]), 1])
                pose = Pose(pose_keypoints, pose_entries[n][18])

                if len(pose.getKeyPoints()) >= 12:
                    current_poses.append(pose)

                for pose in current_poses:
                    pose.img_pose = pose.draw(img)

                    crown_proportion = pose.bbox[2] / pose.bbox[3]  # 宽高比
                    pose = action_detect(action_net, pose, crown_proportion)
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2],
                                   pose.bbox[1] + pose.bbox[3]), (0, 255, 0))

                    f.write(
                        f"{label_} {pose.action_fall} {pose.action_normal}\n")
                    f.flush()
                    break

            cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
            cv2.waitKey(1)
示例#22
0
def run_demo(net, image_provider, height_size=256, cpu=False, track_ids=False):

    net = net.eval()
    if not cpu:
        net = net.cuda()
        print("use cuda")

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts  # 18

    #Initialize
    previous_pose_kpts = []
    graph_x, graph_y = [], []
    result = [-1, -1, -1, -1, -1]

    count = 0
    start_frame, end_frame = 1000000, -1
    completed_half = False
    total_len_frame = 0
    one_cycle_kpts = []

    for i, img in enumerate(image_provider):

        img = cv2.resize(img, (600, 600))
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                                upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)
        #total_keypoints_num = 18

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs,
                                                      demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 1])
            ####
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)
            pose.draw(img)

        #Select joints
        pose_keypoints = np.concatenate(
            (pose_keypoints[2], pose_keypoints[5], pose_keypoints[8],
             pose_keypoints[10], pose_keypoints[11],
             pose_keypoints[13])).reshape(-1, 2)
        #Analyze posture
        previous_pose_kpts.append(pose_keypoints)
        liftoneleg = LiftOneLeg(previous_pose_kpts)  #Wrong
        angle, leg_status = liftoneleg.check_leg_up_down()

        #Update status and count
        leg_status, completed_half, count_update, start_frame_update, end_frame_update= \
                    liftoneleg.count_repetition(angle, leg_status, completed_half,  count, i, start_frame, end_frame)
        if (count_update == count + 1):
            print("count : %d" % count)

            one_cycle_kpts.append(previous_pose_kpts[start_frame:])

            result = test_per_frame(
                previous_pose_kpts[start_frame - total_len_frame:end_frame -
                                   total_len_frame], LABEL)
            total_len_frame += len(previous_pose_kpts)
            previous_pose_kpts = []

        count, start_frame, end_frame = count_update, start_frame_update, end_frame_update

        #To plot angle graph
        if int(angle) != 90:
            graph_x.append(i)
            graph_y.append(angle)

        #Put text on the screen
        cv2.putText(img, 'count : {}'.format(count), (10, 520),
                    cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 2)
        cv2.putText(img, "Rsho-Lsho :%3.2f" % (result[0]), (10, 550),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
        cv2.putText(
            img, "Lsho-Lhip :%3.2f, Lhip-Lank :%3.2f" % (result[1], result[2]),
            (10, 570), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
        cv2.putText(img, "Rhip-Rank :%3.2f" % (result[3]), (10, 590),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
        cv2.putText(
            img,
            '3 align :{}'.format(liftoneleg.check_if_3points_are_aligned()),
            (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 2)
        cv2.putText(
            img,
            'shoulder :{}'.format(liftoneleg.check_if_shoulders_are_aligned()),
            (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 2)

        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
        key = cv2.waitKey(33)
        if key == 27:  # esc
            return

    return graph_x, graph_y
示例#23
0
def run_demo(net, action_net, image_provider, height_size, cpu, boxList):
    net = net.eval()
    print(torch.cuda.device_count())
    print(torch.cuda.is_available())
    a = torch.Tensor(5, 3)
    a = a.cuda()
    print(a)
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts  # 18

    i = 0
    for img in image_provider:  # 遍历图像集
        orig_img = img.copy()  # copy 一份
        # print(i)
        fallFlag = 0
        if i % 1 == 0:
            heatmaps, pafs, scale, pad = infer_fast(
                net, img, height_size, stride, upsample_ratio,
                cpu)  # 返回热图,paf,输入模型图象相比原始图像缩放倍数,输入模型图像padding尺寸

            total_keypoints_num = 0
            all_keypoints_by_type = [
            ]  # all_keypoints_by_type为18个list,每个list包含Ni个当前点的x、y坐标,当前点热图值,当前点在所有特征点中的index
            for kpt_idx in range(
                    num_keypoints):  # 19th for bg  第19个为背景,之考虑前18个关节点
                total_keypoints_num += extract_keypoints(
                    heatmaps[:, :, kpt_idx], all_keypoints_by_type,
                    total_keypoints_num)

            pose_entries, all_keypoints = group_keypoints(
                all_keypoints_by_type, pafs, demo=True
            )  # 得到所有分配的人(前18维为每个人各个关节点在所有关节点中的索引,后两唯为每个人得分及每个人关节点数量),及所有关节点信息
            for kpt_id in range(all_keypoints.shape[0]):  # 依次将每个关节点信息缩放回原始图像上
                all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                            upsample_ratio - pad[1]) / scale
                all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                            upsample_ratio - pad[0]) / scale
            current_poses = []
            for n in range(len(pose_entries)):  # 依次遍历找到的每个人
                if len(pose_entries[n]) == 0:
                    continue
                pose_keypoints = np.ones(
                    (num_keypoints, 2), dtype=np.int32) * -1
                for kpt_id in range(num_keypoints):
                    if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                        pose_keypoints[kpt_id, 0] = int(
                            all_keypoints[int(pose_entries[n][kpt_id]), 0])
                        pose_keypoints[kpt_id, 1] = int(
                            all_keypoints[int(pose_entries[n][kpt_id]), 1])
                pose = Pose(pose_keypoints, pose_entries[n][18])
                posebox = (int(pose.bbox[0]), int(pose.bbox[1]),
                           int(pose.bbox[0]) + int(pose.bbox[2]),
                           int(pose.bbox[1]) + int(pose.bbox[3]))
                coincideValue = coincide(boxList, posebox)
                print(posebox)
                print('coincideValue:' + str(coincideValue))
                if len(
                        pose.getKeyPoints()
                ) >= 10 and coincideValue >= 0.3 and pose.lowerHalfFlag < 3:  # 当人体的点数大于10个的时候算作一个人,同时判断yolov5的框和pose的框是否有交集并且占比30%,同时要有下半身
                    current_poses.append(pose)

            for pose in current_poses:
                pose.img_pose = pose.draw(img, is_save=True, show_draw=True)
                crown_proportion = pose.bbox[2] / pose.bbox[3]  #宽高比
                pose = action_detect(action_net, pose,
                                     crown_proportion)  #判断摔倒还是正常

                if pose.pose_action == 'fall':
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2],
                                   pose.bbox[1] + pose.bbox[3]), (0, 0, 255),
                                  thickness=3)
                    cv2.putText(img, 'state: {}'.format(pose.pose_action),
                                (pose.bbox[0], pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
                    fallFlag = 1
                else:
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2],
                                   pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
                    cv2.putText(img, 'state: {}'.format(pose.pose_action),
                                (pose.bbox[0], pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0))
                    # fallFlag = 1
            if fallFlag == 1:
                t = time.time()
                # cv2.imwrite(f'C:/zqr/project/yolov5_openpose/Image/{t}.jpg', img)
                print('我保存照片了')

            img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
            # 保存识别后的照片
            # cv2.imwrite(f'C:/zqr/project/yolov5_openpose/Image/{t}.jpg', img)
            # print('我保存照片了')
            # cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)

            cv2.waitKey(1)
        i += 1
    cv2.destroyAllWindows()
示例#24
0
def run_on_image(net, height_size, cpu, track, smooth, img, stride,
                 upsample_ratio, num_keypoints, threshold):
    global previous_poses
    orig_img = img.copy()
    heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                            upsample_ratio, cpu)
    score = 0
    total_keypoints_num = 0
    all_keypoints_by_type = []
    for kpt_idx in range(num_keypoints):  # 19th for bg
        total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                 all_keypoints_by_type,
                                                 total_keypoints_num)

    pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                  pafs,
                                                  demo=True)
    for kpt_id in range(all_keypoints.shape[0]):
        all_keypoints[kpt_id,
                      0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio
                            - pad[1]) / scale
        all_keypoints[kpt_id,
                      1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio
                            - pad[0]) / scale
    current_poses = []
    for n in range(len(pose_entries)):
        if len(pose_entries[n]) == 0:
            continue
        pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
        for kpt_id in range(num_keypoints):
            if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                pose_keypoints[kpt_id, 0] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 0])
                pose_keypoints[kpt_id, 1] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 1])
        pose = Pose(pose_keypoints, pose_entries[n][18])
        current_poses.append(pose)

    if track:
        track_poses(previous_poses, current_poses, smooth=smooth)
        previous_poses = current_poses
    for pose in current_poses:
        pose.draw(img)
    img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
    for pose in current_poses:
        # cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),

        r_hand_center, r_hand_width, l_hand_center, l_hand_width, = detect_hand(
            pose)

        if -1 not in r_hand_center:
            cv2.circle(img, (r_hand_center[0], r_hand_center[1]), 5,
                       (255, 0, 0), 5)
            cv2.rectangle(img, (r_hand_center[0] - r_hand_width,
                                r_hand_center[1] - r_hand_width),
                          (r_hand_center[0] + r_hand_width,
                           r_hand_center[1] + r_hand_width), (0, 255, 255))
        if -1 not in l_hand_center:
            cv2.circle(img, (l_hand_center[0], l_hand_center[1]), 5,
                       (255, 0, 0), 5)
            cv2.rectangle(img, (l_hand_center[0] - l_hand_width,
                                l_hand_center[1] - l_hand_width),
                          (l_hand_center[0] + l_hand_width,
                           l_hand_center[1] + l_hand_width), (0, 255, 255))

        face_center, face_width = detect_face(pose)
        if -1 not in face_center:
            cv2.rectangle(
                img,
                (face_center[0] - face_width, face_center[1] - face_width),
                (face_center[0] + face_width, face_center[1] + face_width),
                (0, 0, 255))

            #               (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
            if track:
                cv2.putText(img, 'id: {}'.format(pose.id),
                            (face_center[0] - face_width,
                             face_center[1] - face_width - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))

        if -1 not in r_hand_center:
            x, y, h, w, score = detect_touch(face_center, face_width,
                                             r_hand_center, r_hand_width)
            if h != 0:
                cv2.rectangle(img, (x, y), (x + h, y + w), (255, 0, 255))
                cv2.putText(img, f'Score: {score:0.2f}', (x, y - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0))
        if -1 not in l_hand_center:
            x, y, h, w, score = detect_touch(face_center, face_width,
                                             l_hand_center, l_hand_width)
            if h != 0:
                cv2.rectangle(img, (x, y), (x + h, y + w), (255, 0, 255))
                cv2.putText(img, f'Score: {score:0.2f}', (x, y - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0))
    cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
    delay = 1
    detect = False

    key = cv2.waitKey(delay)
    if key == 27:  # esc
        return
    elif key == 112:  # 'p'
        if delay == 33:
            delay = 0
        else:
            delay = 33
    return score > threshold
def run_demo(net, image_provider1, image_provider2, height_size, cpu, track,
             smooth, com):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts  # +1 for Hidden COM
    previous_poses = []
    # original delay 33
    # 0 = pause / wait for input indefinetly
    delay = 33
    total_provider = zip(image_provider1, image_provider2)
    for img1, img2 in total_provider:
        orig_img1 = img1.copy()
        orig_img2 = img2.copy()
        heatmaps1, pafs1, scale1, pad1 = infer_fast(net, img1, height_size,
                                                    stride, upsample_ratio,
                                                    cpu)
        heatmaps2, pafs2, scale2, pad2 = infer_fast(net, img2, height_size,
                                                    stride, upsample_ratio,
                                                    cpu)

        total_keypoints_num1 = 0
        total_keypoints_num2 = 0
        all_keypoints_by_type1 = []
        all_keypoints_by_type2 = []

        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num1 += extract_keypoints(heatmaps1[:, :, kpt_idx],
                                                      all_keypoints_by_type1,
                                                      total_keypoints_num1)
            total_keypoints_num2 += extract_keypoints(heatmaps2[:, :, kpt_idx],
                                                      all_keypoints_by_type2,
                                                      total_keypoints_num2)

        pose_entries1, all_keypoints1 = group_keypoints(all_keypoints_by_type1,
                                                        pafs1,
                                                        demo=True)
        pose_entries2, all_keypoints2 = group_keypoints(all_keypoints_by_type2,
                                                        pafs2,
                                                        demo=True)
        for kpt_id in range(all_keypoints1.shape[0]):
            all_keypoints1[kpt_id, 0] = (all_keypoints1[kpt_id, 0] * stride /
                                         upsample_ratio - pad1[1]) / scale1
            all_keypoints1[kpt_id, 1] = (all_keypoints1[kpt_id, 1] * stride /
                                         upsample_ratio - pad1[0]) / scale1
        for kpt_id in range(all_keypoints2.shape[0]):
            all_keypoints2[kpt_id, 0] = (all_keypoints2[kpt_id, 0] * stride /
                                         upsample_ratio - pad2[1]) / scale2
            all_keypoints2[kpt_id, 1] = (all_keypoints2[kpt_id, 1] * stride /
                                         upsample_ratio - pad2[0]) / scale2
        current_poses1 = []
        current_poses2 = []
        for n in range(len(pose_entries1)):
            if len(pose_entries1[n]) == 0:
                continue
            pose_keypoints = np.ones(
                (num_keypoints + 1, 2), dtype=np.int32) * -1  # +1 here for COM
            found_kpts = []
            C_pts = []
            BOS = [[-1, -1], [-1, -1]]
            for kpt_id in range(num_keypoints):
                if pose_entries1[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints1[int(pose_entries1[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints1[int(pose_entries1[n][kpt_id]), 1])
                    found_kpts.append(kpt_id)
            if com:
                COM, C_pts, BOS = compute_com(found_kpts, pose_keypoints)
                pose_keypoints[-1] = COM
            pose = Pose(pose_keypoints, pose_entries1[n][18], C_pts, BOS)
            current_poses1.append(pose)

        for n in range(len(pose_entries2)):
            if len(pose_entries2[n]) == 0:
                continue
            pose_keypoints = np.ones(
                (num_keypoints + 1, 2), dtype=np.int32) * -1  # +1 here for COM
            found_kpts = []
            C_pts = []
            BOS = [[-1, -1], [-1, -1]]
            for kpt_id in range(num_keypoints):
                if pose_entries2[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints2[int(pose_entries2[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints2[int(pose_entries2[n][kpt_id]), 1])
                    found_kpts.append(kpt_id)
            if com:
                COM, C_pts, BOS = compute_com(found_kpts, pose_keypoints)
                pose_keypoints[-1] = COM
            pose = Pose(pose_keypoints, pose_entries2[n][18], C_pts, BOS)
            current_poses2.append(pose)

        #if track:
        #track_poses(previous_poses, current_poses, smooth=smooth)
        #previous_poses = current_poses
        for pose in current_poses1:
            pose.draw(img1)
        for pose in current_poses2:
            pose.draw(img2)

        img1 = cv2.addWeighted(orig_img1, 0.6, img1, 0.4, 0)
        img2 = cv2.addWeighted(orig_img2, 0.6, img2, 0.4, 0)
        #for pose in current_poses:
        #cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
        #(pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
        #if track:
        #cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
        #cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
        cv2.imshow('Demo, Feed 1', img1)
        cv2.imshow('Demo, Feed 2', img2)
        key = cv2.waitKey(delay)
        if key == 27:  # esc
            return
        elif key == 112:  # 'p'
            if delay == 33:
                delay = 0
            else:
                delay = 33
示例#26
0
def run_demo(net, image_provider, height_size, cpu, track_ids):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []
    
    list_of_elbow_angles = []
    list_of_shoulder_angles = []
    list_of_hip_angles = []
    list_of_knee_angles = []
    
    rep_count = 0
    rep_not_counted = True
    rep_start = False
    rep_end = False
    
    activity = None

    rotate = True
    rotate_90 = True
    rotate_270 = False

    for img in image_provider:
        if rotate:
            if rotate_90:
                img = imutils.rotate_bound(img, 90)
            if rotate_270:
                img = imutils.rotate_bound(img, 270)

        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True) # pose entries -> Array showing number of poses, and which keypoint in each pose
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
        current_poses = []

        # do not iterate over all poses, instead only choose the one with largest bounding box (pose.bbox)
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
            # import pdb;pdb.set_trace()
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)

            final_pose = pose.get_pose()
            draw(final_pose, img, names = False, lines = True, angles = [5, 6, 7])

            # """
            # - Find out sleeping position (or not standing? Easier to write rules for standing)
            # - 
            # """

            # Figure out if standing or sleeping
            neck = get_coordinates(final_pose, 1)
            hip = average_coordinates(final_pose, 8, 11)
            knee = average_coordinates(final_pose, 9, 12)

            # checking if standing by comparing 'y' coordinates
            # neck is not good. Choose something which is below hips in sleeping but not in standing
            if neck[1] > hip[1] and hip[1] > knee[1]:
                rotate = False
            else: 
                rotate = True
                # checking which side to rotate by comparing x coordinates
                print('neck[0], hip[0], neck[1], hip[1]',neck[0], hip[0], neck[1], hip[1])
                if neck[0] < hip[0]:
                    rotate_90 = True
                    rotate_270 = False
                else:
                    rotate_90 = False
                    rotate_270 = True
                # print('rotate, rotate_90, rotate_270',rotate, rotate_90, rotate_270)
                continue    

            if activity == None:
                activity, list_of_elbow_angles = get_activity(final_pose, list_of_elbow_angles)
                continue
            else:
                pass

            pushups = Pushups(final_pose, list_of_elbow_angles)

            try:
                corrections = pushups.all_corrections()
                for problem, correction in corrections.items():
                    if problem == 'lazy_pushup':
                        list_of_elbow_angles.append(correction[0])
                        
                        # if rep_not_counted:
                        start = correction[2]
                        down = correction[3]

                        # print('1] start, down, rep_start, rep_end, rep_count, elbow angle', \
                        #     start, down, rep_start, rep_end, rep_count, correction[0])

                        print('Activity : {} ||| Correction : {} ||| Rep count : {}'.format(activity, correction[1], rep_count), end = '\r')
                        # print('Correction : {} ||| Rep count : {}'.format(correction[1], rep_count), end = '\r')

                        if start == True and down == True and rep_start == False:
                            start_index = len(list_of_elbow_angles) - 1
                            rep_start = True
                            rep_end = False
                            rep_counted = False 

                        if start == False and down == False and rep_end == False:
                            stop_index = len(list_of_elbow_angles) - 1
                            interval = []

                            # import pdb; pdb.set_trace()
                            for angle in list_of_elbow_angles[start_index: stop_index]:
                                if angle < pushups.lazy_pushup_threshold and not rep_counted:
                                    rep_count = rep_count + 1
                                    rep_counted = True
                            rep_end = True
                            rep_start = False

            squats = Squats(final_pose, list_of_hip_angles)
            print("Ran Squats")
            
            try:
                corrections = squats.all_corrections()
                for problem, correction in corrections.items():
                    if problem == 'squat_depth':
                        list_of_hip_angles.append(correction[0])
                       
                        # if rep_not_counted:
                        start = correction[2]
                        down = correction[3]

                        print('1] start, down, rep_start, rep_end, rep_count, squat angle', \
                            start, down, rep_start, rep_end, rep_count, correction[0])

                        if start == True and down == True and rep_start == False:
                            start_index = len(list_of_hip_angles) - 1
                            rep_start = True
                            rep_end = False
                            rep_counted = False 

                        if start == False and down == False and rep_end == False:
                            stop_index = len(list_of_hip_angles) - 1
                            interval = []

                            # import pdb; pdb.set_trace()
                            for angle in list_of_hip_angles[start_index: stop_index]:
                                if angle < squats.squat_depth_angle_threshold and not rep_counted:
                                    rep_count = rep_count + 1
                                    rep_counted = True
                            rep_end = True
                            rep_start = False

                        print('2] start, down, rep_start, rep_end, rep_count, squat angle', \
                            start, down, rep_start, rep_end, rep_count, correction[0])
                               
            except:
                pass


        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        if track_ids == True:
            propagate_ids(previous_poses, current_poses)
            previous_poses = current_poses
            for pose in current_poses:
                cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                              (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
                cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
        cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
        # cv2.imwrite('output.jpg', img)
        key = cv2.waitKey(1)
        if key == 27:  # esc
           return
示例#27
0
def get_skel_coords(net,
                    image_provider,
                    height_size=256,
                    cpu=False,
                    track=1,
                    smooth=1):
    # text_file = open("skel/"+ args.save_txt, "w")
    net = net.eval()
    if not cpu:
        net = net.cuda()
    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []
    delay = 33

    img = image_provider
    # for img in image_provider:
    #print(img.shape)
    orig_img = img.copy()
    heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                            upsample_ratio, cpu)

    total_keypoints_num = 0
    all_keypoints_by_type = []
    for kpt_idx in range(num_keypoints):  # 19th for bg
        total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                 all_keypoints_by_type,
                                                 total_keypoints_num)

    pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                  pafs,
                                                  demo=True)
    for kpt_id in range(all_keypoints.shape[0]):
        all_keypoints[kpt_id,
                      0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio
                            - pad[1]) / scale
        all_keypoints[kpt_id,
                      1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio
                            - pad[0]) / scale
    current_poses = []
    for n in range(len(pose_entries)):
        if len(pose_entries[n]) == 0:
            continue
        pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
        for kpt_id in range(num_keypoints):
            if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                pose_keypoints[kpt_id, 0] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 0])
                pose_keypoints[kpt_id, 1] = int(
                    all_keypoints[int(pose_entries[n][kpt_id]), 1])
        pose = Pose(pose_keypoints, pose_entries[n][18])
        current_poses.append(pose)

    if track:
        track_poses(previous_poses, current_poses, smooth=smooth)
        previous_poses = current_poses
    for pose in current_poses:
        pose.draw(img)
    img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)

    # for pose in current_poses:
    if len(current_poses) != 0:

        # n = text_file.write(coords)

        cv2.rectangle(
            img, (pose.bbox[0], pose.bbox[1]),
            (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]),
            (0, 255, 0))
        if track:
            cv2.putText(img, 'id: {}'.format(pose.id),
                        (pose.bbox[0], pose.bbox[1] - 16),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
    # break

    #cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)

    if len(current_poses) != 0:
        #print ("not zero", img)
        return current_poses[0].return_coords(), img
    else:
        #print ("zero")
        return [], np.array((1, 1, 1), np.uint8)

    key = cv2.waitKey(delay)
    if key == 27:  # esc
        text_file.close()
        return
    elif key == 112:  # 'p'
        if delay == 33:
            delay = 0
        else:
            delay = 33
示例#28
0
def run_inference(net,
                  image_provider,
                  height_size,
                  cpu,
                  track,
                  smooth,
                  no_display,
                  json_view=False):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []
    delay = 100
    if isinstance(image_provider, ImageReader):
        delay = 0

    for img in image_provider:
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                                upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs,
                                                      demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []
        for n, pose_entry in enumerate(pose_entries):
            if len(pose_entry) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entry[kpt_id] != -1.0:
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entry[kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entry[kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entry[18])
            current_poses.append(pose)

        if json_view == True:
            return current_poses

        if not no_display:
            if track:
                track_poses(previous_poses, current_poses, smooth=smooth)
                previous_poses = current_poses
            for pose in current_poses:
                pose.draw(img)

            for pose in current_poses:
                cv2.rectangle(
                    img, (pose.bbox[0], pose.bbox[1]),
                    (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]),
                    (32, 202, 252))
                if track:
                    cv2.putText(img, 'id: {}'.format(pose.id),
                                (pose.bbox[0], pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
            cv2.imshow('PoseCamera', img)
            key = cv2.waitKey(delay)
            if key == 27:
                return
示例#29
0
def run_demo(net, image_provider, height_size, cpu, track_ids):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = num_keys
    previous_poses = []
    for img in image_provider:
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)
        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
        current_poses = []
        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entries[n][num_keypoints])
            current_poses.append(pose)
            pose.draw(img)
        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        if track_ids == True:
            propagate_ids(previous_poses, current_poses)
            previous_poses = current_poses
            for pose in current_poses:
                cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                              (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
                cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.3, (0, 0, 255))
        for p in range(pose_entries.shape[0]):
            allkey=True
            landmarks=[]
            if(len(pose_entries[p])>0):
                if pose_entries[p][0]==-1 or pose_entries[p][14]==-1 or pose_entries[p][15]==-1 or (pose_entries[p][16]==-1 and pose_entries[p][17]==-1):
                    allkey=False
                if allkey:
                    landmarks.append(int(all_keypoints[int(pose_entries[p][0])][0]))
                    landmarks.append(int(all_keypoints[int(pose_entries[p][0])][1]))
                    landmarks.append(int(all_keypoints[int(pose_entries[p][14])][0]))
                    landmarks.append(int(all_keypoints[int(pose_entries[p][14])][1]))
                    landmarks.append(int(all_keypoints[int(pose_entries[p][15])][0]))
                    landmarks.append(int(all_keypoints[int(pose_entries[p][15])][1]))
                    if pose_entries[p][17]==-1:
                        ear="right"
                        landmarks.append(int(all_keypoints[int(pose_entries[p][16])][0]))
                        landmarks.append(int(all_keypoints[int(pose_entries[p][16])][1]))
                    else:
                        ear="left"
                        landmarks.append(int(all_keypoints[int(pose_entries[p][17])][0]))
                        landmarks.append(int(all_keypoints[int(pose_entries[p][17])][1]))
            if len(landmarks)>0:
                imgpts, modelpts, rotate_degree, center = face_orientation(img, landmarks,ear)

                cv2.line(img, center, tuple(imgpts[1].ravel()), (0,255,0), 3) #GREEN
                cv2.line(img, center, tuple(imgpts[0].ravel()), (255,0,), 3) #BLUE
                cv2.line(img, center, tuple(imgpts[2].ravel()), (0,0,255), 3) #RED

                #for j in range(len(rotate_degree)):
                            #cv2.putText(img, ('{:05.2f}').format(float(rotate_degree[j])), (10, 30 + (50 * j)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), thickness=2, lineType=2)
                    
        #cv2.imwrite('/Users/Utente/Desktop/poseFINAL/out.jpg', img)
        cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
        key = cv2.waitKey(33)
        if key == 27:  # esc
            return
示例#30
0
def run_demo(model, image_provider, height_size, cpu, track, smooth, file):
    """[summary]

    Args:
        model ([type]): [description]
        image_provider ([type]): [description]
        height_size ([type]): [description]
        cpu ([type]): [description]
        track ([type]): [description]
        smooth ([type]): [description]
        file ([type]): [description]

    Returns:
        [type]: [description]
    """
    model = model.eval()
    if not cpu:
        model = model.cuda()

    point_list = []
    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts
    previous_poses = []

    # 保存视频
    fps = image_provider.fps
    width = image_provider.width
    height = image_provider.height
    fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', 'V')
    # video_saver = cv2.VideoWriter('TESTV.mp4', fourcc, fps, (height, width))
    save_video_path = os.path.join(os.getcwd(), 'video_output')
    if not os.path.exists(save_video_path):
        os.mkdir(save_video_path)
    save_video_name = os.path.join(save_video_path, file + '.mp4')
    video_saver = cv2.VideoWriter(save_video_name, fourcc, fps,
                                  (width, height))

    for img in image_provider:
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(model, img, height_size,
                                                stride, upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs,
                                                      demo=True)
        for kpt_id in range(all_keypoints.shape[0]):
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []
        for pose_entry in pose_entries:
            if len(pose_entry) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entry[kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entry[kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entry[kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entry[18])
            current_poses.append(pose)

            # save keypoint in list
            key_point_list = pose_keypoints.flatten().tolist()
            point_list.append(key_point_list)

        if track:
            track_poses(previous_poses, current_poses, smooth=smooth)
            previous_poses = current_poses
        for pose in current_poses:
            pose.draw(img)
        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
        for pose in current_poses:
            cv2.rectangle(
                img, (pose.bbox[0], pose.bbox[1]),
                (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]),
                (0, 255, 0))
            if track:
                cv2.putText(img, 'id: {}'.format(pose.id),
                            (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
        video_saver.write(img)
    return point_list