示例#1
0
def run_demo(net,action_net, image_provider, height_size, cpu):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts

    i = 0
    for img in image_provider:
        orig_img = img.copy()
        # print(i)

        if i % 1 == 0:
            heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)

            total_keypoints_num = 0
            all_keypoints_by_type = []
            for kpt_idx in range(num_keypoints):  # 19th for bg
                total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)

            pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
            for kpt_id in range(all_keypoints.shape[0]):
                all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
                all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
            current_poses = []
            for n in range(len(pose_entries)):
                if len(pose_entries[n]) == 0:
                    continue
                pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
                for kpt_id in range(num_keypoints):
                    if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                        pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
                        pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
                pose = Pose(pose_keypoints, pose_entries[n][18])
                if len(pose.getKeyPoints()) >= 10:
                    current_poses.append(pose)
                # current_poses.append(pose)


            for pose in current_poses:
                pose.img_pose = pose.draw(img,show_draw=True)
                crown_proportion = pose.bbox[2]/pose.bbox[3] #宽高比
                pose = action_detect(action_net,pose,crown_proportion)

                if pose.pose_action == 'fall':
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 0, 255),thickness=3)
                    cv2.putText(img, 'state: {}'.format(pose.pose_action), (pose.bbox[0], pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
                else:
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
                    cv2.putText(img, 'state: {}'.format(pose.pose_action), (pose.bbox[0], pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0))

            img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
            cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)

            cv2.waitKey(1)
        i += 1
    cv2.destroyAllWindows()
示例#2
0
def run_demo(net, action_net, image_provider, height_size, cpu, track, smooth):
    net = net.eval()
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts

    with open(
            "D:/py/openpose_lightweight/performance_evaluation/action_result.txt",
            "a") as f:

        for img, img_name, label in image_provider:

            if label == 'fall':
                label_ = 0
            else:
                label_ = 1

            heatmaps, pafs, scale, pad = infer_fast(net, img, height_size,
                                                    stride, upsample_ratio,
                                                    cpu)

            total_keypoints_num = 0
            all_keypoints_by_type = []
            for kpt_idx in range(num_keypoints):  # 19th for bg
                total_keypoints_num += extract_keypoints(
                    heatmaps[:, :, kpt_idx], all_keypoints_by_type,
                    total_keypoints_num)

            pose_entries, all_keypoints = group_keypoints(
                all_keypoints_by_type, pafs, demo=True)
            for kpt_id in range(all_keypoints.shape[0]):
                all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                            upsample_ratio - pad[1]) / scale
                all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                            upsample_ratio - pad[0]) / scale
            current_poses = []
            for n in range(len(pose_entries)):
                if len(pose_entries[n]) == 0:
                    continue
                pose_keypoints = np.ones(
                    (num_keypoints, 2), dtype=np.int32) * -1
                for kpt_id in range(num_keypoints):
                    if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                        pose_keypoints[kpt_id, 0] = int(
                            all_keypoints[int(pose_entries[n][kpt_id]), 0])
                        pose_keypoints[kpt_id, 1] = int(
                            all_keypoints[int(pose_entries[n][kpt_id]), 1])
                pose = Pose(pose_keypoints, pose_entries[n][18])

                if len(pose.getKeyPoints()) >= 12:
                    current_poses.append(pose)

                for pose in current_poses:
                    pose.img_pose = pose.draw(img)

                    crown_proportion = pose.bbox[2] / pose.bbox[3]  # 宽高比
                    pose = action_detect(action_net, pose, crown_proportion)
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2],
                                   pose.bbox[1] + pose.bbox[3]), (0, 255, 0))

                    f.write(
                        f"{label_} {pose.action_fall} {pose.action_normal}\n")
                    f.flush()
                    break

            cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
            cv2.waitKey(1)
示例#3
0
def run_demo(net, action_net, image_provider, height_size, cpu, boxList):
    net = net.eval()
    print(torch.cuda.device_count())
    print(torch.cuda.is_available())
    a = torch.Tensor(5, 3)
    a = a.cuda()
    print(a)
    if not cpu:
        net = net.cuda()

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts  # 18

    i = 0
    for img in image_provider:  # 遍历图像集
        orig_img = img.copy()  # copy 一份
        # print(i)
        fallFlag = 0
        if i % 1 == 0:
            heatmaps, pafs, scale, pad = infer_fast(
                net, img, height_size, stride, upsample_ratio,
                cpu)  # 返回热图,paf,输入模型图象相比原始图像缩放倍数,输入模型图像padding尺寸

            total_keypoints_num = 0
            all_keypoints_by_type = [
            ]  # all_keypoints_by_type为18个list,每个list包含Ni个当前点的x、y坐标,当前点热图值,当前点在所有特征点中的index
            for kpt_idx in range(
                    num_keypoints):  # 19th for bg  第19个为背景,之考虑前18个关节点
                total_keypoints_num += extract_keypoints(
                    heatmaps[:, :, kpt_idx], all_keypoints_by_type,
                    total_keypoints_num)

            pose_entries, all_keypoints = group_keypoints(
                all_keypoints_by_type, pafs, demo=True
            )  # 得到所有分配的人(前18维为每个人各个关节点在所有关节点中的索引,后两唯为每个人得分及每个人关节点数量),及所有关节点信息
            for kpt_id in range(all_keypoints.shape[0]):  # 依次将每个关节点信息缩放回原始图像上
                all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                            upsample_ratio - pad[1]) / scale
                all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                            upsample_ratio - pad[0]) / scale
            current_poses = []
            for n in range(len(pose_entries)):  # 依次遍历找到的每个人
                if len(pose_entries[n]) == 0:
                    continue
                pose_keypoints = np.ones(
                    (num_keypoints, 2), dtype=np.int32) * -1
                for kpt_id in range(num_keypoints):
                    if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                        pose_keypoints[kpt_id, 0] = int(
                            all_keypoints[int(pose_entries[n][kpt_id]), 0])
                        pose_keypoints[kpt_id, 1] = int(
                            all_keypoints[int(pose_entries[n][kpt_id]), 1])
                pose = Pose(pose_keypoints, pose_entries[n][18])
                posebox = (int(pose.bbox[0]), int(pose.bbox[1]),
                           int(pose.bbox[0]) + int(pose.bbox[2]),
                           int(pose.bbox[1]) + int(pose.bbox[3]))
                coincideValue = coincide(boxList, posebox)
                print(posebox)
                print('coincideValue:' + str(coincideValue))
                if len(
                        pose.getKeyPoints()
                ) >= 10 and coincideValue >= 0.3 and pose.lowerHalfFlag < 3:  # 当人体的点数大于10个的时候算作一个人,同时判断yolov5的框和pose的框是否有交集并且占比30%,同时要有下半身
                    current_poses.append(pose)

            for pose in current_poses:
                pose.img_pose = pose.draw(img, is_save=True, show_draw=True)
                crown_proportion = pose.bbox[2] / pose.bbox[3]  #宽高比
                pose = action_detect(action_net, pose,
                                     crown_proportion)  #判断摔倒还是正常

                if pose.pose_action == 'fall':
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2],
                                   pose.bbox[1] + pose.bbox[3]), (0, 0, 255),
                                  thickness=3)
                    cv2.putText(img, 'state: {}'.format(pose.pose_action),
                                (pose.bbox[0], pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
                    fallFlag = 1
                else:
                    cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
                                  (pose.bbox[0] + pose.bbox[2],
                                   pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
                    cv2.putText(img, 'state: {}'.format(pose.pose_action),
                                (pose.bbox[0], pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0))
                    # fallFlag = 1
            if fallFlag == 1:
                t = time.time()
                # cv2.imwrite(f'C:/zqr/project/yolov5_openpose/Image/{t}.jpg', img)
                print('我保存照片了')

            img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
            # 保存识别后的照片
            # cv2.imwrite(f'C:/zqr/project/yolov5_openpose/Image/{t}.jpg', img)
            # print('我保存照片了')
            # cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)

            cv2.waitKey(1)
        i += 1
    cv2.destroyAllWindows()