예제 #1
0
def demo_for_human36m():
    joint_world = [[-91.679, 154.404, 907.261],
                   [-223.23566, 163.80551, 890.5342],
                   [-188.4703, 14.077106, 475.1688],
                   [-261.84055, 186.55286, 61.438915],
                   [39.877888, 145.00247, 923.98785],
                   [-11.675994, 160.89919, 484.39148],
                   [-51.550297, 220.14624, 35.834396],
                   [-132.34781, 215.73018, 1128.8396],
                   [-97.1674, 202.34435, 1383.1466],
                   [-112.97073, 127.96946, 1477.4457],
                   [-120.03289, 190.96477, 1573.4],
                   [25.895456, 192.35947, 1296.1571],
                   [107.10581, 116.050285, 1040.5062],
                   [129.8381, -48.024918, 850.94806],
                   [-230.36955, 203.17923, 1311.9639],
                   [-315.40536, 164.55284, 1049.1747],
                   [-350.77136, 43.442127, 831.3473],
                   [-102.237045, 197.76935, 1304.0605]]
    joint_world = np.asarray(joint_world)
    # 关节点连接线
    kps_lines = ((0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13),
                 (8, 14), (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4),
                 (4, 5), (5, 6))
    # show in 世界坐标系
    vis.vis_3d(joint_world,
               kps_lines,
               coordinate="WC",
               title="WC",
               set_lim=True,
               isshow=True)

    kp_vis = CameraTools()

    # show in 相机坐标系
    joint_cam = kp_vis.convert_wc_to_cc(joint_world)
    vis.vis_3d(joint_cam,
               kps_lines,
               coordinate="CC",
               title="CC",
               set_lim=True,
               isshow=True)
    joint_img = kp_vis.convert_cc_to_ic(joint_cam)

    joint_world1 = kp_vis.convert_cc_to_wc(joint_cam)
    vis.vis_3d(joint_world1,
               kps_lines,
               coordinate="WC",
               title="WC",
               set_lim=True,
               isshow=True)

    # show in 像素坐标系
    kpt_2d = joint_img[:, 0:2]
    image_path = "./data/s_01_act_02_subact_01_ca_02_000001.jpg"
    image = image_processing.read_image(image_path)
    image = image_processing.draw_key_point_in_image(image,
                                                     key_points=[kpt_2d],
                                                     pointline=kps_lines)
    image_processing.cv_show_image("image_dict", image)
예제 #2
0
 def show_landmark_boxes(win_name, image, bboxes, scores, landms):
     '''
     显示landmark和boxes
     :param win_name:
     :param image:
     :param landmarks_list: [[x1, y1], [x2, y2]]
     :param bboxes: [[ x1, y1, x2, y2],[ x1, y1, x2, y2]]
     :return:
     '''
     image = image_processing.draw_landmark(image, landms, vis_id=True)
     image = image_processing.draw_image_bboxes_text(image,
                                                     bboxes,
                                                     scores,
                                                     color=(0, 0, 255))
     image_processing.cv_show_image(win_name, image, waitKey=1)
예제 #3
0
def demo_for_kinect():
    # data_dir = "E:/git/python-learning-notes/tutorial/kinect2/dataset/kitnect3d"
    data_dir = "/media/dm/dm/X2/Pose/dataset/kitnet_data/panjinquan"
    image, joint_world = load_data(data_dir, flag=5)
    joint_world = -joint_world * 1000
    kps_lines = [
        [0, 1],
        [1, 20],
        [20, 2],
        [2, 3],  # Spine
        [20, 4],
        [4, 5],
        [5, 6],
        [6, 7],
        [7, 21],
        [7, 22],  # Left arm and hand
        [20, 8],
        [8, 9],
        [9, 10],
        [10, 11],
        [11, 23],
        [11, 24],  # Right arm and hand
        [0, 12],
        [12, 13],
        [13, 14],
        [14, 15],  # Left leg
        [0, 16],
        [16, 17],
        [17, 18],
        [18, 19]
    ]  # Right leg
    # show in 世界坐标系
    vis.vis_3d(joint_world, kps_lines, coordinate="WC", title="WC")

    kp_vis = CameraTools()

    # show in 相机坐标系
    joint_cam = kp_vis.convert_wc_to_cc(joint_world)
    vis.vis_3d(joint_cam, kps_lines, coordinate="CC", title="CC")
    joint_img = kp_vis.convert_cc_to_ic(joint_cam)
    # show in 像素坐标系
    kpt_2d = joint_img[:, 0:2]
    image = image_processing.draw_key_point_in_image(image,
                                                     key_points=[kpt_2d],
                                                     pointline=kps_lines)
    image_processing.cv_show_image("image_dict", image)
예제 #4
0
    def show(self, filename, image, keypoints, bboxes, class_name, vis=True):
        is_save = True
        for i, joints in enumerate(keypoints):
            if np.sum(np.asarray(joints[5])) == 0 or np.sum(np.asarray(joints[6])) == 0 or \
                    np.sum(np.asarray(joints[11])) == 0 or np.sum(np.asarray(joints[12])) == 0:
                is_save = False
            else:
                is_save = True
            chest_joint = (np.asarray(joints[5]) + np.asarray(joints[6])) / 2
            hip_joint = (np.asarray(joints[11]) + np.asarray(joints[12])) / 2
            keypoints[i].append(chest_joint.tolist())
            keypoints[i].append(hip_joint.tolist())

        if vis:
            image_processing.draw_image_bboxes_text(image, bboxes, class_name)
            # image_processing.show_image_boxes(None, image, joints_bbox, color=(255, 0, 0))
            image = image_processing.draw_key_point_in_image(
                image, keypoints, pointline=self.skeleton)
            # image_processing.cv_show_image("Det", image, waitKey=0)
            # self.save_images(image, filename, is_save)
            image_processing.cv_show_image("Det", image)
def label_test(image_dir, filename, class_names=None):
    basename = os.path.basename(filename)[:-len('.txt')] + ".jpg"
    image_path = os.path.join(image_dir, basename)
    image = image_processing.read_image(image_path)
    data = file_processing.read_data(filename, split=" ")
    label_list, rect_list = file_processing.split_list(data, split_index=1)
    label_list = [l[0] for l in label_list]
    if class_names:
        name_list = file_processing.decode_label(label_list, class_names)
    else:
        name_list = label_list
    show_info = ["id:" + str(n) for n in name_list]
    rgb_image = image_processing.show_image_rects_text("object2",
                                                       image,
                                                       rect_list,
                                                       show_info,
                                                       color=(0, 0, 255),
                                                       drawType="custom",
                                                       waitKey=1)
    rgb_image = image_processing.resize_image(rgb_image, 900)
    image_processing.cv_show_image("object2", rgb_image)
예제 #6
0
def create_dataset(out_dir, nums, filename, char_set, captcha_height,
                   captcha_width, captcha_size):
    '''
    产生样本
    :param out_dir: 数据集图片保存目录
    :param nums: 产生数据样本个数
    :param filename: 保存数据txt文件
    :param char_set: 字符数据集
    :param captcha_height: 验证码height
    :param captcha_width:  验证码width
    :param captcha_size:   验证码大小
    :return:None
    '''

    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    # 产生一个验证码样本并显示
    i = 0
    while i < nums:
        text, image = gen_captcha_text_and_image(char_set=char_set,
                                                 captcha_height=captcha_height,
                                                 captcha_width=captcha_width,
                                                 captcha_size=captcha_size)
        # 产生的验证码图并不一定等于image_height*image_width
        if image.shape != (image_height, image_width, 3):
            continue

        if i == 0:
            image_processing.cv_show_image(text, image)  # 显示验证码

        image_name = str(i) + "_" + text + ".jpg"
        image_path = out_dir + "/" + image_name
        print(image_path)
        image_processing.save_image(image_path, image, toUINT8=False)
        text = [c for c in text]
        label_list = file_processing.label_encode(text, char_set)
        content = [image_name] + label_list
        content = ' '.join('%s' % id for id in content)
        file_processing.write_data(filename, [content], model='a')
        i += 1
def demo_for_kinect():
    flip_transform = np.asarray([[1, 0, 0],
                                 [0, -1, 0],
                                 [0, 0, -1]])

    # flip_transform = np.linalg.inv(flip_transform)
    # data_dir = "E:/git/python-learning-notes/tutorial/kinect2/dataset/kitnect3d"
    data_dir = "/media/dm/dm/X2/Pose/dataset/kitnet_data/panjinquan"  # flag= 5
    # data_dir = "/media/dm/dm/X2/Pose/dataset/kitnet_data/dengjianxiang"  # 241,245,348
    image, joint_world = load_data(data_dir, flag=503)
    h, w, d = image.shape
    joint_world = convert_kinect2h36m(joint_world)
    joint_world = joint_world * 1000
    joint_world = np.dot(-flip_transform, joint_world.T).T  # R * (pt - T)

    # kps_lines = [[0, 1], [1, 20], [20, 2], [2, 3],  # Spine
    #              [20, 4], [4, 5], [5, 6], [6, 7], [7, 21], [7, 22],  # Left arm and hand
    #              [20, 8], [8, 9], [9, 10], [10, 11], [11, 23], [11, 24],  # Right arm and hand
    #              [0, 12], [12, 13], [13, 14], [14, 15],  # Left leg
    #              [0, 16], [16, 17], [17, 18], [18, 19]]  # Right leg
    kps_lines = ((0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15),
                 (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6))

    # show in 世界坐标系
    vis.vis_3d(joint_world, kps_lines, coordinate="WC", title="WC", set_lim=True)

    kp_vis = KeyPointsVisual()

    # show in 相机坐标系
    joint_cam = kp_vis.convert_wc_to_cc(joint_world)
    vis.vis_3d(joint_cam, kps_lines, coordinate="CC", title="CC", set_lim=True)
    joint_cam = np.dot(flip_transform, joint_cam.T).T  # R * (pt - T)
    joint_img = kp_vis.convert_cc_to_ic(joint_cam)
    # show in 像素坐标系
    kpt_2d = joint_img[:, 0:2]
    image = image_processing.draw_key_point_in_image(image, key_points=[kpt_2d], pointline=kps_lines)
    image_processing.cv_show_image("image_dict", image)
def demo_for_human36m():
    from modules.utils_3d.data import human36m_data
    # x,y,z
    # joint_world = human36m_data.data0
    joint_world = human36m_data.data2 * 1000
    # joint_world = human36m_data.data1*1000
    joint_world = np.asarray(joint_world)
    kps_lines = human36m_data.kps_lines
    # show in 世界坐标系
    vis.vis_3d(joint_world, kps_lines, coordinate="WC", title="WC", set_lim=True)

    kp_vis = KeyPointsVisual()

    # show in 相机坐标系
    joint_cam = kp_vis.convert_wc_to_cc(joint_world)
    vis.vis_3d(joint_cam, kps_lines, coordinate="CC", title="CC", set_lim=True)
    joint_img = kp_vis.convert_cc_to_ic(joint_cam)

    # show in 像素坐标系
    kpt_2d = joint_img[:, 0:2]
    image_path = "/media/dm/dm1/git/python-learning-notes/modules/utils_3d/data/s_01_act_02_subact_01_ca_02_000001.jpg"
    image = image_processing.read_image(image_path)
    image = image_processing.draw_key_point_in_image(image, key_points=[kpt_2d], pointline=kps_lines)
    image_processing.cv_show_image("image_dict", image)
예제 #9
0
    def show(self, color_img, align_color_img, depth_img, ir_frame, bboxes):
        """
        :param color_img: <class 'tuple'>: (1080, 1920, 4),uint8
        :param align_color_img: <class 'tuple'>: (424, 512, 4),uint8
        :param depth_img: <class 'tuple'>: (424, 512),uint16
        :param ir_frame: <class 'tuple'>: (424, 512, 3),uint8
        :return:
        """
        # align_color_img = cv2.cvtColor(align_color_img, cv2.COLOR_RGBA2RGB)  # 将BGR转为RGB
        image_processing.addMouseCallback("depth_img", param=depth_img)
        image_processing.addMouseCallback("align_color_img", param=depth_img)

        # image_processing.cv_show_image("color_img", color_img, waitKey=1)
        image_processing.cv_show_image("align_color_img",
                                       align_color_img,
                                       waitKey=1)
        image_processing.cv_show_image("depth_img", depth_img, waitKey=1)
        image_processing.cv_show_image("ir_frame", ir_frame, waitKey=1)
        image_processing.cv_show_image("ir_frame1", self.ir_frame1, waitKey=1)
        self.count += 1
        freq = 3
        # self.flag = cv2.waitKey(20) & 0xFF == ord('s')
        if self.snapshot_dir and len(bboxes) > 0 and self.count % freq == 0:
            print("save image:{}".format(self.count))
            # self.flag = True
            # pre = "{}.png".format(self.count)
            prefix = "{}_{}.png".format(self.prefix, self.count)
            cv2.imwrite(
                os.path.join(self.snapshot_dir, "depth/{}".format(prefix)),
                depth_img)
            cv2.imwrite(
                os.path.join(self.snapshot_dir, "color/{}".format(prefix)),
                align_color_img)
            cv2.imwrite(
                os.path.join(self.snapshot_dir, "ir/{}".format(prefix)),
                ir_frame)
        cv2.waitKey(20)
        self.save_videos(color_img, align_color_img, depth_img, ir_frame,
                         bboxes)
    def deme_test(self, out_voc_ann=None, vis=True):
        joints_bbox = []
        for lines in self.boxes_label_lists:
            # lines = ['lexue_teacher_LHui211_20190921145000_20190921172000_40_000143.jpg', 543.053254438, 527.147928994,
            #          456.710059172, 548.733727811, 'person']
            image_name = lines[0]
            rect = [lines[1], lines[2], lines[3], lines[4]]
            label = [lines[5]]
            image_path = os.path.join(self.image_dir, image_name)
            json_file = image_name[:-len(".jpg")] + ".json"
            json_path = os.path.join(self.json_dir, json_file)
            if not os.path.exists(image_path):
                print("Error:no path: {}".format(json_file))
                continue
            if not os.path.exists(json_path):
                print("Error:no path: {}".format(json_file))
                continue

            anno = CustomDataset.get_anno(json_path)
            if not anno:
                print("Error:empty path: {}".format(json_file))
                continue
            keypoints = self.get_keypoints(anno)
            sum = np.sum(np.abs(np.asarray(keypoints)))
            if keypoints == [] or sum == 0:
                print("Error:empty path: {}".format(json_file))
                continue

            image = image_processing.read_image(image_path)
            if out_voc_ann:
                try:
                    bboxes = image_processing.rects2bboxes([rect])
                    joints_bbox = self.convert_voc_dataset(
                        image_name,
                        image_shape=image.shape,
                        bboxes=bboxes,
                        labels=label,
                        keypoints=keypoints,
                        out_voc_ann=out_voc_ann)
                except Exception as e:
                    print("Error:empty path: {}".format(json_file))
                    raise Exception("lines: {}".format(lines))

            save_image = True
            for i, joints in enumerate(keypoints):
                if np.sum(np.asarray(joints[5])) == 0 or np.sum(np.asarray(joints[6])) == 0 or \
                        np.sum(np.asarray(joints[11])) == 0 or np.sum(np.asarray(joints[12]))==0:
                    save_image = False
                else:
                    save_image = True
                chest_joint = (np.asarray(joints[5]) +
                               np.asarray(joints[6])) / 2
                hip_joint = (np.asarray(joints[11]) +
                             np.asarray(joints[12])) / 2
                keypoints[i].append(chest_joint.tolist())
                keypoints[i].append(hip_joint.tolist())

            if vis:
                image_processing.show_image_rects(None, image, [rect])
                # image_processing.show_image_boxes(None, image, joints_bbox, color=(255, 0, 0))
                image = image_processing.draw_key_point_in_image(
                    image, keypoints, pointline=skeleton)
                image_processing.cv_show_image("Det", image, waitKey=1)
                if save_image:
                    out_dir = "/media/dm/dm2/project/dataset/COCO/HumanPose/LeXue_teacher/Posture/tmp1"
                    out_dir = file_processing.create_dir(out_dir)
                    out_image_path = os.path.join(out_dir, image_name)
                    image_processing.save_image(out_image_path, image)
예제 #11
0
            face_landmarks = [[landmark[j], landmark[j + 5]] for j in range(5)]
            landmarks_list.append(face_landmarks)
        landmarks = np.asarray(landmarks_list)
        return landmarks


if __name__ == "__main__":
    # img = Image.open('some_img.jpg')  # modify the image path to yours
    # bounding_boxes, landmarks = detect_faces(img)  # detect bboxes and landmarks for all faces in the image
    # show_results(img, bounding_boxes, landmarks)  # visualize the results
    # image_path = "/media/dm/dm/project/dataset/face_recognition/NVR/JPEGImages/2000.jpg"
    image_path = "/media/dm/dm1/FaceRecognition/torch-Face-Recognize-Pipeline/data/dataset2/zhoujielun/zhoujielun_1.jpg"

    image = image_processing.read_image(image_path, colorSpace="RGB")
    mt = MTCNN()
    bbox_score, landmarks = mt.detect(image)
    bboxes, scores, landmarks = mt.adapter_bbox_score_landmarks(bbox_score, landmarks)
    # image_processing.show_image_boxes("image",image,bboxes)
    # image_processing.show_landmark_boxes("image", image, landmarks, bboxes)
    # image_processing.show_landmark_boxes("image2", image, landmarks, bboxes)
    faces = image_processing.get_bboxes_image(image, bboxes)
    # landmarks2 = mt.landmarks_forward(bbox_score, image)
    # bboxes, scores, landmarks2 = mt.adapter_bbox_score_landmarks(bbox_score, landmarks2)
    # image_processing.show_landmark_boxes("image2", image, landmarks2, bboxes)

    for face in faces:
        image_processing.cv_show_image("face", face)
        image_processing.show_landmark_boxes("image", image, landmarks, bboxes)
        landmarks = mt.face_landmarks_forward([face])
        image_processing.show_landmark("landmark", face, landmarks)
    # test_data = TorchDataset(filename=test_filename, image_dir=image_dir,repeat=1)
    train_loader = DataLoader(dataset=train_data,
                              batch_size=batch_size,
                              shuffle=False)
    # test_loader = DataLoader(dataset=test_data, batch_size=batch_size,shuffle=False)

    # [1]使用epoch方法迭代,TorchDataset的参数repeat=1
    for epoch in range(epoch_num):
        for step, bach_data in enumerate(train_loader):
            batch_image = bach_data['image']
            batch_label = bach_data['lable']

            image = batch_image[0, :]
            image = image.numpy()  #image=np.array(image)
            image = image.transpose(1, 2, 0)  # 通道由[c,h,w]->[h,w,c]
            image_processing.cv_show_image("image", image)
            print("batch_image.shape:{},batch_label:{}".format(
                batch_image.shape, batch_label))
            # batch_x, batch_y = Variable(batch_x), Variable(batch_y)
    '''
    下面两种方式,TorchDataset设置repeat=None可以实现无限循环,退出循环由max_iterate设定
    '''
    train_data = TorchDataset(filename=train_filename,
                              image_dir=image_dir,
                              repeat=None)
    train_loader = DataLoader(dataset=train_data,
                              batch_size=batch_size,
                              shuffle=False)
    # [2]第2种迭代方法
    for step, bach_data in enumerate(train_loader):
        batch_image = bach_data['image']
예제 #13
0
    alphabet = [
        'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
        'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
    ]
    ALPHABET = [
        'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
        'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
    ]
    '''设置验证码的信息:
    这里验证码字符集只考虑数字的情况,并且验证码大小captcha_size=4
    '''
    char_set = number
    # char_set = number + alphabet + ALPHABET # 如果要预测26个大小写字母,则加上alphabet和ALPHABET字符集
    batch_size = 32
    image_height = 60
    image_width = 160
    depth = 3
    captcha_size = 4
    # 产生一个验证码样本并显示
    text, image = create_dataset.gen_captcha_text_and_image(
        char_set=char_set,
        captcha_height=image_height,
        captcha_width=image_width,
        captcha_size=captcha_size)
    image_processing.cv_show_image(text, image)  #显示验证码
    print("验证码图像shape:{}".format(image.shape))  # (60, 160, 3)
    print("验证码字符个数:{}".format(captcha_size))

    # 训练
    train(char_set, batch_size, image_height, image_width, depth, captcha_size)
예제 #14
0
        return faces


if __name__ == "__main__":
    # img = Image.open('some_img.jpg')  # modify the image path to yours
    # bounding_boxes, landmarks = detect_faces(img)  # detect bboxes and landmarks for all faces in the image
    # show_results(img, bounding_boxes, landmarks)  # visualize the results
    # image_path = "/media/dm/dm/project/dataset/face_recognition/NVR/JPEGImages/2000.jpg"
    image_path1 = "/media/dm/dm1/FaceRecognition/torch-Face-Recognize-Pipeline/data/test_images/face1.jpg"
    image_path2 = "/media/dm/dm1/FaceRecognition/torch-Face-Recognize-Pipeline/data/test_images/face2.jpg"

    face1 = image_processing.read_image(image_path1,
                                        colorSpace="RGB",
                                        resize_height=200,
                                        resize_width=100)
    face2 = image_processing.read_image(image_path2,
                                        colorSpace="RGB",
                                        resize_height=100,
                                        resize_width=200)
    lmdet = ONetLandmarkDet(device="cuda:0")
    # image_processing.cv_show_image("face", face)
    # image_processing.show_landmark_boxes("image", image, landmarks, bboxes)
    faces = []
    faces.append(face1)
    faces.append(face2)
    landmarks = lmdet.get_faces_landmarks(faces)
    alig_faces = lmdet.face_alignment(faces, face_resize=[112, 112])
    for i in range(len(faces)):
        image_processing.show_landmark("landmark", faces[i], [landmarks[i]])
        image_processing.cv_show_image("alig_faces", alig_faces[i])