Exemple #1
0
    def jsonify(self):

        persons = self.get_persons()
        annotations = []

        for i in range(len(persons)):

            joints2d = persons[i]['pose_2d'].get_joints()
            joints3d = persons[i]['pose_3d'].get_joints()

            annot = {
                'id': persons[i]['id'],
                'pose_2d': {},
                'pose_3d': {},
                'confidence': persons[i]['confidence'].tolist()
            }

            for i in range(PoseConfig.get_total_joints()):
                joint2d = {
                    'x': float(joints2d[i][0]),
                    'y': float(joints2d[i][1])
                }
                annot['pose_2d'][PoseConfig.NAMES[i]] = joint2d
                joint3d = {
                    'x': float(joints3d[i][0]),
                    'y': float(joints3d[i][1]),
                    'z': float(joints3d[i][2])
                }
                annot['pose_3d'][PoseConfig.NAMES[i]] = joint3d

            annotations.append(annot)

        return json.dumps(annotations, ensure_ascii=False)
Exemple #2
0
    def predict(self, many_pose_2d):

        if len(many_pose_2d) == 0:
            return []

        features = []

        for pose in many_pose_2d:
            joints = pose.get_joints()
            size = max(pose.to_bbox().get_width(), pose.to_bbox().get_height())

            feat = ((joints - joints.min(0)) / size).reshape(-1)

            features.append(feat)

        features = np.array(features)

        # infer z dimension
        many_z_axis = self.session.run(self.out,
                                       feed_dict={
                                           self.inp: features,
                                           self.dropout_keep_prob: 1.0
                                       })

        many_pose3d = []

        for poseId in range(len(many_pose_2d)):
            # concat z-axis => <x,y,z>
            pose3d = np.zeros((PoseConfig.get_total_joints(), 3))
            pose3d[:, :2] = features[poseId, :].reshape(-1, 2)
            pose3d[:, 2] = many_z_axis[poseId, :]
            many_pose3d.append(Pose3D(pose3d))

        return many_pose3d
Exemple #3
0
    def our_approach_postprocessing(network_out,
                                    subject_bbox,
                                    input_size,
                                    offsetornot=opt.offset):
        total_joints = PoseConfig.get_total_joints()
        if offsetornot == True:

            heatmap = network_out[:, :, :total_joints]
            xOff = network_out[:, :, total_joints:(total_joints * 2)]
            yOff = network_out[:, :, (total_joints * 2):]

        else:
            heatmap = network_out[:, :, :total_joints]

        confidences = []
        joints = np.zeros((total_joints, 2)) - 1

        for jointId in range(total_joints):

            inlined_pix = heatmap[:, :, jointId].reshape(-1)
            pixId = np.argmax(inlined_pix)

            confidence = inlined_pix[pixId]

            # if max confidence below 0.1 => inactive joint
            if inlined_pix[pixId] < 0.01:
                confidences.append(confidence)
                continue

            outX = pixId % heatmap.shape[1]
            outY = pixId // heatmap.shape[1]

            if offsetornot == True:
                x = outX / heatmap.shape[1] * input_size[0] + xOff[outY, outX,
                                                                   jointId]
                y = outY / heatmap.shape[0] * input_size[0] + yOff[outY, outX,
                                                                   jointId]
            else:
                x = outX / heatmap.shape[1] * input_size[0]
                y = outY / heatmap.shape[0] * input_size[0]
            x = x / input_size[0]
            y = y / input_size[0]

            joints[jointId, 0] = x
            joints[jointId, 1] = y
            confidences.append(confidence)

        return Pose2D(joints).to_absolute_coordinate_from(
            subject_bbox), confidences
Exemple #4
0
    def _new_person(self, person_id):

        self.person_hash_provider = (self.person_hash_provider + 1) % 1000000

        return {
            'id':
            person_id,
            'bbox':
            None,
            'pose_2d':
            None,
            'confidence':
            np.array([0.25 for _ in range(PoseConfig.get_total_joints())]),
            'hash':
            self.person_hash_provider
        }
Exemple #5
0
    def predict(self, img, subject_bboxes, prev_poses=[]):

        Pose2DInterface.i += 1

        if len(subject_bboxes) == 0:
            return [], []


        cropped_images = []


        # filter bbox having no size, insuring the cropped image is not empty

        filtered_bbox,filtered_poses = [], []

        for subject_id in range(len(subject_bboxes)):

            subject_bbox = subject_bboxes[subject_id]
            subject_bbox_padded = subject_bbox.to_squared(img, self.subject_padding)

            width = int(subject_bbox_padded.get_width() * img.shape[1])
            height = int(subject_bbox_padded.get_height() * img.shape[0])

            if width > 0 and height > 0:
                filtered_bbox.append(subject_bboxes[subject_id])
                if subject_id < len(prev_poses):
                    filtered_poses.append(prev_poses[subject_id])

        subject_bboxes, prev_poses = filtered_bbox, filtered_poses



        # crop images and hide stranger bodies

        for subject_id in range(len(subject_bboxes)):


            subject_bbox = subject_bboxes[subject_id]

            subject_bbox_padded = subject_bbox.to_squared(img, self.subject_padding)


            ada_bboxes, adaPoses, subject_id_to_keep = [], [], subject_id


            for i in range(len(subject_bboxes)):

                curr_bbox = subject_bboxes[i]
                curr_bbox = curr_bbox.intersect(subject_bbox_padded)

                if curr_bbox is None: #intersection is empty
                    if i < subject_id:
                        subject_id_to_keep -= 1
                    continue

                curr_bbox = curr_bbox.translate(-subject_bbox_padded.get_min_x(), -subject_bbox_padded.get_min_y())
                curr_bbox = curr_bbox.scale(1.0 / subject_bbox_padded.get_width(), 1.0 / subject_bbox_padded.get_height())

                ada_bboxes.append(curr_bbox)

                if i < len(prev_poses) and prev_poses[i] is not None:
                    adaPoses.append(prev_poses[i].to_relative_coordinate_into(subject_bbox_padded))
                else:
                    adaPoses.append(None)


            cropped_img = subject_bbox_padded.crop(img)

            cropped_img = self.body_cover.hide_strangers(cropped_img, ada_bboxes, subject_id_to_keep, adaPoses)

            cropped_img = cv2.resize(cropped_img, (self.input_size, self.input_size), interpolation=cv2.INTER_AREA)

            cropped_img = cropped_img.astype(np.float32) / (255.0 / 2.0) - 1.0

            cropped_images.append(cropped_img)


        # infer the cropped images

        out = np.zeros((0, PoseConfig.get_total_joints() * 3))

        if len(cropped_images) > 0:
            out = self.session.run(self.output, feed_dict={self.image: cropped_images})


        # decode outputs

        poses_2d, confidences = [], []

        for subject_id in range(out.shape[0]):

            # 1.- recover the pose inside the cropped image from the confidence heatmaps
            curr_heatmaps = out[subject_id, :, :, :]
            cropped_image_bbox = subject_bboxes[subject_id].to_squared(img, self.subject_padding)

            curr_pose_2d, curr_confidences = self.post_processing(curr_heatmaps, cropped_image_bbox, self.input_size)

            poses_2d.append(curr_pose_2d)
            confidences.append(curr_confidences)



        return poses_2d, confidences