コード例 #1
0
    def data_augmentation_v1(self, dataset_path):
        print('Generating augmented images...\n')

        image_paths_list = []
        labels = []

        augmentations_dict = {
            'horizontal-flip': 1,
            'random-crop': 1,
            'gaussian-blur': 1,
            'contrast-norm': 1,
            'additive-gaussian-noise': 1,
        }
        augmentations_type = [
            k for k, v in augmentations_dict.items() if v == 1
        ]

        for augmentation_type in augmentations_type:
            print('augmentation_type', augmentation_type)

            for label_name, label_no in self.config.labels.items():

                # Read image paths
                image_paths = utils_image.get_images_path_list_from_dir(
                    os.path.join(dataset_path, label_name),
                    img_format=self.config.dataset_path_image_format)
                image_paths = image_paths[0:5]

                for idx, image_path in enumerate(image_paths):

                    img = Image.open(image_paths[idx])
                    img = np.array(img)

                    # TODO: Do center cropping
                    # img = cv2.imread(image_paths[idx])
                    # img = cv2.resize(img, (224, 224))

                    img_aug = self.data_augmentation_sequence(
                        img, augmentation_type)

                    os.makedirs(os.path.join(
                        self.config.dataset_path_train_aug, label_name),
                                exist_ok=True)
                    img_name = os.path.basename(image_path).rsplit('.', 1)[0]
                    img_ext = os.path.basename(image_path).rsplit('.', 1)[1]

                    img_path_name = os.path.join(
                        self.config.dataset_path_train_aug, label_name,
                        img_name + '_' + augmentation_type + '.' + img_ext)

                    utils_image.save_image(img_aug, img_path_name)
                    print('img_path_name', img_path_name, idx)
コード例 #2
0
ファイル: skeleton_vis.py プロジェクト: fudiGeng/DCPose
def draw_skeleton_in_origin_image(batch_image_list, batch_joints_list, batch_bbox_list, save_dir, vis_skeleton=True, vis_bbox=True):
    """
    :param batch_image_list:  batch image path
    :param batch_joints_list:   joints coordinates in image Coordinate reference system
    :batch_bbox_list: xyxy
    :param save_dir:
    :return: No return
    """

    skeleton_image_save_folder = osp.join(save_dir, "skeleton")
    bbox_image_save_folder = osp.join(save_dir, "bbox")
    together_save_folder = osp.join(save_dir, "SkeletonAndBbox")

    if vis_skeleton and vis_bbox:
        save_folder = together_save_folder
    else:
        save_folder = skeleton_image_save_folder
        if vis_bbox:
            save_folder = bbox_image_save_folder

    batch_final_coords = batch_joints_list

    for index, image_path in enumerate(batch_image_list):
        final_coords = batch_final_coords[index]
        final_coords = coco2posetrack_ord_infer(final_coords)
        bbox = batch_bbox_list[index]

        image_name = image_path[image_path.index("images") + len("images") + 1:]
        # image_name = image_path[image_path.index("frames") + len("frames") + 1:]

        vis_image_save_path = osp.join(save_folder, image_name)
        if osp.exists(vis_image_save_path):
            processed_image = read_image(vis_image_save_path)

            processed_image = add_poseTrack_joint_connection_to_image(processed_image, final_coords, sure_threshold=0.2,
                                                                      flag_only_draw_sure=True) if vis_skeleton else processed_image
            processed_image = add_bbox_in_image(processed_image, bbox) if vis_bbox else processed_image

            save_image(vis_image_save_path, processed_image)
        else:
            image_data = read_image(image_path)
            processed_image = image_data.copy()

            processed_image = add_poseTrack_joint_connection_to_image(processed_image, final_coords, sure_threshold=0.2,
                                                                      flag_only_draw_sure=True) if vis_skeleton else processed_image

            processed_image = add_bbox_in_image(processed_image, bbox) if vis_bbox else processed_image

            save_image(vis_image_save_path, processed_image)
コード例 #3
0
    def wrap_data_image(self, image_path, output_path=None):

        ## Convert the image to raw bytes.
        img_size = (self.config.tfr_image_width, self.config.tfr_image_height)

        img = Image.open(image_path)
        img = img.resize(size=img_size, resample=Image.LANCZOS)
        img = np.array(img)

        if output_path is not None:
            img_path_name = os.path.join(os.path.dirname(output_path),
                                         os.path.basename(image_path))
            utils_image.save_image(img, img_path_name)

        img_bytes = img.tostring()

        return tf.train.Feature(bytes_list=tf.train.BytesList(
            value=[img_bytes]))
コード例 #4
0
def video():
    logger.info("Start")
    base_video_path = "./input"
    base_img_vis_save_dirs = './output/vis_img'
    json_save_base_dirs = './output/json'
    create_folder(json_save_base_dirs)
    video_list = list_immediate_childfile_paths(base_video_path,
                                                ext=['mp3', 'mp4'])
    input_image_save_dirs = []
    SAVE_JSON = True
    SAVE_VIS_VIDEO = True
    SAVE_VIS_IMAGE = True
    SAVE_BOX_IMAGE = True
    base_img_vis_box_save_dirs = './output/vis_img_box'
    # 1.Split the video into images

    for video_path in tqdm(video_list):
        video_name = osp.basename(video_path)
        temp = video_name.split(".")[0]
        image_save_path = os.path.join(base_video_path, temp)
        image_vis_save_path = os.path.join(base_img_vis_save_dirs, temp)
        image_vis_box_save_path = os.path.join(base_img_vis_box_save_dirs,
                                               temp)
        input_image_save_dirs.append(image_save_path)

        create_folder(image_save_path)
        create_folder(image_vis_save_path)
        create_folder(image_vis_box_save_path)

        video2images(video_path, image_save_path)  # jpg

    # 2. Person Instance detection
    logger.info("Person Instance detection in progress ...")
    video_candidates = {}
    for index, images_dir in tqdm(enumerate(input_image_save_dirs)):
        # if index >= 1:
        #     continue
        video_name = osp.basename(images_dir)
        image_list = list_immediate_childfile_paths(images_dir, ext='jpg')
        video_candidates_list = []
        for image_path in tqdm(image_list):
            candidate_bbox = inference_yolov3(image_path)
            for bbox in candidate_bbox:
                # bbox  - x, y, w, h
                video_candidates_list.append({
                    "image_path": image_path,
                    "bbox": bbox,
                    "keypoints": None
                })
        video_candidates[video_name] = {
            "candidates_list": video_candidates_list,
            "length": len(image_list)
        }
    logger.info("Person Instance detection finish")
    # 3. Singe Person Pose Estimation
    logger.info("Single person pose estimation in progress ...")
    for video_name, video_info in video_candidates.items():
        video_candidates_list = video_info["candidates_list"]
        video_length = video_info["length"]
        prev_image_id = None
        for person_info in tqdm(video_candidates_list):
            image_path = person_info["image_path"]
            xywh_box = person_info["bbox"]
            print(os.path.basename(image_path))
            image_idx = int(os.path.basename(image_path).replace(".jpg", ""))
            # from
            prev_idx, next_id = image_idx - 1, image_idx + 1
            if prev_idx < 0:
                prev_idx = 0
            if image_idx >= video_length - 1:
                next_id = video_length - 1
            prev_image_path = os.path.join(
                os.path.dirname(image_path),
                "{}.jpg".format(str(prev_idx).zfill(zero_fill)))
            next_image_path = os.path.join(
                os.path.dirname(image_path),
                "{}.jpg".format(str(next_id).zfill(zero_fill)))

            # current_image = read_image(image_path)
            # prev_image = read_image(prev_image_path)
            # next_image = read_image(next_image_path)

            bbox = xywh_box
            keypoints = inference_PE(image_path, prev_image_path,
                                     next_image_path, bbox)
            person_info["keypoints"] = keypoints.tolist()[0]

            # posetrack points
            new_coord = coco2posetrack_ord_infer(keypoints[0])
            # pose
            if SAVE_VIS_IMAGE:
                image_save_path = os.path.join(
                    os.path.join(base_img_vis_save_dirs, video_name),
                    image_path.split("/")[-1])
                if osp.exists(image_save_path):
                    current_image = read_image(image_save_path)
                else:
                    current_image = read_image(image_path)
                pose_img = add_poseTrack_joint_connection_to_image(
                    current_image,
                    new_coord,
                    sure_threshold=0.3,
                    flag_only_draw_sure=True)
                save_image(image_save_path, pose_img)

            if SAVE_BOX_IMAGE:
                image_save_path = os.path.join(
                    os.path.join(base_img_vis_box_save_dirs, video_name),
                    image_path.split("/")[-1])
                if osp.exists(image_save_path):
                    current_image = read_image(image_save_path)
                else:
                    current_image = read_image(image_path)
                xyxy_box = bbox[0], bbox[
                    1], bbox[0] + bbox[2], bbox[1] + bbox[3]
                box_image = add_bbox_in_image(current_image, xyxy_box)
                save_image(image_save_path, box_image)

        if SAVE_JSON:
            joints_info = {"Info": video_candidates_list}
            temp = "result_" + video_name + ".json"
            write_json_to_file(joints_info,
                               os.path.join(json_save_base_dirs, temp))
            print("------->json Info save Complete!")
            print("------->Visual Video Compose Start")
        if SAVE_VIS_VIDEO:
            image2video(os.path.join(base_img_vis_save_dirs, video_name),
                        video_name)
            print("------->Complete!")
コード例 #5
0
    def create_tfrecord(self, image_paths, labels, idx_start, idx_end,
                        output_path):

        # Open a TFRecordWriter for the output-file.
        with tf.python_io.TFRecordWriter(output_path) as writer:

            for i in range(idx_start, idx_end):

                utils.print_progress(count=i, total=(idx_end - idx_start))

                image_path = image_paths[i]
                label = labels[i]

                # TODO: Do center cropping
                # img = cv2.imread(image_paths[i])
                # img = cv2.resize(img, (224, 224))

                # Load images
                img = Image.open(image_path)

                # TODO:
                # Center crop and resize image. size: The requested size in pixels, as a 2-tuple: (width, height)
                img = ImageOps.fit(img, (self.config.tfr_image_width,
                                         self.config.tfr_image_height),
                                   Image.LANCZOS, 0, (0.5, 0.5))
                # img = img.resize(size=(self.config.tfr_image_width, self.config.tfr_image_height))

                img = np.array(img)

                if output_path is not None:
                    img_path_name = os.path.join(os.path.dirname(output_path),
                                                 os.path.basename(image_path))
                    utils_image.save_image(img, img_path_name)

                ## Color constancy
                # img = utils_image.color_constancy(img, power=6, gamma=None)
                # if output_path is not None:
                #     img_path_name = os.path.join(os.path.dirname(output_path), os.path.basename(image_path))
                #     img_path_name = img_path_name.split('.')[0] + '_ilu.' + img_path_name.split('.')[1]

                #     # utils_image.save_image(img, img_path_name)
                #     img_save = Image.fromarray(img.astype('uint8'))
                #     img_save.save(img_path_name)

                # Convert the image to raw bytes.
                img_bytes = img.tostring()

                data = {
                    'image': self.wrap_bytes(img_bytes),
                    'label': self.wrap_int64(label)
                }

                # Wrap the data as TensorFlow Features.
                feature = tf.train.Features(feature=data)

                # Wrap again as a TensorFlow Example.
                example = tf.train.Example(features=feature)

                # Serialize the data.
                serialized = example.SerializeToString()

                # Write the serialized data to the TFRecords file.
                writer.write(serialized)