def segment_background(arguments):
    # parse segmentation method
    try:
        seg_method_enum = SegmentationMethods(arguments.method)
        TerminalColors.formatted_print("Segmentation method: " + seg_method_enum.name,
                                       TerminalColors.OKBLUE)
    except ValueError:
        raise ValueError("invalid segmentation method: " + arguments.method)

    # create segmentation object and perform segmentation
    if seg_method_enum == SegmentationMethods.BACKGROUND_SUBTRACTION:
        bg_segmentor = BackgroundSubtraction(arguments.data_directory, arguments.class_annotations)
    else:
        raise ValueError('unsupported segmentation method: ' + seg_method_enum.name)
    bg_segmentor.segment()
def collect_images(topic_name, image_dir, class_ann_file, sleep_time):
    TerminalColors.formatted_print('Image directory: ' + image_dir,
                                   TerminalColors.OKBLUE)
    if not os.path.isdir(image_dir):
        if prompt_for_yes_or_no(
                "'{}' does not exist, create it?".format(image_dir)):
            print('creating ' + image_dir)
            os.mkdir(image_dir)
        else:
            TerminalColors.formatted_print(
                "Not creating '{}'. Exiting program.".format(image_dir),
                TerminalColors.WARNING)
            return

    TerminalColors.formatted_print('class annotation file: ' + class_ann_file,
                                   TerminalColors.OKBLUE)
    if not os.path.exists(class_ann_file):
        raise RuntimeError("Class annotation file does not exist: " +
                           class_ann_file)
    with open(class_ann_file, 'r') as infile:
        class_dict = yaml.load(infile, Loader=yaml.FullLoader)

    bridge = cv_bridge.CvBridge()
    for cls_id, cls_name in class_dict.items():
        cls_img_dir = os.path.join(image_dir, cls_name)
        TerminalColors.formatted_print(
            "collecting images for class '{}' in '{}'".format(
                cls_name, cls_img_dir), TerminalColors.BOLD)
        if not os.path.isdir(cls_img_dir):
            print("creating '{}'".format(cls_img_dir))
            os.mkdir(cls_img_dir)
        while True:
            num_image = int(
                prompt_for_float('please enter number of images to take'))
            if num_image < 1:
                TerminalColors.formatted_print(
                    'please input a positive integer for the number of images',
                    TerminalColors.WARNING)
                continue
            collect_images_single_class(bridge, topic_name, cls_img_dir,
                                        cls_name, num_image, sleep_time)
            if not prompt_for_yes_or_no(
                    "do you want to take more pictures for class '{}' (e.g. different pespective)?"
                    .format(cls_name)):
                break
    def __init__(self, class_id, obj_img_dir, obj_mask_dir):
        self.class_id = class_id

        # check directories
        if not os.path.isdir(obj_img_dir):
            raise RuntimeError(
                "image folder '{}' is not an existing directory".format(
                    obj_img_dir))
        if not os.path.isdir(obj_mask_dir):
            raise RuntimeError(
                "mask folder '{}' is not an existing directory".format(
                    obj_mask_dir))

        # glob object images
        obj_img_paths = glob_extensions_in_directory(obj_img_dir,
                                                     ALLOWED_IMAGE_EXTENSIONS)
        if not obj_img_paths:
            raise RuntimeError(
                "found no image of supported types in '{}'".format(
                    obj_img_dir))

        # load images and masks
        print("found '{}' object images in '{}'".format(
            len(obj_img_paths), obj_img_dir))
        self._segmented_objects = []
        for img_path in obj_img_paths:
            # check expected path to mask
            mask_path = get_image_mask_path(img_path, obj_mask_dir)
            if not os.path.exists(mask_path):
                TerminalColors.formatted_print(
                    "skipping image '{}': mask '{}' does not exist".format(
                        img_path, mask_path), TerminalColors.WARNING)
                continue

            # add SegmentedObject instance for image-mask pair
            try:
                self._segmented_objects.append(
                    SegmentedObject(img_path, mask_path))
            except Exception as e:
                TerminalColors.formatted_print(
                    "failed to process image '{}' and mask '{}': {}".format(
                        img_path, mask_path, e), TerminalColors.FAIL)
                continue
    def __init__(self, data_dir, class_annotation_file, confirmation=True):
        self._confirmation = confirmation

        # check data directory
        self._data_dir = data_dir
        TerminalColors.formatted_print("Data directory: " + data_dir, TerminalColors.OKBLUE)
        if not os.path.isdir(self._data_dir):
            raise RuntimeError("'{}' is not an existing directory".format(self._data_dir))

        # check directory with green box images
        self._image_dir = os.path.join(data_dir, 'green_box_images')
        if not os.path.isdir(self._image_dir):
            raise RuntimeError("'{}' is not an existing directory".format(self._image_dir))

        # check directory to store object masks
        self._mask_dir = os.path.join(data_dir, 'object_masks')
        TerminalColors.formatted_print("Will generate object masks to: " + self._mask_dir, TerminalColors.OKBLUE)
        if not os.path.exists(self._mask_dir):
            print("creating directory '{}'".format(self._mask_dir))
            os.mkdir(self._mask_dir)
        elif os.listdir(self._mask_dir) and self._confirmation:
            if not prompt_for_yes_or_no("Mask directory '{}' not empty, overwrite?".format(self._mask_dir)):
                raise RuntimeError("directory '{}' not empty, not overwriting".format(self._mask_dir))

        # load class annotation file
        if not os.path.exists(class_annotation_file):
            raise RuntimeError("class annotation YAML does not exist: " + class_annotation_file)
        with open(class_annotation_file, 'r') as infile:
            self._class_dict = yaml.load(infile, Loader=yaml.FullLoader)
        TerminalColors.formatted_print("Found '{}' classes in annotation file '{}'"
                                       .format(len(self._class_dict), class_annotation_file), TerminalColors.OKBLUE)
def generate_masks_and_annotations(data_dir, class_annotation_file, output_dir, output_annotation_dir, display_boxes):
    augmenter = ImageAugmenter(data_dir, class_annotation_file)
    if not output_dir:
        output_dir = os.path.join(data_dir, 'synthetic_images')
    if not output_annotation_dir:
        output_annotation_dir = os.path.join(data_dir, 'annotations')
    TerminalColors.formatted_print("begin generating images under '{}' and annotation files under '{}'"
                                   .format(output_dir, output_annotation_dir), TerminalColors.OKBLUE)
    if not os.path.isdir(output_dir):
        print("creating directory: " + output_dir)
        os.mkdir(output_dir)
    if not os.path.isdir(output_annotation_dir):
        print("creating directory: " + output_annotation_dir)
        os.mkdir(output_annotation_dir)

    while True:
        # ask user for split name, number of images to generate per background, and maximum
        # number of objects per background
        split_name = None
        while not split_name:
            split_name = input("please enter split name (e.g. 'go2019_train'): ")

        num_image_per_bg = -1
        while num_image_per_bg < 0:
            num_image_per_bg = int(prompt_for_float("please enter the number of images to be generated"
                                                    " for each background"))

        max_obj_num_per_bg = -1
        max_obj_num_per_bg = int(prompt_for_float("please enter the maximum number of objects to be projected"
                                                  " onto each background"))

        # generate images
        augmenter.generate_detection_data(split_name, output_dir, output_annotation_dir, num_image_per_bg,
                                          max_obj_num_per_bg, display_boxes=display_boxes)

        if not prompt_for_yes_or_no("do you want to generate images for another dataset split?"):
            break
    def segment(self):
        for class_name in self._class_dict.values():
            # check directory containing object images
            class_img_dir = os.path.join(self._image_dir, class_name)
            if not os.path.isdir(class_img_dir):
                TerminalColors.formatted_print("skipping class '{}': '{}' is not an existing directory"
                                               .format(class_name, class_img_dir), TerminalColors.WARNING)
                continue

            # glob all supported image files & warn if no image found
            image_paths = glob_extensions_in_directory(class_img_dir, ALLOWED_IMAGE_EXTENSIONS)
            if not image_paths:
                TerminalColors.formatted_print(
                    "skipping class '{}': directory '{}' does not contain any supported images"
                    .format(class_name, class_img_dir), TerminalColors.WARNING)
                continue

            # Handle algorithm-specific segmentation
            TerminalColors.formatted_print("Generating masks for class '{}' from '{}' images"
                                           .format(class_name, len(image_paths)), TerminalColors.BOLD)
            self._segment_class(class_name, image_paths)
            break


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description="Script to generate training images and annotations for bounding box based object detection."
                    " This is done by randomly projecting segemented object pixels onto backgrounds, then"
                    " calculating the corresponding bounding boxes.")
    parser.add_argument('--data-directory', '-d', required=True,
                        help='directory where the script will look for images, backgrounds and saved object masks')
    parser.add_argument('--class-annotations', '-c', required=True,
                        help='file containing mappings from class ID to class name')
    parser.add_argument('--output-dir', '-o', default=None,
                        help='(optional) directory to store generated images')
    parser.add_argument('--output-annotation-dir', '-a', default=None,
                        help='(optional) directory to store the generated YAML annotations')
    parser.add_argument('--display-boxes', '-b', action='store_true',
                        help='(optional) whether to display the synthetic images with visualized bounding boxes')
    args = parser.parse_args()

    try:
        generate_masks_and_annotations(args.data_directory, args.class_annotations, args.output_dir,
                                       args.output_annotation_dir, args.display_boxes)
        TerminalColors.formatted_print('image and annotation generation complete', TerminalColors.OKGREEN)
    except KeyboardInterrupt:
        TerminalColors.formatted_print('\nscript interrupted', TerminalColors.WARNING)
    except Exception as e:
        TerminalColors.formatted_print(e, TerminalColors.FAIL)
        raise
Beispiel #8
0
def create_tf_record(image_annotation_path, class_annotation_path, output_path,
                     image_dir, num_shards):
    if tfrecords_exist(output_path) \
            and not prompt_for_yes_or_no("shards for '{}' exists, overwrite?".format(args.output_file)):
        # make sure user want to overwrite TFRecord
        TerminalColors.formatted_print('not overwriting TFRecord, exiting..',
                                       TerminalColors.WARNING)
        return

    # Load class file
    if not os.path.exists(class_annotation_path):
        TerminalColors.formatted_print(
            'class annotation file does not exist: ' + class_annotation_path,
            TerminalColors.FAIL)
        return
    with open(class_annotation_path, 'r') as yml_file:
        class_dict = yaml.load(yml_file, Loader=yaml.FullLoader)
    TerminalColors.formatted_print(
        "\nfound '{}' classes in file '{}'".format(len(class_dict),
                                                   class_annotation_path),
        TerminalColors.OKBLUE)

    # Load annotations file
    if not os.path.exists(image_annotation_path):
        TerminalColors.formatted_print(
            'image annotation file does not exist: ' + image_annotation_path,
            TerminalColors.FAIL)
        return
    with open(image_annotation_path, 'r') as annotations_f:
        annotations = yaml.load(annotations_f, Loader=yaml.FullLoader)
    num_annotations = len(annotations)
    TerminalColors.formatted_print(
        "found '{}' image annotations in file '{}'".format(
            num_annotations, image_annotation_path), TerminalColors.OKBLUE)

    TerminalColors.formatted_print(
        'number of TFRecord shards: {}\n'.format(num_shards),
        TerminalColors.OKBLUE)

    with contextlib2.ExitStack() as tf_record_close_stack:
        output_tfrecords = open_sharded_output_tfrecords(
            tf_record_close_stack, output_path, num_shards)

        for idx, example in enumerate(annotations):
            output_shard_index = idx % num_shards
            print_progress(idx + 1,
                           num_annotations,
                           prefix="Generating TFRecord for image ")

            image_path = example['image_name']
            if image_dir:
                # prepend image directory if specified
                image_path = os.path.join(image_dir, image_path)

            try:
                tf_example = create_bbox_detection_tf_example(
                    image_path, example, class_dict)
            except RuntimeError as e:
                TerminalColors.formatted_print(str(e), TerminalColors.FAIL)
                continue

            output_tfrecords[output_shard_index].write(
                tf_example.SerializeToString())
Beispiel #9
0
    )
    parser.add_argument(
        'class_file',
        help="YAML file containing the mapping from class ID to class name")
    parser.add_argument('output_file',
                        help="path where TFRecord's should be written to,"
                        " e.g. './robocup_train.record'")
    parser.add_argument(
        '--image_dir',
        '-d',
        default=None,
        help="if specified, will prepend to image paths in annotation file")
    parser.add_argument('--num_shards',
                        '-n',
                        default=1,
                        type=int,
                        help="number of fragments to split the TFRecord into")
    args = parser.parse_args()

    try:
        create_tf_record(args.annotation_file, args.class_file,
                         args.output_file, args.image_dir, args.num_shards)
        TerminalColors.formatted_print('TFRecord generation complete',
                                       TerminalColors.OKGREEN)
    except KeyboardInterrupt:
        TerminalColors.formatted_print('\nscript interrupted',
                                       TerminalColors.WARNING)
    except Exception as e:
        TerminalColors.formatted_print(e, TerminalColors.FAIL)
        raise
        required=True,
        help=
        "YAML file which contains the mappings from class ID to class names")
    parser.add_argument('--sleep-time',
                        '-s',
                        default=0.5,
                        help="Delay in seconds between taking each picture")
    args = parser.parse_args()

    try:
        import rospy
        import cv_bridge
        from sensor_msgs.msg import Image as ImageMsg
    except ImportError:
        TerminalColors.formatted_print(
            'This script is meant to work with ROS, please run it from a ROS enabled system.',
            TerminalColors.FAIL)
        sys.exit(1)

    rospy.init_node('image_collector', anonymous=True)
    try:
        collect_images(args.topic_name, args.image_dir, args.class_file,
                       args.sleep_time)
        TerminalColors.formatted_print('image collection complete',
                                       TerminalColors.OKGREEN)
    except KeyboardInterrupt:
        TerminalColors.formatted_print('\nscript interrupted',
                                       TerminalColors.WARNING)
    except Exception as e:
        TerminalColors.formatted_print(e, TerminalColors.FAIL)
        raise ValueError("invalid segmentation method: " + arguments.method)

    # create segmentation object and perform segmentation
    if seg_method_enum == SegmentationMethods.BACKGROUND_SUBTRACTION:
        bg_segmentor = BackgroundSubtraction(arguments.data_directory, arguments.class_annotations)
    else:
        raise ValueError('unsupported segmentation method: ' + seg_method_enum.name)
    bg_segmentor.segment()


if __name__ == '__main__':
    # Note: using builtin var '__doc__' as script description
    parser = argparse.ArgumentParser(formatter_class=RawDescriptionAndDefaultsFormatter, description=__doc__)

    parser.add_argument('--method', '-m', choices=SegmentationMethods.values(),
                        default=SegmentationMethods.BACKGROUND_SUBTRACTION.value,
                        help='background segmentation method')
    parser.add_argument('--data-directory', '-d', required=True,
                        help='directory where the script will look for images, backgrounds and save object masks')
    parser.add_argument('--class-annotations', '-c', required=True,
                        help='file containing mapping from class ID to class name')

    try:
        segment_background(parser.parse_args())
        TerminalColors.formatted_print('segmentation complete', TerminalColors.OKGREEN)
    except KeyboardInterrupt:
        TerminalColors.formatted_print('\nscript interrupted', TerminalColors.WARNING)
    except Exception as e:
        TerminalColors.formatted_print(e, TerminalColors.FAIL)
        raise
Beispiel #12
0
    def generate_detection_data(self,
                                split_name,
                                output_dir_images,
                                output_dir_masks,
                                output_annotation_dir,
                                max_obj_num_per_bg,
                                augmentation_config_file_path,
                                num_images_per_bg=10,
                                write_chunk_ratio=0.05,
                                invert_mask=False,
                                annotation_format=AnnotationFormats.CUSTOM):
        """Generates:
        * synthetic images under <output_dir>/synthetic_images/<split_name>
        * image annotations under <output_dir>/annotations

        If annotation_format is equal to AnnotationFormats.VOC, an annotation
        file is generated for each image under <output_dir>/annotations/split_name,
        such that the name of the annotation is the same as the name of the
        image file (e.g. if the image name is train_01.jpg, the name of the
        annotation file will be train_01.xml).
        """
        split_output_dir_images = os.path.join(output_dir_images, split_name)
        split_output_dir_masks = os.path.join(output_dir_masks, split_name)
        TerminalColors.formatted_print(
            "generating images for split '{}' under '{}'".format(
                split_name, split_output_dir_images), TerminalColors.BOLD)
        TerminalColors.formatted_print(
            "generating masks for split '{}' under '{}'".format(
                split_name, split_output_dir_masks), TerminalColors.BOLD)
        # check output image directory
        if not os.path.isdir(split_output_dir_images):
            print("creating directory: " + split_output_dir_images)
            os.mkdir(split_output_dir_images)
        elif os.listdir(split_output_dir_images):
            if not prompt_for_yes_or_no("directory '{}' not empty. Overwrite?".
                                        format(split_output_dir_images)):
                raise RuntimeError(
                    "not overwriting '{}'".format(split_output_dir_images))

        if not os.path.isdir(split_output_dir_masks):
            print("creating directory: " + split_output_dir_masks)
            os.mkdir(split_output_dir_masks)
        elif os.listdir(split_output_dir_masks):
            if not prompt_for_yes_or_no("directory '{}' not empty. Overwrite?".
                                        format(split_output_dir_masks)):
                raise RuntimeError(
                    "not overwriting '{}'".format(split_output_dir_masks))

        config_params = None
        if os.path.isfile(augmentation_config_file_path):
            config_params = read_config_params(augmentation_config_file_path)
        else:
            raise RuntimeError('Config {0} is not a valid file'.format(
                augmentation_config_file_path))

        # check output annotation file
        annotation_file_path = os.path.join(output_annotation_dir,
                                            split_name + '.yaml')
        TerminalColors.formatted_print(
            "generating annotations for split '{}' in '{}'".format(
                split_name, annotation_file_path), TerminalColors.BOLD)
        if os.path.isfile(annotation_file_path):
            if not prompt_for_yes_or_no("file '{}' exists. Overwrite?".format(
                    annotation_file_path)):
                raise RuntimeError(
                    "not overwriting '{}'".format(annotation_file_path))

        if annotation_format == AnnotationFormats.VOC:
            annotation_dir = os.path.join(output_annotation_dir, split_name)
            if not os.path.isdir(annotation_dir):
                os.mkdir(annotation_dir)
            else:
                if not prompt_for_yes_or_no("directory '{}' exists. Overwrite?"
                                            .format(annotation_dir)):
                    raise RuntimeError(
                        "not overwriting '{}'".format(annotation_dir))

        # Total number of images = classes * objects per background * number of backgrounds
        total_img_cnt = len(self._background_paths) * num_images_per_bg
        zero_pad_num = len(str(total_img_cnt))
        annotations = {}

        # Prepare multiprocessing
        img_cnt = mp.Value('i', 0)
        lock = mp.Lock()
        pool = mp.Pool(initializer=self.setup, initargs=[img_cnt, lock])

        for bg_idx in tqdm(range(len(self._background_paths))):
            bg_path = self._background_paths[bg_idx]
            # for bg_path in self._background_paths:
            # generate new image
            try:
                bg_img = cv2.imread(bg_path)
            except RuntimeError as e:
                TerminalColors.formatted_print(
                    "Ignoring background {} because {}".format(bg_path, e),
                    TerminalColors.WARNING)
                continue

            bg_img_params = [(bg_img, max_obj_num_per_bg, invert_mask, split_name, zero_pad_num, \
                              split_output_dir_images, split_output_dir_masks, config_params, seed ) \
                                  for seed in range(num_images_per_bg)]

            annotations_per_bg = pool.map(self.create_image, bg_img_params)

            for img_file_name, box_annotations in annotations_per_bg:
                annotations[img_file_name] = box_annotations

            # Writing annotations
            if print_progress(img_cnt.value + 1,
                              total_img_cnt,
                              prefix="creating image ",
                              fraction=write_chunk_ratio):
                # periodically dump annotations
                self.save_annotations(annotations, split_output_dir_images,
                                      output_annotation_dir, split_name,
                                      annotation_file_path, annotation_format)
                annotations = {}

        self.save_annotations(annotations, split_output_dir_images,
                              output_annotation_dir, split_name,
                              annotation_file_path, annotation_format)
Beispiel #13
0
    def _load_objects_per_class(self, class_id, obj_img_dir, obj_mask_dir):
        # check directories
        if not os.path.isdir(obj_img_dir):
            raise RuntimeError(
                "image folder '{}' is not an existing directory".format(
                    obj_img_dir))
        if not os.path.isdir(obj_mask_dir):
            raise RuntimeError(
                "mask folder '{}' is not an existing directory".format(
                    obj_mask_dir))

        # glob object images
        obj_img_paths = glob_extensions_in_directory(obj_img_dir,
                                                     ALLOWED_IMAGE_EXTENSIONS)
        if not obj_img_paths:
            raise RuntimeError(
                "found no image of supported types in '{}'".format(
                    obj_img_dir))

        # glob mask images
        obj_mask_paths = glob_extensions_in_directory(
            obj_mask_dir, ALLOWED_IMAGE_EXTENSIONS)
        if not obj_img_paths:
            raise RuntimeError(
                "found no mask of supported types in '{}'".format(obj_img_dir))

        # load images and masks
        print("found '{}' object images in '{}'".format(
            len(obj_img_paths), obj_img_dir))
        print("found '{}' object masks in '{}'".format(len(obj_mask_paths),
                                                       obj_mask_dir))

        img_indices = list(range(len(obj_img_paths)))
        np.random.shuffle(img_indices)

        img_count = 0
        current_img_idx = 0
        img_paths = []
        mask_paths = []
        while img_count < self._num_objects_per_class and current_img_idx < len(
                img_indices):
            try:
                # we check if the current image has a corresponding mask
                img_path = obj_img_paths[img_indices[current_img_idx]]
                mask_path = get_image_mask_path(img_path, obj_mask_paths)

                # even if both the image and mask exist, we still need to load them
                # in order to check that they are valid images
                cv2.imread(img_path)
                img_paths.append(img_path)
                mask_paths.append(mask_path)
                img_count += 1
            except Exception as exc:
                TerminalColors.formatted_print(
                    "[load_objects_per_class] Error: {0}. Skipping image".
                    format(exc), TerminalColors.WARNING)
            current_img_idx += 1

        print("{} images saved per class {}".format(len(img_paths), class_id))
        self._object_collections[class_id] = {
            'images': img_paths,
            'masks': mask_paths
        }
Beispiel #14
0
    def __init__(self, data_dir, background_dir, class_annotation_file,
                 num_objects_per_class):
        # check required directories
        TerminalColors.formatted_print('Data directory: ' + data_dir,
                                       TerminalColors.OKBLUE)
        if not os.path.isdir(data_dir):
            raise RuntimeError(
                "'{}' is not an existing directory".format(data_dir))

        TerminalColors.formatted_print(
            'Background directory: ' + background_dir, TerminalColors.OKBLUE)
        if not os.path.isdir(background_dir):
            raise RuntimeError(
                "'{}' is not an existing directory".format(background_dir))

        # load backgrounds for image augmentation
        self._background_paths = glob_extensions_in_directory(
            background_dir, ALLOWED_IMAGE_EXTENSIONS)
        TerminalColors.formatted_print(
            'Found {} background images '.format(len(self._background_paths)),
            TerminalColors.OKBLUE)

        # Saving number of objects per class
        self._num_objects_per_class = num_objects_per_class

        # load class annotation file
        TerminalColors.formatted_print(
            'Class annotation file: {}'.format(class_annotation_file),
            TerminalColors.OKBLUE)
        if not os.path.exists(class_annotation_file):
            raise RuntimeError('class annotation file does not exist: ' +
                               class_annotation_file)
        with open(class_annotation_file, 'r') as infile:
            self._class_dict = yaml.load(infile, Loader=yaml.FullLoader)
        # load segmented objects
        TerminalColors.formatted_print(
            "Loading object masks for '{}' classes".format(
                len(self._class_dict)), TerminalColors.OKBLUE)

        for cls_id, (cls_name, _) in self._class_dict.items():
            obj_dir = os.path.join(data_dir, cls_name)
            obj_img_dir = os.path.join(obj_dir, 'images')
            obj_mask_dir = os.path.join(obj_dir, 'masks')

            try:
                TerminalColors.formatted_print(
                    "loading images and masks for class '{}'".format(cls_name),
                    TerminalColors.BOLD)
                # segmented_obj = SegmentedObjectCollection(cls_id, obj_img_dir, obj_mask_dir)
                # self._segmented_object_collections[cls_id] = segmented_obj
                self._load_objects_per_class(cls_id, obj_img_dir, obj_mask_dir)
            except RuntimeError as e:
                TerminalColors.formatted_print(
                    "skipping class '{}': {}".format(cls_name, e),
                    TerminalColors.WARNING)
                continue

        self._object_collections_copy = copy.deepcopy(self._object_collections)
    def generate_detection_data(self,
                                split_name,
                                output_dir,
                                output_annotation_dir,
                                num_image_per_bg,
                                max_obj_num_per_bg,
                                display_boxes=False,
                                write_chunk_ratio=0.05):
        """
        The main function which generate
        - generate synthetic images under <outpu_dir>/<split_name>
        - generate
        """
        split_output_dir = os.path.join(output_dir, split_name)
        TerminalColors.formatted_print(
            "generating images for split '{}' under '{}'".format(
                split_name, split_output_dir), TerminalColors.BOLD)
        # check output image directory
        if not os.path.isdir(split_output_dir):
            print("creating directory: " + split_output_dir)
            os.mkdir(split_output_dir)
        elif os.listdir(split_output_dir):
            if not prompt_for_yes_or_no("directory '{}' not empty. Overwrite?".
                                        format(split_output_dir)):
                raise RuntimeError(
                    "not overwriting '{}'".format(split_output_dir))

        # check output annotation file
        annotation_path = os.path.join(output_annotation_dir,
                                       split_name + '.yml')
        TerminalColors.formatted_print(
            "generating annotations for split '{}' in '{}'".format(
                split_name, annotation_path), TerminalColors.BOLD)
        if os.path.isfile(annotation_path):
            if not prompt_for_yes_or_no(
                    "file '{}' exists. Overwrite?".format(annotation_path)):
                raise RuntimeError(
                    "not overwriting '{}'".format(annotation_path))

        # store a reasonable value for the maximum number of objects projected onto each background
        if max_obj_num_per_bg <= 0 or max_obj_num_per_bg > len(
                self.class_dict):
            max_obj_num_per_bg = len(self.class_dict)

        # generate images and annotations
        img_cnt = 0
        total_img_cnt = num_image_per_bg * len(self._augment_backgrounds)
        zero_pad_num = len(str(total_img_cnt))
        annotations = []
        for bg_img in self._augment_backgrounds:
            for _ in range(num_image_per_bg):
                if print_progress(img_cnt + 1,
                                  total_img_cnt,
                                  prefix="creating image ",
                                  fraction=write_chunk_ratio):
                    # periodically dump annotations
                    with open(annotation_path, 'a') as infile:
                        yaml.dump(annotations,
                                  infile,
                                  default_flow_style=False)
                        annotations = []

                # generate new image
                generated_image, box_annotations = self.generate_single_image(
                    bg_img, max_obj_num_per_bg)
                if display_boxes:
                    drawn_img = draw_labeled_boxes(generated_image,
                                                   box_annotations,
                                                   self.class_dict)
                    display_image_and_wait(drawn_img, 'box image')

                # write image and annotations
                img_file_name = '{}_{}.jpg'.format(
                    split_name,
                    str(img_cnt).zfill(zero_pad_num))
                img_file_path = os.path.join(split_output_dir, img_file_name)
                annotations.append({
                    'image_name': img_file_path,
                    'objects': box_annotations
                })
                cv2.imwrite(img_file_path, generated_image)
                img_cnt += 1
    def __init__(self, data_dir, class_annotation_file):
        # check required directories
        self._data_dir = data_dir
        TerminalColors.formatted_print('Data directory: ' + self._data_dir,
                                       TerminalColors.OKBLUE)
        if not os.path.isdir(self._data_dir):
            raise RuntimeError("'{}' is not an existing directory".format(
                self._data_dir))

        self._greenbox_image_dir = os.path.join(self._data_dir,
                                                'green_box_images')
        print('will look for object green box images in: ' +
              self._greenbox_image_dir)
        if not os.path.isdir(self._greenbox_image_dir):
            raise RuntimeError("'{}' is not an existing directory".format(
                self._greenbox_image_dir))

        self._mask_dir = os.path.join(self._data_dir, 'object_masks')
        print('will look for object masks in: ' + self._mask_dir)
        if not os.path.isdir(self._mask_dir):
            raise RuntimeError("'{}' is not an existing directory".format(
                self._mask_dir))

        augment_bg_dir = os.path.join(self._data_dir,
                                      'augmentation_backgrounds')
        print('will look for backgrounds for image augmentation in: ' +
              augment_bg_dir)
        if not os.path.isdir(augment_bg_dir):
            raise RuntimeError(
                "'{}' is not an existing directory".format(augment_bg_dir))

        # load backgrounds for image augmentation
        background_paths = glob_extensions_in_directory(
            augment_bg_dir, ALLOWED_IMAGE_EXTENSIONS)
        print("found '{}' background images".format(len(background_paths)))
        self._augment_backgrounds = []
        for bg_path in background_paths:
            bg_img = cv2.imread(bg_path)
            self._augment_backgrounds.append(bg_img)

        # load class annotation file
        TerminalColors.formatted_print(
            'Class annotation file: {}'.format(class_annotation_file),
            TerminalColors.OKBLUE)
        if not os.path.exists(class_annotation_file):
            raise RuntimeError('class annotation file does not exist: ' +
                               class_annotation_file)
        with open(class_annotation_file, 'r') as infile:
            self.class_dict = yaml.load(infile, Loader=yaml.FullLoader)

        # load segmented objects
        TerminalColors.formatted_print(
            "Loading object masks for '{}' classes".format(len(
                self.class_dict)), TerminalColors.OKBLUE)
        self._segmented_object_collections = {}
        for cls_id, cls_name in self.class_dict.items():
            obj_img_dir = os.path.join(self._greenbox_image_dir, cls_name)
            obj_mask_dir = os.path.join(self._mask_dir, cls_name)

            try:
                TerminalColors.formatted_print(
                    "loading images and masks for class '{}'".format(cls_name),
                    TerminalColors.BOLD)
                segmented_obj = SegmentedObjectCollection(
                    cls_id, obj_img_dir, obj_mask_dir)
                self._segmented_object_collections[cls_id] = segmented_obj
            except RuntimeError as e:
                TerminalColors.formatted_print(
                    "skipping class '{}': {}".format(cls_name, e),
                    TerminalColors.WARNING)
                continue