def load_and_display_random_sample(self, dataset, ds_datacfg=None):
        '''
        ## Load and display random samples
        '''
        print("load_and_display_random_sample::-------------------------------->")

        image_ids = np.random.choice(dataset.image_ids, 4)
        class_names = dataset.class_names

        print("dataset: len(image_ids): {}\nimage_ids: {}".format(len(image_ids), image_ids))
        print("dataset: len(class_names): {}\nclass_names: {}".format(len(class_names), class_names))

        for image_id in image_ids:
            datacfg = None
            if ds_datacfg:
                info = dataset.image_info[image_id]
                print("info: {}".format(info))
                ds_source = info['source']
                # print("ds_source:{}".format(ds_source))
                datacfg = utils.get_datacfg(ds_datacfg, ds_source)

            image = dataset.load_image(image_id, datacfg)
            mask, class_ids, keys, values = dataset.load_mask(image_id, datacfg)

            print("keys: {}".format(keys))
            print("values: {}".format(values))
            print("class_ids: {}".format(class_ids))
            ## Compute Bounding box
            # bbox = utils.extract_bboxes(mask)

            ## Display image and instances

            self.display_masks(image, mask, class_ids, class_names)
    def test_load_and_display_dataset(self, dataset, ds_datacfg=None):
        '''
        ## Test Loading of Dataset for any images and masks loading issues
        '''
        print("test_load_and_display_dataset::-------------------------------->")

        image_ids = dataset.image_ids
        class_names = dataset.class_names

        print("dataset: len(image_ids): {}\nimage_ids: {}".format(len(image_ids), image_ids))
        print("dataset: len(class_names): {}\nclass_names: {}".format(len(class_names), class_names))

        total_annotation = 0
        for image_id in image_ids:
            # print("image_id:{}".format(image_id))
            # image = dataset.load_image(image_id)
            datacfg = None
            if ds_datacfg:
                info = dataset.image_info[image_id]
                ds_source = info['source']
                # print("ds_source: {}".format(ds_source))
                datacfg = utils.get_datacfg(ds_datacfg, ds_source)

            mask, class_id, keys, values = dataset.load_mask(image_id, datacfg)
            total_annotation += len(class_id)

        print("total_annotation: {}".format(total_annotation))
    def load_mini_masks(self, dataset, config, ds_datacfg):
        '''
        ## Mini Masks

        Instance binary masks can get large when training with high resolution images.
        For example, if training with 1024x1024 image then the mask of a single instance
        requires 1MB of memory (Numpy uses bytes for boolean values). If an image has
        100 instances then that's 100MB for the masks alone.

        To improve training speed, we optimize masks by:
        * We store mask pixels that are inside the object bounding box, rather than a mask
        of the full image. Most objects are small compared to the image size, so we save space
        by not storing a lot of zeros around the object.
        * We resize the mask to a smaller size (e.g. 56x56). For objects that are larger than
        the selected size we lose a bit of accuracy. But most object annotations are not very
        accuracy to begin with, so this loss is negligable for most practical purposes.
        Thie size of the mini_mask can be set in the config class.

        To visualize the effect of mask resizing, and to verify the code correctness,
        we visualize some examples.
        '''
        print("load_mini_masks::-------------------------------->")

        image_id = np.random.choice(dataset.image_ids, 1)[0]
        print("image_id: {}".format(image_id))

        datacfg = None
        if ds_datacfg:
            info = dataset.image_info[image_id]
            ds_source = info['source']
            datacfg = utils.get_datacfg(ds_datacfg, ds_source)

        image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
            dataset, datacfg, config, image_id, use_mini_mask=False)

        log("image", image)
        log("image_meta", image_meta)
        log("class_ids", class_ids)
        log("bbox", bbox)
        log("mask", mask)

        display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])

        ## Display image and instances
        class_names = dataset.class_names
        self.display_instances(image, bbox, mask, class_ids, class_names)

        return image_id
    def load_and_display_random_single_sample(self, dataset, ds_datacfg=None):
        '''
        ## Bounding Boxes
        Rather than using bounding box coordinates provided by the source datasets,
        compute the bounding boxes from masks instead. This allows to handle bounding
        boxes consistently regardless of the source dataset, and it also makes it easier
        to resize, rotate, or crop images because simply generate the bounding boxes from
        the updates masks rather than computing bounding box transformation for each type
        of image transformation.

        ## Load random single image and mask.
        '''
        print("load_and_display_random_single_sample::-------------------------------->")

        image_ids = dataset.image_ids
        class_names = dataset.class_names

        image_id = random.choice(image_ids)

        print("dataset: image_id: {}".format(image_id))
        print("dataset: len(class_names): {}\nclass_names: {}".format(len(class_names), class_names))

        datacfg = None
        if ds_datacfg:
            info = dataset.image_info[image_id]
            ds_source = info['source']
            datacfg = utils.get_datacfg(ds_datacfg, ds_source)

        image = dataset.load_image(image_id, datacfg)
        mask, class_ids, keys, values = dataset.load_mask(image_id, datacfg)

        # Compute Bounding box
        bbox = utils.extract_bboxes(mask)

        # Display image and additional stats
        log("image_id",image_id)
        log("image", image)
        log("mask", mask)
        log("class_ids", class_ids)
        log("bbox", bbox)

        ## Return /Display image and instances

        self.display_masks(image, mask, class_ids, class_names)
        self.display_instances(image, bbox, mask, class_ids, class_names)
    def visualize_anchors_at_center(self, dataset, ds_datacfg, config, backbone_shapes, anchors, anchors_per_level, anchors_per_cell):
        '''
        ## Visualize anchors of one cell at the center of the feature map of a specific level
        '''
        print("visualize_anchors_at_center::-------------------------------->")
        # Load and draw random image
        image_id = np.random.choice(dataset.image_ids, 1)[0]

        datacfg = None
        if ds_datacfg:
            info = dataset.image_info[image_id]
            ds_source = info['source']
            datacfg = utils.get_datacfg(ds_datacfg, ds_source)

        image, image_meta, _, _, _ = modellib.load_image_gt(dataset, datacfg, config, image_id)
        fig, ax = plt.subplots(1, figsize=(10, 10))
        ax.imshow(image)
        levels = len(backbone_shapes)

        for level in range(levels):
            colors = visualize.random_colors(levels)
            # Compute the index of the anchors at the center of the image
            level_start = sum(anchors_per_level[:level]) # sum of anchors of previous levels
            level_anchors = anchors[level_start:level_start+anchors_per_level[level]]
            print("Level {}. Anchors: {:6}  Feature map Shape: {}".format(level, level_anchors.shape[0], 
                                                                          backbone_shapes[level]))
            center_cell = backbone_shapes[level] // 2
            center_cell_index = (center_cell[0] * backbone_shapes[level][1] + center_cell[1])
            level_center = center_cell_index * anchors_per_cell
            center_anchor = anchors_per_cell * (
                (center_cell[0] * backbone_shapes[level][1] / config.RPN_ANCHOR_STRIDE**2) \
                + center_cell[1] / config.RPN_ANCHOR_STRIDE)
            level_center = int(center_anchor)

            # Draw anchors. Brightness show the order in the array, dark to bright.
            for i, rect in enumerate(level_anchors[level_center:level_center+anchors_per_cell]):
                y1, x1, y2, x2 = rect
                p = patches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=2, facecolor='none',
                                      edgecolor=(i+1)*np.array(colors[level]) / anchors_per_cell)
                ax.add_patch(p)
    def load_and_resize_images(self, dataset, dnncfg, ds_datacfg):
        '''
        ## Resize Images
        To support multiple images per batch, images are resized to one size (1024x1024).
        Aspect ratio is preserved, though. If an image is not square, then zero padding
        is added at the top/bottom or right/left.
        '''
        print("load_and_resize_images::-------------------------------->")

        image_id = np.random.choice(dataset.image_ids, 1)[0]
        image = dataset.load_image(image_id)
        datacfg = None

        if ds_datacfg:
            info = dataset.image_info[image_id]
            ds_source = info['source']
            datacfg = utils.get_datacfg(ds_datacfg, ds_source)

        mask, class_ids, keys, values = dataset.load_mask(image_id, datacfg)
        original_shape = image.shape
        # Resize
        image, window, scale, padding, _ = utils.resize_image(
            image,
            min_dim=dnncfg.IMAGE_MIN_DIM,
            max_dim=dnncfg.IMAGE_MAX_DIM,
            mode=dnncfg.IMAGE_RESIZE_MODE)
        mask = utils.resize_mask(mask, scale, padding)
        # Compute Bounding box
        bbox = utils.extract_bboxes(mask)

        # Display image and additional stats
        print("Original shape: ", original_shape)
        log("image", image)
        log("mask", mask)
        log("class_ids", class_ids)
        log("bbox", bbox)

        ## Display image and instances
        class_names = dataset.class_names
        self.display_instances(image, bbox, mask, class_ids, class_names)
    def load_and_display_dataset(self, dataset, ds_datacfg=None):
        '''
        ## Display Dataset
        ## Load and display images and masks.
        '''
        print("load_and_display_dataset::-------------------------------->")

        image_ids = dataset.image_ids
        class_names = dataset.class_names

        # print("dataset: len(image_ids): {}\nimage_ids: {}".format(len(image_ids), image_ids))
        # print("dataset: len(class_names): {}\nclass_names: {}".format(len(class_names), class_names))

        total_annotation = 0
        total_annontation_per_image_map = {}
        for image_id in image_ids:
            # print("image_id:{}".format(image_id))
            # image = dataset.load_image(image_id)
            datacfg = None
            if ds_datacfg:
                info = dataset.image_info[image_id]
                ds_source = info['source']
                # print("ds_source: {}".format(ds_source))
                datacfg = utils.get_datacfg(ds_datacfg, ds_source)

            mask, class_id, keys, values = dataset.load_mask(image_id, datacfg)
            # print("class_id: {}".format(class_id))
            if image_id not in total_annontation_per_image_map:
                total_annontation_per_image_map[image_id] = []
            total_annontation_per_image_map[image_id].append(class_id)

            total_annotation += len(class_id)


        print("total_annotation: {}".format(total_annotation))

        return total_annotation
    def add_augmentation(self, dataset, config, ds_datacfg, image_id):
        '''
        # Add augmentation and mask resizing.
        '''
        print("add_augmentation::-------------------------------->")

        # image_id = image_id if image_id==None else np.random.choice(dataset.image_ids, 1)[0]

        datacfg = None
        if ds_datacfg:
            info = dataset.image_info[image_id]
            ds_source = info['source']
            datacfg = utils.get_datacfg(ds_datacfg, ds_source)

        # Add augmentation and mask resizing.
        image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
            dataset, datacfg, config, image_id, augment=True, use_mini_mask=True)
        log("mask", mask)
        display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])
        mask = utils.expand_mask(bbox, mask, image.shape)

        ## Display image and instances
        class_names = dataset.class_names
        self.display_instances(image, bbox, mask, class_ids, class_names)