Beispiel #1
0
    def prepare_image(self, image_id):
        """use config to processing coco image size and others,
        augment: (deprecated. Use augmentation instead). If true, apply random
            image augmentation. Currently, only horizontal flipping is offered.
        augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
            For example, passing imgaug.augmenters.Fliplr(0.5) flips images
            right/left 50% of the time.

        Returns:
        image: [height, width, 3]
        image_meta: the original shape of the image and resizing and cropping.
        class_ids: [instance_count] Integer class IDs
        bbox: [instance_count, (y1, x1, y2, x2)]
        mask: [height, width, instance_count]. The height and width are those
            of the image.
        gt_y: [instance_count]
        gt_x: [instance_count]
        vector_mask: [height, width, 2*class_num]. Set pixel relative center vector.
        """
        # Load image and mask
        image = self.load_image(image_id=image_id)
        mask, class_ids = self.load_mask(image_id=image_id)
        original_shape = image.shape
        # print(original_shape)
        # print(type(original_shape))
        image, window, scale, padding, crop = cocoutils.resize_image(
            image,
            min_dim=self.config.IMAGE_MIN_DIM,
            min_scale=self.config.IMAGE_MIN_SCALE,
            max_dim=self.config.IMAGE_MAX_DIM,
            mode=self.config.IMAGE_RESIZE_MODE)
        mask = cocoutils.resize_mask(mask, scale, padding, 0, crop)
        _idx = np.sum(mask, axis=(0, 1)) > 16
        class_ids = class_ids[_idx]
        if len(class_ids) != 0:
            # print(class_ids)
            # [y, x, num_instance]
            mask = mask[:, :, _idx]
            # print(np.amax(mask, axis=(0, 1)))
            # Bounding boxes. Note that some boxes might be all zeros
            # if the corresponding mask got cropped out.
            # bbox: [num_instances, (y1, x1, y2, x2)]
            bbox = cocoutils.extract_bboxes(mask)
            gt_cy, gt_cx = cocoutils.gravity_center(mask)
            return image, class_ids, bbox, mask, gt_cy, gt_cx
        print("return nothing")
        return None
Beispiel #2
0
def generate_stuff(config, semantic_segmentation):
    # [batch, num_detections, (y1, x1, y2, x2)]
    # [batch, num_detections, h, w]
    if config.GPU_COUNT:
        segments = semantic_segmentation.squeeze(0).data.cpu().numpy()
    else:
        segments = semantic_segmentation.squeeze(0).data.numpy()

    pred = np.argmax(segments, axis=0)

    results = []
    masks = []
    for i in range(config.THING_NUM_CLASSES,
                   config.THING_NUM_CLASSES + config.STUFF_NUM_CLASSES):
        mask = (pred == i)
        mask = np.where(mask, 1, 0)
        count = np.sum(mask)
        # if count>5000:
        if count > config.STUFF_THRESHOLD:
            mask = mask.reshape(1, mask.shape[0], mask.shape[1])
            reshaped_mask = mask.transpose(1, 2, 0)
            bbox = extract_bboxes(reshaped_mask)  # 1,4

            r = np.array([[i]], dtype=np.float32)  # 1,1
            r = np.concatenate((bbox, r), axis=1)  # 1,5
            results.append(r)
            # plt.figure()
            # plt.imshow(mask[0])
            # plt.show()
            masks.append(mask)  # (1,500,500)

    if len(results) > 0 and len(masks) > 0:
        results = Variable(torch.from_numpy(np.concatenate(results)).float())
        masks = Variable(torch.from_numpy(np.concatenate(masks)))
    else:
        results = Variable(torch.from_numpy(np.array(results)).float())
        masks = Variable(torch.from_numpy(np.array(masks)).float())
    return pred, results, masks
Beispiel #3
0
    def prepare_image(self, image_id, augment=False, augmentation=None):
        """use config to processing coco image size and others,
        augment: (deprecated. Use augmentation instead). If true, apply random
            image augmentation. Currently, only horizontal flipping is offered.
        augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
            For example, passing imgaug.augmenters.Fliplr(0.5) flips images
            right/left 50% of the time.

        Returns:
        image: [height, width, 3]
        image_meta: the original shape of the image and resizing and cropping.
        class_ids: [instance_count] Integer class IDs
        bbox: [instance_count, (y1, x1, y2, x2)]
        mask: [height, width, instance_count]. The height and width are those
            of the image.
        gt_y: [instance_count]
        gt_x: [instance_count]
        vector_mask: [height, width, 2*class_num]. Set pixel relative center vector.
        """
        # Load image and mask
        image = self.load_image(image_id)
        mask, class_ids = self.load_mask(image_id)
        # original_shape = image.shape
        # print(original_shape)
        # print(type(original_shape))
        image, window, scale, padding, crop = cocoutils.resize_image(
            image,
            min_dim=self.config.IMAGE_MIN_DIM,
            min_scale=self.config.IMAGE_MIN_SCALE,
            max_dim=self.config.IMAGE_MAX_DIM,
            mode=self.config.IMAGE_RESIZE_MODE)
        mask = cocoutils.resize_mask(mask, scale, padding, 0, crop)

        # Random horizontal flips.
        # TODO: will be removed in a future update in favor of augmentation
        if self.augment:
            logging.warning(
                "'augment' is deprecated. Use 'augmentation' instead.")
            if random.randint(0, 1):
                image = np.fliplr(image)
                mask = np.fliplr(mask)

        # Augmentation
        # This requires the imgaug lib (https://github.com/aleju/imgaug)
        if self.augmentation:
            import imgaug

            # Augmenters that are safe to apply to masks
            # Some, such as Affine, have settings that make them unsafe, so always
            # test your augmentation on masks
            MASK_AUGMENTERS = [
                "Sequential", "SomeOf", "OneOf", "Sometimes", "Fliplr",
                "Flipud", "CropAndPad", "Affine", "PiecewiseAffine"
            ]

            def hook(images, augmenter, parents, default):
                """Determines which augmenters to apply to masks."""
                return augmenter.__class__.__name__ in MASK_AUGMENTERS

            # Store shapes before augmentation to compare
            image_shape = image.shape
            mask_shape = mask.shape
            # Make augmenters deterministic to apply similarly to images and masks
            det = augmentation.to_deterministic()
            image = det.augment_image(image)
            # Change mask to np.uint8 because imgaug doesn't support np.bool
            mask = det.augment_image(mask.astype(np.uint8),
                                     hooks=imgaug.HooksImages(activator=hook))
            # Verify that shapes didn't change
            assert image.shape == image_shape, "Augmentation shouldn't change image size"
            assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
            # Change mask back to bool
            mask = mask.astype(np.bool)

        _idx = np.sum(mask, axis=(0, 1)) > 48
        # print(_idx)
        class_ids = class_ids[_idx]
        if len(class_ids) != 0:
            # print(class_ids)
            # [y, x, num_instance]
            mask = mask[:, :, _idx]
            # print(np.amax(mask, axis=(0, 1)))
            # Bounding boxes. Note that some boxes might be all zeros
            # if the corresponding mask got cropped out.
            # bbox: [num_instances, (y1, x1, y2, x2)]
            bbox = cocoutils.extract_bboxes(mask)
            gt_cy, gt_cx = cocoutils.gravity_center(mask)

            # Image meta data
            # image_meta = cocoutils.compose_image_meta(image_id, original_shape, image.shape, window, scale)
            # vector_mask = self.vector_mask(self.num_classes, class_ids, mask, gt_cx, gt_cy, image.shape, bbox)
            return image, class_ids, bbox, gt_cy, gt_cx, mask
        return None
def load_image_gt(dataset,
                  config,
                  image_id,
                  augment=False,
                  augmentation=None,
                  use_mini_mask=False):
    # 载入图片和语义分割效果
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    # print("\nbefore:",image_id,np.shape(mask),np.shape(class_ids))
    # 原始shape
    original_shape = image.shape
    # 获得新图片,原图片在新图片中的位置,变化的尺度,填充的情况等
    image, window, scale, padding, crop = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        min_scale=config.IMAGE_MIN_SCALE,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    mask = utils.resize_mask(mask, scale, padding, crop)
    # print("\nafter:",np.shape(mask),np.shape(class_ids))
    # print(np.shape(image),np.shape(mask))
    # 可以把图片进行翻转
    if augment:
        logging.warning("'augment' is deprecated. Use 'augmentation' instead.")
        if random.randint(0, 1):
            image = np.fliplr(image)
            mask = np.fliplr(mask)

    if augmentation:
        import imgaug
        # 可用于图像增强
        MASK_AUGMENTERS = [
            "Sequential", "SomeOf", "OneOf", "Sometimes", "Fliplr", "Flipud",
            "CropAndPad", "Affine", "PiecewiseAffine"
        ]

        def hook(images, augmenter, parents, default):
            """Determines which augmenters to apply to masks."""
            return augmenter.__class__.__name__ in MASK_AUGMENTERS

        image_shape = image.shape
        mask_shape = mask.shape
        det = augmentation.to_deterministic()
        image = det.augment_image(image)
        mask = det.augment_image(mask.astype(np.uint8),
                                 hooks=imgaug.HooksImages(activator=hook))
        assert image.shape == image_shape, "Augmentation shouldn't change image size"
        assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
        mask = mask.astype(np.bool)
    # 检漏,防止某些层内部实际上不存在语义分割情况
    _idx = np.sum(mask, axis=(0, 1)) > 0

    # print("\nafterer:",np.shape(mask),np.shape(_idx))
    mask = mask[:, :, _idx]
    class_ids = class_ids[_idx]
    # 找到mask对应的box
    bbox = utils.extract_bboxes(mask)

    active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
    source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]
                                                ["source"]]
    active_class_ids[source_class_ids] = 1

    if use_mini_mask:
        mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)

    # 生成Image_meta
    image_meta = utils.compose_image_meta(image_id, original_shape,
                                          image.shape, window, scale,
                                          active_class_ids)

    return image, image_meta, class_ids, bbox, mask
Beispiel #5
0
def parse_fn(image_id,
             dataset,
             anchors_path,
             augmentation=None,
             dtype=np.float32,
             max_num_boxes_per_image=20,
             image_size=416):
    """Load and return ground truth data for an image (image, mask, bounding boxes)."""

    image = dataset.load_image(image_id)
    # original_shape = image.shape
    image, window, scale, padding, crop = utils.resize_image(
        image, min_dim=0, min_scale=0, max_dim=image_size, mode='square')

    mask, class_ids = dataset.load_mask(image_id)

    mask = utils.resize_mask(mask, scale, padding, crop)

    if augmentation:
        import imgaug

        # Augmenters that are safe to apply to masks
        # Some, such as Affine, have settings that make them unsafe, so always
        # test your augmentation on masks
        MASK_AUGMENTERS = [
            "Sequential", "SomeOf", "OneOf", "Sometimes", "Fliplr", "Flipud",
            "CropAndPad", "Affine", "PiecewiseAffine"
        ]

        def hook(images, augmenter, parents, default):
            """Determines which augmenters to apply to masks."""
            return augmenter.__class__.__name__ in MASK_AUGMENTERS

        # Store shapes before augmentation to compare
        image_shape = image.shape
        mask_shape = mask.shape
        # Make augmenters deterministic to apply similarly to images and masks
        det = augmentation.to_deterministic()
        image = det.augment_image(image)
        # Change mask to np.uint8 because imgaug doesn't support np.bool
        mask = det.augment_image(mask.astype(np.uint8),
                                 hooks=imgaug.HooksImages(activator=hook))
        # Verify that shapes didn't change
        assert image.shape == image_shape, "Augmentation shouldn't change image size"
        assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
        # Change mask back to bool
        mask = mask.astype(np.bool)

    # Note that some boxes might be all zeros if the corresponding mask got cropped out.
    # and here is to filter them out
    _idx = np.sum(mask, axis=(0, 1)) > 0
    mask = mask[:, :, _idx]
    class_ids = class_ids[_idx]
    # Bounding boxes. Note that some boxes might be all zeros
    # if the corresponding mask got cropped out.
    # bbox: [num_instances, (y1, x1, y2, x2)]
    bbox = utils.extract_bboxes(mask)

    if mask.shape[-1] > max_num_boxes_per_image:
        ids = np.random.choice(np.arange(mask.shape[-1]),
                               max_num_boxes_per_image,
                               replace=False)
        class_ids = class_ids[ids]
        bbox = bbox[ids, :]

    # confs = np.ones((bbox.shape[0], 1), dtype=dtype)
    # bbox = np.concatenate([bbox, confs], axis=-1)

    # Active classes
    # Different datasets have different classes, so track the
    # classes supported in the dataset of this image.
    # active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
    # source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
    # active_class_ids[source_class_ids] = 1

    # image_meta = utils.compose_image_meta(image_id, original_shape, image.shape,
    #                                       window, scale, active_class_ids)
    # image_meta.astype(dtype)

    # gt_mask = np.zeros((mask.shape[0], mask.shape[1], 20), mask.dtype)
    gt_class_ids = np.zeros(max_num_boxes_per_image, class_ids.dtype)
    gt_bbox = np.zeros((max_num_boxes_per_image, bbox.shape[1]), bbox.dtype)
    # gt_data = np.zeros((max_num_boxes_per_image, bbox.shape[1] + dataset.num_classes), dtype=dtype)

    if class_ids.shape[0] > 0:
        gt_class_ids[:class_ids.shape[0]] = class_ids
        # gt_mask[:, :, :mask.shape[-1]] = mask
        gt_bbox[:bbox.shape[0], :] = bbox

    gt_class_ids = np.expand_dims(gt_class_ids, axis=-1).astype(dtype)

    gt_bbox = np.concatenate([gt_bbox, gt_class_ids], axis=-1)

    anchors = utils.get_anchors(anchors_path)
    anchors = np.array(anchors, dtype=np.float32)

    boxes_yx = (gt_bbox[:, 0:2] + gt_bbox[:, 2:4]) // 2
    boxes_hw = gt_bbox[:, 2:4] - gt_bbox[:, 0:2]

    gt_bbox[:, 0] = boxes_yx[..., 1] / image_size
    gt_bbox[:, 1] = boxes_yx[..., 0] / image_size
    gt_bbox[:, 2] = boxes_hw[..., 1] / image_size
    gt_bbox[:, 3] = boxes_hw[..., 0] / image_size

    hw = np.expand_dims(boxes_hw, -2)
    anchors_broad = np.expand_dims(anchors, 0)

    anchor_maxes = anchors_broad / 2.
    anchor_mins = -anchor_maxes
    box_maxes = hw / 2.
    box_mins = -box_maxes
    intersect_mins = np.maximum(box_mins, anchor_mins)
    intersect_maxes = np.minimum(box_maxes, anchor_maxes)
    intersect_hw = np.maximum(intersect_maxes - intersect_mins, 0.)
    intersect_area = intersect_hw[..., 0] * intersect_hw[..., 1]
    box_area = hw[..., 0] * hw[..., 1]
    anchor_area = anchors[..., 0] * anchors[..., 1]
    iou = intersect_area / (box_area + anchor_area - intersect_area)
    best_anchors = np.argmax(iou, axis=-1)

    # TODO: write a function to calculate the stride automatically.
    large_obj_image_size = image_size // 32
    medium_obj_image_size = image_size // 16
    small_obj_image_size = image_size // 8

    large_obj_detectors, large_obj_boxes = get_detector_heatmap_each_scale(
        gt_bbox,
        best_anchors_=best_anchors,
        anchors_mask=[6, 7, 8],
        grid_size=(large_obj_image_size, large_obj_image_size),
        num_classes=dataset.num_classes)

    medium_obj_detectors, medium_obj_boxes = get_detector_heatmap_each_scale(
        gt_bbox,
        best_anchors_=best_anchors,
        anchors_mask=[3, 4, 5],
        grid_size=(medium_obj_image_size, medium_obj_image_size),
        num_classes=dataset.num_classes)

    small_obj_detectors, small_obj_boxes = get_detector_heatmap_each_scale(
        gt_bbox,
        best_anchors_=best_anchors,
        anchors_mask=[0, 1, 2],
        grid_size=(small_obj_image_size, small_obj_image_size),
        num_classes=dataset.num_classes)

    yolo_true_data = np.concatenate(
        [large_obj_detectors, medium_obj_detectors, small_obj_detectors],
        axis=0).reshape([-1])
    yolo_true_boxes = np.concatenate(
        [large_obj_boxes, medium_obj_boxes, small_obj_boxes],
        axis=0).reshape([-1])

    yolo_gt = np.concatenate([yolo_true_data, yolo_true_boxes], axis=-1)

    return image.astype(dtype) / 255., yolo_gt.astype(dtype)
Beispiel #6
0
def load_image_gt(dataset,
                  config,
                  image_id,
                  augment=False,
                  use_mini_mask=False):
    """Load and return ground truth data for an image (image, mask, bounding boxes).

    augment: If true, apply random image augmentation. Currently, only
        horizontal flipping is offered.
    use_mini_mask: If False, returns full-size masks that are the same height
        and width as the original image. These can be big, for example
        1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
        224x224 and are generated by extracting the bounding box of the
        object and resizing it to MINI_MASK_SHAPE.

    Returns:
    image: [height, width, 3]
    shape: the original shape of the image before resizing and cropping.
    class_ids: [instance_count] Integer class IDs
    bbox: [instance_count, (y1, x1, y2, x2)]
    mask: [height, width, instance_count]. The height and width are those
        of the image unless use_mini_mask is True, in which case they are
        defined in MINI_MASK_SHAPE.
    """
    # Load image and mask
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    shape = image.shape
    image, window, scale, padding = utils.resize_image(
        image,
        min_dim=config.TRAIN.IMAGE_MIN_DIM,
        max_dim=config.TRAIN.IMAGE_MAX_DIM,
        padding=config.TRAIN.IMAGE_PADDING)
    mask = utils.resize_mask(mask, scale, padding)

    # Random horizontal flips.
    if augment:
        if random.randint(0, 1):
            image = np.fliplr(image)
            mask = np.fliplr(mask)

    # Bounding boxes. Note that some boxes might be all zeros
    # if the corresponding mask got cropped out.
    # bbox: [num_instances, (y1, x1, y2, x2)]
    bbox = utils.extract_bboxes(mask)

    # Active classes
    # Different datasets have different classes, so track the
    # classes supported in the dataset of this image.
    active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
    source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]
                                                ["source"]]
    active_class_ids[source_class_ids] = 1

    # Resize masks to smaller size to reduce memory usage
    if use_mini_mask:
        mask = utils.minimize_mask(bbox, mask, config.MRCNN.MINI_MASK_SHAPE)

    # Image meta data
    image_meta = compose_image_meta(image_id, shape, window, active_class_ids)

    return image, image_meta, class_ids, bbox, mask
Beispiel #7
0
def load_image_gt(dataset, config, image_id, augment=False, use_mini_mask=False):
    # Load image and mask
    image_name = dataset.image_info[str(image_id)]['image_name']
    # print(image_name)
    image = dataset.load_image(image_id)
    shape = image.shape
    image, window, scale, padding = resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        max_dim=config.IMAGE_MAX_DIM,
        padding=config.IMAGE_PADDING)
    image_meta = compose_image_meta(image_id, shape, window)

    thing_mask, thing_class_ids, stuff_mask, stuff_class_ids, influence_mask, influence_class_ids = dataset.load_mask(image_id)
    thing_mask = resize_mask(thing_mask, scale, padding)  # 1024
    stuff_mask = resize_mask(stuff_mask, scale, padding)  # 1024
    influence_mask = resize_mask(influence_mask, scale, padding)  # 1024
    influence_mask = resize_map(influence_mask, 1 / 8)  # 1024 -> 128
    # Resize masks to smaller size to reduce memory usage
    thing_bbox = extract_bboxes(thing_mask)
    stuff_bbox = extract_bboxes(stuff_mask)
    influence_bbox = extract_bboxes(influence_mask)

    if use_mini_mask:
        thing_mask = minimize_mask(
            thing_bbox, thing_mask, config.MINI_MASK_SHAPE)
        stuff_mask = minimize_mask(
            stuff_bbox, stuff_mask, config.MINI_MASK_SHAPE)

    segmentation = skimage.io.imread(os.path.join(dataset.annotation_dir, image_name.replace("jpg", "png")))

    semantic_label = np.zeros_like(segmentation)
    segmentation_instance_id_map=rgb2id(segmentation)
    instance_id_list=list(dataset.image_info[str(image_id)]['instances'].keys())
    for instance_id in instance_id_list:
        instance=dataset.image_info[str(image_id)]['instances'][instance_id]
        instance_mask=segmentation_instance_id_map==int(instance_id)
        semantic_label[instance_mask]=dataset.category_info[str(instance['category_id'])]['class_id']
    semantic_label=semantic_label[:,:,0]

    semantic_label_h = semantic_label.shape[0]
    semantic_label_w = semantic_label.shape[1]
    semantic_label_scale = min(500 / semantic_label_h, 500 / semantic_label_w)
    semantic_label = scipy.misc.imresize(semantic_label, (round(semantic_label_h * semantic_label_scale), round(semantic_label_w * semantic_label_scale)), interp="nearest")


    h, w = semantic_label.shape[:2]
    top_pad = (500 - h) // 2
    bottom_pad = 500 - h - top_pad
    left_pad = (500 - w) // 2
    right_pad = 500 - w - left_pad
    padding = [(top_pad, bottom_pad), (left_pad, right_pad)]
    semantic_label = np.pad(semantic_label, padding, mode='constant', constant_values=0)

    image_info = dataset.image_info[str(image_id)]

    # Random horizontal flips.
    if augment:
        if random.randint(0, 1):
            image = np.fliplr(image)
            thing_mask = np.fliplr(thing_mask)
            semantic_label = np.fliplr(semantic_label)
            segmentation = np.fliplr(segmentation)
    return image, image_meta, thing_class_ids, thing_bbox, thing_mask, stuff_class_ids, stuff_bbox, stuff_mask, \
           semantic_label, segmentation, image_info, influence_class_ids, influence_bbox, influence_mask
Beispiel #8
0
image_ids = dataset.image_ids
print(len(image_ids))
for idx, image_id in enumerate(image_ids):
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    visualize.display_top_masks(image,
                                mask,
                                class_ids,
                                dataset.class_names,
                                img_idx=idx)

# Load random image and mask.
for idx, image_id in enumerate(image_ids):
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    bbox = utils.extract_bboxes(mask)
    # Display image and additional stats
    print("image_id ", image_id, dataset.image_reference(image_id))
    log("image", image)
    log("mask", mask)
    log("class_ids", class_ids)
    log("bbox", bbox)
    # Display image and instances
    visualize.display_instances(image,
                                bbox,
                                mask,
                                class_ids,
                                dataset.class_names,
                                img_idx=idx)

# Generate Anchors