Exemplo n.º 1
0
def load_inference_data_obj_feat_gt(dataset, image_id, config):
    image = dataset.load_image(image_id)

    gt_ranks, sel_not_sal_obj_idx_list, shuffled_indices, chosen_obj_idx_order_list = dataset.load_gt_rank_order(
        image_id)

    object_roi_masks = dataset.load_object_roi_masks(image_id,
                                                     sel_not_sal_obj_idx_list)

    original_shape = image.shape
    image, window, scale, padding, crop = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        min_scale=config.IMAGE_MIN_SCALE,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    obj_mask = utils.resize_mask(object_roi_masks, scale, padding, crop)

    # bbox: [num_instances, (y1, x1, y2, x2)]
    obj_bbox = utils.extract_bboxes(obj_mask)

    # *********************** FILL REST, SHUFFLE ORDER ***********************
    # order is in salient objects then non-salient objects
    batch_obj_roi = np.zeros(shape=(config.SAL_OBJ_NUM, 4), dtype=np.int32)
    for i in range(len(chosen_obj_idx_order_list)):
        _idx = chosen_obj_idx_order_list[i]
        batch_obj_roi[_idx] = obj_bbox[i]

    # Normalize image
    image = model_utils.mold_image(image.astype(np.float32), config)

    # Active classes
    active_class_ids = np.ones([config.NUM_CLASSES], dtype=np.int32)
    img_id = image_id
    img_id = int(img_id[-12:])
    # Image meta data
    image_meta = model_utils.compose_image_meta(img_id, original_shape,
                                                image.shape, window, scale,
                                                active_class_ids)

    # Expand input dimensions to consider batch
    image = np.expand_dims(image, axis=0)
    image_meta = np.expand_dims(image_meta, axis=0)
    batch_obj_roi = np.expand_dims(batch_obj_roi, axis=0)

    return [
        image, image_meta, batch_obj_roi
    ], gt_ranks, sel_not_sal_obj_idx_list, shuffled_indices, chosen_obj_idx_order_list
Exemplo n.º 2
0
def load_inference_data_obj_feat(dataset, image_id, config):
    image = dataset.load_image(image_id)

    object_roi_masks = dataset.load_object_roi_masks(image_id)

    original_shape = image.shape
    image, window, scale, padding, crop = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        min_scale=config.IMAGE_MIN_SCALE,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    obj_mask = utils.resize_mask(object_roi_masks, scale, padding, crop)

    # bbox: [num_instances, (y1, x1, y2, x2)]
    obj_bbox = utils.extract_bboxes(obj_mask)

    # Normalize image
    image = model_utils.mold_image(image.astype(np.float32), config)

    # Active classes
    active_class_ids = np.ones([config.NUM_CLASSES], dtype=np.int32)
    img_id = image_id
    img_id = int(img_id[-12:])
    # Image meta data
    image_meta = model_utils.compose_image_meta(img_id, original_shape,
                                                image.shape, window, scale,
                                                active_class_ids)

    # Expand input dimensions to consider batch
    image = np.expand_dims(image, axis=0)
    image_meta = np.expand_dims(image_meta, axis=0)

    batch_obj_roi = np.zeros(shape=(config.SAL_OBJ_NUM, 4), dtype=np.int32)
    batch_obj_roi[:len(obj_bbox)] = obj_bbox
    batch_obj_roi = np.expand_dims(batch_obj_roi, axis=0)

    return [image, image_meta, batch_obj_roi]
Exemplo n.º 3
0
def load_image_gt(dataset, config, image_id, augmentation=None):
    """Load and return ground truth data for an image (image, mask, bounding boxes).

    augment: (deprecated. Use augmentation instead). If true, apply random
        image augmentation. Currently, only horizontal flipping is offered.
    augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
        For example, passing imgaug.augmenters.Fliplr(0.5) flips images
        right/left 50% of the time.

    Returns:
    image: [height, width, 3]
    shape: the original shape of the image before resizing and cropping.
    class_ids: [instance_count] Integer class IDs
    bbox: [instance_count, (y1, x1, y2, x2)]
    """
    # Load image
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    original_shape = image.shape
    image, window, scale, padding, crop = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        min_scale=config.IMAGE_MIN_SCALE,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    mask = utils.resize_mask(mask, scale, padding, crop)

    # print("Image size RESIZE: ", image.shape)

    # Augmentation
    # This requires the imgaug lib (https://github.com/aleju/imgaug)
    if augmentation:
        import imgaug

        # Augmenters that are safe to apply to masks
        # Some, such as Affine, have settings that make them unsafe, so always
        # test your augmentation on masks
        MASK_AUGMENTERS = [
            "Sequential", "SomeOf", "OneOf", "Sometimes", "Fliplr", "Flipud",
            "CropAndPad", "Affine", "PiecewiseAffine"
        ]

        def hook(images, augmenter, parents, default):
            """Determines which augmenters to apply to masks."""
            return augmenter.__class__.__name__ in MASK_AUGMENTERS

        # Store shapes before augmentation to compare
        image_shape = image.shape
        mask_shape = mask.shape
        # Make augmenters deterministic to apply similarly to images and masks
        det = augmentation.to_deterministic()
        image = det.augment_image(image)
        # Change mask to np.uint8 because imgaug doesn't support np.bool
        mask = det.augment_image(mask.astype(np.uint8),
                                 hooks=imgaug.HooksImages(activator=hook))
        # Verify that shapes didn't change
        assert image.shape == image_shape, "Augmentation shouldn't change image size"
        assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
        # Change mask back to bool
        mask = mask.astype(np.bool)

    # Note that some boxes might be all zeros if the corresponding mask got cropped out.
    # and here is to filter them out
    _idx = np.sum(mask, axis=(0, 1)) > 0
    mask = mask[:, :, _idx]
    class_ids = class_ids[_idx]
    # Bounding boxes. Note that some boxes might be all zeros
    # if the corresponding mask got cropped out.
    # bbox: [num_instances, (y1, x1, y2, x2)]
    bbox = utils.extract_bboxes(mask)

    # Active classes
    active_class_ids = np.ones([dataset.num_classes], dtype=np.int32)

    # Image meta data
    image_meta = model_utils.compose_image_meta(image_id, original_shape,
                                                image.shape, window, scale,
                                                active_class_ids)

    return image, image_meta, class_ids, bbox
Exemplo n.º 4
0
        pred = load_data_single(data_path)

        ranks = np.squeeze(pred, axis=0)

        # ********** Dataset information
        og_image = dataset.load_image(image_id)
        original_shape = og_image.shape

        # Pre-processed Object Mask
        pre_proc_data = dataset.load_obj_pre_proc_data(image_id)
        obj_masks = pre_proc_data["obj_masks"]
        obj_masks = np.squeeze(obj_masks, axis=0)

        # Get Original bounding box for resize
        object_roi_masks = dataset.load_object_roi_masks(image_id)
        obj_roi = utils.extract_bboxes(object_roi_masks)

        # ********** Process Predicted Masks
        # First get saliency values
        masks = np.array_split(obj_masks, 2, axis=-1)
        masks = masks[1]
        masks = np.squeeze(masks, axis=-1)

        # Remove "padded" data
        masks = masks[:obj_roi.shape[0]]
        ranks = ranks[:obj_roi.shape[0]]

        # Resize the predicted masks to original size and location based on their available bounding boxes
        # Resize masks to original image size and set boundary threshold.
        N = obj_roi.shape[0]
        full_masks = []