Beispiel #1
0
def load_inference_data(dataset, image_id, config):

    # Load GT data
    pre_proc_data = dataset.load_obj_pre_proc_data(image_id)

    obj_feat = pre_proc_data["obj_feat"]
    p5_feat = pre_proc_data["P5"]

    # Load Object Spatial Mask
    object_roi_masks = dataset.load_object_roi_masks(image_id)

    # For 32 x 32 image size
    scale = 0.05
    padding = [(4, 4), (0, 0), (0, 0)]
    crop = None
    obj_spatial_masks = utils.resize_mask(object_roi_masks, scale, padding, crop)

    # Transpose and add dimension
    # [32, 32, N] -> [N, 32, 32, 1]
    obj_spatial_masks = np.expand_dims(np.transpose(obj_spatial_masks, [2, 0, 1]), -1)

    # fill rest with 0
    batch_obj_spatial_masks = np.zeros(shape=(config.SAL_OBJ_NUM, 32, 32, 1), dtype=np.float32)

    batch_obj_spatial_masks[:obj_spatial_masks.shape[0]] = obj_spatial_masks

    batch_obj_spatial_masks = np.expand_dims(batch_obj_spatial_masks, axis=0)

    return [obj_feat, batch_obj_spatial_masks, p5_feat]
Beispiel #2
0
def load_inference_data_obj_feat_gt(dataset, image_id, config):
    image = dataset.load_image(image_id)

    gt_ranks, sel_not_sal_obj_idx_list, shuffled_indices, chosen_obj_idx_order_list = dataset.load_gt_rank_order(
        image_id)

    object_roi_masks = dataset.load_object_roi_masks(image_id,
                                                     sel_not_sal_obj_idx_list)

    original_shape = image.shape
    image, window, scale, padding, crop = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        min_scale=config.IMAGE_MIN_SCALE,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    obj_mask = utils.resize_mask(object_roi_masks, scale, padding, crop)

    # bbox: [num_instances, (y1, x1, y2, x2)]
    obj_bbox = utils.extract_bboxes(obj_mask)

    # *********************** FILL REST, SHUFFLE ORDER ***********************
    # order is in salient objects then non-salient objects
    batch_obj_roi = np.zeros(shape=(config.SAL_OBJ_NUM, 4), dtype=np.int32)
    for i in range(len(chosen_obj_idx_order_list)):
        _idx = chosen_obj_idx_order_list[i]
        batch_obj_roi[_idx] = obj_bbox[i]

    # Normalize image
    image = model_utils.mold_image(image.astype(np.float32), config)

    # Active classes
    active_class_ids = np.ones([config.NUM_CLASSES], dtype=np.int32)
    img_id = image_id
    img_id = int(img_id[-12:])
    # Image meta data
    image_meta = model_utils.compose_image_meta(img_id, original_shape,
                                                image.shape, window, scale,
                                                active_class_ids)

    # Expand input dimensions to consider batch
    image = np.expand_dims(image, axis=0)
    image_meta = np.expand_dims(image_meta, axis=0)
    batch_obj_roi = np.expand_dims(batch_obj_roi, axis=0)

    return [
        image, image_meta, batch_obj_roi
    ], gt_ranks, sel_not_sal_obj_idx_list, shuffled_indices, chosen_obj_idx_order_list
Beispiel #3
0
def load_inference_data_obj_feat(dataset, image_id, config):
    image = dataset.load_image(image_id)

    object_roi_masks = dataset.load_object_roi_masks(image_id)

    original_shape = image.shape
    image, window, scale, padding, crop = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        min_scale=config.IMAGE_MIN_SCALE,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    obj_mask = utils.resize_mask(object_roi_masks, scale, padding, crop)

    # bbox: [num_instances, (y1, x1, y2, x2)]
    obj_bbox = utils.extract_bboxes(obj_mask)

    # Normalize image
    image = model_utils.mold_image(image.astype(np.float32), config)

    # Active classes
    active_class_ids = np.ones([config.NUM_CLASSES], dtype=np.int32)
    img_id = image_id
    img_id = int(img_id[-12:])
    # Image meta data
    image_meta = model_utils.compose_image_meta(img_id, original_shape,
                                                image.shape, window, scale,
                                                active_class_ids)

    # Expand input dimensions to consider batch
    image = np.expand_dims(image, axis=0)
    image_meta = np.expand_dims(image_meta, axis=0)

    batch_obj_roi = np.zeros(shape=(config.SAL_OBJ_NUM, 4), dtype=np.int32)
    batch_obj_roi[:len(obj_bbox)] = obj_bbox
    batch_obj_roi = np.expand_dims(batch_obj_roi, axis=0)

    return [image, image_meta, batch_obj_roi]
def load_image_gt(dataset, config, image_id, augmentation=None):
    """Load and return ground truth data for an image (image, mask, bounding boxes).

    augment: (deprecated. Use augmentation instead). If true, apply random
        image augmentation. Currently, only horizontal flipping is offered.
    augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
        For example, passing imgaug.augmenters.Fliplr(0.5) flips images
        right/left 50% of the time.

    Returns:
    image: [height, width, 3]
    shape: the original shape of the image before resizing and cropping.
    class_ids: [instance_count] Integer class IDs
    bbox: [instance_count, (y1, x1, y2, x2)]
    """
    # Load image
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    original_shape = image.shape
    image, window, scale, padding, crop = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        min_scale=config.IMAGE_MIN_SCALE,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    mask = utils.resize_mask(mask, scale, padding, crop)

    # print("Image size RESIZE: ", image.shape)

    # Augmentation
    # This requires the imgaug lib (https://github.com/aleju/imgaug)
    if augmentation:
        import imgaug

        # Augmenters that are safe to apply to masks
        # Some, such as Affine, have settings that make them unsafe, so always
        # test your augmentation on masks
        MASK_AUGMENTERS = [
            "Sequential", "SomeOf", "OneOf", "Sometimes", "Fliplr", "Flipud",
            "CropAndPad", "Affine", "PiecewiseAffine"
        ]

        def hook(images, augmenter, parents, default):
            """Determines which augmenters to apply to masks."""
            return augmenter.__class__.__name__ in MASK_AUGMENTERS

        # Store shapes before augmentation to compare
        image_shape = image.shape
        mask_shape = mask.shape
        # Make augmenters deterministic to apply similarly to images and masks
        det = augmentation.to_deterministic()
        image = det.augment_image(image)
        # Change mask to np.uint8 because imgaug doesn't support np.bool
        mask = det.augment_image(mask.astype(np.uint8),
                                 hooks=imgaug.HooksImages(activator=hook))
        # Verify that shapes didn't change
        assert image.shape == image_shape, "Augmentation shouldn't change image size"
        assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
        # Change mask back to bool
        mask = mask.astype(np.bool)

    # Note that some boxes might be all zeros if the corresponding mask got cropped out.
    # and here is to filter them out
    _idx = np.sum(mask, axis=(0, 1)) > 0
    mask = mask[:, :, _idx]
    class_ids = class_ids[_idx]
    # Bounding boxes. Note that some boxes might be all zeros
    # if the corresponding mask got cropped out.
    # bbox: [num_instances, (y1, x1, y2, x2)]
    bbox = utils.extract_bboxes(mask)

    # Active classes
    active_class_ids = np.ones([dataset.num_classes], dtype=np.int32)

    # Image meta data
    image_meta = model_utils.compose_image_meta(image_id, original_shape,
                                                image.shape, window, scale,
                                                active_class_ids)

    return image, image_meta, class_ids, bbox