Ejemplo n.º 1
0
def visualise_annotations(dataset, n_instances= None):
    '''
    dataset : hsDataset
    n_instances : Number of images. Default is 'None'. When it is None takes entire dataset
                n_instances must be in between 1 and the size of the dataset. 
    '''
    if n_instances == None:
        size = len(dataset.image_ids)
    else:
        size = n_instances
    
    for image_id in range(size):
        image = dataset.load_image(image_id)
        if image.shape[-1] == 4: # PNG images have alpha channel as the 4th channel. 
            image = image[..., :3] # Drop the 4th channel
            
        mask, class_ids = dataset.load_mask(image_id)
        
        bbox = utils.extract_bboxes(mask)
        
        print("image_id ", image_id, dataset.image_reference(image_id))
        log("image", image)
        log("mask", mask)
        log("class_ids", class_ids)
        log("bbox", bbox)
                
        ax = get_ax(1)
        
        try:
            visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names, ax=ax)
        except ValueError:
            print("Image size and Mask size does not match")
Ejemplo n.º 2
0
def compare_bbox_rbbox_from_annotations(annotation, image_dir):
    for elem in annotation:
        img_ext = elem.split('.')[-1]
        img_name = elem.replace(img_ext, 'jpg')
        image = cv2.imread(os.path.join(image_dir, img_name))
        width, height = image.shape[:2]
        # print('[INFO]   working on {}'.format(img_name))
        bbox_ABI = []
        bbox_AUI = []
        rbbox_ABI = []
        rbbox_AUI = []
        try:
            masks = load_masks_from_annotation(annotation[elem],
                                               width=width,
                                               height=height)
            bboxes = extract_bboxes(masks)
            # print('[DEBUG]      Number of objects : {}'.format(len(bboxes)))
            (a, b, c,
             d) = rbb_utils.get_bbox_rbbox_values(image, masks, bboxes)
            # print('[INFO]   Average useful information on image :                         - hbbox : {}  |  rbbox : {}'.format(a,b))
            # print('[INFO]   Average percentage of intersection between boxes on image :   - hbbox : {}  |  rbbox : {}'.format(c,d))
            # print('')
            bbox_ABI.append(a)
            rbbox_ABI.append(b)
            bbox_AUI.append(c)
            rbbox_AUI.append(d)
        except AssertionError:
            continue
    return (np.mean(bbox_ABI), np.mean(rbbox_ABI), np.mean(bbox_AUI),
            np.mean(rbbox_AUI))
Ejemplo n.º 3
0
def compute_gt_stats(gt_bbox, gt_mask):
    # Compute statistics for all the ground truth things.
    hw = gt_bbox[:,2:] - gt_bbox[:,:2]
    hw = hw*1.
    min_side = np.min(hw,1)[:,np.newaxis]
    max_side = np.max(hw,1)[:,np.newaxis]
    aspect_ratio = np.max(hw, 1) / np.min(hw, 1)
    aspect_ratio = aspect_ratio[:,np.newaxis]
    log_aspect_ratio = np.log(aspect_ratio)
    box_area = np.prod(hw, 1)[:,np.newaxis]
    log_box_area = np.log(box_area)
    sqrt_box_area = np.sqrt(box_area)
    modal_area = np.sum(np.sum(gt_mask, 0), 0)[:,np.newaxis]*1.
    log_modal_area = np.log(modal_area)
    sqrt_modal_area = np.sqrt(modal_area)

    # Number of distinct components
    ov_connected = sqrt_box_area*1.
    for i in range(gt_mask.shape[2]):
        aa = skimage.measure.label(gt_mask[:,:,i], background=0)
        sz = np.bincount(aa.ravel())[1:]
        biggest = np.argmax(sz)+1
        big_comp = utilslib.extract_bboxes(aa[:,:,np.newaxis]==biggest)
        ov_connected[i,0] = utilslib.compute_overlaps(big_comp, gt_bbox[i:i+1,:])

    a = np.concatenate([min_side, max_side, aspect_ratio, log_aspect_ratio,
        box_area, log_box_area, sqrt_box_area, modal_area, log_modal_area,
        sqrt_modal_area, ov_connected], 1)
    n = ['min_side', 'max_side', 'aspect_ratio', 'log_aspect_ratio', 'box_area',
        'log_box_area', 'sqrt_box_area', 'modal_area', 'log_modal_area',
        'sqrt_modal_area', 'ov_connected']
    return a, n
Ejemplo n.º 4
0
def createClipedImage():
    for item in Path(basePath + "/masks3").rglob('*.png'):
        filePath = str(item)
        img_id, img_class, class_instance = filePath.split(
            os.sep)[-1].split("_")
        img_file_name = filePath.split(os.sep)[-1]
        img_mask_data = cv2.imread(filePath)
        img_mask_data = img_mask_data[:, :, [0]]
        bbox = utils.extract_bboxes(img_mask_data.astype(np.uint8))[0]
        scaleX, scaleY = 2100 / 576, 1400 / 384
        y1, x1, y2, x2 = int(bbox[0] * scaleY), int(bbox[1] * scaleX), int(
            bbox[2] * scaleY), int(bbox[3] * scaleX)
        width = x2 - x1
        height = y2 - y1
        if width / height < 0.4 or width / height > 2.5:
            continue
        if width < 25 or height < 18:
            continue
        img = cv2.imread(basePath + "train_images/" + img_id + ".jpg")
        clippedImg = img[y1:y2, x1:x2, :]
        if width < height:
            clippedImg = np.transpose(clippedImg, axes=(1, 0, 2))
        clippedImg = cv2.resize(clippedImg, (576, 384))
        cropped_img_file_name = basePath + "/train_class_images/" + img_file_name
        print(cropped_img_file_name)
        cv2.imwrite(cropped_img_file_name, clippedImg)
Ejemplo n.º 5
0
def plot_sun_rgb():
    image_ids = random.choices(dataset_rgb.image_ids, k=10)

    for image_id in image_ids:
        print(image_id)

        image = dataset_rgb.load_image(image_id)
        mask, class_ids = dataset_rgb.load_mask(image_id)
        bbox = utils.extract_bboxes(mask)
        _, ground_truth = visualize.display_instances(image, bbox, mask,
                                                      class_ids,
                                                      dataset_rgb.class_names)

        _, result_rgb = plot_inference(model_rgb, dataset_rgb, image_id, image)
        _, result_d3 = plot_inference(model_d3, dataset_d3, image_id, image)
        _, result_rgbd = plot_inference(model_rgbd, dataset_rgbd, image_id,
                                        image)
        _, result_rgbd_fusenet = plot_inference(model_rgbd_fusenet,
                                                dataset_rgbd_fusenet, image_id,
                                                image)

        ground_truth.savefig("inference_" + str(image_id) +
                             "_ground_truth.png")
        result_rgb.savefig("inference_" + str(image_id) + "_sun_rgb.png")
        result_d3.savefig("inference_" + str(image_id) + "_sun_d3.png")
        result_rgbd.savefig("inference_" + str(image_id) + "_sun_rgbd.png")
        result_rgbd_fusenet.savefig("inference_" + str(image_id) +
                                    "_sun_rgbd_fusenet.png")
def load_and_display_random_sample(dataset, datacfg, N=2):
  """Load and display random samples
  """
  log.info("load_and_display_random_sample::-------------------------------->")

  image_ids = np.random.choice(dataset.image_ids, N)
  class_names = dataset.class_names
  log.debug("dataset: len(image_ids): {}\nimage_ids: {}".format(len(image_ids), image_ids))
  log.debug("dataset: len(class_names): {}\nclass_names: {}".format(len(class_names), class_names))

  for image_id in image_ids:
    image = dataset.load_image(image_id, datacfg)
    mask, class_ids, keys, values = dataset.load_mask(image_id, datacfg)

    log.debug("keys: {}".format(keys))
    log.debug("values: {}".format(values))
    log.debug("class_ids: {}".format(class_ids))

    ## Display image and instances
    visualize.display_top_masks(image, mask, class_ids, class_names)
    ## Compute Bounding box
    
    bbox = utils.extract_bboxes(mask)
    log.debug("bbox: {}".format(bbox))
    visualize.display_instances(image, bbox, mask, class_ids, class_names)
Ejemplo n.º 7
0
def visualise_annotation_by_pos(dataset, position):
    '''
    dataset : hsDataset
    position : Index of the desired image
    '''
    image = dataset.load_image(position)
    if image.shape[-1] == 4:
        image = image[..., :3]
        
    mask, class_ids = dataset.load_mask(position)
    
    bbox = utils.extract_bboxes(mask)
    
    print("image_id ", position, dataset.image_reference(position))
    log("image", image)
    log("mask", mask)
    log("class_ids", class_ids)
    log("bbox", bbox)
                
    ax = get_ax(1)
    
    try:
        visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names, ax=ax)
    except ValueError:
        print("Image size and Mask size does not match")
def load_and_resize_images(dataset, datacfg, dnncfg):
  '''
  ## Resize Images
  To support multiple images per batch, images are resized to one size (1024x1024).
  Aspect ratio is preserved, though. If an image is not square, then zero padding
  is added at the top/bottom or right/left.
  '''
  log.info("load_and_resize_images::-------------------------------->")

  image_id = np.random.choice(dataset.image_ids, 1)[0]
  image = dataset.load_image(image_id)

  mask, class_ids, keys, values = dataset.load_mask(image_id, datacfg)
  original_shape = image.shape
  # Resize
  image, window, scale, padding, _ = utils.resize_image(
      image,
      min_dim=dnncfg.IMAGE_MIN_DIM,
      max_dim=dnncfg.IMAGE_MAX_DIM,
      mode=dnncfg.IMAGE_RESIZE_MODE)
  mask = utils.resize_mask(mask, scale, padding)
  # Compute Bounding box
  bbox = utils.extract_bboxes(mask)

  # Display image and additional stats
  log.debug("Original shape: {}".format(original_shape))
  # customlog("image", image)
  # customlog("mask", mask)
  # customlog("class_ids", class_ids)
  # customlog("bbox", bbox)

  ## Display image and instances
  class_names = dataset.class_names
  visualize.display_instances(image, bbox, mask, class_ids, class_names)
Ejemplo n.º 9
0
def display_dataset(num_of_random_samples):
    # Load and display random samples
    if num_of_random_samples >= len(dataset.image_ids):
        print(
            "The number of samples cannot be larger than the amount of samples available"
        )
        print("\nSetting the amount of equal to the amount of samples")
        num_of_random_samples = len(dataset.image_ids) - 1

    image_ids = np.random.choice(dataset.image_ids, num_of_random_samples)

    for image_id in image_ids:
        image = dataset.load_image(image_id)
        mask, class_ids = dataset.load_mask(image_id)
        visualize.display_top_masks(image, mask, class_ids,
                                    dataset.class_names)

    # Load random image and mask.
    image_id = random.choice(dataset.image_ids)
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    # Compute Bounding box
    bbox = utils.extract_bboxes(mask)

    # Display image and additional stats
    print("image_id ", image_id, dataset.image_reference(image_id))
    log("image", image)
    log("mask", mask)
    log("class_ids", class_ids)
    log("bbox", bbox)
    # Display image and instances
    visualize.display_instances(image, bbox, mask, class_ids,
                                dataset.class_names)
Ejemplo n.º 10
0
def resize_image(image_id):
    # Load random image and mask.
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    original_shape = image.shape
    # Resize
    image, window, scale, padding, _ = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    mask = utils.resize_mask(mask, scale, padding)
    # Compute Bounding box
    bboxes = utils.extract_bboxes(mask)

    # Display image and additional stats
    print("image_id: ", image_id, dataset.image_reference(image_id))
    print("Original shape: ", original_shape)
    log("image", image)
    log("mask", mask)
    log("class_ids", class_ids)
    log("bbox", bboxes)
    # Display image and instances
    visualize.display_instances(image, bboxes, mask, class_ids,
                                dataset.class_names)
def load_image_gt_without_resizing(dataset, datacfg, config, image_id):
    """Inspired from load_image_gt, but does not re-size the image
  """

    # Load image and mask
    image = dataset.load_image(image_id, datacfg, config)
    mask, class_ids, keys, values = dataset.load_mask(image_id, datacfg,
                                                      config)
    # Note that some boxes might be all zeros if the corresponding mask got cropped out.
    # and here is to filter them out
    _idx = np.sum(mask, axis=(0, 1)) > 0
    mask = mask[:, :, _idx]
    class_ids = class_ids[_idx]

    # Bounding boxes. Note that some boxes might be all zeros
    # if the corresponding mask got cropped out.
    # bbox: [num_instances, (y1, x1, y2, x2)]
    bbox = utils.extract_bboxes(mask)

    # Active classes
    # Different datasets have different classes, so track the
    # classes supported in the dataset of this image.
    active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
    source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]
                                                ["source"]]
    active_class_ids[source_class_ids] = 1

    # return image, class_ids, bbox, mask
    return image, class_ids, bbox, mask, active_class_ids
Ejemplo n.º 12
0
def bbox_show(image, mask):
    """Apply bbox and show"""
    if mask.shape[-1] > 0:
        # mask = (np.sum(mask, -1, keepdims=True) >= 1)
        bbox = utils.extract_bboxes(mask)
        class_ids = np.array([1 for i in range(bbox.shape[0])])
        visualize.display_instances(image, bbox, mask, class_ids,
                                    ['BG', 'GARBAGE'])
Ejemplo n.º 13
0
def detect_and_color_splash(model, image_path=None, video_path=None):
    assert image_path or video_path

    # Image or video?
    if image_path:
        # Run model detection and generate the color splash effect
        print("Running on {}".format(args.image))
        # Read image
        image = skimage.io.imread(args.image)
        # Detect objects
        r = model.detect([image], verbose=1)[0]
        # Color splash
        splash = color_splash(image, r['masks'])
        # Save output
        file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(
            datetime.datetime.now())
        # bbox
        mask, class_ids = BoxDataset.load_mask(file_name)
        # Compute Bounding box
        bbox = utils.extract_bboxes(mask)
        visualize.display_instances(image, bbox, mask, class_ids,
                                    BoxDataset.class_names)
        skimage.io.imsave(file_name, splash)
    elif video_path:
        import cv2
        # Video capture
        vcapture = cv2.VideoCapture(video_path)
        width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = vcapture.get(cv2.CAP_PROP_FPS)

        # Define codec and create video writer
        file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(
            datetime.datetime.now())
        vwriter = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc(*'MJPG'),
                                  fps, (width, height))

        count = 0
        success = True
        while success:
            print("frame: ", count)
            # Read next image
            success, image = vcapture.read()
            if success:
                # OpenCV returns images as BGR, convert to RGB
                image = image[..., ::-1]
                # Detect objects
                r = model.detect([image], verbose=0)[0]
                # Color splash
                splash = color_splash(image, r['masks'])
                # RGB -> BGR to save image to video
                splash = splash[..., ::-1]
                # Add image to video writer
                vwriter.write(splash)
                count += 1
        vwriter.release()
    print("Saved to ", file_name)
Ejemplo n.º 14
0
def run_map(out_path, num2_1, num2_2):
    dataset_val = ExpressDataset()
    dataset_val.out_path(out_path)
    dataset_val.set_dataset_class('val')
    dataset_val.set_dataset_exit_flag(True)
    dataset_val.load_shapes(num2_1, num2_2, 0)
    dataset_val.prepare()

    class InferenceConfig(ExpressConfig):
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1

    inference_config = InferenceConfig()

    # Recreate the model in inference mode
    model = modellib.MaskRCNN(mode="inference",
                              config=inference_config,
                              model_dir=MODEL_DIR)

    # Get path to saved weights
    # Either set a specific path or find last trained weights
    # model_path = os.path.join(ROOT_DIR, ".h5 file name here")
    #model_path = '/home/kingqi/proj/Mask_RCNN/log/express20190424T1751/mask_rcnn_express_0006.h5'
    model_path = model.find_last()

    # Load trained weights
    print("Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)

    image_ids = np.random.choice(dataset_val.image_ids, num2_1 + num2_2 * 6)
    APs = []
    for image_id in image_ids:
        # Load image and ground truth data
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
            dataset_val, inference_config, image_id, use_mini_mask=False)
        molded_images = np.expand_dims(
            modellib.mold_image(image, inference_config), 0)

        image1 = dataset_val.load_image(image_id)
        mask1, class_ids1 = dataset_val.load_mask(image_id)

        bbox = utils.extract_bboxes(mask1)
        # visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
        #visualize.display_instances(image1, bbox, mask1, class_ids1, dataset_val.class_names)

        # Run object detection
        results = model.detect([image], verbose=0)
        r = results[0]
        # visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
        #                            dataset_val.class_names, r['scores'])
        # Compute AP
        AP, precisions, recalls, overlaps = utils.compute_ap(
            gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"],
            r["scores"], r['masks'])
        APs.append(AP)

    print("mAP: ", np.mean(APs))
Ejemplo n.º 15
0
def evaluation(image_ids, y_pred, dataset):

    import matplotlib.pyplot as plt

    def get_ax(rows=1, cols=1, size=16):
        _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
        return ax

    y_true = []

    for i in image_ids:
        mask, class_ids = dataset.load_mask(i)
        bbox = utils.extract_bboxes(mask)
        y_true.append([bbox, class_ids, mask])

    pickle.dump(y_true, open('y_true', 'wb'))

    ax = get_ax(len(image_ids), 2)

    if len(image_ids) <= 3:
        for index, i in enumerate(image_ids):
            image = dataset.load_image(i)
            visualize.display_instances(image,
                                        y_true[i][0],
                                        y_true[i][2],
                                        y_true[i][1],
                                        dataset.class_names,
                                        ax=ax[0],
                                        title="True")
            visualize.display_instances(image,
                                        y_pred[i]['rois'],
                                        y_pred[i]['masks'],
                                        y_pred[i]['class_ids'],
                                        dataset.class_names,
                                        y_pred[i]['scores'],
                                        ax=ax[1],
                                        title="Predicted")
        plt.show()

    image = dataset.load_image(i)
    w, h = image.shape[0], image.shape[1]
    print(w, h)

    mask_pred = Image.new('1', (w, h))
    mask_true = Image.new('1', (w, h))

    bah = []
    currentIoU = 0
    for index_true, i in enumerate(y_true[0][0]):
        bah.append([])
        for index_pred, j in enumerate(y_pred[0]['rois']):
            currentIoU = IoU(i, j, mask_pred, mask_true)
            bah[index_true].append((currentIoU, y_true[0][1][index_true],
                                    y_pred[0]['class_ids'][index_pred]))

    return bah
Ejemplo n.º 16
0
def displayImage(self):
    image_id = '00010'
    image = train_set.load_image(int(image_id))
    print(image.shape)
    # load image mask
    mask, class_ids = train_set.load_mask(int(image_id))
    bbox = extract_bboxes(mask)
    print('final box- ' + str(bbox))
    # display image with masks and bounding boxes
    display_instances(image, bbox, mask, class_ids, train_set.class_names)
Ejemplo n.º 17
0
def combineSameBBoxMask():
    maskdicts = {}
    for item in Path(basePath + "/masks_seperated").rglob('*.png'):
        filePath = str(item)
        img_id, img_class, class_instance = item.name.split("_")
        img_mask_data = cv2.imread(filePath)
        img_mask_data = img_mask_data[:, :, [0]]
        cur_bbox = utils.extract_bboxes(img_mask_data.astype(np.uint8))[0]
        y1, x1, y2, x2 = cur_bbox[0], cur_bbox[1], cur_bbox[2], cur_bbox[3]
        cur_area = (x2 - x1) * (y2 - y1)
        if img_id not in maskdicts.keys():
            img_inst = {}
            maskdicts[img_id] = img_inst
        if img_class not in img_inst.keys():
            img_cls = []
            img_inst[img_class] = img_cls
        if len(img_cls) == 0:
            new_Inst = {"path": [filePath], "bbox": cur_bbox, "ambi": False}
            img_cls.append(new_Inst)
        else:
            findInstance = False
            for instance in img_cls:
                inst_bbox = instance["bbox"]
                _y1, _x1, _y2, _x2 = inst_bbox[0], inst_bbox[1], inst_bbox[
                    2], inst_bbox[3]
                if (abs(y1 - _y1) < 10
                        or abs(y2 - _y2) < 10) and (abs(x2 - _x1) < 60
                                                    or abs(x1 - _x2) < 60):
                    instance["bbox"] = boxU(cur_bbox, inst_bbox)
                    instance["path"].append(filePath)
                    findInstance = True
            if not findInstance:
                new_Inst = {
                    "path": [filePath],
                    "bbox": cur_bbox,
                    "ambi": False
                }
                img_cls.append(new_Inst)
    for id in maskdicts:
        imgs = maskdicts[id]
        for cls_name in imgs:
            cls_insts = imgs[cls_name]
            for idx, inst in enumerate(cls_insts):
                img_data = np.zeros([384, 576])
                for path in inst["path"]:
                    data = cv2.imread(path)
                    img_data = np.logical_or(img_data, data[:, :, 0])
                img_data = img_data * 255
                _img_id, _img_class, _class_instance = inst["path"][0].split(
                    os.sep)[-1].split("_")
                new_mask_file = _img_id + "_" + _img_class + "_" + str(idx)
                cv2.imwrite(
                    basePath + "masks_seperated2/" + new_mask_file + ".png",
                    img_data.astype(np.uint8))
                print(basePath + "masks_seperated2/" + new_mask_file + ".png")
def get_fru_net_results(results_dir: str, dataset: VesicleDataset):
    WIN_NAME = 'img'
    cv.namedWindow(WIN_NAME)
    gt_boxes = []
    gt_class_ids = []
    gt_masks = []
    results = []
    images = []

    for image_id in dataset.image_ids:
        origin_img = dataset.load_image(image_id)
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, config,
                                   image_id, use_mini_mask=False)
        gt_boxes.append(gt_bbox)
        gt_class_ids.append(gt_class_id)
        gt_masks.append(gt_mask)

        # load masks
        img_name = dataset.image_info[image_id]['id']
        name, ext = img_name.rsplit('.', 1)
        path = os.path.join(results_dir, f'{name}_labels.{ext}')
        mask_img = cv.imread(path, cv.IMREAD_GRAYSCALE + cv.IMREAD_ANYDEPTH)
        n = np.max(mask_img)
        class_ids = np.ones(n, dtype=np.int32)
        scores = np.ones(n, dtype=np.float32)
        mask = get_bin_mask(mask_img)

        image, window, scale, padding, crop = utils.resize_image(
            origin_img,
            min_dim=config.IMAGE_MIN_DIM,
            min_scale=config.IMAGE_MIN_SCALE,
            max_dim=config.IMAGE_MAX_DIM,
            mode=config.IMAGE_RESIZE_MODE)
        mask = utils.resize_mask(mask, scale, padding, crop)
        rois = utils.extract_bboxes(mask)
        images.append(image)

        vis_img = image.copy()
        draw_masks_contours(vis_img, gt_mask, (0, 0, 255))
        draw_masks_contours(vis_img, mask, (0, 255, 0))
        cv.setWindowTitle(WIN_NAME, img_name)
        cv.imshow(WIN_NAME, vis_img)
        cv.waitKey(0)

        result = {
            'class_ids': class_ids,
            'scores': scores,
            'masks': mask,
            'rois': rois
        }
        results.append(result)

    return images, gt_boxes, gt_class_ids, gt_masks, results
Ejemplo n.º 19
0
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
                  use_mini_mask=False):

    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    original_shape = image.shape
    image, window, scale, padding, crop = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        min_scale=config.IMAGE_MIN_SCALE,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    mask = utils.resize_mask(mask, scale, padding, crop)

    if augment:
        logging.warning("'augment' is deprecated. Use 'augmentation' instead.")
        if random.randint(0, 1):
            image = np.fliplr(image)
            mask = np.fliplr(mask)

    if augmentation:
        import imgaug
        MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
                           "Fliplr", "Flipud", "CropAndPad",
                           "Affine", "PiecewiseAffine"]

        def hook(images, augmenter, parents, default):
            """Determines which augmenters to apply to masks."""
            return augmenter.__class__.__name__ in MASK_AUGMENTERS

        image_shape = image.shape
        mask_shape = mask.shape
        det = augmentation.to_deterministic()
        image = det.augment_image(image)
        mask = det.augment_image(mask.astype(np.uint8),
                                 hooks=imgaug.HooksImages(activator=hook))
        assert image.shape == image_shape, "Augmentation shouldn't change image size"
        assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
        mask = mask.astype(np.bool)

    _idx = np.sum(mask, axis=(0, 1)) > 0
    mask = mask[:, :, _idx]
    class_ids = class_ids[_idx]
    bbox = utils.extract_bboxes(mask)

    active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
    source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
    active_class_ids[source_class_ids] = 1
    if use_mini_mask:
        mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)

    image_meta = compose_image_meta(image_id, original_shape, image.shape,
                                    window, scale, active_class_ids)
    return image, image_meta, class_ids, bbox, mask
Ejemplo n.º 20
0
def test2(model):
    """ Test the model on input dataset """
    dataset = SidelobeDataset()
    dataset.load_dataset(args.dataset)
    dataset.prepare()

    for index, image_id in enumerate(dataset.image_ids):
        # - Load image
        image = dataset.load_image(image_id)
        image_path = dataset.image_info[index]['path']
        image_path_base = os.path.basename(image_path)
        image_path_base_noext = os.path.splitext(image_path_base)[0]

        # - Load mask
        mask_gt = dataset.load_gt_mask(image_id)

        mask_gt_chan3 = np.broadcast_to(mask_gt, image.shape)
        image_masked_gt = np.copy(image)
        image_masked_gt[np.where(
            (mask_gt_chan3 == [True, True,
                               True]).all(axis=2))] = [255, 255, 0]

        outfile = 'gtmask_' + image_path_base_noext + '.png'
        skimage.io.imsave(outfile, image_masked_gt)

        # - Extract true bounding box from true mask
        bboxes_gt = utils.extract_bboxes(mask_gt)

        # Detect objects
        r = model.detect([image], verbose=0)[0]
        mask = r['masks']
        bboxes = r['rois']
        ##bboxes= utils.extract_bboxes(mask)
        class_labels = r['class_ids']
        nobjects = mask.shape[-1]
        if nobjects <= 0:
            print("INFO: No object mask found for image %s ..." %
                  image_path_base)
            continue

        # Save image with masks
        outfile = 'out_' + image_path_base_noext + '.png'
        visualize.display_instances(image,
                                    r['rois'],
                                    r['masks'],
                                    r['class_ids'],
                                    dataset.class_names,
                                    r['scores'],
                                    show_bbox=True,
                                    show_mask=True,
                                    title="Predictions")
        plt.savefig(outfile)
Ejemplo n.º 21
0
def score():
    image_path = "static/downloaded.jpg"
    image = skimage.io.imread(image_path)

    r = model.detect([image], verbose=1)
    r = r[0]

    mask, class_ids, scores = r['masks'], r['class_ids'], r['scores']
    bbox = utils.extract_bboxes(mask)
    classes = ["BG", "flat", "dome", "tree", "N", "NE", "E", "SE", "S", "SW", "W", "NW"]
    display_instances(image, bbox, mask, class_ids, classes, scores)

    return url_for("static", filename='scored.jpg')
Ejemplo n.º 22
0
def deleteAbnormalMask():
    moveToPath = basePath + "/masktoremove"
    for item in Path(basePath + "/masks_seperated2").rglob('*.png'):
        filePath = str(item)
        img_mask_data = cv2.imread(filePath)
        img_mask_data = img_mask_data[:, :, [0]]
        bbox = utils.extract_bboxes(img_mask_data.astype(np.uint8))
        y1, x1, y2, x2 = bbox[0][0], bbox[0][1], bbox[0][2], bbox[0][3]
        width = x2 - x1
        height = y2 - y1
        print(filePath)
        if width * height < 2000:
            imgName = filePath.split(os.sep)[-1]
            shutil.move(filePath, moveToPath + "/" + imgName)
Ejemplo n.º 23
0
def data_split(file, number, left=True):
    """
    3D scan generate ROI, ROI mask and weights
    :param file:scan
    :param left:if the scan is left or not
    :return:ROI,ROI_mask,weights
    """
    scan = np.array(file['scan'])
    CartTM = np.array(file['CartTM'])
    pred = ROI1[number]
    if left is False:
        scan = reverse(scan)
        CartTM = reverse(CartTM)
    scan = np.lib.pad(scan, 2, padwithzeros)
    CartTM = np.lib.pad(CartTM, 2, padwithzeros)
    CartTM_cor = np.array(np.where(CartTM > 0)).T
    print(len(CartTM_cor))
    # x = int(round(max(np.array(CartTM_cor.T[0])) + min(np.array(CartTM_cor.T[0])))/2)
    # y = int(round(max(np.array(CartTM_cor.T[1])) + min(np.array(CartTM_cor.T[1])))/2)
    # z = int(round(max(np.array(CartTM_cor.T[2])) + min(np.array(CartTM_cor.T[2])))/2)
    x = int(round((pred[0] + pred[1]) / 2))
    y = int(round((pred[2] + pred[3]) / 2))
    z = int(round((pred[4] + pred[5]) / 2))
    # print(x,y,z)
    ROI = scan[x - 16:x + 16, y - 40:y + 40, z - 24:z + 24]
    ROI_mask = CartTM[x - 16:x + 16, y - 40:y + 40, z - 24:z + 24]
    print(scan.shape)
    print(CartTM.shape)
    bbx = utils.extract_bboxes(CartTM)
    print(bbx.shape)
    #     ROI = scan[x-8:x+8, y-32:y+32, z-16:z+16]
    #     ROI_mask = CartTM[x-8:x+8, y-32:y+32, z-16:z+16]
    #    print(ROI_mask.shape)
    cart_cor = np.array(np.where(ROI_mask > 0)).T
    #    back_cor = np.array(np.where(ROI_mask == 0)).T
    print(ROI.shape)
    print(len(cart_cor))
    print(len(cart_cor) / len(CartTM_cor))
    print(len(cart_cor) / ROI.size)
    #    print(back_cor.shape)
    #    ROI_mask = np.array([np.array(np.where(ROI_mask > 0))*8, np.array(np.where(ROI_mask ==0))])
    ROI = ROI[..., np.newaxis]
    ROI_mask = onehot(ROI_mask).reshape(ROI.size, 2)
    weights = np.zeros((ROI.size))
    for i in range(ROI.size):
        if ROI_mask[i][0] == 0:
            weights[i] = 1 - len(cart_cor) / ROI.size
        else:
            weights[i] = len(cart_cor) / ROI.size
    return ROI, ROI_mask, weights
Ejemplo n.º 24
0
def getCenteredClassBboxes(datasetPath: str,
                           imageName: str,
                           classToCenter: str,
                           image_size=1024,
                           imageFormat="jpg",
                           allow_oversized=True,
                           config: Config = None,
                           verbose=0):
    """
    Computes and returns bboxes of all masks of the given image and class
    :param datasetPath: path to the dataset containing the image folder
    :param imageName: the image name
    :param classToCenter: the class to center and get the bbox from
    :param image_size: the minimal height and width of the bboxes
    :param imageFormat: the image format to use to get original image
    :param allow_oversized: if False, masks that does not fit image_size will be skipped
    :param config: if given, config file is used to know if mini_masks are used
    :param verbose: level of verbosity
    :return: (N, 4) ndarray of [y1, x1, y2, x2] matching bboxes
    """
    imagePath = os.path.join(datasetPath, imageName, 'images',
                             f'{imageName}.{imageFormat}')
    image = cv2.imread(imagePath, cv2.IMREAD_COLOR)
    image_shape = image.shape[:2]
    classDirPath = os.path.join(datasetPath, imageName, classToCenter)
    maskList = os.listdir(classDirPath)
    classBboxes = np.zeros((len(maskList), 4), dtype=int)
    toDelete = []
    for idx, mask in enumerate(maskList):
        maskPath = os.path.join(classDirPath, mask)
        if config is not None and config.is_using_mini_mask():
            bbox = getBboxFromName(mask)
        else:
            maskImage = cv2.imread(maskPath, cv2.IMREAD_GRAYSCALE)
            bbox = utils.extract_bboxes(maskImage)
        if not allow_oversized:
            h, w = bbox[2:] - bbox[:2]
            if h > image_size or w > image_size:
                if verbose > 1:
                    print(
                        f"{mask} mask could not fit into {(image_size, image_size)} image"
                    )
                toDelete.append(idx)
        classBboxes[idx] = center_mask(bbox,
                                       image_shape,
                                       min_output_shape=image_size,
                                       verbose=verbose)
    classBboxes = np.delete(classBboxes, toDelete, axis=0)
    return classBboxes
Ejemplo n.º 25
0
 def display_samples(self):
     """
     Display 20 samples with their masks
     """
     for i in range(20):
         # define image id
         image_id = i
         # load the image
         image = self.train_set.load_image(image_id)
         # load the masks and the class ids
         mask, class_ids = self.train_set.load_mask(image_id)
         # extract bounding boxes from the masks
         bbox = extract_bboxes(mask)
         # display image with masks and bounding boxes
         display_instances(image, bbox, mask, class_ids, self.train_set.class_names)
Ejemplo n.º 26
0
def loadOnlyMask(imagePath, imageShape):
    mask = cv2.imread(imagePath, cv2.IMREAD_UNCHANGED)
    if mask.shape[0] != imageShape[0] or mask.shape[1] != imageShape[1]:
        # Finding bbox coordinates from image name
        bbox = getBboxFromName(imagePath)
        shifted = shift_bbox(bbox)
        y1, x1, y2, x2 = shifted

        # Expanding mask to its real size
        mask = expand_mask(shifted, mask, image_shape=shifted[2:])
        mask = mask.astype(np.uint8) * 255
    else:
        # Extracting bbox of
        bbox = extract_bboxes(mask)
        y1, x1, y2, x2 = bbox
    return mask[y1:y2, x1:x2, ...], bbox
Ejemplo n.º 27
0
 def show_image(dataset: Dataset) -> NoReturn:
     """
     Позволяет просматривать изображения в наборах данных
     :param dataset:
     :return:
     """
     # define image id
     image_id = 1
     # load the image
     image = dataset.load_image(image_id)
     # load the masks and the class ids
     mask, class_ids = dataset.load_mask(image_id)
     # extract bounding boxes from the masks
     bbox = extract_bboxes(mask)
     # display image with masks and bounding boxes
     display_instances(image, bbox, mask, class_ids, dataset.class_names)
    def load_mask(self, image_id_):
        """ Generate instance masks for cells of the given image ID. """
        info = self.image_info[image_id_]
        info = info.get("id")

        path = os.path.join('data', info)

        # Counting masks for current image
        number_of_masks = 0
        masks_dir_list = {p: self.__CLASS_ASSOCIATION[format_text(p)] for p in os.listdir(path)
                          if format_text(p) in self.__CLASS_ASSOCIATION}
        for masks_dir in masks_dir_list:
            temp_DIR = os.path.join(path, masks_dir)
            # https://stackoverflow.com/a/2632251/9962046
            number_of_masks += len([name_ for name_ in os.listdir(temp_DIR)
                                    if os.path.isfile(os.path.join(temp_DIR, name_))])
        if self.__CONFIG.get_param().get('resize', None) is not None:
            masks_shape = tuple(self.__CONFIG.get_param().get('resize', None)) + (number_of_masks,)
        elif self.__CONFIG.is_using_mini_mask():
            masks_shape = self.__CONFIG.get_mini_mask_shape() + (number_of_masks,)
        else:
            masks_shape = (self.__IMAGE_INFO["HEIGHT"], self.__IMAGE_INFO["WIDTH"], number_of_masks)
        masks = np.zeros(masks_shape, dtype=np.uint8)
        bboxes = np.zeros((number_of_masks, 4), dtype=np.int32)
        iterator = 0
        class_ids = np.zeros((number_of_masks,), dtype=int)
        for masks_dir, mask_class in masks_dir_list.items():
            temp_class_id = self.__CUSTOM_CLASS_NAMES.index(mask_class) + 1
            masks_dir_path = os.path.join(path, masks_dir)
            for mask_file in os.listdir(masks_dir_path):
                mask = imread(os.path.join(masks_dir_path, mask_file))
                mask = np.where(mask > 220, 255, 0).astype(np.uint8)
                masks[:, :, iterator] = mask
                if self.__CONFIG.is_using_mini_mask():
                    bboxes[iterator] = getBboxFromName(mask_file)
                else:
                    bboxes[iterator] = utils.extract_bboxes(mask)
                class_ids[iterator] = temp_class_id
                iterator += 1
        # Handle occlusions /!\ In our case there is no possible occlusion (part of object that
        # is hidden), all objects are complete (some are parts of other)
        if self.__ENABLE_OCCLUSION:
            occlusion = np.logical_not(masks[:, :, -1]).astype(np.uint8)
            for i in range(number_of_masks - 2, -1, -1):
                masks[:, :, i] = masks[:, :, i] * occlusion
                occlusion = np.logical_and(occlusion, np.logical_not(masks[:, :, i]))
        return masks, class_ids.astype(np.int32), bboxes
Ejemplo n.º 29
0
def main():
    dataset = elevator_rgbd.ElevatorRGBDDataset()
    dataset.load_elevator_rgbd(args.input, "train")
    dataset.prepare()

    # Load random image and mask.
    image_ids = np.random.choice(dataset.image_ids, 10)
    i = 0
    for image_id in image_ids:
        image = dataset.load_image(image_id)
        mask, class_ids = dataset.load_mask(image_id)
        # Compute Bounding box
        bbox = utils.extract_bboxes(mask)

        # Display image and additional stats
        print("image_id ", image_id, dataset.image_reference(image_id))
        log("image", image)
        log("mask", mask)
        log("class_ids", class_ids)
        log("bbox", bbox)
        # Display image and instances
        blank = np.zeros((512, 512, 3), image.dtype)
        print(blank.shape)
        print(image.shape)
        msk_image = visualize.display_instances(blank, bbox, mask, class_ids,
                                                dataset.class_names)
        rgb_image = image[:, :, 0:3]
        dpt_image = image[:, :, 3]
        fig = skimage.io.imshow(rgb_image)
        fig.axes.get_xaxis().set_visible(False)
        fig.axes.get_yaxis().set_visible(False)
        plt.show()
        fig = skimage.io.imshow(dpt_image)
        fig.axes.get_xaxis().set_visible(False)
        fig.axes.get_yaxis().set_visible(False)
        plt.show()
        print(msk_image.shape)
        print(msk_image.dtype)
        skimage.io.imsave("elevator_dataset_sample_" + str(i) + "_mask.png",
                          msk_image.astype(np.uint8))
        skimage.io.imsave("elevator_dataset_sample_" + str(i) + "_rgb.png",
                          rgb_image)
        skimage.io.imsave("elevator_dataset_sample_" + str(i) + "_dpt.png",
                          dpt_image)

        i += 1
Ejemplo n.º 30
0
def display_bbox(image_id):
    # Load random image and mask.
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    # Compute Bounding box
    bboxes = utils.extract_bboxes(mask)

    # Display image and additional stats
    print("image_id ", image_id, dataset.image_reference(image_id))
    log("image", image)
    log("mask", mask)
    log("class_ids", class_ids)
    log("bbox", bboxes)
    # Display image and instances
    # 显示 instance 的 bbox 和 mask
    visualize.display_instances(image, bboxes, mask, class_ids,
                                dataset.class_names)