Пример #1
0
def MaskDetect(videoPath, franeIndex, outfile):
    videoObj = cv2.VideoCapture(videoPath)
    cell_num = 3
    if videoObj.isOpened() is False:
        print('Can not open Video')
        return
    ret, frame = videoObj.read()
    [height, width, _] = frame.shape
    model = LoadModel()
    index = 0
    while videoObj.isOpened():
        ret, frame = videoObj.read()
        index += 1
        if index < 20000:
            continue
        results, featureMap = MaskFeature(frame,
                                          cell_num=cell_num,
                                          model=model)
        # results = model.detect([frame], verbose=1)
        # r = copy.deepcopy(results[0])
        # featureMap = LocationFeature(results, width, height,cell_num=cell_num)
        featureMap = featureMap.reshape([-1, cell_num, cell_num])
        print(featureMap)
        r = results[0]
        visualize.display_instances(frame, r['rois'], r['masks'],
                                    r['class_ids'], class_names, r['scores'])
    del model
    videoObj.release()
Пример #2
0
    def detect(self, flie):
        image = skimage.io.imread(flie)  #输入
        # Run detection
        results = self.model.detect([image], verbose=1)

        # Visualize results
        r = results[0]
        visualize.display_instances(image, r['rois'], r['masks'],
                                    r['class_ids'], self.class_names,
                                    r['scores'])  # 输出
Пример #3
0
    def recognize(self, image):

        result = self.model.detect([image], verbose=1)[0]
        result['objects'] = []

        r = result
        visualize.display_instances(image, r['rois'], r['masks'],
                                    r['class_ids'], self.class_names,
                                    r['scores'])

        for i in result['class_ids']:
            result['objects'].append(self.class_names[i])

        return result
    def detect(self, im, visualize_results=False):
        """im could be numpy image (read with cv2) or image path"""
        if isinstance(im, str):
            im = cv2.imread(im)
        results = self.model.detect([im], verbose=1)
        r = results[0]
        if visualize_results:
            visualize.display_instances(im,
                                        r['rois'],
                                        r['masks'],
                                        r['class_ids'],
                                        ['BG'] + self.class_names,
                                        r['scores'],
                                        ax=get_ax(),
                                        output_im_dir=self.output_dir)

        bounding_boxes = r['rois'].tolist()
        scores = r['scores'].tolist()
        class_ids = r['class_ids'].tolist()
        #im = self.visualize_image(im, bounding_boxes, scores, class_ids)
        #cv2.imshow('output', im)
        #cv2.waitKey()
        return bounding_boxes, scores, class_ids
Пример #5
0
def display_detection_masks(image,
                            result,
                            dataset=None,
                            image_id=None,
                            ground_truth=True,
                            proposals=True,
                            avg_precision=None,
                            recall=None,
                            classnames=False,
                            show=True,
                            fill_holes=True):
    """produce detection masks from nuclei detection results of a single image.
    """

    r = result
    n_images = 2
    if proposals:
        n_images += 1
    if ground_truth:
        n_images += 1
        gt_mask, gt_class_id = dataset.load_mask(image_id,
                                                 fill_holes=fill_holes)
        gt_bbox = mrnn_utils.extract_bboxes(gt_mask)

    empty_classnames = ['', '']
    if classnames:
        classnames_use = dataset.class_names
    else:
        classnames_use = empty_classnames

    hw_ratio = image.shape[0] / image.shape[1]
    if n_images == 2:
        fig, axes = plt.subplots(1, 2, figsize=(20, 10 * hw_ratio))
    elif n_images == 3:
        fig, axes = plt.subplots(1, 3, figsize=(30, 10 * hw_ratio))
    elif n_images == 4:
        fig, axes = plt.subplots(2, 2, figsize=(20, 20 * hw_ratio))

    axes = axes.ravel()
    # original image
    visualize.display_instances(image,
                                np.array([]),
                                np.array([]),
                                np.array([]),
                                empty_classnames,
                                ax=axes[0],
                                show=False,
                                mask_alpha=0,
                                verbose=False)

    if proposals:
        visualize.display_bbox(image,
                               r['all_rois'],
                               r['all_class_ids'],
                               empty_classnames,
                               None,
                               ax=axes[1],
                               show=False,
                               verbose=False)

    if ground_truth:
        visualize.display_instances(image,
                                    gt_bbox,
                                    gt_mask,
                                    gt_class_id,
                                    classnames_use,
                                    ax=axes[-2],
                                    show=False,
                                    mask_alpha=0,
                                    verbose=False)
        axes[-2].set_title(format("ground truth: %d nuclei" %
                                  len(gt_class_id)),
                           fontsize=20)

    # detection results
    visualize.display_instances(image,
                                r['rois'],
                                r['masks'],
                                r['class_ids'],
                                classnames_use,
                                r['scores'],
                                ax=axes[-1],
                                show=False,
                                mask_alpha=0,
                                verbose=False)
    avg_prec_str = ""
    if not avg_precision is None:
        avg_prec_str = format("AP: %.03f" % avg_precision)
    recall_str = ""
    if not recall is None:
        recall_str = format("recall: %.03f" % recall)
    axes[-1].set_title(format("detection: %d nuclei. %s %s" %
                              (len(r['class_ids']), avg_prec_str, recall_str)),
                       fontsize=20)

    fig.tight_layout()
    if show:
        plt.show()
    else:
        return fig, axes
Пример #6
0
def generate_ground_truth(dataset_dir,
                          dataset_name,
                          output_folder,
                          image_ids=None,
                          mask_alpha=0,
                          resize=False):
    """produce ground truth mask images from labeled data.
    dataset_name: a folder inside dataset_dir
            each example (images + masks) are in a separate folder named after the image.
    output_folder:
    image_ids: subset of image id's to use
    mask_alpha: higher alpha gives stronger color to mask on top of raw image.
    """
    input_folder = os.path.join(dataset_dir, dataset_name)
    postfix = ".png"
    empty_classnames = ['', '']
    all_images = [x for x in os.listdir(input_folder) if not x.startswith(".")]
    if not image_ids is None:
        assert set(image_ids).issubset(set(all_images))
        images_use = image_ids
    else:
        images_use = all_images

    if not os.path.exists(output_folder):
        os.mkdir(output_folder)
    # if len([x for x in os.listdir(output_folder) if not x.startswith(".")]) > 0:
    #     print("Target folder not empty. Abort.")
    #     return None
    dataset = NucleiDataset()
    dataset.load_dataset(dataset_dir, dataset_name, image_ids=images_use)
    dataset.prepare()
    for i, image_id in enumerate(dataset.image_ids):
        if i % 50 == 0:
            print(i)
        original_image = dataset.load_image(image_id)
        gt_mask, gt_class_id = dataset.load_mask(image_id)
        gt_bbox = mrnn_utils.extract_bboxes(gt_mask)

        hw_ratio = original_image.shape[0] / original_image.shape[1]
        fig, axes = plt.subplots(1, 2, figsize=(20, 10 * hw_ratio))

        visualize.display_instances(original_image,
                                    np.array([]),
                                    np.array([]),
                                    np.array([]),
                                    empty_classnames,
                                    ax=axes[0],
                                    show=False,
                                    mask_alpha=0,
                                    verbose=False)
        visualize.display_instances(original_image,
                                    gt_bbox,
                                    gt_mask,
                                    gt_class_id,
                                    empty_classnames,
                                    ax=axes[1],
                                    show=False,
                                    mask_alpha=0,
                                    verbose=False)
        axes[1].set_title(format("ground truth: %d nuclei" % len(gt_class_id)),
                          fontsize=20)
        fig.tight_layout(rect=[0, 0.03, 1, 0.95])
        fig.savefig(os.path.join(output_folder,
                                 dataset.image_info[image_id]['id'] + postfix),
                    dpi=100)
        plt.close(fig)
        gc.collect()
    print(
        format("%d ground truth images generated in folder %s" %
               (len(dataset.image_ids), output_folder)))
    return
Пример #7
0
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = [
    'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
    'hair drier', 'toothbrush'
]

# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))

# Run detection
results = model.detect([image], verbose=1)

# Visualize results
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
                            class_names, r['scores'])