コード例 #1
0
    def _visualize(self, detection, image):
        """Generate a visual detection for a detection with matplotlib.

        Parameters
        ----------
        - detection: dict
            A single Mask RCNN detection

        - image: ndarray
            Original image.

        Returns
        ----------
        - vis_plt_detection:
            A visual detection.

        """
        fig = Figure()
        canvas = FigureCanvasAgg(fig)
        axes = fig.gca()
        visualize.display_instances(image,
                                    detection['rois'],
                                    detection['masks'],
                                    detection['class_ids'],
                                    CLASS_NAMES,
                                    detection['scores'],
                                    ax=axes,
                                    class_colors=self._class_colors)
        fig.tight_layout()
        canvas.draw()
        vis_plt_detection = np.fromstring(canvas.tostring_rgb(), dtype='uint8')

        _, _, w, h = fig.bbox.bounds
        vis_plt_detection = vis_plt_detection.reshape((int(h), int(w), 3))
        return vis_plt_detection
コード例 #2
0
def predict():
    # prepare config
    cfg = PredictionConfig()
    cfg.BATCH_SIZE = 1
    # define the model
    model = modellib.MaskRCNN(mode='inference', model_dir='./', config=cfg)
    # load model weights
    model.load_weights('mask_rcnn_weapon_cfg_0009.h5', by_name=True)

    for _ in range(10):
        ROOT_DIR = os.getcwd()
        IMAGE_DIR = os.path.join(ROOT_DIR, "armedperson")
        file_names = next(os.walk(IMAGE_DIR))[2]
        i = randint(0, 332)
        image = skimage.io.imread(os.path.join(IMAGE_DIR, file_names[i]))
        results = model.detect([image], verbose=1)
        # Visualize results
        r = results[0]
        class_names = ['BG', 'weapon']
        print(r['scores'])
        list = r['scores']
        is_display = True
        # for i in range(len(list)):
        #     if list[i] < 0.9:
        #         is_display = False
        #         continue

        if is_display:
            visualize.display_instances(image, r['rois'], r['masks'],
                                        r['class_ids'], class_names,
                                        r['scores'])
コード例 #3
0
def visualize_instance_segmentation(data_base_dir,
                                    dataset_type,
                                    image_id,
                                    save_path='',
                                    verbose=True):
    split_dataset = SketchDataset(data_base_dir)
    split_dataset.load_sketches(dataset_type)
    split_dataset.prepare()

    original_image = split_dataset.load_image(image_id - 1)
    gt_mask, gt_class_id = split_dataset.load_mask(image_id - 1)
    gt_bbox = utils.extract_bboxes(gt_mask)

    if verbose:
        log('original_image', original_image)
        log('gt_class_id', gt_class_id)
        log('gt_bbox', gt_bbox)
        log('gt_mask', gt_mask)

    visualize.display_instances(original_image,
                                gt_bbox,
                                gt_mask,
                                gt_class_id,
                                split_dataset.class_names,
                                save_path=save_path)
コード例 #4
0
ファイル: demo.py プロジェクト: MFattouh/pytorch-mask-rcnn
def main():

    config = InferenceConfig()
    config.display()

    # Create model object.
    model = modellib.MaskRCNN(model_dir=MODEL_DIR, config=config)
    if config.GPU_COUNT:
        model = model.cuda()

        # Load weights trained on MS-COCO
        model.load_state_dict(torch.load(COCO_MODEL_PATH))

        # COCO Class names
        # Index of the class in the list is its ID. For example, to get ID of
        # the teddy bear class, use: class_names.index('teddy bear')

        # Load a random image from the images folder
        file_names = next(os.walk(IMAGE_DIR))[2]
        image = skimage.io.imread(
            os.path.join(IMAGE_DIR, random.choice(file_names)))

        # Run detection
        results = model.detect([image])

        # Visualize results
        r = results[0]
        visualize.display_instances(image, r['rois'], r['masks'],
                                    r['class_ids'], class_names, r['scores'])
        plt.show()
コード例 #5
0
ファイル: run.py プロジェクト: WXR1998/SIGOC
 def savefig():
     visualize.display_instances(image, gt_bbox, gt_mask, gt_class_ids, 
         [categories.category2name(i) for i in range(categories.cate_cnt)], 
         savefilename=os.path.join(save_visual_path, '%05d_gt.jpg' % i))
     visualize.display_instances(image, bbox, mask, class_ids, 
         [categories.category2name(i) for i in range(categories.cate_cnt)], 
         savefilename=os.path.join(save_visual_path, '%05d_pred.jpg' % i))
コード例 #6
0
def debug_saved_npz(dataset_type, img_idx, data_base_dir):
    outputs_base_dir = 'outputs'
    seg_data_save_base_dir = os.path.join(outputs_base_dir, 'inst_segm_output_data', dataset_type)

    npz_name = os.path.join(seg_data_save_base_dir, str(img_idx) + '_datas.npz')
    npz = np.load(npz_name)

    pred_class_ids = np.array(npz['pred_class_ids'], dtype=np.int32)
    pred_boxes = np.array(npz['pred_boxes'], dtype=np.int32)
    pred_masks_s = npz['pred_masks']
    pred_masks = expand_small_segmentation_mask(pred_masks_s, pred_boxes)  # [N, H, W]

    pred_masks = np.transpose(pred_masks, (1, 2, 0))
    print(pred_class_ids.shape)
    print(pred_masks.shape)
    print(pred_boxes.shape)

    image_name = 'L0_sample' + str(img_idx) + '.png'
    images_base_dir = os.path.join(data_base_dir, dataset_type, 'DRAWING_GT')
    image_path = os.path.join(images_base_dir, image_name)
    original_image = Image.open(image_path).convert("RGB")
    original_image = original_image.resize((768, 768), resample=Image.NEAREST)
    original_image = np.array(original_image, dtype=np.float32)  # shape = [H, W, 3]

    dataset_class_names = ['bg']
    color_map_mat_path = os.path.join(data_base_dir, 'colorMapC46.mat')
    colorMap = scipy.io.loadmat(color_map_mat_path)['colorMap']
    for i in range(46):
        cat_name = colorMap[i][0][0]
        dataset_class_names.append(cat_name)

    visualize.display_instances(original_image, pred_boxes, pred_masks, pred_class_ids,
                                dataset_class_names, figsize=(8, 8))
コード例 #7
0
def MaskDetect(videoPath, franeIndex, outfile):

    videoObj = cv2.VideoCapture(videoPath)
    ROOT_DIR = os.getcwd()
    MODEL_DIR = os.path.join(ROOT_DIR, "logs")
    COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
    if videoObj.isOpened() is False:
        print('Can not open Video')
        return
    ret, frame = videoObj.read()
    [height, width, _] = frame.shape
    portrait = False
    maxL = max(height, width)
    if height == maxL:
        portrait = True
    minL = min(height, width)
    radio = maxL / minL
    minL = 64 * round(minL / 2**6)
    maxL = minL * radio
    maxL = 64 * round(maxL / 2**6)
    if portrait:
        height = maxL
        width = minL
    else:
        height = minL
        width = maxL
    # print(height,width)
    config = TriConfig()
    # config.IMAGE_SHAPE = np.array([maxL, maxL, 3])
    # config.IMAGE_MIN_DIM = minL
    # config.IMAGE_MAX_DIM = maxL
    model = modellib.MaskRCNN(mode="inference",
                              model_dir=MODEL_DIR,
                              config=config)
    # plot_model(model.keras_model, to_file='model.png')
    cell_num = 3
    index = 0
    while (videoObj.isOpened()):
        ret, frame = videoObj.read()
        index += 1
        if index < 27000:
            continue
        frame = cv2.resize(frame, (width, height))
        results = model.detect([frame], verbose=1)
        r = copy.deepcopy(results[0])
        print(r)
        featureMap = LocationFeature(results, width, height, ceilNum=cell_num)
        featureMap = featureMap.reshape([-1, cell_num, cell_num])
        print(featureMap)
        visualize.display_instances(frame, r['rois'], r['masks'],
                                    r['class_ids'], class_names, r['scores'])
        # print(results)
        # # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # cv2.imshow('frame', frame)
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break
    del model
    videoObj.release()
コード例 #8
0
def detect_and_color_splash(model, image_path=None, video_path=None):
    assert image_path or video_path
    class_names = ["None", "fibre"]
    # Image or video?
    if image_path:
        # Run model detection and generate the color splash effect
        print("Running on {}".format(args.image))
        # Read image
        image = skimage.io.imread(args.image)
        # Detect objects
        time1 = time.time()
        r = model.detect([image], verbose=1)[0]
        visualize.display_instances(image, r["rois"], r["masks"],
                                    r["class_ids"], class_names, r["scores"])
        # print("time:", time.time() - time1)
        # print(image.shape)
        # Color splash
        # splash = color_splash(image, r['masks'])
        # Save output
        # file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
        # skimage.io.imsave(file_name, splash)

    elif video_path:
        import cv2
        # Video capture
        vcapture = cv2.VideoCapture(video_path)
        width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = vcapture.get(cv2.CAP_PROP_FPS)

        # Define codec and create video writer
        file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(
            datetime.datetime.now())
        vwriter = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc(*'MJPG'),
                                  fps, (width, height))

        count = 0
        success = True
        while success:
            print("frame: ", count)
            # Read next image
            success, image = vcapture.read()
            if success:
                # OpenCV returns images as BGR, convert to RGB
                image = image[..., ::-1]
                # Detect objects
                r = model.detect([image], verbose=0)[0]
                # Color splash
                splash = color_splash(image, r['masks'])
                # RGB -> BGR to save image to video
                splash = splash[..., ::-1]
                # Add image to video writer
                vwriter.write(splash)
                count += 1
        vwriter.release()
    # print("Saved to ", file_name)
    return r
コード例 #9
0
def detectDuckie(model,
                 dataset,
                 config,
                 video_path=None,
                 live=False,
                 video=False):
    import random
    from visualize import display_instances
    from videoVisualise import getPoints, displayDetections
    import matplotlib.pyplot as plt
    import modellib
    from vidDetectionLive import detectVideoLive
    from vidDetectionOffline import detectVideoOffline

    #    assert(video is True and live is False and video_path is not None)

    # If running on photo
    if video is False:
        print("Images: {}\nClasses: {}".format(len(dataset.image_ids),
                                               dataset.class_names))

        image_id = random.choice(dataset.image_ids)
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
            dataset, config, image_id, use_mini_mask=False)
        # Run object detection
        results = model.detect([image], verbose=1)

        # Display results
        fig = plt.figure(figsize=(15, 15 / 3))
        ax = fig.add_subplot(111)
        r = results[0]
        display_instances(image,
                          r['rois'],
                          r['masks'],
                          r['class_ids'],
                          dataset.class_names,
                          r['scores'],
                          ax=ax)

        topLeft, botLeft, topRight, botRight, centre = getPoints(r['rois'])

        # Verification plot
        for i in range(r['rois'].shape[0]):
            plt.scatter(topLeft[i][0], topLeft[i][1])
            plt.scatter(botLeft[i][0], botLeft[i][1])
            plt.scatter(topRight[i][0], topRight[i][1])
            plt.scatter(botRight[i][0], botRight[i][1])
            plt.scatter(centre[i][1], centre[i][0])

    elif video is True and live is True:
        detectVideoLive(model)
    elif video is True and live is False and video_path is not None:
        detectVideoOffline(model, video_path)
    else:
        raise ValueError("Non-valid testing configuration")
コード例 #10
0
 def dataset(self, config, debug=False):
     # #### load dataset
     train_dataset = coco.CocoDataSet('./COCO2017',
                                      'train',
                                      flip_ratio=0.,
                                      pad_mode='fixed',
                                      config=config,
                                      debug=False)
     print(train_dataset.get_categories())
     assert config.NUM_CLASSES == len(
         train_dataset.get_categories()
     ), f"NUM_CLASSES must be compare with dataset, set:{config.NUM_CLASSES} != {len(train_dataset.get_categories())}"
     train_generator = data_generator.DataGenerator(train_dataset)
     train_tf_dataset = tf.data.Dataset.from_generator(
         train_generator, (tf.float32, tf.float32, tf.float32, tf.int32,
                           tf.float32, tf.float32))
     self.train_tf_dataset = train_tf_dataset.padded_batch(
         config.IMAGES_PER_GPU,
         padded_shapes=(
             [None, None, None],  # img
             [None],  #img_meta
             [None, None],  #bboxes
             [None],  #labels
             [None, None, None],  #masks 
             [None, None, 1]))  # global_mask
     eval_dataset = coco.CocoDataSet('./COCO2017',
                                     'val',
                                     flip_ratio=0.,
                                     pad_mode='fixed',
                                     config=config,
                                     debug=False)
     eval_generator = data_generator.DataGenerator(eval_dataset)
     eval_tf_dataset = tf.data.Dataset.from_generator(
         eval_generator, (tf.float32, tf.float32, tf.float32, tf.int32,
                          tf.float32, tf.float32))
     self.eval_tf_dataset = eval_tf_dataset.padded_batch(
         config.IMAGES_PER_GPU,
         padded_shapes=(
             [None, None, None],  # img
             [None],  #img_meta
             [None, None],  #bboxes
             [None],  #labels
             [None, None, None],  #masks 
             [None, None, 1]))  # global_mask
     if debug:
         idx = np.random.choice(range(len(train_dataset)))
         img, img_meta, bboxes, labels, masks, global_mask = train_dataset[
             idx]
         rgb_img = np.round(img + config.MEAN_PIXEL)
         ori_img = utils.get_original_image(img, img_meta,
                                            config.MEAN_PIXEL)
         visualize.display_instances(rgb_img, bboxes, labels,
                                     train_dataset.get_categories())
     self.train_dataset = train_dataset
コード例 #11
0
def predict(image):
    model = get_model_instance_segmentation(2)
    model.load_state_dict(
        torch.load('best_model', map_location=torch.device('cpu')))
    model.eval()
    image = Image.open(image).convert('RGB')
    img = loader(image)
    with torch.no_grad():
        output = model(img[None, ...])[0]
    display_instances(image, output['boxes'], output['labels'], class_names,
                      output['scores'])
コード例 #12
0
def save_result(r, image, i):
    print('saving res')
    class_names = ['BG', 'person']
    visualize.display_instances(
        image,
        r['rois'],
        r['masks'],
        r['class_ids'],
        class_names,
        r['scores'],
        figoutpath='./output_images/{}_pldetection.jpg'.format(i))
    # Change to save
    return
コード例 #13
0
def test_dataset(model, dataset, nr_images):

    for i in range(nr_images):

        image_id = dataset.image_ids[
            i]  #if nr_images == len(dataset.image_ids) #else random.choice(dataset.image_ids)

        image, image_meta = \
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
        """
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
        """
        info = dataset.image_info[image_id]
        #print("info",info)

        #detection
        r = model.detect([image], verbose=0)[0]

        print(r['class_ids'].shape[0])
        print(r['class_ids'].shape[0],
              file=codecs.open('amount_' + str(i) + '.txt', 'w', 'utf-8'))

        if r['class_ids'].shape[0] > 0:
            r_fused = utils.fuse_instances(r)
        else:
            r_fused = r

        fig, (ax1) = plt.subplots(1, 1, figsize=(16, 16))
        #fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 16))

        #予測表示
        visualize.display_instances(image,
                                    r['rois'],
                                    r['masks'],
                                    r['class_ids'],
                                    dataset.class_names,
                                    r['scores'],
                                    title="Predictions",
                                    ax=ax1)
        """
        visualize.display_instances(image, r_fused['rois'], r_fused['masks'], r_fused['class_ids'],
                                     dataset.class_names, r_fused['scores'], title="Predictions fused", ax=ax2)
        """

        #正解データ表示
        #visualize.display_instances(image, gt_bbox, gt_mask, gt_class_id, dataset.class_names, title="GT", ax=ax3)

        # 画像表示
        plt.show()
コード例 #14
0
ファイル: detector.py プロジェクト: mhrah7495/TACO
def test_dataset(model, dataset, nr_images):

    for i in range(nr_images):
        try:
            os.mkdir('detections')
        except:
            pass

        image_id = dataset.image_ids[i] if nr_images == len(
            dataset.image_ids) else random.choice(dataset.image_ids)

        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
        info = dataset.image_info[image_id]

        r = model.detect([image], verbose=0)[0]

        print(r['class_ids'].shape)
        if r['class_ids'].shape[0] > 0:
            r_fused = utils.fuse_instances(r)
        else:
            r_fused = r

        fig, (ax1, ax3) = plt.subplots(1, 2, figsize=(16, 16))

        # Display predictions
        visualize.display_instances(image,
                                    r['rois'],
                                    r['masks'],
                                    r['class_ids'],
                                    dataset.class_names,
                                    r['scores'],
                                    title="Predictions",
                                    ax=ax1)

        #visualize.display_instances(image, r_fused['rois'], r_fused['masks'], r_fused['class_ids'],
        #dataset.class_names, r_fused['scores'], title="Predictions fused", ax=ax2)

        # # Display ground truth
        visualize.display_instances(image,
                                    gt_bbox,
                                    gt_mask,
                                    gt_class_id,
                                    dataset.class_names,
                                    title="GT",
                                    ax=ax3)
        plt.savefig('detections/{}.jpg'.format(i))
        # Voilà
        plt.show()
コード例 #15
0
ファイル: maskrcnn_api.py プロジェクト: pshivraj/clomask
 def _save_masks(self, image, res, output_path):
     captions = ["{:.3f}".format(score) for score in res['scores']]
     visualize.display_instances(image,
                                 res['rois'],
                                 res['masks'],
                                 res['class_ids'],
                                 self.class_names,
                                 res['scores'],
                                 show_label=True,
                                 show_bbox=False,
                                 captions=captions,
                                 figsize=(8, 8),
                                 savepath=output_path)
     out = post_process(skimage.io.imread(output_path))
     skimage.io.imsave(output_path, out)
コード例 #16
0
    def show_result(self, imagename):
        image = skimage.io.imread(imagename)

        # Run detection
        results = model.detect([image], verbose=1)

        # Visualize results
        r = results[0]
        visualize.display_instances(image,
                                    r['rois'],
                                    r['masks'],
                                    r['class_ids'],
                                    class_names,
                                    r['scores'],
                                    ax=self.axes)  # 小变动
コード例 #17
0
    def evaluate_gdxray(self, model, dataset):
        """Evalua la imagen.
        model: modelo con el que se va a evaluar la imagen.
        dataset: imagen a evaluar.
        """
        # Load image
        image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset, config, 0, use_mini_mask=False)

        # Run object detection
        results = model.detect([image], verbose=0)
        
        # Compute AP
        self.r = results[0]
        visualize.display_instances(image, self.r['rois'], self.r['masks'], self.r['class_ids'], dataset.class_names, self.r['scores'], visualize = False)
        self.show_image()
コード例 #18
0
def show_and_save_detection(file_path):
    image = skimage.io.imread(file_path)
    print(file_path)

    # Run detection
    try:
        results = model.detect([image])
    except IndexError:
        results = None

    # Visualize results
    try:
        r = results[0]
    except TypeError as e:
        print("No detection found")
        return

    print(str(len(r['scores'])) + " objects detected")
    plot = visualize.display_instances(image, r['rois'], r['masks'],
                                       r['class_ids'], class_names,
                                       r['scores'])
    fig = plot.gcf()

    detections_name = file_path.split("/")[-1][:-4] + "_detections.png"
    detections_path = os.path.join(DETECTIONS_PATH, detections_name)
    fig.savefig(detections_path, bbox_inches='tight')
コード例 #19
0
ファイル: func1.py プロジェクト: DragonGongY/Steel-scrap
def load_rec(image):
    # image = skimage.io.imread(image)
    # img = color.rgb2gray(image)
    # img = transform.resize(img, (1920,1080))
    # Run object detection
    results = model.detect([image], verbose=1)
    # Display results
    ax = get_ax(1)
    r = results[0]
    visualize.display_instances(image,
                                r['rois'],
                                r['masks'],
                                r['class_ids'],
                                class_names,
                                r['scores'],
                                ax=ax,
                                title='Predictions')
コード例 #20
0
def get_fitness(population, target, net):
    '''
    Compute the fitness score for each example in population.

    Args:
        population (4D array): current generation features, [population_size x n_channels x h x w]
        target (1D array): target descriptor
        net: black box model to attack
        mse: torch.nn.MSELoss instance to compute MSE

    Returns:
        fitness (1D array): fitness scores as measured by MSE between descriptors
    '''
    # measure fitness with MSE between descriptors
    N = population.shape[0]
    dim = target.shape[0]
    descP = torch.cuda.FloatTensor(N, dim)

    for i in range(N):
        # obtain candidate descriptors from the black box [N x ddim]

        try:
            img = population[i].permute(1, 2, 0)
            img = img.clamp(0, 1).cpu().numpy()

            img = img * 255  # Convert back to 0, 255 range
            img = img.astype(np.uint8)
            result = net.detect([img])[0]
            res = result['scores'].sum().item()

            # Visualize results
            if VISUALIZE_DETECTIONS:
                visualize.display_instances(img, result['rois'],
                                            result['masks'],
                                            result['class_ids'], class_names,
                                            result['scores'])
                plt.show()

        except IndexError:
            res = 0
            print("No detections found")

        descP[i] = torch.cuda.FloatTensor([res])
    t = target.expand(N, -1)  # [N x ddim]

    return descP[:, 0]
コード例 #21
0
def visualization(model,dataset_val,inference_config,img_id=0):
    print("Visualization (on random Test Image, Ground Truths)")
    # Test on a random image
    image_id = img_id
    original_image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, inference_config, image_id, use_mini_mask=False)
    log("original_image", original_image)
    log("image_meta", image_meta)
    log("gt_class_id", gt_class_id)
    log("gt_bbox", gt_bbox)
    log("gt_mask", gt_mask)
    visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,dataset_val.class_names,figsize=(8, 8))
    print("Detecting for test image")
    results = model.detect([original_image], verbose=1)
    print("Visualization (on random Test Image, Predicted)")
    r = results[0]
    visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'], 
    dataset_val.class_names, r['scores'])
コード例 #22
0
def detectRobotX(model, dataset, config, video=False):
    import random
    from visualize import display_instances
    import matplotlib.pyplot as plt
    import cv2
    import model as modellib
    from videoVisualise import getPoints, display_cv_instances

    print("Images: {}\nClasses: {}".format(len(dataset.image_ids),
                                           dataset.class_names))

    image_id = random.choice(dataset.image_ids)
    image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
        dataset, config, image_id, use_mini_mask=False)
    info = dataset.image_info[image_id]

    print("image ID: {}.{} ({}) {}".format(info["source"], info["id"],
                                           image_id,
                                           dataset.image_reference(image_id)))

    # Run object detection
    results = model.detect([image], verbose=1)

    # Display results
    fig = plt.figure(figsize=(15, 15 / 3))
    ax = fig.add_subplot(111)
    r = results[0]
    display_instances(image,
                      r['rois'],
                      r['masks'],
                      r['class_ids'],
                      dataset.class_names,
                      r['scores'],
                      ax=ax,
                      title="Predictions")

    topLeft, botLeft, topRight, botRight, centre = getPoints(r['rois'])

    # Verification plot
    for i in range(r['rois'].shape[0]):
        plt.scatter(topLeft[i][0], topLeft[i][1])
        plt.scatter(botLeft[i][0], botLeft[i][1])
        plt.scatter(topRight[i][0], topRight[i][1])
        plt.scatter(botRight[i][0], botRight[i][1])
        plt.scatter(centre[i][1], centre[i][0])
コード例 #23
0
def return_visualized_image():
    # Get image from request and change to array
    image = fh.image_from_request(request)
    image = fh.image_to_array(image)

    # Run detection
    results = MODEL.detect([image])
    r = results[0]
    visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
                                CLASS_NAMES, r['scores'])

    buf = BytesIO()
    plt.savefig(buf, format='jpg')

    response = Response()
    response.set_data(buf.getvalue())
    response.headers['Content-Type'] = 'image/jpeg'
    return response
コード例 #24
0
 def inference(self, input_image_path, init_with='last'):
     #set config for inference properly
     self.config.GPU_COUNT = 1
     self.config.IMAGES_PER_GPU = 1
     model = modellib.RobotVQA(mode="inference",
                               config=self.config,
                               model_dir=self.MODEL_DIR)
     #Weights initialization imagenet, coco, or last
     if init_with == "imagenet":
         model_path = model.get_imagenet_weights()
         model.load_weights(model_path,
                            by_name=True,
                            exclude=ExtendedRobotVQAConfig.EXCLUDE)
     elif init_with == "coco":
         # Load weights trained on MS COCO, but skip layers that
         # are different due to the different number of classes
         # See README for instructions to download the COCO weights
         model_path = self.ROBOTVQA_WEIGHTS_PATH
         model.load_weights(model_path,
                            by_name=True,
                            exclude=ExtendedRobotVQAConfig.EXCLUDE)
     elif init_with == "last":
         # Load the last model you trained and continue training
         model_path = model.find_last()[1]
         model.load_weights(model_path, by_name=True)
     print('Weights loaded successfully from ' + str(model_path))
     #load image
     image = utils.load_image(
         input_image_path[0], input_image_path[1],
         self.config.MAX_CAMERA_CENTER_TO_PIXEL_DISTANCE)
     #predict
     results = model.detect([image], verbose=1)
     r = results[0]
     dst = self.getDataset()
     class_ids = [
         r['class_cat_ids'], r['class_col_ids'], r['class_sha_ids'],
         r['class_mat_ids'], r['class_opn_ids'], r['class_rel_ids']
     ]
     scores = [
         r['scores_cat'], r['scores_col'], r['scores_sha'], r['scores_mat'],
         r['scores_opn'], r['scores_rel']
     ]
     visualize.display_instances(image[:,:,:3], r['rois'], r['masks'], class_ids, dst.class_names,r['poses'], scores=scores, axs=get_ax(cols=2),\
     title='Object description',title1='Object relationships')
コード例 #25
0
ファイル: detector.py プロジェクト: xuqy1981/TACO
def test_dataset(model, dataset, nr_images):

    for i in range(nr_images):
        image_id = random.choice(dataset.image_ids)

        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
        info = dataset.image_info[image_id]

        r = model.detect([image], verbose=0)[0]

        # Display results
        visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
                                    dataset.class_names, r['scores'], title="Predictions")

        # Display ground truth
        #visualize.display_instances(image, gt_bbox, gt_mask, gt_class_id, dataset.class_names)

        print(r['class_ids'])
コード例 #26
0
def predict(images_dir):
    config = InferenceConfig()
    config.display()

    # Create model object.
    model = modellib.MaskRCNN(model_dir=LOGS_DIR, config=config)
    if config.GPU_COUNT:
        model = model.cuda()

    # Load weights trained
    model.load_state_dict(torch.load(ISIC_MODEL_PATH))

    if not os.path.exists(OUTPUTS_DIR):
        os.makedirs(OUTPUTS_DIR)

    images = filter_by_file_types(images_dir, os.listdir(images_dir),
                                  ["*.jpeg", "*.jpg"])
    images = sorted(images)
    total_images = len(images)
    cont = 0

    class_names = ["BG", "Lesion"]

    for image in images:
        image_name = image.split("/")[-1][:-4]

        if image_name not in IGNORE:
            img = skimage.io.imread(image)

            result = model.detect([img])
            pred = result[0]

            output_name = os.path.join(OUTPUTS_DIR, image_name + ".png")
            #visualize.display_instances(
            #img, pred["rois"], pred["masks"], title=output_name)
            visualize.display_instances(img, pred['rois'], pred['masks'],
                                        pred['class_ids'], class_names,
                                        output_name, pred['scores'])

        cont = cont + 1
        print("Processed {}/{} images.".format(cont, total_images))
コード例 #27
0
def simpleValidation():
    class InferenceConfig(NucleusConfig):
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1

    inference_config = InferenceConfig()
    model = modellib.MaskRCNN(mode="inference",
                              config=inference_config,
                              model_dir=MODEL_DIR)

    model_path = model.find_last()[1]
    assert model_path != "", "Provide path to trained weights"
    print("Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)

    dataset_valid = ShapesDataset()
    dataset_valid.load_imgs(VALID_PATH)
    dataset_valid.prepare()

    image_id = dataset_valid.image_ids[3]
    original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
        modellib.load_image_gt(dataset_valid, inference_config,
                               image_id, use_mini_mask=True, augment=False)

    log("original_image", original_image)
    log("image_meta", image_meta)
    log("gt_class_id", gt_class_id)
    log("gt_bbox", gt_bbox)
    log("gt_mask", gt_mask)

    results = model.detect([original_image], verbose=1)

    r = results[0]

    visualize.display_instances(original_image,
                                r['rois'],
                                r['masks'],
                                r['class_ids'],
                                dataset_valid.class_names,
                                r['scores'],
                                ax=get_ax())
コード例 #28
0
ファイル: detector.py プロジェクト: iamjuniorpeter/TACO
def test_dataset(model, dataset, nr_images):

    for i in range(nr_images):
        image_id = random.choice(dataset.image_ids)

        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
        info = dataset.image_info[image_id]

        r = model.detect([image], verbose=0)[0]

        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 16))

        # Display predictions
        visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
                                    dataset.class_names, r['scores'], title="Predictions", ax=ax1)
        # Display ground truth
        visualize.display_instances(image, gt_bbox, gt_mask, gt_class_id, dataset.class_names, title="GT", ax=ax2)

        # Voilà
        plt.show()
コード例 #29
0
def result():
    prediction = ''
    if request.method == 'POST':
        file = request.files['file']
        img = skimage.io.imread(file)
        img_arr = np.array(img)
        results = model.detect([img_arr])
        r = results[0]
        graph1_url = display_instances(img, r['rois'], r['masks'],
                                       r['class_ids'], class_names,
                                       r['scores'])
        return render_template("result.html", graph1=graph1_url)
コード例 #30
0
def testPredict(model, dataset):
    for _ in range(10):
        i = randint(0, 150)
        print('--------------------random number : %d' % i)
        image = dataset.load_image(i)
        results = model.detect([image], verbose=1)
        # Visualize results
        r = results[0]
        class_names = ['BG', 'weapon']
        print(r['scores'])
        list = r['scores']
        is_display = True
        for i in range(len(list)):
            if list[i] < 0.9:
                is_display = False
                continue

        if is_display:
            visualize.display_instances(image, r['rois'], r['masks'],
                                        r['class_ids'], class_names,
                                        r['scores'])
コード例 #31
0
ファイル: demo.py プロジェクト: lzqkean/deep_learning
               'bus', 'train', 'truck', 'boat', 'traffic light',
               'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
               'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
               'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
               'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
               'kite', 'baseball bat', 'baseball glove', 'skateboard',
               'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
               'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
               'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
               'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
               'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
               'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
               'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
               'teddy bear', 'hair drier', 'toothbrush']


# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
#image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
image = skimage.io.imread(os.path.join(IMAGE_DIR, "bike.png"))

# Run detection
results = model.detect([image], verbose=1)


# Visualize results
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
                            class_names, r['scores'])