コード例 #1
0
def mini_mask(image_id):
    image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
        dataset, config, image_id, use_mini_mask=False)

    log("image", image)
    log("image_meta", image_meta)
    log("class_ids", class_ids)
    log("bbox", bbox)
    log("mask", mask)

    display_images([image] +
                   [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])

    visualize.display_instances(image, bbox, mask, class_ids,
                                dataset.class_names)
    # Add augmentation and mask resizing.
    # 取 mask 的 bbox, resize 到固定大小
    image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
        dataset, config, image_id, augment=True, use_mini_mask=True)
    log("mask", mask)
    display_images([image] +
                   [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])
    # 把 mask 恢复到原来大小
    mask = utils.expand_mask(bbox, mask, image.shape)
    visualize.display_instances(image, bbox, mask, class_ids,
                                dataset.class_names)
コード例 #2
0
def color_splash():
    image = dataset.load_image(image_id)
    image_info = dataset.image_info[image_id]
    # Note: image_info 的 id 是 image 的 filename
    print("Image ID: {}.{} ({}) {}".format(image_info["source"],
                                           image_info["id"], image_id,
                                           dataset.image_reference(image_id)))
    results = model.detect([image], verbose=1)
    r = results[0]
    splashed_image = seal.color_splash(image, r['masks'])
    display_images([splashed_image], titles='color_splash', cols=1)
コード例 #3
0
def visualize_activations(config, model_dir, model_path, dataset_val):
    model = modellib.MaskRCNN(mode="inference",
                              config=config,
                              model_dir=model_dir)
    model.load_weights(model_path, by_name=True)
    print(model.keras_model.summary())
    i = 0
    image_ids = np.random.choice(dataset_val.image_ids, 10)
    for image_id in image_ids:
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset_val, config,
                                   image_id, use_mini_mask=False)

        activations = model.run_graph([image], [
            ("input_image",
             tf.identity(model.keras_model.get_layer("input_image").output)),
            ("relu3fsn_branchrgb",
             model.keras_model.get_layer("relu3fsn_branchrgb").output),
            ("relu4fsn_branchrgb",
             model.keras_model.get_layer("relu4fsn_branchrgb").output),
            ("relu5cbr3_branchdpt",
             model.keras_model.get_layer("relu5cbr3_branchdpt").output),
            ("pl5pool_branchrgb",
             model.keras_model.get_layer("pl5pool_branchrgb").output),
            ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
            ("roi", model.keras_model.get_layer("ROI").output),
        ])

        # Backbone feature map

        display_images(np.transpose(
            activations["relu3fsn_branchrgb"][0, :, :, :4], [2, 0, 1]),
                       cols=4)
        display_images(np.transpose(
            activations["relu4fsn_branchrgb"][0, :, :, :4], [2, 0, 1]),
                       cols=4)
        display_images(np.transpose(
            activations["relu5cbr3_branchdpt"][0, :, :, :4], [2, 0, 1]),
                       cols=4)
        display_images(np.transpose(
            activations["pl5pool_branchrgb"][0, :, :, :4], [2, 0, 1]),
                       cols=4)

        activation = activations["relu4fsn_branchrgb"][0, :, :, 3]
        print(activation.shape)
        rgb_image = image[:, :, 0:3]
        dpt_image = image[:, :, 3]
        resized = cv2.resize(activation, (512, 512))
        cv2.normalize(resized, resized, 255, 0, cv2.NORM_MINMAX)
        resized = np.uint8(resized)
        resized_activation = cv2.applyColorMap(resized, cv2.COLORMAP_WINTER)

        skimage.io.imsave("sun_" + str(i) + "_act.png", resized_activation)
        skimage.io.imsave("sun_" + str(i) + "_rgb.png", rgb_image)
        skimage.io.imsave("sun_" + str(i) + "_dpt.png", dpt_image)

        cv2.waitKey(0)
        i += 1
コード例 #4
0
    def load_mini_masks(self, dataset, config, ds_datacfg):
        '''
        ## Mini Masks

        Instance binary masks can get large when training with high resolution images.
        For example, if training with 1024x1024 image then the mask of a single instance
        requires 1MB of memory (Numpy uses bytes for boolean values). If an image has
        100 instances then that's 100MB for the masks alone.

        To improve training speed, we optimize masks by:
        * We store mask pixels that are inside the object bounding box, rather than a mask
        of the full image. Most objects are small compared to the image size, so we save space
        by not storing a lot of zeros around the object.
        * We resize the mask to a smaller size (e.g. 56x56). For objects that are larger than
        the selected size we lose a bit of accuracy. But most object annotations are not very
        accuracy to begin with, so this loss is negligable for most practical purposes.
        Thie size of the mini_mask can be set in the config class.

        To visualize the effect of mask resizing, and to verify the code correctness,
        we visualize some examples.
        '''
        print("load_mini_masks::-------------------------------->")

        image_id = np.random.choice(dataset.image_ids, 1)[0]
        print("image_id: {}".format(image_id))

        datacfg = None
        if ds_datacfg:
            info = dataset.image_info[image_id]
            ds_source = info['source']
            datacfg = utils.get_datacfg(ds_datacfg, ds_source)

        image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
            dataset, datacfg, config, image_id, use_mini_mask=False)

        log("image", image)
        log("image_meta", image_meta)
        log("class_ids", class_ids)
        log("bbox", bbox)
        log("mask", mask)

        display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])

        ## Display image and instances
        class_names = dataset.class_names
        self.display_instances(image, bbox, mask, class_ids, class_names)

        return image_id
コード例 #5
0
    def display_image(self, image_ids=None, display=False, grid=True):
        '''
        retrieves images for  a list of image ids, that can be passed to model detect() functions
        '''
        images = []
        if not isinstance(image_ids, list):
            image_ids = [image_ids]

        for image_id in image_ids:
            images.append(self.load_image(image_id))

        display_images(images,
                       titles=['id: ' + str(i) + ' ' for i in image_ids],
                       cols=5,
                       width=25,
                       grid=grid)
        return
コード例 #6
0
def add_augmentation(dataset, datacfg, dnncfg, image_id=None):
    '''
    # Add augmentation and mask resizing.
    '''
    log.info("add_augmentation::-------------------------------->")

    image_id = image_id if image_id==None else np.random.choice(dataset.image_ids, 1)[0]
    # Add augmentation and mask resizing.
    image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(dataset, datacfg, dnncfg, image_id, augment=True, use_mini_mask=False)
    # customlog("mask", mask)
    
    visualize.display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])
    mask = utils.expand_mask(bbox, mask, image.shape)

    ## Display image and instances
    class_names = dataset.class_names
    visualize.display_instances(image, bbox, mask, class_ids, class_names)
コード例 #7
0
def show_roi_and_mask():
    g, num_random_rois, detection_targets = create_data_generator()
    [normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois], \
    [mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g)
    # Remove the last dim in mrcnn_class_ids. It's only added
    # to satisfy Keras restriction on target shape.
    # 原来的 mrcnn_class_idx 的 shape 是 (batch_size, num_rois, 1)
    mrcnn_class_ids = mrcnn_class_ids[:, :, 0]
    b = 0
    # Restore original image (reverse normalization)
    sample_image = modellib.unmold_image(normalized_images[b], config)
    # Class aware bboxes
    # mrcnn_bbox 的 shape 是 (batch_size,num_rois,num_classes,4)
    # mrcnn_class_idx 的 shape 是 (batch_size, num_rois)
    # bbox_specific 的 shape 是 (num_train_rois, dataset.num_classes, 4)
    bbox_specific = mrcnn_bbox[b,
                               np.arange(mrcnn_bbox.shape[1]),
                               mrcnn_class_ids[b], :]
    # Refined ROIs
    refined_rois = utils.apply_box_deltas(
        rois[b].astype(np.float32), bbox_specific[:, :4] * config.BBOX_STD_DEV)
    # Class aware masks
    # mrcnn_mask 的 shape 是 (batch_size, num_rois, 28, 28, num_classes)
    # mask_specific 的 shape 是 (num_train_rois, 28, 28, dataset.num_classes)
    mask_specific = mrcnn_mask[b,
                               np.arange(mrcnn_mask.shape[1]), :, :,
                               mrcnn_class_ids[b]]
    # Dispalay ROIs and corresponding masks and bounding boxes
    ids = random.sample(range(rois.shape[1]), 8)
    images = []
    titles = []
    for i in ids:
        # 这里的 copy() 是为了在这个循环中的多个 id 互不影响
        image = visualize.draw_box(sample_image.copy(),
                                   rois[b,
                                        i, :4].astype(np.int32), [255, 0, 0])
        image = visualize.draw_box(image, refined_rois[i].astype(np.int64),
                                   [0, 255, 0])
        images.append(image)
        titles.append("ROI {}".format(i))
        images.append(mask_specific[i] * 255)
        titles.append(dataset.class_names[mrcnn_class_ids[b, i]][:20])

    display_images(images, titles, cols=4, cmap="Blues", interpolation="none")
コード例 #8
0
def visualize_activations():
    # Get activations of a few sample layers
    resized_image, image_meta, gt_class_ids, gt_bboxes, gt_masks = \
        modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
    activations = model.run_graph(
        [resized_image],
        [
            # ("input_image", model.keras_model.get_layer("input_image").output),
            ("res2c_out", model.keras_model.get_layer("res2c_out").output),
            ("res3c_out", model.keras_model.get_layer("res3c_out").output),
            ("res4w_out",
             model.keras_model.get_layer("res4w_out").output),  # for resnet100
            ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
            ("roi", model.keras_model.get_layer("ROI").output),
        ])
    # Input image (normalized)
    # _ = plt.imshow(modellib.unmold_image(activations["input_image"][0], config))
    # Backbone feature map
    display_images(np.transpose(activations["res2c_out"][0, :, :, :4],
                                [2, 0, 1]),
                   cols=4)
コード例 #9
0
    def add_augmentation(self, dataset, config, ds_datacfg, image_id):
        '''
        # Add augmentation and mask resizing.
        '''
        print("add_augmentation::-------------------------------->")

        # image_id = image_id if image_id==None else np.random.choice(dataset.image_ids, 1)[0]

        datacfg = None
        if ds_datacfg:
            info = dataset.image_info[image_id]
            ds_source = info['source']
            datacfg = utils.get_datacfg(ds_datacfg, ds_source)

        # Add augmentation and mask resizing.
        image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
            dataset, datacfg, config, image_id, augment=True, use_mini_mask=True)
        log("mask", mask)
        display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])
        mask = utils.expand_mask(bbox, mask, image.shape)

        ## Display image and instances
        class_names = dataset.class_names
        self.display_instances(image, bbox, mask, class_ids, class_names)
コード例 #10
0
def visualize_feature_map(model, dataset, config, image_ids, output_dir):
    for image_id in image_ids:
        image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
        # Get activations of a few sample layers
        activations = model.run_graph([image], [
            ("input_image", model.keras_model.get_layer("input_image").output),
            ("res4w_out", model.keras_model.get_layer("res4w_out").output),  # for resnet100
            ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
            ("roi", model.keras_model.get_layer("ROI").output),
        ])

        # Input image (normalized)
        plt.figure("input_image")
        fig = plt.imshow(modellib.unmold_image(activations["input_image"][0], config))
        output_path = os.path.join(output_dir, "input_image_id_{}.png".format(image_id))
        print('Saving image {} to {}'.format(image_id, output_path))
        plt.savefig(output_path)

        # Backbone feature map
        fig = visualize.display_images(np.transpose(activations["res4w_out"][0, :, :, :8], [2, 0, 1]))
        output_path = os.path.join(output_dir, "feature_maps_id_{}.png".format(image_id))
        print('Saving image {} to {}'.format(image_id, output_path))
        fig.savefig(output_path)
コード例 #11
0
def display_mrcnn_mask_prediction():
    #################################### Mask Targets ##############################################
    # gt_masks 的 shape 为 (image_height, image_width, num_instances)
    resized_image, image_meta, gt_class_ids, gt_bboxes, gt_masks = \
        modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
    display_images(np.transpose(gt_masks, [2, 0, 1]), cmap="Blues")
    # Get predictions of mask head
    mrcnn = model.run_graph([resized_image], [
        ("detections", model.keras_model.get_layer("mrcnn_detection").output),
        ("masks", model.keras_model.get_layer("mrcnn_mask").output),
    ])

    # Get detection class IDs. Trim zero padding.
    det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
    padding_start_ix = np.where(det_class_ids == 0)[0][0]
    det_class_ids = det_class_ids[:padding_start_ix]

    print("{} detections: {}".format(
        padding_start_ix,
        np.array(dataset.class_names)[det_class_ids]))
    # Masks
    det_boxes = utils.denorm_boxes(mrcnn["detections"][0, :, :4],
                                   resized_image.shape[:2])
    # mrcnn['masks'] 的 shape 为 (batch_size, num_instances, mask_height, mask_width, num_classes)
    det_mask_specific = np.array(
        [mrcnn["masks"][0, i, :, :, c] for i, c in enumerate(det_class_ids)])
    det_masks = np.array([
        utils.unmold_mask(mask, det_boxes[i], resized_image.shape)
        for i, mask in enumerate(det_mask_specific)
    ])
    log("det_mask_specific", det_mask_specific)
    display_images(det_mask_specific[:4] * 255,
                   cmap="Blues",
                   interpolation="none")
    log("det_masks", det_masks)
    display_images(det_masks[:4] * 255, cmap="Blues", interpolation="none")
コード例 #12
0
    model = modellib.MaskRCNN(mode="inference",
                              model_dir=MODEL_DIR,
                              config=config)

# load the last model you trained
# weights_path = model.find_last()[1]

# Load weights
print("Loading weights ", custom_WEIGHTS_PATH)
model.load_weights(custom_WEIGHTS_PATH, by_name=True)

import skimage
image = skimage.io.imread('8. nooteboom-ballasttrailer-7-axlevolvo6x4.jpg')

# Run object detection

results = model.detect([image], verbose=1)

# Display results
ax = get_ax(1)
r = results[0]
visualize.display_instances(image,
                            r['rois'],
                            r['masks'],
                            r['class_ids'],
                            dataset.class_names,
                            r['scores'],
                            ax=ax,
                            title="Predictions")
visualize.display_images(image)
コード例 #13
0
r = results[0]
print("---------------" * 100, dataset.class_names)
visualize.display_instances(image,
                            r['rois'],
                            r['masks'],
                            r['class_ids'],
                            dataset.class_names,
                            r['scores'],
                            ax=ax,
                            title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)

splash = lines.color_splash(image, r['masks'])
display_images([splash], cols=1)

input("---------------" * 100)

target_rpn_match, target_rpn_bbox = modellib.build_rpn_targets(
    image.shape, model.anchors, gt_class_id, gt_bbox, model.config)

log("target_rpn_match", target_rpn_match)
log("target_rpn_bbox", target_rpn_bbox)

positive_anchor_ix = np.where(target_rpn_match[:] == 1)[0]
negative_anchor_ix = np.where(target_rpn_match[:] == -1)[0]
neutral_anchor_ix = np.where(target_rpn_match[:] == 0)[0]
positive_anchors = model.anchors[positive_anchor_ix]
negative_anchors = model.anchors[negative_anchor_ix]
neutral_anchors = model.anchors[neutral_anchor_ix]
コード例 #14
0
                            r['rois'],
                            r['masks'],
                            r['class_ids'],
                            dataset.class_names,
                            r['scores'],
                            ax=ax,
                            title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
print(
    "---------------------------------------------------------------------------"
)

splash = damageDetection.color_splash(image, r['masks'])
display_images([splash], cols=1)

# Generate RPN trainig targets
# target_rpn_match is 1 for positive anchors, -1 for negative anchors
# and 0 for neutral anchors.
target_rpn_match, target_rpn_bbox = modellib.build_rpn_targets(
    image.shape, model.anchors, gt_class_id, gt_bbox, model.config)
log("target_rpn_match", target_rpn_match)
log("target_rpn_bbox", target_rpn_bbox)

positive_anchor_ix = np.where(target_rpn_match[:] == 1)[0]
negative_anchor_ix = np.where(target_rpn_match[:] == -1)[0]
neutral_anchor_ix = np.where(target_rpn_match[:] == 0)[0]
positive_anchors = model.anchors[positive_anchor_ix]
negative_anchors = model.anchors[negative_anchor_ix]
neutral_anchors = model.anchors[neutral_anchor_ix]
コード例 #15
0
import matplotlib.pyplot as plt
ids = np.where(r['class_ids']==class_names.index("person"))[0]
ids = ids if(len(ids)<6) else ids[:10]
plt.figure(figsize=(30,30))
columns = 2
for i, id in enumerate(ids):
    mask = r['masks'][:, :, id] * 1
    mask =np.moveaxis(np.stack([mask, mask, mask]), 0, 2)
    masked_image = image * mask
    plt.subplot(len(ids) / columns + 1, columns, i + 1)
    plt.show(masked_image)

from mrcnn.visualize import display_images
import mrcnn.model as modellib
display_images(np.transpose(r['masks'], [2, 0, 1]), cmap="Blues")


# Save outputs

import matplotlib.pyplot as plt
import cv2
import shutil
from datetime import datetime
out_dir = os.path.join(ROOT_DIR, "outputs/mask/" + str(datetime.now())[:-10].replace(":", "_"))
if not os.path.exists(out_dir): os.makedirs(out_dir)
else: 
    shutil.rmtree(out_dir)
    os.makedirs(out_dir)
for i, id in enumerate(r['class_ids']):
    mask = r['masks'][:, :, i] * 1
コード例 #16
0
ファイル: yolo.py プロジェクト: gopikabala14/MainProject2019
    def detect_image(self, image,k):
        start = timer()

        if self.model_image_size != (None, None):
            assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                    size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5))
            left = max(0, np.floor(left + 0.5))
            bottom = min(image.size[1], np.floor(bottom + 0.5))
            right = min(image.size[0], np.floor(right + 0.5))
            print(label, (left, top), (right, bottom))

            #a=[1,2,3,4,5,6,7,8,9,10]

            dict1={"class":label, i:[left, top, right, bottom]}
            #print(dict1)
            dict2.update(dict1)
            # print(dict2)
            dict1 = {}




            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle(
                    [left + i, top + i, right - i, bottom - i],
                    outline=self.colors[c])
            draw.rectangle(
                [tuple(text_origin), tuple(text_origin + label_size)],
                fill=self.colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

            conn = sqlite3.connect('toll_data.db')
            c = conn.cursor()

            print("classssssssssss",out_classes)
            ##################################################### MASK RCNN#########################################################
            for i in out_classes:
                if(i==7):
                    # Directory to save logs and trained model
                    MODEL_DIR = os.path.join(ROOT_DIR, "logs")

                    custom_WEIGHTS_PATH = "mask_rcnn_wheel_0100.h5"  # TODO: update this path

                    config = custom.CustomConfig()
                    custom_DIR = os.path.join(ROOT_DIR, "customImages")

                    # ---------------------------------------------------------------------------

                    # Override the training configurations with a few
                    # changes for inferencing.
                    class InferenceConfig(config.__class__):
                        # Run detection on one image at a time
                        GPU_COUNT = 1
                        IMAGES_PER_GPU = 1

                    config = InferenceConfig()
                    config.display()

                    # Device to load the neural network on.
                    # Useful if you're training a model on the same
                    # machine, in which case use CPU and leave the
                    # GPU for training.
                    DEVICE = "/cpu:0"  # /cpu:0 or /gpu:0

                    # Inspect the model in training or inference modes
                    # values: 'inference' or 'training'
                    # TODO: code for 'training' te

                    def get_ax(rows=1, cols=1, size=16):
                        """Return a Matplotlib Axes array to be used in
                        all visualizations in the notebook. Provide a
                        central point to control graph sizes.

                        Adjust the size attribute to control how big to render images
                        """
                        _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
                        return ax

                    # Load validation dataset
                    dataset = custom.CustomDataset()
                    dataset.load_custom(custom_DIR, "val")

                    # Must call before using the dataset
                    dataset.prepare()

                    print("Images: {}\nClasses: {}".format(len(dataset.image_ids), dataset.class_names))

                    # Create model in inference mode
                    with tf.device(DEVICE):
                        model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
                                                  config=config)

                    # load the last model you trained
                    # weights_path = model.find_last()[1]

                    # Load weights
                    print("Loading weights ", custom_WEIGHTS_PATH)
                    model.load_weights(custom_WEIGHTS_PATH, by_name=True)

                    # Display results
                    import skimage
                    # i=read()
                    imag =skimage.io.imread(k)
                    results = model.detect([imag], verbose=1)
                    ax = get_ax(1)
                    r = results[0]
                    visualize.display_instances(imag, r['rois'], r['masks'], r['class_ids'],
                                                dataset.class_names, r['scores'], ax=ax,
                                                title="Predictions")
                    visualize.display_images(imag)
                    print(r['scores'])
                    axle = (len(r['scores']))
                    print(len(r['scores']))
                    if axle >= 8:
                        c = conn.cursor()
                        #seven_count = seven_count + 1
                        global eight_count
                        eight_count = eight_count + 1
                        print(eight_count)
                        print("8 Axle or 8 Axle Above Truck... Pay 12 Riyal")
                        now = datetime.datetime.now()
                        print(now)
                        global eight_sum
                        eight_sum = eight_sum + 12
                        print("eight_sum", eight_sum)
                        ctime = now.strftime("%I:%M:%S %p")
                        today = now.strftime("%Y-%m-%d")
                        car = '--'
                        bus = '--'
                        truck = '8 axle or above'
                        amount = 20

                        c.execute("INSERT INTO toll(datestamp, time, car,bus,truck,amount) VALUES (?,?, ?, ?, ?,?)",
                                  (today, ctime, car, bus, truck, amount))

                        conn.commit()
                    elif axle == 7:
                        c = conn.cursor()
                        #seven_count = seven_count + 1
                        global seven_count
                        seven_count = seven_count + 1
                        print(seven_count)
                        print("7 Axle Truck... Pay 10 Riyal")
                        now = datetime.datetime.now()
                        print(now)
                        global seven_sum
                        seven_sum = seven_sum + 10
                        print("seven_sum", seven_sum)
                        ctime = now.strftime("%I:%M:%S %p")
                        today = now.strftime("%Y-%m-%d")
                        car = '--'
                        bus = '--'
                        truck = '7 axle'
                        amount = 20

                        c.execute("INSERT INTO toll(datestamp, time, car,bus,truck,amount) VALUES (?,?, ?, ?, ?,?)",
                                  (today, ctime, car, bus, truck, amount))

                        conn.commit()

                    elif axle == 6:
                        c = conn.cursor()
                        global  six_count
                        six_count = six_count + 1
                        print(six_count)
                        print("6 Axle Truck... Pay 8 Riyal")
                        now = datetime.datetime.now()
                        print(now)
                        global six_sum
                        six_sum = six_sum + 8
                        print("six_sum", six_sum)
                        ctime = now.strftime("%I:%M:%S %p")
                        today = now.strftime("%Y-%m-%d")
                        car = '--'
                        bus = '--'
                        truck = '6 axle'
                        amount = 20

                        c.execute("INSERT INTO toll(datestamp, time, car,bus,truck,amount) VALUES (?,?, ?, ?, ?,?)",
                                  (today, ctime, car, bus, truck, amount))

                        conn.commit()
                    elif axle == 5:
                        c = conn.cursor()
                        global five_count
                        five_count = five_count + 1
                        print(five_count)
                        print("5 Axle Truck... Pay 6 Riyal")
                        now = datetime.datetime.now()
                        print(now)
                        global five_sum
                        five_sum = five_sum + 6
                        print("five_sum", five_sum)
                        ctime = now.strftime("%I:%M:%S %p")
                        today = now.strftime("%Y-%m-%d")
                        car = '--'
                        bus = '--'
                        truck = '5 axle'
                        amount = 20

                        c.execute("INSERT INTO toll(datestamp, time, car,bus,truck,amount) VALUES (?,?, ?, ?, ?,?)",
                                  (today, ctime, car, bus, truck, amount))

                        conn.commit()
                    elif axle == 4:
                        c = conn.cursor()
                        global four_count
                        four_count = four_count + 1
                        print(four_count)
                        print("4 Axle Truck... Pay 4 Riyal")
                        now = datetime.datetime.now()
                        print(now)
                        global four_sum
                        four_sum = four_sum + 4
                        print("four_sum", four_sum)
                        ctime = now.strftime("%I:%M:%S %p")
                        today = now.strftime("%Y-%m-%d")
                        car = '--'
                        bus = '--'
                        truck = '4 axle'
                        amount = 20

                        c.execute("INSERT INTO toll(datestamp, time, car,bus,truck,amount) VALUES (?,?, ?, ?, ?,?)",
                                  (today, ctime, car, bus, truck, amount))

                        conn.commit()
                    elif axle == 3:
                        c = conn.cursor()

                        #global three_count
                        #three_count = three_count + 1
                        #print(three_count)

                        now = datetime.datetime.now()
                        #print(now)
                        #global three_sum
                        #three_sum = three_sum + 2
                        #print("three_sum", three_sum)
                        ctime = now.strftime("%I:%M:%S %p")
                        today = now.strftime("%Y-%m-%d")

                        car = '--'
                        bus = '--'
                        truck = '3 axle'
                        amount = 15


                        c.execute("INSERT INTO toll(datestamp, time, car,bus,truck,amount) VALUES (?,?, ?, ?, ?,?)",
                                  (today, ctime, car, bus, truck, amount))
                        conn.commit()


                    else:
                        c = conn.cursor()

                        now = datetime.datetime.now()
                        #global two_sum
                        #two_sum = two_sum + 1

                        #print("two_sum", two_sum)
                        ctime = now.strftime("%I:%M:%S %p")
                        today = now.strftime("%Y-%m-%d")

                        car = '--'
                        bus = '--'
                        truck = '2 axle'
                        amount = 10

                        c.execute("INSERT INTO toll(datestamp, time, car,bus,truck,amount) VALUES (?,?, ?, ?, ?,?)",
                                  (today, ctime, car, bus, truck, amount))

                        conn.commit()


                elif i == 5:
                    c = conn.cursor()
                    global flag
                    flag = 1
                    now = datetime.datetime.now()

                    #global bus_sum
                    #bus_sum = bus_sum + 3
                    #print(bus_sum)
                    ctime= now.strftime("%I:%M:%S %p")
                    today = now.strftime("%Y-%m-%d")
                    car = '--'
                    bus = 'bus'
                    truck = '--'
                    amount = 20

                    c.execute("INSERT INTO toll(datestamp, time, car,bus,truck,amount) VALUES (?,?, ?, ?, ?,?)",
                              (today, ctime, car, bus, truck, amount))

                    conn.commit()
                elif i == 2:
                    c = conn.cursor()
                    global flag
                    flag = 2
                    now = datetime.datetime.now()

                    #global car_sum
                    #car_sum = car_sum + 2
                    #print(car_sum)
                    ctime = now.strftime("%I:%M:%S %p")
                    today = now.strftime("%Y-%m-%d")

                    car = 'car'
                    bus = '--'
                    truck = '--'
                    amount = 10

                    c.execute("INSERT INTO toll(datestamp, time, car,bus,truck,amount) VALUES (?,?, ?, ?, ?,?)",
                              (today, ctime, car, bus, truck, amount))

                    conn.commit()
                else : continue
                ########################### CAR COUNT #####################################

                v = """SELECT count(car) FROM toll GROUP BY car"""
                c.execute(v)
                record = c.fetchall()
                car_count_list = []
                for row in record:
                    car_count_list = row[0]
                print("Printing Car  Count", car_count_list)

                ############################# BUS COUNT ##################################

                sel = """SELECT count(bus) FROM toll GROUP BY bus"""
                c.execute(sel)
                record = c.fetchall()
                count_list = []
                for row in record:
                    count_list = row[0]
                print("Printing Bus Count", count_list)
                ############################## TRUCK COUNT ###############################

                sel = """SELECT count(truck) FROM toll GROUP BY truck"""
                c.execute(sel)
                record = c.fetchall()
                print("size", record)
                global truck_count
                sel_truck = """SELECT truck FROM toll  """
                c.execute(sel_truck)
                rec = c.fetchall()
                for r in rec :
                    print(r[0])
                conn.commit()
                ############################## TWO COUNT ###############################
                sel_two = """SELECT count(truck) FROM toll WHERE truck = '2 axle' """
                c.execute(sel_two)
                rec_two = c.fetchall()
                for row in rec_two:
                    print(row)
                flag = 3
                conn.commit()
                ############################## THREE COUNT ###############################
                sel_three = """SELECT truck FROM toll WHERE truck = '3 axle' """
                c.execute(sel_three)
                rec_three = c.fetchall()
                for row1 in rec_three:
                    print(row1)
                flag = 4
                conn.commit()
                ############################## FOUR COUNT ###############################


                if flag == 2:
                    c = conn.cursor()
                    now = datetime.datetime.now()
                    today = now.strftime("%Y-%m-%d")

                    car = car_count_list

                    # c.execute("INSERT INTO count(eight_above,seven,six,five,four,three,two,car,bus) VALUES "
                    # "(?, ?, ?, ?,?,?,?,?,?)", (eight_or_more, seven, six, five, four, three, two, car, bus))



                    up = """UPDATE count SET date =? ,car=? WHERE count_id =1"""

                    val = (today, car)
                    c.execute(up, val)

                    global car_sum
                    car_sum = car * 10
                    up = """UPDATE amount SET date =?, car_sum=? WHERE amt_id =1"""
                    val = (today, car_sum)
                    c.execute(up, val)

                    conn.commit()
                elif flag == 1:

                    c = conn.cursor()

                    now = datetime.datetime.now()
                    today = now.strftime("%Y-%m-%d")

                    bus = count_list

                    #c.execute("INSERT INTO count (date,bus) VALUES  (?, ?)", (today,bus))

                    up = """UPDATE count SET date =?, bus=? WHERE count_id =1"""

                    val = (today, bus)
                    c.execute(up, val)
                    global bus_sum
                    bus_sum = bus * 20
                    up1 = """UPDATE amount SET date =?, bus_sum=? WHERE amt_id =1"""
                    val1 = (today, bus_sum)
                    c.execute(up1,val1)

                    conn.commit()
                elif flag == 3:

                    c = conn.cursor()

                    now = datetime.datetime.now()
                    today = now.strftime("%Y-%m-%d")
                    tr_2count = row
                    up1 = """UPDATE count SET date =?, 2axle=? WHERE count_id =1"""

                    val1 = (today, tr_2count)
                    c.execute(up1, val1)

                    global truck_sum
                    truck_sum = tr_2count * 50
                    up2 = """UPDATE amount SET date =?, truck_sum=? WHERE amt_id =1"""
                    val2 = (today, truck_sum)
                    c.execute(up2, val2)

                elif flag == 4:

                    c = conn.cursor()

                    now = datetime.datetime.now()
                    today = now.strftime("%Y-%m-%d")
                    tr_3count = row
                    up1 = """UPDATE count SET date =?, 3axle=? WHERE count_id =1"""

                    val1 = (today, tr_3count)
                    c.execute(up1, val1)

                    global truck_sum
                    truck_sum = tr_3count * 50
                    up2 = """UPDATE amount SET date =?, truck_sum=? WHERE amt_id =1"""
                    val2 = (today, truck_sum)
                    c.execute(up2, val2)

                    conn.commit()


            ####################################################MRCNN END##################################################

        end = timer()

        with open('data.json', 'w') as outfile:
            json.dump(dict2, outfile)

        print(end - start)
        return image,out_classes
コード例 #17
0
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)


image_id = np.random.choice(dataset.image_ids, 1)[0]
# load_image_gt:产生ground truth
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
    dataset, config, image_id, use_mini_mask=False)

log("image", image)
log("image_meta", image_meta)
log("class_ids", class_ids)
log("bbox", bbox)
log("mask", mask)

display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])

visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)

# Add augmentation and mask resizing.
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
    dataset, config, image_id, augment=True, use_mini_mask=True)
log("mask", mask)
display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])

mask = utils.expand_mask(bbox, mask, image.shape)
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)

# 锚框生成
# Generate Anchors
backbone_shapes = modellib.compute_backbone_shapes(config, config.IMAGE_SHAPE)
コード例 #18
0
# Display results
#ax = get_ax(1)
##r = results[0]
#visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
#                           dataset.class_names, r['scores'], ax=ax,
#                           title="Predictions")
#log("gt_class_id", gt_class_id)
#log("gt_bbox", gt_bbox)
#log("gt_mask", gt_mask)

#splash = balloon.color_splash(image, r['masks'])
#display_images([splash], cols=1)
#display_images(dataset.image_ids,cols=4)

# Get activations of a few sample layers
activations = model.run_graph(
    [image],
    [
        ("input_image",
         tf.identity(model.keras_model.get_layer("input_image").output)),
        ("res2c_out", model.keras_model.get_layer("res2c_out").output),
        ("res3c_out", model.keras_model.get_layer("res3c_out").output),
        ("res4w_out",
         model.keras_model.get_layer("res4w_out").output),  # for resnet100
        ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
        ("roi", model.keras_model.get_layer("ROI").output),
    ])
# Backbone feature map
display_images(np.transpose(activations["res2c_out"][0, :, :, :4], [2, 0, 1]),
               cols=4)
コード例 #19
0
r = results[0]
visualize.display_instances(image,
                            r['rois'],
                            r['masks'],
                            r['class_ids'],
                            dataset.class_names,
                            r['scores'],
                            ax=ax,
                            title="Predictions",
                            img_name="_detected_with_bbox")
# log("gt_class_id", gt_class_id)
# log("gt_bbox", gt_bbox)
# log("gt_mask", gt_mask)

splash = ship.color_splash(image, r['masks'])
display_images([splash], cols=1, img_name="detected_withMask")

# Generate RPN trainig targets
# target_rpn_match is 1 for positive anchors, -1 for negative anchors
# and 0 for neutral anchors.
target_rpn_match, target_rpn_bbox = modellib.build_rpn_targets(
    image.shape, model.anchors, gt_class_id, gt_bbox, model.config)
log("target_rpn_match", target_rpn_match)
log("target_rpn_bbox", target_rpn_bbox)

positive_anchor_ix = np.where(target_rpn_match[:] == 1)[0]
negative_anchor_ix = np.where(target_rpn_match[:] == -1)[0]
neutral_anchor_ix = np.where(target_rpn_match[:] == 0)[0]
positive_anchors = model.anchors[positive_anchor_ix]
negative_anchors = model.anchors[negative_anchor_ix]
neutral_anchors = model.anchors[neutral_anchor_ix]
コード例 #20
0
def inspect_data(dataset, config):
    print("Image Count: {}".format(len(dataset.image_ids)))
    print("Class Count: {}".format(dataset.num_classes))
    for i, info in enumerate(dataset.class_info):
        print("{:3}. {:50}".format(i, info['name']))

    # ## Display Samples
    #
    # Load and display images and masks.

    # In[4]:

    # Load and display random samples
    image_ids = np.random.choice(dataset.image_ids, 4)
    for image_id in image_ids:
        image = dataset.load_image(image_id)
        mask, class_ids = dataset.load_mask(image_id)
        visualize.display_top_masks(image, mask, class_ids,
                                    dataset.class_names)

    # ## Bounding Boxes
    #
    # Rather than using bounding box coordinates provided by the source datasets, we compute the bounding boxes from masks instead. This allows us to handle bounding boxes consistently regardless of the source dataset, and it also makes it easier to resize, rotate, or crop images because we simply generate the bounding boxes from the updates masks rather than computing bounding box transformation for each type of image transformation.

    # In[5]:

    # Load random image and mask.
    image_id = random.choice(dataset.image_ids)
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    # Compute Bounding box
    bbox = utils.extract_bboxes(mask)

    # Display image and additional stats
    print("image_id ", image_id, dataset.image_reference(image_id))
    log("image", image)
    log("mask", mask)
    log("class_ids", class_ids)
    log("bbox", bbox)
    # Display image and instances
    visualize.display_instances(image, bbox, mask, class_ids,
                                dataset.class_names)

    # ## Resize Images
    #
    # To support multiple images per batch, images are resized to one size (1024x1024). Aspect ratio is preserved, though. If an image is not square, then zero padding is added at the top/bottom or right/left.

    # In[6]:

    # Load random image and mask.
    image_id = np.random.choice(dataset.image_ids, 1)[0]
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    original_shape = image.shape
    # Resize
    image, window, scale, padding, _ = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        max_dim=config.IMAGE_MAX_DIM,
        mode=config.IMAGE_RESIZE_MODE)
    mask = utils.resize_mask(mask, scale, padding)
    # Compute Bounding box
    bbox = utils.extract_bboxes(mask)

    # Display image and additional stats
    print("image_id: ", image_id, dataset.image_reference(image_id))
    print("Original shape: ", original_shape)
    log("image", image)
    log("mask", mask)
    log("class_ids", class_ids)
    log("bbox", bbox)
    # Display image and instances
    visualize.display_instances(image, bbox, mask, class_ids,
                                dataset.class_names)

    # ## Mini Masks
    #
    # Instance binary masks can get large when training with high resolution images. For example, if training with 1024x1024 image then the mask of a single instance requires 1MB of memory (Numpy uses bytes for boolean values). If an image has 100 instances then that's 100MB for the masks alone.
    #
    # To improve training speed, we optimize masks by:
    # * We store mask pixels that are inside the object bounding box, rather than a mask of the full image. Most objects are small compared to the image size, so we save space by not storing a lot of zeros around the object.
    # * We resize the mask to a smaller size (e.g. 56x56). For objects that are larger than the selected size we lose a bit of accuracy. But most object annotations are not very accuracy to begin with, so this loss is negligable for most practical purposes. Thie size of the mini_mask can be set in the config class.
    #
    # To visualize the effect of mask resizing, and to verify the code correctness, we visualize some examples.

    # In[7]:

    image_id = np.random.choice(dataset.image_ids, 1)[0]
    image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
        dataset, config, image_id, use_mini_mask=False)

    log("image", image)
    log("image_meta", image_meta)
    log("class_ids", class_ids)
    log("bbox", bbox)
    log("mask", mask)

    display_images([image] +
                   [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])

    # In[8]:

    visualize.display_instances(image, bbox, mask, class_ids,
                                dataset.class_names)

    # In[9]:

    # Add augmentation and mask resizing.
    image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
        dataset, config, image_id, augment=True, use_mini_mask=True)
    log("mask", mask)
    display_images([image] +
                   [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])

    # In[10]:

    mask = utils.expand_mask(bbox, mask, image.shape)
    visualize.display_instances(image, bbox, mask, class_ids,
                                dataset.class_names)

    # ## Anchors
    #
    # The order of anchors is important. Use the same order in training and prediction phases. And it must match the order of the convolution execution.
    #
    # For an FPN network, the anchors must be ordered in a way that makes it easy to match anchors to the output of the convolution layers that predict anchor scores and shifts.
    # * Sort by pyramid level first. All anchors of the first level, then all of the second and so on. This makes it easier to separate anchors by level.
    # * Within each level, sort anchors by feature map processing sequence. Typically, a convolution layer processes a feature map starting from top-left and moving right row by row.
    # * For each feature map cell, pick any sorting order for the anchors of different ratios. Here we match the order of ratios passed to the function.
    #
    # **Anchor Stride:**
    # In the FPN architecture, feature maps at the first few layers are high resolution. For example, if the input image is 1024x1024 then the feature meap of the first layer is 256x256, which generates about 200K anchors (256*256*3). These anchors are 32x32 pixels and their stride relative to image pixels is 4 pixels, so there is a lot of overlap. We can reduce the load significantly if we generate anchors for every other cell in the feature map. A stride of 2 will cut the number of anchors by 4, for example.
    #
    # In this implementation we use an anchor stride of 2, which is different from the paper.

    # In[11]:

    # Generate Anchors
    backbone_shapes = modellib.compute_backbone_shapes(config,
                                                       config.IMAGE_SHAPE)
    anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
                                             config.RPN_ANCHOR_RATIOS,
                                             backbone_shapes,
                                             config.BACKBONE_STRIDES,
                                             config.RPN_ANCHOR_STRIDE)

    # Print summary of anchors
    num_levels = len(backbone_shapes)
    anchors_per_cell = len(config.RPN_ANCHOR_RATIOS)
    print("Count: ", anchors.shape[0])
    print("Scales: ", config.RPN_ANCHOR_SCALES)
    print("ratios: ", config.RPN_ANCHOR_RATIOS)
    print("Anchors per Cell: ", anchors_per_cell)
    print("Levels: ", num_levels)
    anchors_per_level = []
    for l in range(num_levels):
        num_cells = backbone_shapes[l][0] * backbone_shapes[l][1]
        anchors_per_level.append(anchors_per_cell * num_cells //
                                 config.RPN_ANCHOR_STRIDE**2)
        print("Anchors in Level {}: {}".format(l, anchors_per_level[l]))

    # Visualize anchors of one cell at the center of the feature map of a specific level.

    # In[12]:

    ## Visualize anchors of one cell at the center of the feature map of a specific level

    # Load and draw random image
    image_id = np.random.choice(dataset.image_ids, 1)[0]
    image, image_meta, _, _, _ = modellib.load_image_gt(
        dataset, config, image_id)
    fig, ax = plt.subplots(1, figsize=(10, 10))
    ax.imshow(image)
    levels = len(backbone_shapes)

    for level in range(levels):
        colors = visualize.random_colors(levels)
        # Compute the index of the anchors at the center of the image
        level_start = sum(
            anchors_per_level[:level])  # sum of anchors of previous levels
        level_anchors = anchors[level_start:level_start +
                                anchors_per_level[level]]
        print("Level {}. Anchors: {:6}  Feature map Shape: {}".format(
            level, level_anchors.shape[0], backbone_shapes[level]))
        center_cell = backbone_shapes[level] // 2
        center_cell_index = (center_cell[0] * backbone_shapes[level][1] +
                             center_cell[1])
        level_center = center_cell_index * anchors_per_cell
        center_anchor = anchors_per_cell * (
            (center_cell[0] * backbone_shapes[level][1] / config.RPN_ANCHOR_STRIDE**2) \
            + center_cell[1] / config.RPN_ANCHOR_STRIDE)
        level_center = int(center_anchor)

        # Draw anchors. Brightness show the order in the array, dark to bright.
        for i, rect in enumerate(level_anchors[level_center:level_center +
                                               anchors_per_cell]):
            y1, x1, y2, x2 = rect
            p = patches.Rectangle(
                (x1, y1),
                x2 - x1,
                y2 - y1,
                linewidth=2,
                facecolor='none',
                edgecolor=(i + 1) * np.array(colors[level]) / anchors_per_cell)
            ax.add_patch(p)

    # ## Data Generator
    #

    # In[13]:

    # Create data generator
    random_rois = 2000
    g = modellib.data_generator(dataset,
                                config,
                                shuffle=True,
                                random_rois=random_rois,
                                batch_size=4,
                                detection_targets=True)

    # In[14]:

    # Uncomment to run the generator through a lot of images
    # to catch rare errors
    # for i in range(1000):
    #     print(i)
    #     _, _ = next(g)

    # In[15]:

    # Get Next Image
    if random_rois:
        [
            normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids,
            gt_boxes, gt_masks, rpn_rois, rois
        ], [mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g)

        log("rois", rois)
        log("mrcnn_class_ids", mrcnn_class_ids)
        log("mrcnn_bbox", mrcnn_bbox)
        log("mrcnn_mask", mrcnn_mask)
    else:
        [
            normalized_images, image_meta, rpn_match, rpn_bbox, gt_boxes,
            gt_masks
        ], _ = next(g)

    log("gt_class_ids", gt_class_ids)
    log("gt_boxes", gt_boxes)
    log("gt_masks", gt_masks)
    log(
        "rpn_match",
        rpn_match,
    )
    log("rpn_bbox", rpn_bbox)
    image_id = modellib.parse_image_meta(image_meta)["image_id"][0]
    print("image_id: ", image_id, dataset.image_reference(image_id))

    # Remove the last dim in mrcnn_class_ids. It's only added
    # to satisfy Keras restriction on target shape.
    mrcnn_class_ids = mrcnn_class_ids[:, :, 0]

    # In[16]:

    b = 0

    # Restore original image (reverse normalization)
    sample_image = modellib.unmold_image(normalized_images[b], config)

    # Compute anchor shifts.
    indices = np.where(rpn_match[b] == 1)[0]
    refined_anchors = utils.apply_box_deltas(
        anchors[indices], rpn_bbox[b, :len(indices)] * config.RPN_BBOX_STD_DEV)
    log("anchors", anchors)
    log("refined_anchors", refined_anchors)

    # Get list of positive anchors
    positive_anchor_ids = np.where(rpn_match[b] == 1)[0]
    print("Positive anchors: {}".format(len(positive_anchor_ids)))
    negative_anchor_ids = np.where(rpn_match[b] == -1)[0]
    print("Negative anchors: {}".format(len(negative_anchor_ids)))
    neutral_anchor_ids = np.where(rpn_match[b] == 0)[0]
    print("Neutral anchors: {}".format(len(neutral_anchor_ids)))

    # ROI breakdown by class
    for c, n in zip(dataset.class_names,
                    np.bincount(mrcnn_class_ids[b].flatten())):
        if n:
            print("{:23}: {}".format(c[:20], n))

    # Show positive anchors
    fig, ax = plt.subplots(1, figsize=(16, 16))
    visualize.draw_boxes(sample_image,
                         boxes=anchors[positive_anchor_ids],
                         refined_boxes=refined_anchors,
                         ax=ax)

    # In[17]:

    # Show negative anchors
    visualize.draw_boxes(sample_image, boxes=anchors[negative_anchor_ids])

    # In[18]:

    # Show neutral anchors. They don't contribute to training.
    visualize.draw_boxes(sample_image,
                         boxes=anchors[np.random.choice(
                             neutral_anchor_ids, 100)])

    # ## ROIs

    # In[19]:

    if random_rois:
        # Class aware bboxes
        bbox_specific = mrcnn_bbox[b,
                                   np.arange(mrcnn_bbox.shape[1]),
                                   mrcnn_class_ids[b], :]

        # Refined ROIs
        refined_rois = utils.apply_box_deltas(
            rois[b].astype(np.float32),
            bbox_specific[:, :4] * config.BBOX_STD_DEV)

        # Class aware masks
        mask_specific = mrcnn_mask[b,
                                   np.arange(mrcnn_mask.shape[1]), :, :,
                                   mrcnn_class_ids[b]]

        visualize.draw_rois(sample_image, rois[b], refined_rois, mask_specific,
                            mrcnn_class_ids[b], dataset.class_names)

        # Any repeated ROIs?
        rows = np.ascontiguousarray(rois[b]).view(
            np.dtype((np.void, rois.dtype.itemsize * rois.shape[-1])))
        _, idx = np.unique(rows, return_index=True)
        print("Unique ROIs: {} out of {}".format(len(idx), rois.shape[1]))

    # In[20]:

    if random_rois:
        # Dispalay ROIs and corresponding masks and bounding boxes
        ids = random.sample(range(rois.shape[1]), 8)

        images = []
        titles = []
        for i in ids:
            image = visualize.draw_box(sample_image.copy(),
                                       rois[b, i, :4].astype(np.int32),
                                       [255, 0, 0])
            image = visualize.draw_box(image, refined_rois[i].astype(np.int64),
                                       [0, 255, 0])
            images.append(image)
            titles.append("ROI {}".format(i))
            images.append(mask_specific[i] * 255)
            titles.append(dataset.class_names[mrcnn_class_ids[b, i]][:20])

        display_images(images,
                       titles,
                       cols=4,
                       cmap="Blues",
                       interpolation="none")

    # In[21]:

    # Check ratio of positive ROIs in a set of images.
    if random_rois:
        limit = 10
        temp_g = modellib.data_generator(dataset,
                                         config,
                                         shuffle=True,
                                         random_rois=10000,
                                         batch_size=1,
                                         detection_targets=True)
        total = 0
        for i in range(limit):
            _, [ids, _, _] = next(temp_g)
            positive_rois = np.sum(ids[0] > 0)
            total += positive_rois
            print("{:5} {:5.2f}".format(positive_rois,
                                        positive_rois / ids.shape[1]))
        print("Average percent: {:.2f}".format(total / (limit * ids.shape[1])))
コード例 #21
0
def all_steps(dataset, datacfg, dnncfg):
  '''
  ## Single entry point for all the steps for inspecting dataset
  '''

  ## Uncomment for debugging
  # inspectdata.load_and_display_dataset(dataset, datacfg)

  # In[7]:
  log.info("[7]. ---------------")
  log.info("Load and display random images and masks---------------")
  log.info("Bounding Boxes---------------")
  load_and_display_random_sample(dataset, datacfg)

  # In[9]:
  log.info("[9]. ---------------")
  log.info("Resize Images---------------")
  load_and_resize_images(dataset, datacfg, dnncfg)

  # In[10]:
  log.info("[10]. ---------------")
  log.info("Mini Masks---------------")
  image_id = load_mini_masks(dataset, datacfg, dnncfg)

  log.info("image_id: {}".format(image_id))
  # In[11]:
  log.info("[11]. ---------------")
  log.info("Add augmentation and mask resizing---------------")
  add_augmentation(dataset, datacfg, dnncfg, image_id)

  info = dataset.image_info[image_id]
  log.debug("info: {}".format(info))

  # In[12]:
  log.info("[12]. ---------------")
  log.info("Anchors---------------")
  backbone_shapes, anchors, anchors_per_level, anchors_per_cell = generate_anchors(dnncfg)

  # In[13]:
  log.info("[13]. ---------------")
  log.info("Visualize anchors of one cell at the center of the feature map of a specific level---------------")
  visualize_anchors_at_center(dataset, datacfg, dnncfg, backbone_shapes, anchors, anchors_per_level, anchors_per_cell)

  # In[14]:
  log.info("[14]. ---------------")
  log.info("info---------------")
  image_ids = dataset.image_ids
  log.info(image_ids)
  image_index = -1
  image_index = (image_index + 1) % len(image_ids)
  log.info("image_index:{}".format(image_index))

  # In[15]:
  log.info("[15]. ---------------")
  log.info("data_generator---------------")

  ## Data Generator
  # Create data generator
  random_rois = 2000
  g = modellib.data_generator(
      dataset, datacfg, dnncfg, shuffle=True, random_rois=random_rois,
      batch_size=4,
      detection_targets=True)

  # Uncomment to run the generator through a lot of images
  # to catch rare errors
  # for i in range(1000):
  #     log.debug(i)
  #     _, _ = next(g)

  # Get Next Image
  if random_rois:
      [normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois],     [mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g)

      customlog("rois", rois)
      customlog("mrcnn_class_ids", mrcnn_class_ids)
      customlog("mrcnn_bbox", mrcnn_bbox)
      customlog("mrcnn_mask", mrcnn_mask)
  else:
      [normalized_images, image_meta, rpn_match, rpn_bbox, gt_boxes, gt_masks], _ = next(g)

  customlog("gt_class_ids", gt_class_ids)
  customlog("gt_boxes", gt_boxes)
  customlog("gt_masks", gt_masks)
  customlog("rpn_match", rpn_match, )
  customlog("rpn_bbox", rpn_bbox)
  image_id = modellib.parse_image_meta(image_meta)["image_id"][0]

  # Remove the last dim in mrcnn_class_ids. It's only added
  # to satisfy Keras restriction on target shape.
  mrcnn_class_ids = mrcnn_class_ids[:,:,0]


  # In[16]:
  log.info("[16]. ---------------")

  b = 0
  # Restore original image (reverse normalization)
  sample_image = modellib.unmold_image(normalized_images[b], dnncfg)

  # Compute anchor shifts.
  indices = np.where(rpn_match[b] == 1)[0]
  refined_anchors = utils.apply_box_deltas(anchors[indices], rpn_bbox[b, :len(indices)] * dnncfg.RPN_BBOX_STD_DEV)
  customlog("anchors", anchors)
  customlog("refined_anchors", refined_anchors)

  # Get list of positive anchors
  positive_anchor_ids = np.where(rpn_match[b] == 1)[0]
  log.info("Positive anchors: {}".format(len(positive_anchor_ids)))
  negative_anchor_ids = np.where(rpn_match[b] == -1)[0]
  log.info("Negative anchors: {}".format(len(negative_anchor_ids)))
  neutral_anchor_ids = np.where(rpn_match[b] == 0)[0]
  log.info("Neutral anchors: {}".format(len(neutral_anchor_ids)))

  log.info("ROI breakdown by class---------------")
  # ROI breakdown by class
  for c, n in zip(dataset.class_names, np.bincount(mrcnn_class_ids[b].flatten())):
      if n:
          log.info("{:23}: {}".format(c[:20], n))

  log.info("Show positive anchors---------------")
  # Show positive anchors
  fig, ax = plt.subplots(1, figsize=(16, 16))
  visualize.draw_boxes(sample_image, boxes=anchors[positive_anchor_ids],
                       refined_boxes=refined_anchors, ax=ax)


  # In[17]:
  log.info("[17]. ---------------")
  log.info("Show negative anchors---------------")
  # Show negative anchors
  visualize.draw_boxes(sample_image, boxes=anchors[negative_anchor_ids])


  # In[18]:
  log.info("[18]. ---------------")
  log.info("Show neutral anchors. They don't contribute to training---------------")
  # Show neutral anchors. They don't contribute to training.
  visualize.draw_boxes(sample_image, boxes=anchors[np.random.choice(neutral_anchor_ids, 100)])


  # In[19]:
  log.info("[19]. ---------------")
  log.info("ROIs---------------")
  ## ROIs
  if random_rois:
      # Class aware bboxes
      bbox_specific = mrcnn_bbox[b, np.arange(mrcnn_bbox.shape[1]), mrcnn_class_ids[b], :]

      # Refined ROIs
      refined_rois = utils.apply_box_deltas(rois[b].astype(np.float32), bbox_specific[:,:4] * dnncfg.BBOX_STD_DEV)

      # Class aware masks
      mask_specific = mrcnn_mask[b, np.arange(mrcnn_mask.shape[1]), :, :, mrcnn_class_ids[b]]

      visualize.draw_rois(sample_image, rois[b], refined_rois, mask_specific, mrcnn_class_ids[b], dataset.class_names)

      # Any repeated ROIs?
      rows = np.ascontiguousarray(rois[b]).view(np.dtype((np.void, rois.dtype.itemsize * rois.shape[-1])))
      _, idx = np.unique(rows, return_index=True)
      log.info("Unique ROIs: {} out of {}".format(len(idx), rois.shape[1]))


  # In[20]:
  log.info("[20]. ---------------")
  log.info("Dispalay ROIs and corresponding masks and bounding boxes---------------")
  if random_rois:
      # Dispalay ROIs and corresponding masks and bounding boxes
      ids = random.sample(range(rois.shape[1]), 8)

      images = []
      titles = []
      for i in ids:
          image = visualize.draw_box(sample_image.copy(), rois[b,i,:4].astype(np.int32), [255, 0, 0])
          image = visualize.draw_box(image, refined_rois[i].astype(np.int64), [0, 255, 0])
          images.append(image)
          titles.append("ROI {}".format(i))
          images.append(mask_specific[i] * 255)
          titles.append(dataset.class_names[mrcnn_class_ids[b,i]][:20])

      visualize.display_images(images, titles, cols=4, cmap="Blues", interpolation="none")


  # In[21]:
  log.info("[21]. ---------------")
  log.info("Check ratio of positive ROIs in a set of images.---------------")
  # Check ratio of positive ROIs in a set of images.
  if random_rois:
      limit = 10
      temp_g = modellib.data_generator(
          dataset, datacfg, dnncfg, shuffle=True, random_rois=10000,
          batch_size=1, detection_targets=True)
      total = 0
      for i in range(limit):
          _, [ids, _, _] = next(temp_g)
          positive_rois = np.sum(ids[0] > 0)
          total += positive_rois
          log.info("{:5} {:5.2f}".format(positive_rois, positive_rois/ids.shape[1]))
      log.info("Average percent: {:.2f}".format(total/(limit*ids.shape[1])))
コード例 #22
0
    print("Unique ROIs: {} out of {}".format(len(idx), rois.shape[1]))

if random_rois:
    # Dispalay ROIs and corresponding masks and bounding boxes
    ids = random.sample(range(rois.shape[1]), 8)

    images = []
    titles = []
    for i in ids:
        image = visualize.draw_box(sample_image.copy(), rois[b,i,:4].astype(np.int32), [255, 0, 0])
        image = visualize.draw_box(image, refined_rois[i].astype(np.int64), [0, 255, 0])
        images.append(image)
        titles.append("ROI {}".format(i))
        images.append(mask_specific[i] * 255)
        titles.append(dataset.class_names[mrcnn_class_ids[b,i]][:20])

    display_images(images, titles, cols=4, cmap="Blues", interpolation="none")

# Check ratio of positive ROIs in a set of images.
if random_rois:
    limit = 10
    temp_g = modellib.data_generator(
        dataset, config, shuffle=True, random_rois=10000,
        batch_size=1, detection_targets=True)
    total = 0
    for i in range(limit):
        _, [ids, _, _] = next(temp_g)
        positive_rois = np.sum(ids[0] > 0)
        total += positive_rois
        print("{:5} {:5.2f}".format(positive_rois, positive_rois/ids.shape[1]))
    print("Average percent: {:.2f}".format(total/(limit*ids.shape[1])))
コード例 #23
0
                     boxes=proposals[keep][ixs],
                     refined_boxes=refined_proposals[keep][ixs],
                     visibilities=np.where(roi_class_ids[keep][ixs] > 0, 1, 0),
                     captions=captions,
                     title="Detections after NMS",
                     ax=get_ax())

#%%
#######################################################
############# Stage 3: Generating Masks ###############
#######################################################
#%%
#######################################################
################## 3.a Mask Targets ###################
#######################################################
display_images(np.transpose(gt_mask, [2, 0, 1]), cmap="Blues")

#%%
#######################################################
################ 3.b Predicted Masks ##################
#######################################################
# Get predictions of mask head
mrcnn = model.run_graph([image], [
    ("detections", model.keras_model.get_layer("mrcnn_detection").output),
    ("masks", model.keras_model.get_layer("mrcnn_mask").output),
])

# Get detection class IDs. Trim zero padding.
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
det_count = np.where(det_class_ids == 0)[0][0]
det_class_ids = det_class_ids[:det_count]
コード例 #24
0
    def get_labels(self, labels):

        dims = labels.shape

        unlabeled_labels = np.zeros((dims[0], dims[1], 1))
        building_labels = np.zeros((dims[0], dims[1], 1))
        fence_labels = np.zeros((dims[0], dims[1], 1))
        other_labels = np.zeros((dims[0], dims[1], 1))
        pedestrian_labels = np.zeros((dims[0], dims[1], 1))
        pole_labels = np.zeros((dims[0], dims[1], 1))
        road_line_labels = np.zeros((dims[0], dims[1], 1))
        road_labels = np.zeros((dims[0], dims[1], 1))
        sidewalk_labels = np.zeros((dims[0], dims[1], 1))
        vegetation_labels = np.zeros((dims[0], dims[1], 1))
        car_labels = np.zeros((dims[0], dims[1], 1))
        wall_labels = np.zeros((dims[0], dims[1], 1))
        traffic_sign_labels = np.zeros((dims[0], dims[1], 1))

        unlabeled_index = np.all(labels == (0, 0, 0), axis=-1)
        building_index = np.all(labels == (70, 70, 70), axis=-1)
        fence_index = np.all(labels == (190, 153, 153), axis=-1)
        other_index = np.all(labels == (250, 170, 160), axis=-1)
        pedestrian_index = np.all(labels == (220, 20, 60), axis=-1)
        pole_index = np.all(labels == (153, 153, 153), axis=-1)
        road_line_index = np.all(labels == (157, 234, 50), axis=-1)
        road_index = np.all(labels == (128, 64, 128), axis=-1)
        sidewalk_index = np.all(labels == (244, 35, 232), axis=-1)
        vegetation_index = np.all(labels == (107, 142, 35), axis=-1)
        car_index = np.all(labels == (0, 0, 142), axis=-1)
        wall_index = np.all(labels == (102, 102, 156), axis=-1)
        traffic_sign_index = np.all(labels == (220, 220, 70), axis=-1)

        unlabeled_labels[unlabeled_index] = 1
        building_labels[building_index] = 10
        fence_labels[fence_index] = 10
        other_labels[other_index] = 10
        pedestrian_labels[pedestrian_index] = 10
        pole_labels[pole_index] = 10
        road_line_labels[road_line_index] = 10
        road_labels[road_index] = 10
        sidewalk_labels[sidewalk_index] = 10
        vegetation_labels[vegetation_index] = 1
        car_labels[car_index] = 10
        wall_labels[wall_index] = 10
        traffic_sign_labels[traffic_sign_index] = 10

        return np.dstack([unlabeled_labels, building_labels, fence_labels,
        return np.dstack([unlabeled_labels, building_labels, fence_labels,
                          other_labels, pedestrian_labels, pole_labels,
                          road_line_labels, road_labels, sidewalk_labels, vegetation_labels,
                          car_labels, wall_labels, traffic_sign_labels])

    def image_reference(self, image_id):
        """Return the carla data of the image."""
        info = self.image_info[image_id]
        if info["source"] == "carla":
            return info["id"]
        else:
            super(self.__class__).image_reference(self, image_id)

config = CarlaConfig()
config.STEPS_PER_EPOCH = NUMBER_OF_TRAIN_DATA//config.BATCH_SIZE
config.VALIDATION_STEPS = NUMBER_OF_VAL_DATA//config.BATCH_SIZE
config.display()


dataset = carlaDataset()
dataset.load_images(dir=RGB_TRAIN_DIR, type='train')


# mask, a = train.load_mask(50)
# print(a)
dataset.prepare()
print("Image Count: {}".format(len(dataset.image_ids)))
print("Class Count: {}".format(dataset.num_classes))
for i, info in enumerate(dataset.class_info):
    print("{:3}. {:50}".format(i, info['name']))

image_ids = np.random.choice(dataset.image_ids, 4)
for image_id in image_ids:
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)
    visualize.display_top_masks(image, mask, class_ids, dataset.class_names)




# Load random image and mask.
image_id = random.choice(dataset.image_ids)
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
# Compute Bounding box
bbox = utils.extract_bboxes(mask)

# Display image and additional stats
print("image_id ", image_id)
log("image", image)
log("mask", mask)
log("class_ids", class_ids)
log("bbox", bbox)
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)


# Load random image and mask.
image_id = np.random.choice(dataset.image_ids, 1)[0]
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
# Resize
image, window, scale, padding, _ = utils.resize_image(
    image,
    min_dim=config.IMAGE_MIN_DIM,
    max_dim=config.IMAGE_MAX_DIM,
    mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding)
# Compute Bounding box
bbox = utils.extract_bboxes(mask)

# Display image and additional stats
print("image_id: ", image_id)
print("Original shape: ", original_shape)
log("image", image)
log("mask", mask)
log("class_ids", class_ids)
log("bbox", bbox)
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)



image_id = np.random.choice(dataset.image_ids, 1)[0]
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
    dataset, config, image_id, use_mini_mask=False)

log("image", image)
log("image_meta", image_meta)
log("class_ids", class_ids)
log("bbox", bbox)
log("mask", mask)

display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])

visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)

# Generate Anchors
backbone_shapes = modellib.compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
                                          config.RPN_ANCHOR_RATIOS,
                                          backbone_shapes,
                                          config.BACKBONE_STRIDES,
                                          config.RPN_ANCHOR_STRIDE)

# Print summary of anchors
num_levels = len(backbone_shapes)
anchors_per_cell = len(config.RPN_ANCHOR_RATIOS)
print("Count: ", anchors.shape[0])
print("Scales: ", config.RPN_ANCHOR_SCALES)
print("ratios: ", config.RPN_ANCHOR_RATIOS)
print("Anchors per Cell: ", anchors_per_cell)
print("Levels: ", num_levels)
anchors_per_level = []
for l in range(num_levels):
    num_cells = backbone_shapes[l][0] * backbone_shapes[l][1]
    anchors_per_level.append(anchors_per_cell * num_cells // config.RPN_ANCHOR_STRIDE**2)
    print("Anchors in Level {}: {}".format(l, anchors_per_level[l]))
## Visualize anchors of one cell at the center of the feature map of a specific level

# Load and draw random image
image_id = np.random.choice(dataset.image_ids, 1)[0]
image, image_meta, _, _, _ = modellib.load_image_gt(dataset, config, image_id)
fig, ax = plt.subplots(1, figsize=(10, 10))
ax.imshow(image)
levels = len(backbone_shapes)

for level in range(levels):
    colors = visualize.random_colors(levels)
    # Compute the index of the anchors at the center of the image
    level_start = sum(anchors_per_level[:level]) # sum of anchors of previous levels
    level_anchors = anchors[level_start:level_start+anchors_per_level[level]]
    print("Level {}. Anchors: {:6}  Feature map Shape: {}".format(level, level_anchors.shape[0],
                                                                  backbone_shapes[level]))
    center_cell = backbone_shapes[level] // 2
    center_cell_index = (center_cell[0] * backbone_shapes[level][1] + center_cell[1])
    level_center = center_cell_index * anchors_per_cell
    center_anchor = anchors_per_cell * (
        (center_cell[0] * backbone_shapes[level][1] / config.RPN_ANCHOR_STRIDE**2) \
        + center_cell[1] / config.RPN_ANCHOR_STRIDE)
    level_center = int(center_anchor)

    # Draw anchors. Brightness show the order in the array, dark to bright.
    for i, rect in enumerate(level_anchors[level_center:level_center+anchors_per_cell]):
        y1, x1, y2, x2 = rect
        p = patches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=2, facecolor='none',
                              edgecolor=(i+1)*np.array(colors[level]) / anchors_per_cell)
        ax.add_patch(p)

# Create data generator
random_rois = 4000
g = modellib.data_generator(
    dataset, config, shuffle=True, random_rois=random_rois,
    batch_size=4,
    detection_targets=True)
# Get Next Image
if random_rois:
    [normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois], \
    [mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g)

    log("rois", rois)
    log("mrcnn_class_ids", mrcnn_class_ids)
    log("mrcnn_bbox", mrcnn_bbox)
    log("mrcnn_mask", mrcnn_mask)
else:
    [normalized_images, image_meta, rpn_match, rpn_bbox, gt_boxes, gt_masks], _ = next(g)

log("gt_class_ids", gt_class_ids)
log("gt_boxes", gt_boxes)
log("gt_masks", gt_masks)
log("rpn_match", rpn_match, )
log("rpn_bbox", rpn_bbox)
image_id = modellib.parse_image_meta(image_meta)["image_id"][0]
print("image_id: ", image_id, dataset.image_reference(image_id))

# Remove the last dim in mrcnn_class_ids. It's only added
# to satisfy Keras restriction on target shape.
mrcnn_class_ids = mrcnn_class_ids[:, :, 0]


b = 0

# Restore original image (reverse normalization)
sample_image = modellib.unmold_image(normalized_images[b], config)

# Compute anchor shifts.
indices = np.where(rpn_match[b] == 1)[0]
refined_anchors = utils.apply_box_deltas(anchors[indices], rpn_bbox[b, :len(indices)] * config.RPN_BBOX_STD_DEV)
log("anchors", anchors)
log("refined_anchors", refined_anchors)

# Get list of positive anchors
positive_anchor_ids = np.where(rpn_match[b] == 1)[0]
print("Positive anchors: {}".format(len(positive_anchor_ids)))
negative_anchor_ids = np.where(rpn_match[b] == -1)[0]
print("Negative anchors: {}".format(len(negative_anchor_ids)))
neutral_anchor_ids = np.where(rpn_match[b] == 0)[0]
print("Neutral anchors: {}".format(len(neutral_anchor_ids)))

# ROI breakdown by class
for c, n in zip(dataset.class_names, np.bincount(mrcnn_class_ids[b].flatten())):
    if n:
        print("{:23}: {}".format(c[:20], n))

# Show positive anchors
visualize.draw_boxes(sample_image, boxes=anchors[positive_anchor_ids],
                     refined_boxes=refined_anchors)



# Show negative anchors
visualize.draw_boxes(sample_image, boxes=anchors[negative_anchor_ids])


# Show neutral anchors. They don't contribute to training.
visualize.draw_boxes(sample_image, boxes=anchors[np.random.choice(neutral_anchor_ids, 100)])

if random_rois:
    # Class aware bboxes
    bbox_specific = mrcnn_bbox[b, np.arange(mrcnn_bbox.shape[1]), mrcnn_class_ids[b], :]

    # Refined ROIs
    refined_rois = utils.apply_box_deltas(rois[b].astype(np.float32), bbox_specific[:, :4] * config.BBOX_STD_DEV)

    # Class aware masks
    mask_specific = mrcnn_mask[b, np.arange(mrcnn_mask.shape[1]), :, :, mrcnn_class_ids[b]]

    visualize.draw_rois(sample_image, rois[b], refined_rois, mask_specific, mrcnn_class_ids[b], dataset.class_names)

    # Any repeated ROIs?
    rows = np.ascontiguousarray(rois[b]).view(np.dtype((np.void, rois.dtype.itemsize * rois.shape[-1])))
    _, idx = np.unique(rows, return_index=True)
    print("Unique ROIs: {} out of {}".format(len(idx), rois.shape[1]))
if random_rois:
    # Dispalay ROIs and corresponding masks and bounding boxes
    ids = random.sample(range(rois.shape[1]), 8)

    images = []
    titles = []
    for i in ids:
        image = visualize.draw_box(sample_image.copy(), rois[b,i,:4].astype(np.int32), [255, 0, 0])
        image = visualize.draw_box(image, refined_rois[i].astype(np.int64), [0, 255, 0])
        images.append(image)
        titles.append("ROI {}".format(i))
        images.append(mask_specific[i] * 255)
        titles.append(dataset.class_names[mrcnn_class_ids[b,i]][:20])

    display_images(images, titles, cols=4, cmap="Blues", interpolation="none")
# Check ratio of positive ROIs in a set of images.
if random_rois:
    limit = 10
    temp_g = modellib.data_generator(
        dataset, config, shuffle=True, random_rois=10000,
        batch_size=1, detection_targets=True)
    total = 0
    for i in range(limit):
        _, [ids, _, _] = next(temp_g)
        positive_rois = np.sum(ids[0] > 0)
        total += positive_rois
        print("{:5} {:5.2f}".format(positive_rois, positive_rois/ids.shape[1]))
    print("Average percent: {:.2f}".format(total/(limit*ids.shape[1])))
exit()
コード例 #25
0
                            r['masks'],
                            r['class_ids'],
                            dataset.class_names,
                            r['scores'],
                            ax=ax,
                            title="Predictions")

#print("masks", r['masks'])
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)

#display_images([image])

splash = load.color_splash(image, r['masks'])
display_images([splash], cols=1)

mrcnn = model.run_graph([image], [
    ("detections", model.keras_model.get_layer("mrcnn_detection").output),
    ("masks", model.keras_model.get_layer("mrcnn_mask").output),
])

# Get detection class IDs. Trim zero padding.
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
# det_class_ids는 detection한 object수만큼 list가 생성되며 각 리스트에는 분류될 class 수(3)(Llane, car, Slane)가 들어간다.
det_count = np.where(det_class_ids == 0)[0][0]
det_class_ids = det_class_ids[:det_count]
print((det_class_ids[0]))
print((det_class_ids[1]))
#display_images(d)
print("{} detections: {}".format(det_count,
コード例 #26
0
def mrcnn_templates(rec_list, image_source, snowleop_dir, weights_path):
    'Used for generating templates with the Mask R-CNN and adding'
    'to the recognition class.'
    # MASK R-CNN
    #------------------------------------------------------------------------------
    # This is the Mask R-CNN Function. It takes in the rec_list, the list of recognition objects,
    # the image source or the folder where the images you want to add masks are located, the snow leopard
    # The snowleop_dir which is the directory of the snow leopard photos that have been trained on, and the weight path
    # which is where the weight (the bottle....h5 file) is located. The snowleop directory is needed because the weight
    # is trained on that dataset. There are more comments underneath here. Up until the for loop is being called, the
    # Mask RCNN is still being configured. If there are more than 1 snow leopard templated then after the first one,
    # the rest will turn into blank 0's. Make sure you Edit all the paths to reflect your directory or else it will not work.
    #-------------------------------------------------------------------------------
    config = snow_leopard.CustomConfig()

    ## TODO: change this path or get it into easy_run.py
    ##snowleop_dir = os.path.join(ROOT_DIR, "C:/Users/Phil/SU-ECE-19-7-master-MaskRCNN/Recognition/samples/snow_leopard/dataset")
    class InferenceConfig(config.__class__):
        # Run detection on one image at a time
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1

    config = InferenceConfig()
    config.display()
    # Device to load the neural network on. Useful if you're training a model on the same machine,
    # in which case use CPU and leave the GPU for training.
    DEVICE = "/cpu:0"  # /cpu:0 or /gpu:0
    # Inspect the model in training or inference modes
    # values: 'inference' or 'training'
    TEST_MODE = "inference"

    def get_ax(rows=1, cols=1, size=16):
        """Return a Matplotlib Axes array to be used in
        all visualizations in the notebook. Provide a
        central point to control graph sizes.
        
        Adjust the size attribute to control how big to render images
        """
        _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
        return ax

    # Load validation dataset
    dataset = snow_leopard.CustomDataset()
    dataset.load_custom(snowleop_dir, "val")

    # Must call before using the dataset
    dataset.prepare()

    print("Images: {}\nClasses: {}".format(len(dataset.image_ids),
                                           dataset.class_names))

    # Create model in inference mode
    with tf.device(DEVICE):
        model = modellib.MaskRCNN(mode="inference",
                                  model_dir=MODEL_DIR,
                                  config=config)

    # Set path to balloon weights file

    # Optional: Download file from the Releases page and set its path
    # https://github.com/matterport/Mask_RCNN/releases
    # weights_path = "/path/to/mask_rcnn_balloon.h5"

    # Load weights
    print("Loading weights ", weights_path)
    model.load_weights(str(weights_path), by_name=True)

    temp_templates = image_source.parents[1] / "mrcnn_templates/"
    if (not os.path.exists(temp_templates)):
        os.mkdir(temp_templates)

    count = 0

    # add in template
    for t in glob.iglob(str(image_source)):

        ## TODO: edit this path
        IMAGE_DIR = "/Users/tonycaballero/Downloads/SU-ECE-20-4-master/Image_Sets/quick_set/images"
        # Load a random image from the images folder
        file_names = next(os.walk(IMAGE_DIR))[2]
        maskimage = skimage.io.imread(rec_list[count].image_title)
        # Run detection
        results = model.detect([maskimage], verbose=1)

        # Visualize results
        r = results[0]
        ax = get_ax(1)
        bbb = visualize.display_instances(maskimage,
                                          r['rois'],
                                          r['masks'],
                                          r['class_ids'],
                                          dataset.class_names,
                                          r['scores'],
                                          ax=ax,
                                          title="Predictions")
        display_images(np.transpose(r['masks'], [2, 0, 1]), cmap="binary")

        ## TODO: Turn this into a user option of "Is there a cat in this photo?"
        ##       or "Is there more than one cat in this photo?" then either let
        ##       the Mask R-CNN keep both masks or have the user draw a manual
        ##       template for that image.
        if (np.size(r['masks']) == 0):
            print('\n\tNo cat detected by Mask R-CNN in image.')
            print('\tMaking empty template for this image:',
                  rec_list[count].image_title, '\n\n')
            r_mask = np.zeros(np.shape(rec_list[count].image))
        elif (np.shape(r['masks'])[2] > 1):
            print('\tShape r[' 'masks' ']:', np.shape(r['masks']))
            print('\n\tMore than one cat detected by Mask R-CNN in image:',
                  np.shape(r['masks'])[2], "cats.")
            print('\tMaking empty template for this image:',
                  rec_list[count].image_title, '\n\n')
            r_mask = np.zeros(np.shape(rec_list[count].image))
            #print('\tUsing 1st template generated for this image:',rec_list[count].image_title,'\n\n')
            #r_masks = np.split(r['masks'],np.shape(r['masks'])[2])
            #r_mask = np.reshape(r_masks[1], np.shape(r['masks'])[:2])
        else:
            r_mask = np.reshape(r['masks'], np.shape(r['masks'])[:2])

        r_mask = r_mask * 255

        # get template name and write BMP from r_mask
        template = cv2.imread(t)
        template_name = Path(t).with_suffix('.BMP')
        template_path = temp_templates / template_name.name
        cv2.imwrite(str(template_path), r_mask,
                    [int(cv2.IMWRITE_JPEG_QUALITY), 80])

        # add template to corresponding rec_list object
        rec_list[count].add_template(str(template_path), r_mask)

        count = count + 1

    return rec_list
コード例 #27
0
                     visibilities=np.where(roi_class_ids[keep][ixs] > 0, 1, 0),
                     captions=captions,
                     title="Detections after NMS",
                     ax=get_ax())

#%% [markdown]
# ## Stage 3: Generating Masks
#
# This stage takes the detections (refined bounding boxes and class IDs) from the previous layer and runs the mask head to generate segmentation masks for every instance.
#%% [markdown]
# ### 3.a Mask Targets
#
# These are the training targets for the mask branch

#%%
display_images(np.transpose(gt_mask, [2, 0, 1]), cmap="Blues")

#%% [markdown]
# ### 3.b Predicted Masks

#%%
# Get predictions of mask head
mrcnn = model.run_graph([image], [
    ("detections", model.keras_model.get_layer("mrcnn_detection").output),
    ("masks", model.keras_model.get_layer("mrcnn_mask").output),
])

# Get detection class IDs. Trim zero padding.
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
det_count = np.where(det_class_ids == 0)[0][0]
det_class_ids = det_class_ids[:det_count]
コード例 #28
0
# Validation dataset
dataset_val = visdrone.VisDroneDataset()
dataset_val.load_VisDrone(val_count, val_images_folder, val_imglist,
                          val_folder)
dataset_val.prepare()

# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 4)
images = []
images_bbx = []
for image_id in image_ids:
    image = dataset_train.load_image(image_id)
    label, count = dataset_train.load_anno(image_id)
    images.append(image)
    images_bbx.append(visualize.draw_bbx(image, label, count))
visualize.display_images(images_bbx)

### Create Model  ###
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR)

# Which weights to start with?
init_with = "coco"  # imagenet, coco, or last

if init_with == "imagenet":
    model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
    # Load weights trained on MS COCO, but skip layers that
    # are different due to the different number of classes
    model.load_weights(MODEL_PATH,
                       by_name=True,
                       exclude=[
コード例 #29
0
def inspect_backbone_activation(model, image, savename=None, args=None):
    """
    insert the image through the model pipe and insect (using plt.imshow) its plot after each layer
    TODO: add a variable to choose the layer to inspect from
    :param model: the MRCNN model
    :param image: the image to inspect
    :param savename: if None does nothing, else save the image to the output folder with the given name
    :param args: if None does nothing, else expect object with 'backbone' attribute to choose the right pipe to follow
    """
    if args is not None:
        if args.backbone.lower() == "resnet50":
            activation_resnet50 = [
                ("input_image",
                 tf.identity(
                     model.keras_model.get_layer("input_image").output)),
                ("res2c_out", model.keras_model.get_layer("res2c_out").output),
                ("res3c_out", model.keras_model.get_layer("res3c_out").output),
                # ("res4w_out", model.keras_model.get_layer("res4w_out").output),  # for resnet100
                ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
                ("roi", model.keras_model.get_layer("ROI").output),
            ]
            activations = model.run_graph([image], activation_resnet50)
        elif args.backbone.lower() == "resnet101":
            activation_resnet101 = [
                ("input_image",
                 tf.identity(
                     model.keras_model.get_layer("input_image").output)),
                ("res2c_out", model.keras_model.get_layer("res2c_out").output),
                ("res3c_out", model.keras_model.get_layer("res3c_out").output),
                ("res4w_out", model.keras_model.get_layer("res4w_out").output
                 ),  # for resnet100
                ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
                ("roi", model.keras_model.get_layer("ROI").output),
            ]
            activations = model.run_graph([image], activation_resnet101)
        else:
            raise (
                NotImplemented,
                "{} is not implemented for inspect_backbone_activation".format(
                    args.backbone))
    else:
        activation_resnet50 = [
            ("input_image",
             tf.identity(model.keras_model.get_layer("input_image").output)),
            ("res2c_out", model.keras_model.get_layer("res2c_out").output),
            ("res3c_out", model.keras_model.get_layer("res3c_out").output),
            # ("res4w_out", model.keras_model.get_layer("res4w_out").output),  # for resnet100
            ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
            ("roi", model.keras_model.get_layer("ROI").output),
        ]
        activations = model.run_graph([image], activation_resnet50)
    if savename is not None:
        savename = os.path.join(result_dir, savename + ".png")
    try:
        vis.display_images(np.transpose(activations["res2c_out"][0, :, :, :4],
                                        [2, 0, 1]),
                           cols=4,
                           out_path=savename,
                           show=False)
    except ...:
        pass
コード例 #30
0
print("Original shape: ", original_shape)
log("image", image)
log("mask", mask)
log("class_ids", class_ids)
log("bbox", bbox)
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)

image_id = np.random.choice(dataset.image_ids, 1)[0]
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
    dataset, config, image_id, use_mini_mask=False)

log("image", image)
log("image_meta", image_meta)
log("class_ids", class_ids)
log("bbox", bbox)
log("mask", mask)

display_images([image] +
               [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)

# Add augmentation and mask resizing.
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
    dataset, config, image_id, augment=True, use_mini_mask=True)
log("mask", mask)
display_images([image] +
               [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])
mask = utils.expand_mask(bbox, mask, image.shape)
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)