Example #1
0
 def mask(self):
     """
     Visualiza las máscaras de la imagen evaluada
     """
     if len(self.r['masks']) != 0:
         display_images(np.transpose(self.r['masks'], [2, 0, 1]), cmap="Blues_r")
     
     else:
         self.visualizar_error("No hay defectos en la imagen.")
                            dataset.class_names,
                            r['scores'],
                            ax=ax,
                            title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)

# ## Color Splash
#
# This is for illustration. You can call `pig_train.py` with the `splash` option to get better images without the black padding.

# In[10]:

splash = pig_train.color_splash(image, r['masks'])
display_images([splash], cols=1)

# ## Step by Step Prediction

# ## Stage 1: Region Proposal Network
#
# The Region Proposal Network (RPN) runs a lightweight binary classifier on a lot of boxes (anchors) over the image and returns object/no-object scores. Anchors with high *objectness* score (positive anchors) are passed to the stage two to be classified.
#
# Often, even positive anchors don't cover objects fully. So the RPN also regresses a refinement (a delta in location and size) to be applied to the anchors to shift it and resize it a bit to the correct boundaries of the object.

# ### 1.a RPN Targets
#
# The RPN targets are the training values for the RPN. To generate the targets, we start with a grid of anchors that cover the full image at different scales, and then we compute the IoU of the anchors with ground truth object. Positive anchors are those that have an IoU >= 0.7 with any ground truth object, and negative anchors are those that don't cover any object by more than 0.3 IoU. Anchors in between (i.e. cover an object by IoU >= 0.3 but < 0.7) are considered neutral and excluded from training.
#
# To train the RPN regressor, we also compute the shift and resizing needed to make the anchor cover the ground truth object completely.
Example #3
0
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
    modellib.load_image_gt(dataset_val, inference_config,
                           image_id, use_mini_mask=False)

log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)

visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
                            dataset_val.class_names, figsize=(8, 8))

visualize.display_images([original_image])

results = model.detect([original_image], verbose=1)

r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
                            dataset_val.class_names, r['scores'], ax=get_ax())



'''
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
image_ids = np.random.choice(dataset_val.image_ids, 10)
APs = []
for image_id in image_ids:
Example #4
0
#
# To visualize the effect of mask resizing, and to verify the code correctness, we visualize some examples.

# In[7]:

image_id = np.random.choice(dataset.image_ids, 1)[0]
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
    dataset, config, image_id, use_mini_mask=False)

log("image", image)
log("image_meta", image_meta)
log("class_ids", class_ids)
log("bbox", bbox)
log("mask", mask)

display_images([image] +
               [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])

# In[8]:

visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)

# In[9]:

# Add augmentation and mask resizing.
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
    dataset, config, image_id, augment=True, use_mini_mask=True)
log("mask", mask)
display_images([image] +
               [mask[:, :, i] for i in range(min(mask.shape[-1], 7))])

# In[10]:
Example #5
0
    parser.set_defaults(skipvalidate=False)
    parser.set_defaults(loadweights=False)
    args = parser.parse_args()

    all_df = load_data_from_frames()

    if args.visualize == 1:
        from visualize import visualize_data_histogram, visualize_p_data_histogram, display_images

        #visualize_data_histogram(np.array([row['angle'] for i, row in all_df.iterrows()]))
        #visualize_p_data_histogram(all_df)
        #all_df = sampling_data(all_df)
        visualize_p_data_histogram(all_df)
        #one_df = all_df[0]
        x,y = data_generator_for_vis(all_df)
        display_images(x,y)
        #sampling_data(all_df)

    else:
        if args.sample == "yes":
            print("sampling...")
            all_df = sampling_data(all_df)
            print("done sampling...")
        training_df, validation_df = load_training_validation_df(all_df)
        n = training_df.shape[0]
        batch_size = args.batch
        samples_per_epoch = n * 3 # args.epochsize #int(n / batch_size)

        # Create training and validation generators
        train_gen = data_generator(training_df, batch_size=batch_size)
        validation_gen = data_generator(validation_df, is_training=0)
Example #6
0
det_count = np.where(det_class_ids == 0)[0][0]
det_class_ids = det_class_ids[:det_count]

print("{} detections: {}".format(
    det_count, np.array(class_names)[det_class_ids]))

# Masks
det_boxes = mrcnn["detections"][0, :, :4].astype(np.int32)
det_mask_specific = np.array([mrcnn["masks"][0, i, :, :, c]
                              for i, c in enumerate(det_class_ids)])
det_masks = np.array([utils.unmold_mask(m, det_boxes[i], image.shape)
                      for i, m in enumerate(det_mask_specific)])
log("det_mask_specific", det_mask_specific)
log("det_masks", det_masks)

display_images(det_mask_specific[:4] * 255, cmap="Blues", interpolation="none")

display_images(det_masks[:4] * 255, cmap="Blues", interpolation="none")

# Get activations of a few sample layers
activations = model.run_graph([image], [
    ("input_image",        model.keras_model.get_layer("input_image").output),
    ("res4w_out",          model.keras_model.get_layer("res4w_out").output),  # for resnet100
    ("rpn_bbox",           model.keras_model.get_layer("rpn_bbox").output),
    ("roi",                model.keras_model.get_layer("ROI").output),
])

# Input image (normalized)
_ = plt.imshow(modellib.unmold_image(activations["input_image"][0],config))

# Backbone feature map