def show_roi_and_mask(): g, num_random_rois, detection_targets = create_data_generator() [normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois], \ [mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g) # Remove the last dim in mrcnn_class_ids. It's only added # to satisfy Keras restriction on target shape. # 原来的 mrcnn_class_idx 的 shape 是 (batch_size, num_rois, 1) mrcnn_class_ids = mrcnn_class_ids[:, :, 0] b = 0 # Restore original image (reverse normalization) sample_image = modellib.unmold_image(normalized_images[b], config) # Class aware bboxes # mrcnn_bbox 的 shape 是 (batch_size,num_rois,num_classes,4) # mrcnn_class_idx 的 shape 是 (batch_size, num_rois) # bbox_specific 的 shape 是 (num_train_rois, dataset.num_classes, 4) bbox_specific = mrcnn_bbox[b, np.arange(mrcnn_bbox.shape[1]), mrcnn_class_ids[b], :] # Refined ROIs refined_rois = utils.apply_box_deltas( rois[b].astype(np.float32), bbox_specific[:, :4] * config.BBOX_STD_DEV) # Class aware masks # mrcnn_mask 的 shape 是 (batch_size, num_rois, 28, 28, num_classes) # mask_specific 的 shape 是 (num_train_rois, 28, 28, dataset.num_classes) mask_specific = mrcnn_mask[b, np.arange(mrcnn_mask.shape[1]), :, :, mrcnn_class_ids[b]] # Dispalay ROIs and corresponding masks and bounding boxes ids = random.sample(range(rois.shape[1]), 8) images = [] titles = [] for i in ids: # 这里的 copy() 是为了在这个循环中的多个 id 互不影响 image = visualize.draw_box(sample_image.copy(), rois[b, i, :4].astype(np.int32), [255, 0, 0]) image = visualize.draw_box(image, refined_rois[i].astype(np.int64), [0, 255, 0]) images.append(image) titles.append("ROI {}".format(i)) images.append(mask_specific[i] * 255) titles.append(dataset.class_names[mrcnn_class_ids[b, i]][:20]) display_images(images, titles, cols=4, cmap="Blues", interpolation="none")
def show_anchors(): g, _, _ = create_data_generator(num_random_rois=0) # Get Next Image [ normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks ], _ = next(g) b = 0 # Restore original image (reverse normalization) sample_image = modellib.unmold_image(normalized_images[b], config) # Get list of positive anchors positive_anchor_ids = np.where(rpn_match[b] == 1)[0] # Generate anchors backbone_shapes, anchors, anchors_per_level = generate_anchors() # Compute anchor shifts. refined_anchors = utils.apply_box_deltas( anchors[positive_anchor_ids], rpn_bbox[b, :len(positive_anchor_ids)] * config.RPN_BBOX_STD_DEV) log("anchors", anchors) print("Positive anchors: {}".format(len(positive_anchor_ids))) log("refined_anchors", refined_anchors) negative_anchor_ids = np.where(rpn_match[b] == -1)[0] print("Negative anchors: {}".format(len(negative_anchor_ids))) neutral_anchor_ids = np.where(rpn_match[b] == 0)[0] print("Neutral anchors: {}".format(len(neutral_anchor_ids))) # Show positive anchors fig, ax = plt.subplots(1, figsize=(16, 16)) visualize.draw_boxes(sample_image, boxes=anchors[positive_anchor_ids], refined_boxes=refined_anchors, ax=ax) # Show negative anchors visualize.draw_boxes(sample_image, boxes=anchors[negative_anchor_ids], ax=ax) # Show neutral anchors. They don't contribute to training. visualize.draw_boxes(sample_image, boxes=anchors[np.random.choice( neutral_anchor_ids, 100)], ax=ax) plt.show()
def show_rois(): g, num_random_rois, detection_targets = create_data_generator() [normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois], \ [mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g) # Remove the last dim in mrcnn_class_ids. It's only added to satisfy Keras restriction on target shape. # 原来的 mrcnn_class_idx 的 shape 是 (batch_size, num_train_rois, 1) mrcnn_class_ids = mrcnn_class_ids[:, :, 0] b = 0 # Restore original image (reverse normalization) sample_image = modellib.unmold_image(normalized_images[b], config) # Class aware bboxes # mrcnn_bbox 的 shape 是 (batch_size, num_train_rois, dataset.num_classes,4) # mrcnn_class_idx 的 shape 是 (batch_size, num_train_rois) # bbox_specific 的 shape 是 (num_train_rois, dataset.num_classes, 4) bbox_specific = mrcnn_bbox[b, np.arange(mrcnn_bbox.shape[1]), mrcnn_class_ids[b], :] # Refined ROIs refined_rois = utils.apply_box_deltas( rois[b].astype(np.float32), bbox_specific[:, :4] * config.BBOX_STD_DEV) # Class aware masks # mrcnn_mask 的 shape 是 (batch_size, num_train_rois, 28, 28, dataset.num_classes) # mask_specific 的 shape 是 (num_train_rois, 28, 28, dataset.num_classes) mask_specific = mrcnn_mask[b, np.arange(mrcnn_mask.shape[1]), :, :, mrcnn_class_ids[b]] visualize.draw_rois(sample_image, rois[b], refined_rois, mask_specific, mrcnn_class_ids[b], dataset.class_names) # Any repeated ROIs? # np.ascontiguousarray 是让数组在内存中连续 # view 方法是创建一个视图,视图和原数组数据共享 rows = np.ascontiguousarray(rois[b]).view( np.dtype((np.void, rois.dtype.itemsize * rois.shape[-1]))) _, idx = np.unique(rows, return_index=True) print("Unique ROIs: {} out of {}".format(len(idx), rois.shape[1])) plt.show()
def visualize_feature_map(model, dataset, config, image_ids, output_dir): for image_id in image_ids: image, image_meta, gt_class_id, gt_bbox, gt_mask = \ modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False) # Get activations of a few sample layers activations = model.run_graph([image], [ ("input_image", model.keras_model.get_layer("input_image").output), ("res4w_out", model.keras_model.get_layer("res4w_out").output), # for resnet100 ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output), ("roi", model.keras_model.get_layer("ROI").output), ]) # Input image (normalized) plt.figure("input_image") fig = plt.imshow(modellib.unmold_image(activations["input_image"][0], config)) output_path = os.path.join(output_dir, "input_image_id_{}.png".format(image_id)) print('Saving image {} to {}'.format(image_id, output_path)) plt.savefig(output_path) # Backbone feature map fig = visualize.display_images(np.transpose(activations["res4w_out"][0, :, :, :8], [2, 0, 1])) output_path = os.path.join(output_dir, "feature_maps_id_{}.png".format(image_id)) print('Saving image {} to {}'.format(image_id, output_path)) fig.savefig(output_path)
log("gt_boxes", gt_boxes) log("gt_masks", gt_masks) log("rpn_match", rpn_match, ) log("rpn_bbox", rpn_bbox) image_id = modellib.parse_image_meta(image_meta)["image_id"][0] print("image_id: ", image_id, dataset.image_reference(image_id)) # Remove the last dim in mrcnn_class_ids. It's only added # to satisfy Keras restriction on target shape. mrcnn_class_ids = mrcnn_class_ids[:, :, 0] b = 0 # Restore original image (reverse normalization) sample_image = modellib.unmold_image(normalized_images[b], config) # Compute anchor shifts. indices = np.where(rpn_match[b] == 1)[0] refined_anchors = utils.apply_box_deltas(anchors[indices], rpn_bbox[b, :len(indices)] * config.RPN_BBOX_STD_DEV) log("anchors", anchors) log("refined_anchors", refined_anchors) # Get list of positive anchors positive_anchor_ids = np.where(rpn_match[b] == 1)[0] print("Positive anchors: {}".format(len(positive_anchor_ids))) negative_anchor_ids = np.where(rpn_match[b] == -1)[0] print("Negative anchors: {}".format(len(negative_anchor_ids))) neutral_anchor_ids = np.where(rpn_match[b] == 0)[0] print("Neutral anchors: {}".format(len(neutral_anchor_ids)))
#%% [markdown] # ## Visualize Activations # # In some cases it helps to look at the output from different layers and visualize them to catch issues and odd patterns. #%% # Get activations of a few sample layers activations = model.run_graph( [image], [ ("input_image", tf.identity(model.keras_model.get_layer("input_image").output)), ("res2c_out", model.keras_model.get_layer("res2c_out").output), ("res3c_out", model.keras_model.get_layer("res3c_out").output), ("res4w_out", model.keras_model.get_layer("res4w_out").output), # for resnet100 ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output), ("roi", model.keras_model.get_layer("ROI").output), ]) #%% # Input image (normalized) _ = plt.imshow(modellib.unmold_image(activations["input_image"][0], config)) #%% # Backbone feature map display_images(np.transpose(activations["res2c_out"][0, :, :, :4], [2, 0, 1]), cols=4) #%%
def get_labels(self, labels): dims = labels.shape unlabeled_labels = np.zeros((dims[0], dims[1], 1)) building_labels = np.zeros((dims[0], dims[1], 1)) fence_labels = np.zeros((dims[0], dims[1], 1)) other_labels = np.zeros((dims[0], dims[1], 1)) pedestrian_labels = np.zeros((dims[0], dims[1], 1)) pole_labels = np.zeros((dims[0], dims[1], 1)) road_line_labels = np.zeros((dims[0], dims[1], 1)) road_labels = np.zeros((dims[0], dims[1], 1)) sidewalk_labels = np.zeros((dims[0], dims[1], 1)) vegetation_labels = np.zeros((dims[0], dims[1], 1)) car_labels = np.zeros((dims[0], dims[1], 1)) wall_labels = np.zeros((dims[0], dims[1], 1)) traffic_sign_labels = np.zeros((dims[0], dims[1], 1)) unlabeled_index = np.all(labels == (0, 0, 0), axis=-1) building_index = np.all(labels == (70, 70, 70), axis=-1) fence_index = np.all(labels == (190, 153, 153), axis=-1) other_index = np.all(labels == (250, 170, 160), axis=-1) pedestrian_index = np.all(labels == (220, 20, 60), axis=-1) pole_index = np.all(labels == (153, 153, 153), axis=-1) road_line_index = np.all(labels == (157, 234, 50), axis=-1) road_index = np.all(labels == (128, 64, 128), axis=-1) sidewalk_index = np.all(labels == (244, 35, 232), axis=-1) vegetation_index = np.all(labels == (107, 142, 35), axis=-1) car_index = np.all(labels == (0, 0, 142), axis=-1) wall_index = np.all(labels == (102, 102, 156), axis=-1) traffic_sign_index = np.all(labels == (220, 220, 70), axis=-1) unlabeled_labels[unlabeled_index] = 1 building_labels[building_index] = 10 fence_labels[fence_index] = 10 other_labels[other_index] = 10 pedestrian_labels[pedestrian_index] = 10 pole_labels[pole_index] = 10 road_line_labels[road_line_index] = 10 road_labels[road_index] = 10 sidewalk_labels[sidewalk_index] = 10 vegetation_labels[vegetation_index] = 1 car_labels[car_index] = 10 wall_labels[wall_index] = 10 traffic_sign_labels[traffic_sign_index] = 10 return np.dstack([unlabeled_labels, building_labels, fence_labels, return np.dstack([unlabeled_labels, building_labels, fence_labels, other_labels, pedestrian_labels, pole_labels, road_line_labels, road_labels, sidewalk_labels, vegetation_labels, car_labels, wall_labels, traffic_sign_labels]) def image_reference(self, image_id): """Return the carla data of the image.""" info = self.image_info[image_id] if info["source"] == "carla": return info["id"] else: super(self.__class__).image_reference(self, image_id) config = CarlaConfig() config.STEPS_PER_EPOCH = NUMBER_OF_TRAIN_DATA//config.BATCH_SIZE config.VALIDATION_STEPS = NUMBER_OF_VAL_DATA//config.BATCH_SIZE config.display() dataset = carlaDataset() dataset.load_images(dir=RGB_TRAIN_DIR, type='train') # mask, a = train.load_mask(50) # print(a) dataset.prepare() print("Image Count: {}".format(len(dataset.image_ids))) print("Class Count: {}".format(dataset.num_classes)) for i, info in enumerate(dataset.class_info): print("{:3}. {:50}".format(i, info['name'])) image_ids = np.random.choice(dataset.image_ids, 4) for image_id in image_ids: image = dataset.load_image(image_id) mask, class_ids = dataset.load_mask(image_id) visualize.display_top_masks(image, mask, class_ids, dataset.class_names) # Load random image and mask. image_id = random.choice(dataset.image_ids) image = dataset.load_image(image_id) mask, class_ids = dataset.load_mask(image_id) # Compute Bounding box bbox = utils.extract_bboxes(mask) # Display image and additional stats print("image_id ", image_id) log("image", image) log("mask", mask) log("class_ids", class_ids) log("bbox", bbox) # Display image and instances visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names) # Load random image and mask. image_id = np.random.choice(dataset.image_ids, 1)[0] image = dataset.load_image(image_id) mask, class_ids = dataset.load_mask(image_id) original_shape = image.shape # Resize image, window, scale, padding, _ = utils.resize_image( image, min_dim=config.IMAGE_MIN_DIM, max_dim=config.IMAGE_MAX_DIM, mode=config.IMAGE_RESIZE_MODE) mask = utils.resize_mask(mask, scale, padding) # Compute Bounding box bbox = utils.extract_bboxes(mask) # Display image and additional stats print("image_id: ", image_id) print("Original shape: ", original_shape) log("image", image) log("mask", mask) log("class_ids", class_ids) log("bbox", bbox) # Display image and instances visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names) image_id = np.random.choice(dataset.image_ids, 1)[0] image, image_meta, class_ids, bbox, mask = modellib.load_image_gt( dataset, config, image_id, use_mini_mask=False) log("image", image) log("image_meta", image_meta) log("class_ids", class_ids) log("bbox", bbox) log("mask", mask) display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))]) visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names) # Generate Anchors backbone_shapes = modellib.compute_backbone_shapes(config, config.IMAGE_SHAPE) anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES, config.RPN_ANCHOR_RATIOS, backbone_shapes, config.BACKBONE_STRIDES, config.RPN_ANCHOR_STRIDE) # Print summary of anchors num_levels = len(backbone_shapes) anchors_per_cell = len(config.RPN_ANCHOR_RATIOS) print("Count: ", anchors.shape[0]) print("Scales: ", config.RPN_ANCHOR_SCALES) print("ratios: ", config.RPN_ANCHOR_RATIOS) print("Anchors per Cell: ", anchors_per_cell) print("Levels: ", num_levels) anchors_per_level = [] for l in range(num_levels): num_cells = backbone_shapes[l][0] * backbone_shapes[l][1] anchors_per_level.append(anchors_per_cell * num_cells // config.RPN_ANCHOR_STRIDE**2) print("Anchors in Level {}: {}".format(l, anchors_per_level[l])) ## Visualize anchors of one cell at the center of the feature map of a specific level # Load and draw random image image_id = np.random.choice(dataset.image_ids, 1)[0] image, image_meta, _, _, _ = modellib.load_image_gt(dataset, config, image_id) fig, ax = plt.subplots(1, figsize=(10, 10)) ax.imshow(image) levels = len(backbone_shapes) for level in range(levels): colors = visualize.random_colors(levels) # Compute the index of the anchors at the center of the image level_start = sum(anchors_per_level[:level]) # sum of anchors of previous levels level_anchors = anchors[level_start:level_start+anchors_per_level[level]] print("Level {}. Anchors: {:6} Feature map Shape: {}".format(level, level_anchors.shape[0], backbone_shapes[level])) center_cell = backbone_shapes[level] // 2 center_cell_index = (center_cell[0] * backbone_shapes[level][1] + center_cell[1]) level_center = center_cell_index * anchors_per_cell center_anchor = anchors_per_cell * ( (center_cell[0] * backbone_shapes[level][1] / config.RPN_ANCHOR_STRIDE**2) \ + center_cell[1] / config.RPN_ANCHOR_STRIDE) level_center = int(center_anchor) # Draw anchors. Brightness show the order in the array, dark to bright. for i, rect in enumerate(level_anchors[level_center:level_center+anchors_per_cell]): y1, x1, y2, x2 = rect p = patches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=2, facecolor='none', edgecolor=(i+1)*np.array(colors[level]) / anchors_per_cell) ax.add_patch(p) # Create data generator random_rois = 4000 g = modellib.data_generator( dataset, config, shuffle=True, random_rois=random_rois, batch_size=4, detection_targets=True) # Get Next Image if random_rois: [normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois], \ [mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g) log("rois", rois) log("mrcnn_class_ids", mrcnn_class_ids) log("mrcnn_bbox", mrcnn_bbox) log("mrcnn_mask", mrcnn_mask) else: [normalized_images, image_meta, rpn_match, rpn_bbox, gt_boxes, gt_masks], _ = next(g) log("gt_class_ids", gt_class_ids) log("gt_boxes", gt_boxes) log("gt_masks", gt_masks) log("rpn_match", rpn_match, ) log("rpn_bbox", rpn_bbox) image_id = modellib.parse_image_meta(image_meta)["image_id"][0] print("image_id: ", image_id, dataset.image_reference(image_id)) # Remove the last dim in mrcnn_class_ids. It's only added # to satisfy Keras restriction on target shape. mrcnn_class_ids = mrcnn_class_ids[:, :, 0] b = 0 # Restore original image (reverse normalization) sample_image = modellib.unmold_image(normalized_images[b], config) # Compute anchor shifts. indices = np.where(rpn_match[b] == 1)[0] refined_anchors = utils.apply_box_deltas(anchors[indices], rpn_bbox[b, :len(indices)] * config.RPN_BBOX_STD_DEV) log("anchors", anchors) log("refined_anchors", refined_anchors) # Get list of positive anchors positive_anchor_ids = np.where(rpn_match[b] == 1)[0] print("Positive anchors: {}".format(len(positive_anchor_ids))) negative_anchor_ids = np.where(rpn_match[b] == -1)[0] print("Negative anchors: {}".format(len(negative_anchor_ids))) neutral_anchor_ids = np.where(rpn_match[b] == 0)[0] print("Neutral anchors: {}".format(len(neutral_anchor_ids))) # ROI breakdown by class for c, n in zip(dataset.class_names, np.bincount(mrcnn_class_ids[b].flatten())): if n: print("{:23}: {}".format(c[:20], n)) # Show positive anchors visualize.draw_boxes(sample_image, boxes=anchors[positive_anchor_ids], refined_boxes=refined_anchors) # Show negative anchors visualize.draw_boxes(sample_image, boxes=anchors[negative_anchor_ids]) # Show neutral anchors. They don't contribute to training. visualize.draw_boxes(sample_image, boxes=anchors[np.random.choice(neutral_anchor_ids, 100)]) if random_rois: # Class aware bboxes bbox_specific = mrcnn_bbox[b, np.arange(mrcnn_bbox.shape[1]), mrcnn_class_ids[b], :] # Refined ROIs refined_rois = utils.apply_box_deltas(rois[b].astype(np.float32), bbox_specific[:, :4] * config.BBOX_STD_DEV) # Class aware masks mask_specific = mrcnn_mask[b, np.arange(mrcnn_mask.shape[1]), :, :, mrcnn_class_ids[b]] visualize.draw_rois(sample_image, rois[b], refined_rois, mask_specific, mrcnn_class_ids[b], dataset.class_names) # Any repeated ROIs? rows = np.ascontiguousarray(rois[b]).view(np.dtype((np.void, rois.dtype.itemsize * rois.shape[-1]))) _, idx = np.unique(rows, return_index=True) print("Unique ROIs: {} out of {}".format(len(idx), rois.shape[1])) if random_rois: # Dispalay ROIs and corresponding masks and bounding boxes ids = random.sample(range(rois.shape[1]), 8) images = [] titles = [] for i in ids: image = visualize.draw_box(sample_image.copy(), rois[b,i,:4].astype(np.int32), [255, 0, 0]) image = visualize.draw_box(image, refined_rois[i].astype(np.int64), [0, 255, 0]) images.append(image) titles.append("ROI {}".format(i)) images.append(mask_specific[i] * 255) titles.append(dataset.class_names[mrcnn_class_ids[b,i]][:20]) display_images(images, titles, cols=4, cmap="Blues", interpolation="none") # Check ratio of positive ROIs in a set of images. if random_rois: limit = 10 temp_g = modellib.data_generator( dataset, config, shuffle=True, random_rois=10000, batch_size=1, detection_targets=True) total = 0 for i in range(limit): _, [ids, _, _] = next(temp_g) positive_rois = np.sum(ids[0] > 0) total += positive_rois print("{:5} {:5.2f}".format(positive_rois, positive_rois/ids.shape[1])) print("Average percent: {:.2f}".format(total/(limit*ids.shape[1]))) exit()
def all_steps(dataset, datacfg, dnncfg): ''' ## Single entry point for all the steps for inspecting dataset ''' ## Uncomment for debugging # inspectdata.load_and_display_dataset(dataset, datacfg) # In[7]: log.info("[7]. ---------------") log.info("Load and display random images and masks---------------") log.info("Bounding Boxes---------------") load_and_display_random_sample(dataset, datacfg) # In[9]: log.info("[9]. ---------------") log.info("Resize Images---------------") load_and_resize_images(dataset, datacfg, dnncfg) # In[10]: log.info("[10]. ---------------") log.info("Mini Masks---------------") image_id = load_mini_masks(dataset, datacfg, dnncfg) log.info("image_id: {}".format(image_id)) # In[11]: log.info("[11]. ---------------") log.info("Add augmentation and mask resizing---------------") add_augmentation(dataset, datacfg, dnncfg, image_id) info = dataset.image_info[image_id] log.debug("info: {}".format(info)) # In[12]: log.info("[12]. ---------------") log.info("Anchors---------------") backbone_shapes, anchors, anchors_per_level, anchors_per_cell = generate_anchors(dnncfg) # In[13]: log.info("[13]. ---------------") log.info("Visualize anchors of one cell at the center of the feature map of a specific level---------------") visualize_anchors_at_center(dataset, datacfg, dnncfg, backbone_shapes, anchors, anchors_per_level, anchors_per_cell) # In[14]: log.info("[14]. ---------------") log.info("info---------------") image_ids = dataset.image_ids log.info(image_ids) image_index = -1 image_index = (image_index + 1) % len(image_ids) log.info("image_index:{}".format(image_index)) # In[15]: log.info("[15]. ---------------") log.info("data_generator---------------") ## Data Generator # Create data generator random_rois = 2000 g = modellib.data_generator( dataset, datacfg, dnncfg, shuffle=True, random_rois=random_rois, batch_size=4, detection_targets=True) # Uncomment to run the generator through a lot of images # to catch rare errors # for i in range(1000): # log.debug(i) # _, _ = next(g) # Get Next Image if random_rois: [normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois], [mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g) customlog("rois", rois) customlog("mrcnn_class_ids", mrcnn_class_ids) customlog("mrcnn_bbox", mrcnn_bbox) customlog("mrcnn_mask", mrcnn_mask) else: [normalized_images, image_meta, rpn_match, rpn_bbox, gt_boxes, gt_masks], _ = next(g) customlog("gt_class_ids", gt_class_ids) customlog("gt_boxes", gt_boxes) customlog("gt_masks", gt_masks) customlog("rpn_match", rpn_match, ) customlog("rpn_bbox", rpn_bbox) image_id = modellib.parse_image_meta(image_meta)["image_id"][0] # Remove the last dim in mrcnn_class_ids. It's only added # to satisfy Keras restriction on target shape. mrcnn_class_ids = mrcnn_class_ids[:,:,0] # In[16]: log.info("[16]. ---------------") b = 0 # Restore original image (reverse normalization) sample_image = modellib.unmold_image(normalized_images[b], dnncfg) # Compute anchor shifts. indices = np.where(rpn_match[b] == 1)[0] refined_anchors = utils.apply_box_deltas(anchors[indices], rpn_bbox[b, :len(indices)] * dnncfg.RPN_BBOX_STD_DEV) customlog("anchors", anchors) customlog("refined_anchors", refined_anchors) # Get list of positive anchors positive_anchor_ids = np.where(rpn_match[b] == 1)[0] log.info("Positive anchors: {}".format(len(positive_anchor_ids))) negative_anchor_ids = np.where(rpn_match[b] == -1)[0] log.info("Negative anchors: {}".format(len(negative_anchor_ids))) neutral_anchor_ids = np.where(rpn_match[b] == 0)[0] log.info("Neutral anchors: {}".format(len(neutral_anchor_ids))) log.info("ROI breakdown by class---------------") # ROI breakdown by class for c, n in zip(dataset.class_names, np.bincount(mrcnn_class_ids[b].flatten())): if n: log.info("{:23}: {}".format(c[:20], n)) log.info("Show positive anchors---------------") # Show positive anchors fig, ax = plt.subplots(1, figsize=(16, 16)) visualize.draw_boxes(sample_image, boxes=anchors[positive_anchor_ids], refined_boxes=refined_anchors, ax=ax) # In[17]: log.info("[17]. ---------------") log.info("Show negative anchors---------------") # Show negative anchors visualize.draw_boxes(sample_image, boxes=anchors[negative_anchor_ids]) # In[18]: log.info("[18]. ---------------") log.info("Show neutral anchors. They don't contribute to training---------------") # Show neutral anchors. They don't contribute to training. visualize.draw_boxes(sample_image, boxes=anchors[np.random.choice(neutral_anchor_ids, 100)]) # In[19]: log.info("[19]. ---------------") log.info("ROIs---------------") ## ROIs if random_rois: # Class aware bboxes bbox_specific = mrcnn_bbox[b, np.arange(mrcnn_bbox.shape[1]), mrcnn_class_ids[b], :] # Refined ROIs refined_rois = utils.apply_box_deltas(rois[b].astype(np.float32), bbox_specific[:,:4] * dnncfg.BBOX_STD_DEV) # Class aware masks mask_specific = mrcnn_mask[b, np.arange(mrcnn_mask.shape[1]), :, :, mrcnn_class_ids[b]] visualize.draw_rois(sample_image, rois[b], refined_rois, mask_specific, mrcnn_class_ids[b], dataset.class_names) # Any repeated ROIs? rows = np.ascontiguousarray(rois[b]).view(np.dtype((np.void, rois.dtype.itemsize * rois.shape[-1]))) _, idx = np.unique(rows, return_index=True) log.info("Unique ROIs: {} out of {}".format(len(idx), rois.shape[1])) # In[20]: log.info("[20]. ---------------") log.info("Dispalay ROIs and corresponding masks and bounding boxes---------------") if random_rois: # Dispalay ROIs and corresponding masks and bounding boxes ids = random.sample(range(rois.shape[1]), 8) images = [] titles = [] for i in ids: image = visualize.draw_box(sample_image.copy(), rois[b,i,:4].astype(np.int32), [255, 0, 0]) image = visualize.draw_box(image, refined_rois[i].astype(np.int64), [0, 255, 0]) images.append(image) titles.append("ROI {}".format(i)) images.append(mask_specific[i] * 255) titles.append(dataset.class_names[mrcnn_class_ids[b,i]][:20]) visualize.display_images(images, titles, cols=4, cmap="Blues", interpolation="none") # In[21]: log.info("[21]. ---------------") log.info("Check ratio of positive ROIs in a set of images.---------------") # Check ratio of positive ROIs in a set of images. if random_rois: limit = 10 temp_g = modellib.data_generator( dataset, datacfg, dnncfg, shuffle=True, random_rois=10000, batch_size=1, detection_targets=True) total = 0 for i in range(limit): _, [ids, _, _] = next(temp_g) positive_rois = np.sum(ids[0] > 0) total += positive_rois log.info("{:5} {:5.2f}".format(positive_rois, positive_rois/ids.shape[1])) log.info("Average percent: {:.2f}".format(total/(limit*ids.shape[1])))
def inspect_data(dataset, config): print("Image Count: {}".format(len(dataset.image_ids))) print("Class Count: {}".format(dataset.num_classes)) for i, info in enumerate(dataset.class_info): print("{:3}. {:50}".format(i, info['name'])) # ## Display Samples # # Load and display images and masks. # In[4]: # Load and display random samples image_ids = np.random.choice(dataset.image_ids, 4) for image_id in image_ids: image = dataset.load_image(image_id) mask, class_ids = dataset.load_mask(image_id) visualize.display_top_masks(image, mask, class_ids, dataset.class_names) # ## Bounding Boxes # # Rather than using bounding box coordinates provided by the source datasets, we compute the bounding boxes from masks instead. This allows us to handle bounding boxes consistently regardless of the source dataset, and it also makes it easier to resize, rotate, or crop images because we simply generate the bounding boxes from the updates masks rather than computing bounding box transformation for each type of image transformation. # In[5]: # Load random image and mask. image_id = random.choice(dataset.image_ids) image = dataset.load_image(image_id) mask, class_ids = dataset.load_mask(image_id) # Compute Bounding box bbox = utils.extract_bboxes(mask) # Display image and additional stats print("image_id ", image_id, dataset.image_reference(image_id)) log("image", image) log("mask", mask) log("class_ids", class_ids) log("bbox", bbox) # Display image and instances visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names) # ## Resize Images # # To support multiple images per batch, images are resized to one size (1024x1024). Aspect ratio is preserved, though. If an image is not square, then zero padding is added at the top/bottom or right/left. # In[6]: # Load random image and mask. image_id = np.random.choice(dataset.image_ids, 1)[0] image = dataset.load_image(image_id) mask, class_ids = dataset.load_mask(image_id) original_shape = image.shape # Resize image, window, scale, padding, _ = utils.resize_image( image, min_dim=config.IMAGE_MIN_DIM, max_dim=config.IMAGE_MAX_DIM, mode=config.IMAGE_RESIZE_MODE) mask = utils.resize_mask(mask, scale, padding) # Compute Bounding box bbox = utils.extract_bboxes(mask) # Display image and additional stats print("image_id: ", image_id, dataset.image_reference(image_id)) print("Original shape: ", original_shape) log("image", image) log("mask", mask) log("class_ids", class_ids) log("bbox", bbox) # Display image and instances visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names) # ## Mini Masks # # Instance binary masks can get large when training with high resolution images. For example, if training with 1024x1024 image then the mask of a single instance requires 1MB of memory (Numpy uses bytes for boolean values). If an image has 100 instances then that's 100MB for the masks alone. # # To improve training speed, we optimize masks by: # * We store mask pixels that are inside the object bounding box, rather than a mask of the full image. Most objects are small compared to the image size, so we save space by not storing a lot of zeros around the object. # * We resize the mask to a smaller size (e.g. 56x56). For objects that are larger than the selected size we lose a bit of accuracy. But most object annotations are not very accuracy to begin with, so this loss is negligable for most practical purposes. Thie size of the mini_mask can be set in the config class. # # To visualize the effect of mask resizing, and to verify the code correctness, we visualize some examples. # In[7]: image_id = np.random.choice(dataset.image_ids, 1)[0] image, image_meta, class_ids, bbox, mask = modellib.load_image_gt( dataset, config, image_id, use_mini_mask=False) log("image", image) log("image_meta", image_meta) log("class_ids", class_ids) log("bbox", bbox) log("mask", mask) display_images([image] + [mask[:, :, i] for i in range(min(mask.shape[-1], 7))]) # In[8]: visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names) # In[9]: # Add augmentation and mask resizing. image, image_meta, class_ids, bbox, mask = modellib.load_image_gt( dataset, config, image_id, augment=True, use_mini_mask=True) log("mask", mask) display_images([image] + [mask[:, :, i] for i in range(min(mask.shape[-1], 7))]) # In[10]: mask = utils.expand_mask(bbox, mask, image.shape) visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names) # ## Anchors # # The order of anchors is important. Use the same order in training and prediction phases. And it must match the order of the convolution execution. # # For an FPN network, the anchors must be ordered in a way that makes it easy to match anchors to the output of the convolution layers that predict anchor scores and shifts. # * Sort by pyramid level first. All anchors of the first level, then all of the second and so on. This makes it easier to separate anchors by level. # * Within each level, sort anchors by feature map processing sequence. Typically, a convolution layer processes a feature map starting from top-left and moving right row by row. # * For each feature map cell, pick any sorting order for the anchors of different ratios. Here we match the order of ratios passed to the function. # # **Anchor Stride:** # In the FPN architecture, feature maps at the first few layers are high resolution. For example, if the input image is 1024x1024 then the feature meap of the first layer is 256x256, which generates about 200K anchors (256*256*3). These anchors are 32x32 pixels and their stride relative to image pixels is 4 pixels, so there is a lot of overlap. We can reduce the load significantly if we generate anchors for every other cell in the feature map. A stride of 2 will cut the number of anchors by 4, for example. # # In this implementation we use an anchor stride of 2, which is different from the paper. # In[11]: # Generate Anchors backbone_shapes = modellib.compute_backbone_shapes(config, config.IMAGE_SHAPE) anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES, config.RPN_ANCHOR_RATIOS, backbone_shapes, config.BACKBONE_STRIDES, config.RPN_ANCHOR_STRIDE) # Print summary of anchors num_levels = len(backbone_shapes) anchors_per_cell = len(config.RPN_ANCHOR_RATIOS) print("Count: ", anchors.shape[0]) print("Scales: ", config.RPN_ANCHOR_SCALES) print("ratios: ", config.RPN_ANCHOR_RATIOS) print("Anchors per Cell: ", anchors_per_cell) print("Levels: ", num_levels) anchors_per_level = [] for l in range(num_levels): num_cells = backbone_shapes[l][0] * backbone_shapes[l][1] anchors_per_level.append(anchors_per_cell * num_cells // config.RPN_ANCHOR_STRIDE**2) print("Anchors in Level {}: {}".format(l, anchors_per_level[l])) # Visualize anchors of one cell at the center of the feature map of a specific level. # In[12]: ## Visualize anchors of one cell at the center of the feature map of a specific level # Load and draw random image image_id = np.random.choice(dataset.image_ids, 1)[0] image, image_meta, _, _, _ = modellib.load_image_gt( dataset, config, image_id) fig, ax = plt.subplots(1, figsize=(10, 10)) ax.imshow(image) levels = len(backbone_shapes) for level in range(levels): colors = visualize.random_colors(levels) # Compute the index of the anchors at the center of the image level_start = sum( anchors_per_level[:level]) # sum of anchors of previous levels level_anchors = anchors[level_start:level_start + anchors_per_level[level]] print("Level {}. Anchors: {:6} Feature map Shape: {}".format( level, level_anchors.shape[0], backbone_shapes[level])) center_cell = backbone_shapes[level] // 2 center_cell_index = (center_cell[0] * backbone_shapes[level][1] + center_cell[1]) level_center = center_cell_index * anchors_per_cell center_anchor = anchors_per_cell * ( (center_cell[0] * backbone_shapes[level][1] / config.RPN_ANCHOR_STRIDE**2) \ + center_cell[1] / config.RPN_ANCHOR_STRIDE) level_center = int(center_anchor) # Draw anchors. Brightness show the order in the array, dark to bright. for i, rect in enumerate(level_anchors[level_center:level_center + anchors_per_cell]): y1, x1, y2, x2 = rect p = patches.Rectangle( (x1, y1), x2 - x1, y2 - y1, linewidth=2, facecolor='none', edgecolor=(i + 1) * np.array(colors[level]) / anchors_per_cell) ax.add_patch(p) # ## Data Generator # # In[13]: # Create data generator random_rois = 2000 g = modellib.data_generator(dataset, config, shuffle=True, random_rois=random_rois, batch_size=4, detection_targets=True) # In[14]: # Uncomment to run the generator through a lot of images # to catch rare errors # for i in range(1000): # print(i) # _, _ = next(g) # In[15]: # Get Next Image if random_rois: [ normalized_images, image_meta, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks, rpn_rois, rois ], [mrcnn_class_ids, mrcnn_bbox, mrcnn_mask] = next(g) log("rois", rois) log("mrcnn_class_ids", mrcnn_class_ids) log("mrcnn_bbox", mrcnn_bbox) log("mrcnn_mask", mrcnn_mask) else: [ normalized_images, image_meta, rpn_match, rpn_bbox, gt_boxes, gt_masks ], _ = next(g) log("gt_class_ids", gt_class_ids) log("gt_boxes", gt_boxes) log("gt_masks", gt_masks) log( "rpn_match", rpn_match, ) log("rpn_bbox", rpn_bbox) image_id = modellib.parse_image_meta(image_meta)["image_id"][0] print("image_id: ", image_id, dataset.image_reference(image_id)) # Remove the last dim in mrcnn_class_ids. It's only added # to satisfy Keras restriction on target shape. mrcnn_class_ids = mrcnn_class_ids[:, :, 0] # In[16]: b = 0 # Restore original image (reverse normalization) sample_image = modellib.unmold_image(normalized_images[b], config) # Compute anchor shifts. indices = np.where(rpn_match[b] == 1)[0] refined_anchors = utils.apply_box_deltas( anchors[indices], rpn_bbox[b, :len(indices)] * config.RPN_BBOX_STD_DEV) log("anchors", anchors) log("refined_anchors", refined_anchors) # Get list of positive anchors positive_anchor_ids = np.where(rpn_match[b] == 1)[0] print("Positive anchors: {}".format(len(positive_anchor_ids))) negative_anchor_ids = np.where(rpn_match[b] == -1)[0] print("Negative anchors: {}".format(len(negative_anchor_ids))) neutral_anchor_ids = np.where(rpn_match[b] == 0)[0] print("Neutral anchors: {}".format(len(neutral_anchor_ids))) # ROI breakdown by class for c, n in zip(dataset.class_names, np.bincount(mrcnn_class_ids[b].flatten())): if n: print("{:23}: {}".format(c[:20], n)) # Show positive anchors fig, ax = plt.subplots(1, figsize=(16, 16)) visualize.draw_boxes(sample_image, boxes=anchors[positive_anchor_ids], refined_boxes=refined_anchors, ax=ax) # In[17]: # Show negative anchors visualize.draw_boxes(sample_image, boxes=anchors[negative_anchor_ids]) # In[18]: # Show neutral anchors. They don't contribute to training. visualize.draw_boxes(sample_image, boxes=anchors[np.random.choice( neutral_anchor_ids, 100)]) # ## ROIs # In[19]: if random_rois: # Class aware bboxes bbox_specific = mrcnn_bbox[b, np.arange(mrcnn_bbox.shape[1]), mrcnn_class_ids[b], :] # Refined ROIs refined_rois = utils.apply_box_deltas( rois[b].astype(np.float32), bbox_specific[:, :4] * config.BBOX_STD_DEV) # Class aware masks mask_specific = mrcnn_mask[b, np.arange(mrcnn_mask.shape[1]), :, :, mrcnn_class_ids[b]] visualize.draw_rois(sample_image, rois[b], refined_rois, mask_specific, mrcnn_class_ids[b], dataset.class_names) # Any repeated ROIs? rows = np.ascontiguousarray(rois[b]).view( np.dtype((np.void, rois.dtype.itemsize * rois.shape[-1]))) _, idx = np.unique(rows, return_index=True) print("Unique ROIs: {} out of {}".format(len(idx), rois.shape[1])) # In[20]: if random_rois: # Dispalay ROIs and corresponding masks and bounding boxes ids = random.sample(range(rois.shape[1]), 8) images = [] titles = [] for i in ids: image = visualize.draw_box(sample_image.copy(), rois[b, i, :4].astype(np.int32), [255, 0, 0]) image = visualize.draw_box(image, refined_rois[i].astype(np.int64), [0, 255, 0]) images.append(image) titles.append("ROI {}".format(i)) images.append(mask_specific[i] * 255) titles.append(dataset.class_names[mrcnn_class_ids[b, i]][:20]) display_images(images, titles, cols=4, cmap="Blues", interpolation="none") # In[21]: # Check ratio of positive ROIs in a set of images. if random_rois: limit = 10 temp_g = modellib.data_generator(dataset, config, shuffle=True, random_rois=10000, batch_size=1, detection_targets=True) total = 0 for i in range(limit): _, [ids, _, _] = next(temp_g) positive_rois = np.sum(ids[0] > 0) total += positive_rois print("{:5} {:5.2f}".format(positive_rois, positive_rois / ids.shape[1])) print("Average percent: {:.2f}".format(total / (limit * ids.shape[1])))
def examin_data(model, image_path=None, option=None): # visualization function. 4 basic options provided: # activation visualization # pre and post mold mask visualization # augmentations visualization # # change as necessary assert image_path or option if image_path: print("Running on {}".format(args.image)) image = skimage.io.imread(args.image) # run selected graphs and save outputs for each activations = model.run_graph( [image], [ ("input_image", tf.identity( model.keras_model.get_layer("input_image").output)), ("res2c_out", model.keras_model.get_layer("res2c_out").output), ("res3c_out", model.keras_model.get_layer("res3c_out").output), ("res4w_out", model.keras_model.get_layer("res4w_out").output ), # for resnet100 ("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output), ("roi", model.keras_model.get_layer("ROI").output), ]) _ = plt.imshow( modellib.unmold_image(activations["input_image"][0], SiliqueConfig())) display_images(np.transpose(activations["res3c_out"][0, :, :, :5], [2, 0, 1]), cols=5, save=True, name="activations_aa_" + args.image.split(".")[0], savedir="Mask_RCNN/output") elif option: #prepare dataset and augmentation config dataset = SiliqueDataset() dataset.load_silique("new_dataset/", "val", ["white"]) dataset.prepare() image_ids = np.random.choice(dataset.image_ids, 10) augmentation = augs.Sometimes( 0.7, augs.SomeOf((1, 3), [ augs.Flipud(0.5), augs.Flipud(0.5), augs.GaussianBlur(sigma=(0.0, 5.0)), augs.Affine(scale={ "x": (0.8, 1.2), "y": (0.8, 1.2) }), augs.Affine(rotate=(-90, 90)) ], random_order=True)) if option == "premold_masks": # view images and masks before mold for image_id in image_ids: print("extracting {}".format(image_id)) image = dataset.load_image(image_id) mask, class_ids = dataset.load_mask(image_id) display_top_masks(image, mask, class_ids, dataset.class_names, limit=1, sve=True, nme="white_masks_{}".format(image_id), svedir="Mask_RCNN/output") elif option == "postmold_masks": print("Generating molded images...") for image_id in image_ids: print("extracting {}".format(image_id)) #load image after it has been processed by the model image, image_meta, class_ids, bbox, mask = modellib.load_image_gt( dataset, SiliqueConfig(), image_id, use_mini_mask=False, augmentation=augmentation) display_top_masks(image, mask, class_ids, dataset.class_names, limit=1, sve=True, nme="white_masks_{}".format(image_id), svedir="Mask_RCNN/output")