def detect(self, image): cuda.select_device(0) config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) ROOT_DIR = "/home/bernihoh/Bachelor/SMS/MaskRCNN/samples/SMSNetworks/face_feature_detection/" MODEL_DIR = os.path.join(ROOT_DIR, "logsFaceFeatureDetection") COCO_MODEL_PATH = "/home/bernihoh/Bachelor/SMS/MaskRCNN/samples/SMSNetworks/face_feature_detection/mask_rcnn_face_feature_detection_0029.h5" config = InferenceConfig() config.display() # Create model object in inference mode. model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config) # Load weights trained on MS-COCO model.load_weights(COCO_MODEL_PATH, by_name=True) class_names = ["bg", "iris_l", "inner_eye_l", "outer_eye_l", "eye_brow_l", "cheek_l", "iris_r", "inner_eye_r", "outer_eye_r", "eye_brow_r", "cheek_r", "nose_tip", "nose", "mouth", "chin", "face", "head", "distortion"] results = model.detect([image], verbose=1) r = results[0] session.close() cuda.close() return r
# Use smaller anchors because our image and objects are small RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels # Reduce training ROIs per image because the images are small and have # few objects. Aim to allow ROI sampling to pick 33% positive ROIs. TRAIN_ROIS_PER_IMAGE = 32 # Use a small epoch since the data is simple STEPS_PER_EPOCH = 100 # use small validation steps since the epoch is small VALIDATION_STEPS = 5 config = ShapesConfig() config.display() # %% md ## Notebook Preferences # %% def get_ax(rows=1, cols=1, size=8): """Return a Matplotlib Axes array to be used in all visualizations in the notebook. Provide a central point to control graph sizes. Change the default size attribute to control the size of rendered images
def detect(self, image_path, background_color): cuda.select_device(0) config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) ROOT_DIR = os.path.abspath("/") MODEL_DIR = os.path.join(ROOT_DIR, "logs") sys.path.append(ROOT_DIR) # To find local version of the library sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version COCO_MODEL_PATH = "/home/bernihoh/Bachelor/MaskRCNN/mask_rcnn_coco.h5" if not os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH) config = InferenceConfig() config.display() model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config) model.load_weights(COCO_MODEL_PATH, by_name=True) COCO_DIR = "/home/bernihoh/Bachelor/MaskRCNN/samples/coco" # geändert: Zeile eingefügt dataset = coco.CocoDataset() dataset.load_coco(COCO_DIR, "train") dataset.prepare() class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] image = skimage.io.imread(image_path) print(dataset.class_names) plt.imshow(image) # Run detection results = model.detect([image], verbose=1) # Visualize results r = results[0] visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']) used_class = r["class_ids"] print(used_class) mask = r["masks"] mask = mask.astype(np.ubyte) # maskimg = mask[:, :, 1] ^ mask[:, :, 1] maskimg = np.zeros((image.shape[0], image.shape[1])) maskimg = maskimg.astype(np.ubyte) background_mask = np.full((image.shape[0], image.shape[1]), background_color, dtype=np.uint8) skimage.io.imshow(background_mask) plt.show() for i in range(mask.shape[2]): # skimage.io.imshow(mask[:, :, i]) # plt.show() # maskimg = maskimg | mask[:, :, i] a = used_class[i] - 1 if used_class[i] - 1 < 0: a = 0 background_mask = background_mask - mask[:, :, i] * (a + 127) maskimg = np.maximum(maskimg, mask[:, :, i] * a) skimage.io.imshow(background_mask) plt.show() maskimg = np.maximum(maskimg, background_mask) # maskimg[maskimg == 0] = 124 # maskimg = skimage.exposure.rescale_intensity(maskimg) skimage.io.imshow(maskimg) plt.show() # skimage.io.imsave("/home/bernihoh/Bachelor/MaskRCNN/ownimages/mask138-1.jpg", maskimg) session.close() cuda.close() return maskimg