Пример #1
0
    def call(self, inputs):
        rois = inputs[0]
        mrcnn_class = inputs[1]
        mrcnn_bbox = inputs[2]
        image_meta = inputs[3]

        # Get windows of images in normalized coordinates. Windows are the area
        # in the image that excludes the padding.
        # Use the shape of the first image in the batch to normalize the window
        # because we know that all images get resized to the same size.
        m = parse_image_meta_graph(image_meta)
        image_shape = m['image_shape'][0]
        window = norm_boxes_graph(m['window'], image_shape[:2])

        # Run detection refinement graph on each item in the batch
        detections_batch = utils.batch_slice(
            [rois, mrcnn_class, mrcnn_bbox, window],
            lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
            self.config.IMAGES_PER_GPU)

        # Reshape output
        # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
        # normalized coordinates
        return tf.reshape(
            detections_batch,
            [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
Пример #2
0
    def call(self, inputs):
        print('    Detection Target Layer : call() ', type(inputs), len(inputs))
        print('     proposals.shape    :',  inputs[0].shape, inputs[0].get_shape(), KB.int_shape(inputs[0]) )
        print('     gt_class_ids.shape :',  inputs[1].shape, inputs[1].get_shape(), KB.int_shape(inputs[1]) ) 
        print('     gt_bboxes.shape    :',  inputs[2].shape, inputs[2].get_shape(), KB.int_shape(inputs[2]) )
        # print('     gt_masks.shape     :',  inputs[3].shape, inputs[3].get_shape(), KB.int_shape(inputs[3]) ) 
        proposals    = inputs[0]    # target_rois           --  proposals generated by the RPN (or artificially generated proposals)
        gt_class_ids = inputs[1]    # input_gt_class_ids 
        gt_boxes     = inputs[2]    # input_normlzd_gt_boxes
        # gt_masks     = inputs[3]    # input_gt_masks

        # Slice the batch and run a graph for each slice    
        # TODO: Rename target_bbox to target_deltas for clarity
        # detection_target_graph() returns:
        #         rois,    roi_gt_class_ids,  deltas,         masks,       roi_gt_boxes
        
        names = ["output_rois", "target_class_ids", "target_bbox_deltas", "roi_gt_boxes"]
        
        outputs = utils.batch_slice([proposals, gt_class_ids, gt_boxes],             # inputs 
                                    lambda w, x, y: detection_targets_graph_mod(w, x, y, self.config), # batch function 
                                    self.config.IMAGES_PER_GPU,                                        # batch_size, name
                                    names=names)                  
                   
        print('\n    Detection Target Layer : return ', type(outputs) , len(outputs))                    
        for i,out in enumerate(outputs):
            print('     output {}  shape {}  type {} '.format(i, out.shape, type(out)))    

        return outputs
Пример #3
0
    def call(self, inputs):
        proposals = inputs[0]
        gt_class_ids = inputs[1]
        gt_boxes = inputs[2]
        gt_masks = inputs[3]

        # Slice the batch and run a graph for each slice
        # TODO: Rename target_bbox to target_deltas for clarity
        names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
        outputs = utils.batch_slice(
            [proposals, gt_class_ids, gt_boxes, gt_masks],
            lambda w, x, y, z: detection_targets_graph(
                w, x, y, z, self.config),
            self.config.IMAGES_PER_GPU, names=names)
        return outputs
Пример #4
0
    def call(self, inputs):
        # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
        scores = inputs[0][:, :, 1]
        # Box deltas [batch, num_rois, 4]
        deltas = inputs[1]
        deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
        # Anchors
        anchors = inputs[2]

        # Improve performance by trimming to top anchors by score
        # and doing the rest on the smaller subset.
        pre_nms_limit = tf.minimum(
            self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
        ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
                         name="top_anchors").indices
        scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
                                   self.config.IMAGES_PER_GPU)
        deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
                                   self.config.IMAGES_PER_GPU)
        pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
                                            self.config.IMAGES_PER_GPU,
                                            names=["pre_nms_anchors"])

        # Apply deltas to anchors to get refined anchors.
        # [batch, N, (y1, x1, y2, x2)]
        boxes = utils.batch_slice([pre_nms_anchors, deltas],
                                  lambda x, y: apply_box_deltas_graph(x, y),
                                  self.config.IMAGES_PER_GPU,
                                  names=["refined_anchors"])

        # Clip to image boundaries. Since we're in normalized coordinates,
        # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
        window = np.array([0, 0, 1, 1], dtype=np.float32)
        boxes = utils.batch_slice(boxes,
                                  lambda x: clip_boxes_graph(x, window),
                                  self.config.IMAGES_PER_GPU,
                                  names=["refined_anchors_clipped"])

        # Filter out small boxes
        # According to Xinlei Chen's paper, this reduces detection accuracy
        # for small objects, so we're skipping it.

        # Non-max suppression
        def nms(boxes, scores):
            indices = tf.image.non_max_suppression(
                boxes, scores, self.proposal_count,
                self.nms_threshold, name="rpn_non_max_suppression")
            proposals = tf.gather(boxes, indices)
            # Pad if needed
            padding = tf.maximum(self.proposal_count -
                                 tf.shape(proposals)[0], 0)
            proposals = tf.pad(proposals, [(0, padding), (0, 0)])
            return proposals
        proposals = utils.batch_slice([boxes, scores], nms,
                                      self.config.IMAGES_PER_GPU)
        return proposals
Пример #5
0
    def call(self, inputs):
    
        # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
        scores = inputs[0][:, :, 1]
        
        # Box deltas                    [batch, num_rois, 4]
        # RPN_BBOX_STD_DEV              [0.1 0.1 0.2 0.2]
        # Multiply bbox [x,y,log(w),log(h)] by [0.1, 0.1, 0.2, 0.2]
        
        deltas = inputs[1]
        deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
        
        # Base anchors
        anchors = self.anchors

        # Improve performance by trimming to top anchors by score
        # and doing the rest on the smaller subset.
        pre_nms_limit = min(6000, self.anchors.shape[0])
        
        # return the indicies for the top "pre_nms_limit"  rpn_class scores  
        ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,name="top_anchors").indices
        
        # pass scores and the selected indicies(ix) 
        scores  = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y), self.config.IMAGES_PER_GPU)
        deltas  = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y), self.config.IMAGES_PER_GPU)
        anchors = utils.batch_slice(         ix , lambda x   : tf.gather(anchors, x), self.config.IMAGES_PER_GPU, 
                                  names=["pre_nms_anchors"])
        # Apply deltas to anchors to get refined anchors.
        # [batch, N, (y1, x1, y2, x2)]
        
        boxes = utils.batch_slice([anchors, deltas],
                                  lambda x, y: apply_box_deltas_graph(x, y),self.config.IMAGES_PER_GPU,
                                  names=["refined_anchors"])
                                  
        print('     Scores : ' , scores.shape)
        print('     Deltas : ' , deltas.shape)
        print('     Anchors: ' , anchors.shape)
        print('     Boxes shape / type after processing: ', boxes.shape, type(boxes))

        # Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
        height, width = self.config.IMAGE_SHAPE[:2]
        window = np.array([0, 0, height, width]).astype(np.float32)
        
        boxes  = utils.batch_slice(boxes, 
                                   lambda x: clip_boxes_graph(x, window), self.config.IMAGES_PER_GPU,
                                   names=["refined_anchors_clipped"])

        #-------------------------------------------------------------------------
        # Filter out small boxes :
        # According to Xinlei Chen's paper, this reduces detection accuracy
        # for small objects, so we're skipping it.
        #-------------------------------------------------------------------------
        
        # Normalize dimensions to range of 0 to 1.
        normalized_boxes = boxes / np.array([[height, width, height, width]])

        #-------------------------------------------------------------------------
        # Define Non-max suppression operation
        #
        #  tf.image.non_max_suppression:
        #
        #  Prunes away boxes that have high intersection-over-union (IOU) overlap
        #  with previously selected boxes.
        #  Bounding boxes (normalized_boxes) are supplied as [y1, x1, y2, x2], where
        #  (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners
        #  and the coordinates can be provided as normalized (i.e., lying in the interval
        #  [0, 1]) or absolute. 
        # 
        #  The output of this operation is a set of integers indexing into the input
        #  collection of bounding boxes representing the selected boxes. The bounding box 
        #  coordinates corresponding to the selected indices can then be obtained using 
        #  the tf.gather operation. 
        #  For example: 
        #  selected_indices = tf..non_max_suppression( boxes, scores, max_output_size, iou_threshold) 
        #  selected_boxes   = tf.gather(boxes, selected_indices)
        #-------------------------------------------------------------------------
        def nms(normalized_boxes, scores):
            indices = tf.image.non_max_suppression(normalized_boxes, 
                                                   scores, 
                                                   self.proposal_count,
                                                   self.nms_threshold, 
                                                   name="rpn_non_max_suppression")
                                                   
            proposals = tf.gather(normalized_boxes, indices)
            # Pad if needed
            padding   = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
            proposals = tf.pad(proposals, [(0, padding), (0, 0)])
            return proposals
            
        # Apply the nms operation on slices of normalized boxes
        proposals = utils.batch_slice([normalized_boxes, scores], nms, self.config.IMAGES_PER_GPU)
        print('     Output: Prposals shape : ', proposals.shape, KB.int_shape(proposals))

        return proposals
    def call(self, inputs):

        # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
        scores = inputs[0][:, :, 1]  # rpn_socres

        # Box deltas                    [batch, num_rois, 4]
        # RPN_BBOX_STD_DEV              [0.1 0.1 0.2 0.2]
        # Multiply bbox [x,y,log(w),log(h)] by [0.1, 0.1, 0.2, 0.2]

        deltas = inputs[1]  # rpn bbox deltas
        deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])

        # Base anchors
        anchors = self.anchors

        # Improve performance by trimming to top anchors by score
        # and doing the rest on the smaller subset.
        pre_nms_limit = min(6000, self.anchors.shape[0])

        #------------------------------------------------------------------------------------------
        ## return the indicies for the top "pre_nms_limit"  rpn_class scores
        ## gather scores, deltas, and anchors using the selected indicies(ix)
        #------------------------------------------------------------------------------------------
        ix = tf.nn.top_k(scores,
                         pre_nms_limit,
                         sorted=True,
                         name="top_anchors").indices
        scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
                                   self.config.IMAGES_PER_GPU)
        deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
                                   self.config.IMAGES_PER_GPU)
        anchors = utils.batch_slice(ix,
                                    lambda x: tf.gather(anchors, x),
                                    self.config.IMAGES_PER_GPU,
                                    names=["pre_nms_anchors"])

        #------------------------------------------------------------------------------------------
        ## Apply deltas to anchors to get refined anchors : [batch, N, (y1, x1, y2, x2)]
        #------------------------------------------------------------------------------------------
        boxes = utils.batch_slice([anchors, deltas],
                                  lambda x, y: apply_box_deltas_graph(x, y),
                                  self.config.IMAGES_PER_GPU,
                                  names=["refined_anchors"])
        if self.config.VERBOSE:
            print('     Scores : ', scores.shape)
            print('     Deltas : ', deltas.shape)
            print('     Anchors: ', anchors.shape)

        #------------------------------------------------------------------------------------------
        ## Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
        #------------------------------------------------------------------------------------------
        height, width = self.config.IMAGE_SHAPE[:2]
        window = np.array([0, 0, height, width]).astype(np.float32)
        boxes = utils.batch_slice(boxes,
                                  lambda x: clip_boxes_graph(x, window),
                                  self.config.IMAGES_PER_GPU,
                                  names=["refined_anchors_clipped"])

        #------------------------------------------------------------------------------------------
        ## Suppress proposal boxes (and  corresponding score) if the area is less than ROI_AREA_THRESHOLD
        # Filter out small boxes :
        #   According to Xinlei Chen's paper, this reduces detection accuracy
        #   for small objects, so we're skipping it.
        # 16-05-2018 : added this back as it was causing issues for heatmap score calculation
        #------------------------------------------------------------------------------------------
        boxes, scores = utils.batch_slice(
            [boxes, scores],
            lambda x, y: suppress_small_boxes_graph(
                x, y, self.config.ROI_PROPOSAL_AREA_THRESHOLD),
            self.config.IMAGES_PER_GPU,
            names=["boxes", "scores"])

        # print('     Boxes (After suppression of small proposal boxes) :', tf.shape(mod_boxes).eval())
        # print('     Score (After suppression of small proposal boxes) :', tf.shape(mod_scores).eval())

        #------------------------------------------------------------------------------------------
        ## Normalize dimensions to range of 0 to 1.
        #------------------------------------------------------------------------------------------
        normalized_boxes = boxes / np.array([[height, width, height, width]])

        #------------------------------------------------------------------------------------------
        ## Non-max suppression operation
        #
        #  tf.image.non_max_suppression:
        #
        #  Prunes away boxes that have high intersection-over-union (IOU) overlap
        #  with previously selected boxes.
        #  Bounding boxes (normalized_boxes) are supplied as [y1, x1, y2, x2], where (y1, x1) and
        #  (y2, x2) are the coordinates of any diagonal pair of box corners, and the coordinates
        #  can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute.
        #
        #  The output of this operation is a set of integers indexing into the input
        #  collection of bounding boxes representing the selected boxes. The bounding box
        #  coordinates corresponding to the selected indices can then be obtained using
        #  the tf.gather operation.
        #
        #  For example:
        #  selected_indices = tf..non_max_suppression( boxes, scores, max_output_size, iou_threshold)
        #  selected_boxes   = tf.gather(boxes, selected_indices)
        #
        #  These selected boxes are RPN_PROPOSAL_ROIS, which are passed on to further layers, namely,
        #  DETECTION_TARGET_LAYER and DETECTION_INFERENCE_LAYER
        #
        #  hyperparameters:
        #  ---------------
        #       proposal_count: if mode == "training":
        #                           config.POST_NMS_ROIS_TRAINING     1000
        #                        else
        #                           config.POST_NMS_ROIS_INFERENCE    2000
        #       nms_threshold : config.RPN_NMS_THRESHOLD              0.7
        #-------------------------------------------------------------------------
        def nms(normalized_boxes, scores):
            indices = tf.image.non_max_suppression(
                normalized_boxes,
                scores,
                self.proposal_count,
                self.nms_threshold,
                name="rpn_non_max_suppression")

            proposals = tf.gather(normalized_boxes, indices)
            # Pad if needed
            padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0],
                                 0)
            proposals = tf.pad(proposals, [(0, padding), (0, 0)])
            return proposals

        # Apply the nms operation on slices of normalized boxes
        proposals = utils.batch_slice([normalized_boxes, scores],
                                      nms,
                                      self.config.IMAGES_PER_GPU,
                                      names=["rpn_roi_proposals"])
        if self.config.VERBOSE:
            print('     Boxes shape / type after processing: ')
            print('     Output: Proposals shape : ', proposals.shape,
                  KB.int_shape(proposals))

        return proposals