def fastrcnn_training(self, image, rcnn_labels, fg_rcnn_boxes, gt_boxes_per_fg, rcnn_label_logits, fg_rcnn_box_logits): """ Args: image (NCHW): rcnn_labels (n): labels for each sampled targets fg_rcnn_boxes (fg x 4): proposal boxes for each sampled foreground targets gt_boxes_per_fg (fg x 4): matching gt boxes for each sampled foreground targets rcnn_label_logits (n): label logits for each sampled targets fg_rcnn_box_logits (fg x #class x 4): box logits for each sampled foreground targets """ with tf.name_scope('fg_sample_patch_viz'): fg_sampled_patches = crop_and_resize( image, fg_rcnn_boxes, tf.zeros(tf.shape(fg_rcnn_boxes)[0], dtype=tf.int32), 300) fg_sampled_patches = tf.transpose(fg_sampled_patches, [0, 2, 3, 1]) fg_sampled_patches = tf.reverse(fg_sampled_patches, axis=[-1]) # BGR->RGB tf.summary.image('viz', fg_sampled_patches, max_outputs=30) encoded_boxes = encode_bbox_target( gt_boxes_per_fg, fg_rcnn_boxes) * tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32) fastrcnn_label_loss, fastrcnn_box_loss = fastrcnn_losses( rcnn_labels, rcnn_label_logits, encoded_boxes, fg_rcnn_box_logits) return fastrcnn_label_loss, fastrcnn_box_loss
def fastrcnn_training(self, image, rcnn_labels, fg_rcnn_boxes, gt_boxes_per_fg, rcnn_label_logits, fg_rcnn_box_logits): """ Args: image (NCHW): rcnn_labels (n): labels for each sampled targets fg_rcnn_boxes (fg x 4): proposal boxes for each sampled foreground targets gt_boxes_per_fg (fg x 4): matching gt boxes for each sampled foreground targets rcnn_label_logits (n): label logits for each sampled targets fg_rcnn_box_logits (fg x #class x 4): box logits for each sampled foreground targets """ with tf.name_scope('fg_sample_patch_viz'): fg_sampled_patches = crop_and_resize( image, fg_rcnn_boxes, tf.zeros([tf.shape(fg_rcnn_boxes)[0]], dtype=tf.int32), 300) fg_sampled_patches = tf.transpose(fg_sampled_patches, [0, 2, 3, 1]) fg_sampled_patches = tf.reverse(fg_sampled_patches, axis=[-1]) # BGR->RGB tf.summary.image('viz', fg_sampled_patches, max_outputs=30) encoded_boxes = encode_bbox_target( gt_boxes_per_fg, fg_rcnn_boxes) * tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32) fastrcnn_label_loss, fastrcnn_box_loss = fastrcnn_losses( rcnn_labels, rcnn_label_logits, encoded_boxes, fg_rcnn_box_logits) return fastrcnn_label_loss, fastrcnn_box_loss
def losses(self): encoded_fg_gt_boxes = encode_bbox_target( tf.gather(self.gt_boxes, self.proposals.fg_inds_wrt_gt), self.proposals.fg_boxes()) * self.bbox_regression_weights return fastrcnn_losses( self.proposals.labels, self.label_logits, encoded_fg_gt_boxes, self.fg_box_logits() )
def losses(self): encoded_fg_gt_boxes = encode_bbox_target( self.proposals.matched_gt_boxes(), self.proposals.fg_boxes()) * self.bbox_regression_weights return fastrcnn_losses( self.proposals.labels, self.label_logits, encoded_fg_gt_boxes, self.fg_box_logits() )
def losses(self): encoded_fg_gt_boxes = encode_bbox_target( self.matched_gt_boxes_per_fg, self.fg_input_boxes()) * self.bbox_regression_weights encoded_fg_gt_masks = encode_mask_target( self.matched_gt_masks_per_fg, self.fg_input_masks()) * self.mask_regression_weights return fastrcnn_losses(self.labels, self.label_logits, encoded_fg_gt_boxes, self.fg_box_logits(), encoded_fg_gt_masks, self.fg_mask_logits())
def losses(self): encoded_fg_gt_boxes = encode_bbox_target( self.proposals.matched_gt_boxes(), self.proposals.fg_boxes()) * self.bbox_regression_weights decoded_boxes = self.decoded_output_boxes() decoded_boxes = tf.reshape(decoded_boxes, [-1, 4]) gt_boxes = tf.reshape(self.proposals.gt_boxes, [-1, 4]) iou = pairwise_iou(decoded_boxes, gt_boxes) max_iou = tf.reduce_max(iou, axis=1) # if only bg gt_boxes, all ious are 0. pos_mask = tf.stop_gradient(tf.not_equal(self.proposals.labels, 0)) nr_pos = tf.identity(tf.count_nonzero(pos_mask, dtype=tf.int32)) max_iou = tf.where(tf.equal(nr_pos, 0), tf.zeros_like(max_iou), max_iou) max_iou = tf.stop_gradient(tf.reshape(max_iou, [-1])) return fastrcnn_losses_iou(self.proposals.labels, self.label_logits, max_iou, tf.reshape(self.iou_logits, [-1]), encoded_fg_gt_boxes, self.fg_box_logits())
def losses(self, batch_size_per_gpu, shortcut=False): assert self.training_info_available, "In order to calculate losses, we need to know GT info, but " \ "add_training_info was never called" if shortcut: proposal_label_loss = tf.cast(tf.reduce_mean(self.proposal_labels), dtype=tf.float32) proposal_boxes_loss = tf.cast(tf.reduce_mean(self.proposal_boxes), dtype=tf.float32) proposal_fg_boxes_loss = tf.cast(tf.reduce_mean( self.proposal_fg_boxes), dtype=tf.float32) gt_box_loss = tf.cast(tf.reduce_mean(self.gt_boxes), dtype=tf.float32) bbox_reg_loss = tf.cast(tf.reduce_mean( self.bbox_regression_weights), dtype=tf.float32) label_logit_loss = tf.cast(tf.reduce_mean(self.label_logits), dtype=tf.float32) total_loss = proposal_label_loss + proposal_boxes_loss + proposal_fg_boxes_loss + gt_box_loss \ + bbox_reg_loss + label_logit_loss return [total_loss] all_labels = [] all_label_logits = [] all_encoded_fg_gt_boxes = [] all_fg_box_logits = [] for i in range(batch_size_per_gpu): single_image_fg_inds_wrt_gt = self.proposal_gt_id_for_each_fg[i] single_image_gt_boxes = self.gt_boxes[ i, :self.prepadding_gt_counts[i], :] # NumGT x 4 gt_for_each_fg = tf.gather( single_image_gt_boxes, single_image_fg_inds_wrt_gt) # NumFG x 4 single_image_fg_boxes_indices = tf.where( tf.equal(self.proposal_fg_boxes[:, 0], i)) single_image_fg_boxes_indices = tf.squeeze( single_image_fg_boxes_indices, axis=1) single_image_fg_boxes = tf.gather( self.proposal_fg_boxes, single_image_fg_boxes_indices) # NumFG x 5 single_image_fg_boxes = single_image_fg_boxes[:, 1:] # NumFG x 4 encoded_fg_gt_boxes = encode_bbox_target( gt_for_each_fg, single_image_fg_boxes) * self.bbox_regression_weights single_image_box_indices = tf.squeeze(tf.where( tf.equal(self.proposal_boxes[:, 0], i)), axis=1) single_image_labels = tf.gather( self.proposal_labels, single_image_box_indices) # Vector len N single_image_label_logits = tf.gather(self.label_logits, single_image_box_indices) single_image_fg_box_logits_indices = tf.gather( self.proposal_fg_inds, single_image_fg_boxes_indices) single_image_fg_box_logits = tf.gather( self.box_logits, single_image_fg_box_logits_indices) all_labels.append(single_image_labels) all_label_logits.append(single_image_label_logits) all_encoded_fg_gt_boxes.append(encoded_fg_gt_boxes) all_fg_box_logits.append(single_image_fg_box_logits) return boxclass_losses(tf.concat(all_labels, axis=0), tf.concat(all_label_logits, axis=0), tf.concat(all_encoded_fg_gt_boxes, axis=0), tf.concat(all_fg_box_logits, axis=0))
def build_graph(self, *inputs): num_fpn_level = len(cfg.FPN.ANCHOR_STRIDES) assert len(cfg.RPN.ANCHOR_SIZES) == num_fpn_level is_training = get_current_tower_context().is_training image = inputs[0] input_anchors = inputs[1:1 + 2 * num_fpn_level] multilevel_anchor_labels = input_anchors[0::2] multilevel_anchor_boxes = input_anchors[1::2] gt_boxes, gt_labels = inputs[11], inputs[12] if cfg.MODE_MASK: gt_masks = inputs[-1] image = self.preprocess(image) # 1CHW image_shape2d = tf.shape(image)[2:] # h,w c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCK) p23456 = fpn_model('fpn', c2345) # Images are padded for p5, which are too large for p2-p4. # This seems to have no effect on mAP. for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES[:3]): pi = p23456[i] target_shape = tf.to_int32( tf.ceil(tf.to_float(image_shape2d) * (1.0 / stride))) p23456[i] = tf.slice(pi, [0, 0, 0, 0], tf.concat([[-1, -1], target_shape], axis=0)) p23456[i].set_shape([1, pi.shape[1], None, None]) # Multi-Level RPN Proposals multilevel_proposals = [] rpn_loss_collection = [] for lvl in range(num_fpn_level): rpn_label_logits, rpn_box_logits = rpn_head( 'rpn', p23456[lvl], cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS)) with tf.name_scope('FPN_lvl{}'.format(lvl + 2)): anchors = tf.constant(get_all_anchors_fpn()[lvl], name='rpn_anchor_lvl{}'.format(lvl + 2)) anchors, anchor_labels, anchor_boxes = \ self.narrow_to_featuremap(p23456[lvl], anchors, multilevel_anchor_labels[lvl], multilevel_anchor_boxes[lvl]) anchor_boxes_encoded = encode_bbox_target( anchor_boxes, anchors) pred_boxes_decoded = decode_bbox_target( rpn_box_logits, anchors) proposal_boxes, proposal_scores = generate_rpn_proposals( tf.reshape(pred_boxes_decoded, [-1, 4]), tf.reshape(rpn_label_logits, [-1]), image_shape2d, cfg.RPN.TRAIN_FPN_NMS_TOPK if is_training else cfg.RPN.TEST_FPN_NMS_TOPK) multilevel_proposals.append((proposal_boxes, proposal_scores)) if is_training: label_loss, box_loss = rpn_losses(anchor_labels, anchor_boxes_encoded, rpn_label_logits, rpn_box_logits) rpn_loss_collection.extend([label_loss, box_loss]) # Merge proposals from multi levels, pick top K proposal_boxes = tf.concat([x[0] for x in multilevel_proposals], axis=0) # nx4 proposal_scores = tf.concat([x[1] for x in multilevel_proposals], axis=0) # n proposal_topk = tf.minimum( tf.size(proposal_scores), cfg.RPN.TRAIN_FPN_NMS_TOPK if is_training else cfg.RPN.TEST_FPN_NMS_TOPK) proposal_scores, topk_indices = tf.nn.top_k(proposal_scores, k=proposal_topk, sorted=False) proposal_boxes = tf.gather(proposal_boxes, topk_indices) if is_training: rcnn_boxes, rcnn_labels, fg_inds_wrt_gt = sample_fast_rcnn_targets( proposal_boxes, gt_boxes, gt_labels) else: # The boxes to be used to crop RoIs. rcnn_boxes = proposal_boxes roi_feature_fastrcnn = multilevel_roi_align(p23456[:4], rcnn_boxes, 7) fastrcnn_head_func = getattr(model, cfg.FPN.FRCNN_HEAD_FUNC) fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_head_func( 'fastrcnn', roi_feature_fastrcnn, cfg.DATA.NUM_CLASS) if is_training: # rpn loss is already defined above with tf.name_scope('rpn_losses'): rpn_total_label_loss = tf.add_n(rpn_loss_collection[::2], name='label_loss') rpn_total_box_loss = tf.add_n(rpn_loss_collection[1::2], name='box_loss') add_moving_summary(rpn_total_box_loss, rpn_total_label_loss) # fastrcnn loss: matched_gt_boxes = tf.gather(gt_boxes, fg_inds_wrt_gt) fg_inds_wrt_sample = tf.reshape(tf.where(rcnn_labels > 0), [-1]) # fg inds w.r.t all samples fg_sampled_boxes = tf.gather(rcnn_boxes, fg_inds_wrt_sample) fg_fastrcnn_box_logits = tf.gather(fastrcnn_box_logits, fg_inds_wrt_sample) fastrcnn_label_loss, fastrcnn_box_loss = self.fastrcnn_training( image, rcnn_labels, fg_sampled_boxes, matched_gt_boxes, fastrcnn_label_logits, fg_fastrcnn_box_logits) if cfg.MODE_MASK: # maskrcnn loss fg_labels = tf.gather(rcnn_labels, fg_inds_wrt_sample) roi_feature_maskrcnn = multilevel_roi_align( p23456[:4], fg_sampled_boxes, 14) mask_logits = maskrcnn_upXconv_head('maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 4) # #fg x #cat x 28 x 28 target_masks_for_fg = crop_and_resize( tf.expand_dims(gt_masks, 1), fg_sampled_boxes, fg_inds_wrt_gt, 28, pad_border=False) # fg x 1x28x28 target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets') mrcnn_loss = maskrcnn_loss(mask_logits, fg_labels, target_masks_for_fg) else: mrcnn_loss = 0.0 wd_cost = regularize_cost( '(?:group1|group2|group3|rpn|fpn|fastrcnn|maskrcnn)/.*W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost') total_cost = tf.add_n( rpn_loss_collection + [fastrcnn_label_loss, fastrcnn_box_loss, mrcnn_loss, wd_cost], 'total_cost') add_moving_summary(total_cost, wd_cost) return total_cost else: final_boxes, final_labels = self.fastrcnn_inference( image_shape2d, rcnn_boxes, fastrcnn_label_logits, fastrcnn_box_logits) if cfg.MODE_MASK: # Cascade inference needs roi transform with refined boxes. roi_feature_maskrcnn = multilevel_roi_align( p23456[:4], final_boxes, 14) mask_logits = maskrcnn_upXconv_head('maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 4) # #fg x #cat x 28 x 28 indices = tf.stack([ tf.range(tf.size(final_labels)), tf.to_int32(final_labels) - 1 ], axis=1) final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28 tf.sigmoid(final_mask_logits, name='final_masks')
def build_graph(self, *inputs): is_training = get_current_tower_context().is_training if cfg.MODE_MASK: image, anchor_labels, anchor_boxes, gt_boxes, gt_labels, gt_masks = inputs else: image, anchor_labels, anchor_boxes, gt_boxes, gt_labels = inputs image = self.preprocess(image) # 1CHW featuremap = resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCK[:3]) rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, 1024, cfg.RPN.NUM_ANCHOR) fm_anchors, anchor_labels, anchor_boxes = self.narrow_to_featuremap( featuremap, get_all_anchors(), anchor_labels, anchor_boxes) anchor_boxes_encoded = encode_bbox_target(anchor_boxes, fm_anchors) image_shape2d = tf.shape(image)[2:] # h,w pred_boxes_decoded = decode_bbox_target( rpn_box_logits, fm_anchors) # fHxfWxNAx4, floatbox proposal_boxes, proposal_scores = generate_rpn_proposals( tf.reshape(pred_boxes_decoded, [-1, 4]), tf.reshape(rpn_label_logits, [-1]), image_shape2d, cfg.RPN.TRAIN_PRE_NMS_TOPK if is_training else cfg.RPN.TEST_PRE_NMS_TOPK, cfg.RPN.TRAIN_POST_NMS_TOPK if is_training else cfg.RPN.TEST_POST_NMS_TOPK) if is_training: # sample proposal boxes in training rcnn_boxes, rcnn_labels, fg_inds_wrt_gt = sample_fast_rcnn_targets( proposal_boxes, gt_boxes, gt_labels) else: # The boxes to be used to crop RoIs. # Use all proposal boxes in inference rcnn_boxes = proposal_boxes boxes_on_featuremap = rcnn_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE) roi_resized = roi_align(featuremap, boxes_on_featuremap, 14) feature_fastrcnn = resnet_conv5( roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCK[-1]) # nxcx7x7 # Keep C5 feature to be shared with mask branch feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first') fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs( 'fastrcnn', feature_gap, cfg.DATA.NUM_CLASS) if is_training: # rpn loss rpn_label_loss, rpn_box_loss = rpn_losses(anchor_labels, anchor_boxes_encoded, rpn_label_logits, rpn_box_logits) # fastrcnn loss matched_gt_boxes = tf.gather(gt_boxes, fg_inds_wrt_gt) fg_inds_wrt_sample = tf.reshape(tf.where(rcnn_labels > 0), [-1]) # fg inds w.r.t all samples fg_sampled_boxes = tf.gather(rcnn_boxes, fg_inds_wrt_sample) fg_fastrcnn_box_logits = tf.gather(fastrcnn_box_logits, fg_inds_wrt_sample) fastrcnn_label_loss, fastrcnn_box_loss = self.fastrcnn_training( image, rcnn_labels, fg_sampled_boxes, matched_gt_boxes, fastrcnn_label_logits, fg_fastrcnn_box_logits) if cfg.MODE_MASK: # maskrcnn loss fg_labels = tf.gather(rcnn_labels, fg_inds_wrt_sample) # In training, mask branch shares the same C5 feature. fg_feature = tf.gather(feature_fastrcnn, fg_inds_wrt_sample) mask_logits = maskrcnn_upXconv_head( 'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14 target_masks_for_fg = crop_and_resize( tf.expand_dims(gt_masks, 1), fg_sampled_boxes, fg_inds_wrt_gt, 14, pad_border=False) # nfg x 1x14x14 target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets') mrcnn_loss = maskrcnn_loss(mask_logits, fg_labels, target_masks_for_fg) else: mrcnn_loss = 0.0 wd_cost = regularize_cost( '(?:group1|group2|group3|rpn|fastrcnn|maskrcnn)/.*W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost') total_cost = tf.add_n([ rpn_label_loss, rpn_box_loss, fastrcnn_label_loss, fastrcnn_box_loss, mrcnn_loss, wd_cost ], 'total_cost') add_moving_summary(total_cost, wd_cost) return total_cost else: final_boxes, final_labels = self.fastrcnn_inference( image_shape2d, rcnn_boxes, fastrcnn_label_logits, fastrcnn_box_logits) if cfg.MODE_MASK: roi_resized = roi_align( featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14) feature_maskrcnn = resnet_conv5( roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCK[-1]) mask_logits = maskrcnn_upXconv_head( 'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14 indices = tf.stack([ tf.range(tf.size(final_labels)), tf.to_int32(final_labels) - 1 ], axis=1) final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14 tf.sigmoid(final_mask_logits, name='final_masks')
def build_graph(self, *inputs): num_fpn_level = len(cfg.FPN.ANCHOR_STRIDES) assert len(cfg.RPN.ANCHOR_SIZES) == num_fpn_level is_training = get_current_tower_context().is_training image = inputs[0] input_anchors = inputs[1: 1 + 2 * num_fpn_level] multilevel_anchor_labels = input_anchors[0::2] multilevel_anchor_boxes = input_anchors[1::2] gt_boxes, gt_labels = inputs[11], inputs[12] if cfg.MODE_MASK: gt_masks = inputs[-1] image = self.preprocess(image) # 1CHW image_shape2d = tf.shape(image)[2:] # h,w c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCK) p23456 = fpn_model('fpn', c2345) # Images are padded for p5, which are too large for p2-p4. # This seems to have no effect on mAP. for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES[:3]): pi = p23456[i] target_shape = tf.to_int32(tf.ceil(tf.to_float(image_shape2d) * (1.0 / stride))) p23456[i] = tf.slice(pi, [0, 0, 0, 0], tf.concat([[-1, -1], target_shape], axis=0)) p23456[i].set_shape([1, pi.shape[1], None, None]) # Multi-Level RPN Proposals multilevel_proposals = [] rpn_loss_collection = [] for lvl in range(num_fpn_level): rpn_label_logits, rpn_box_logits = rpn_head( 'rpn', p23456[lvl], cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS)) with tf.name_scope('FPN_lvl{}'.format(lvl + 2)): anchors = tf.constant(get_all_anchors_fpn()[lvl], name='rpn_anchor_lvl{}'.format(lvl + 2)) anchors, anchor_labels, anchor_boxes = \ self.narrow_to_featuremap(p23456[lvl], anchors, multilevel_anchor_labels[lvl], multilevel_anchor_boxes[lvl]) anchor_boxes_encoded = encode_bbox_target(anchor_boxes, anchors) pred_boxes_decoded = decode_bbox_target(rpn_box_logits, anchors) proposal_boxes, proposal_scores = generate_rpn_proposals( tf.reshape(pred_boxes_decoded, [-1, 4]), tf.reshape(rpn_label_logits, [-1]), image_shape2d, cfg.RPN.TRAIN_FPN_NMS_TOPK if is_training else cfg.RPN.TEST_FPN_NMS_TOPK) multilevel_proposals.append((proposal_boxes, proposal_scores)) if is_training: label_loss, box_loss = rpn_losses( anchor_labels, anchor_boxes_encoded, rpn_label_logits, rpn_box_logits) rpn_loss_collection.extend([label_loss, box_loss]) # Merge proposals from multi levels, pick top K proposal_boxes = tf.concat([x[0] for x in multilevel_proposals], axis=0) # nx4 proposal_scores = tf.concat([x[1] for x in multilevel_proposals], axis=0) # n proposal_topk = tf.minimum(tf.size(proposal_scores), cfg.RPN.TRAIN_FPN_NMS_TOPK if is_training else cfg.RPN.TEST_FPN_NMS_TOPK) proposal_scores, topk_indices = tf.nn.top_k(proposal_scores, k=proposal_topk, sorted=False) proposal_boxes = tf.gather(proposal_boxes, topk_indices) if is_training: rcnn_boxes, rcnn_labels, fg_inds_wrt_gt = sample_fast_rcnn_targets( proposal_boxes, gt_boxes, gt_labels) else: # The boxes to be used to crop RoIs. rcnn_boxes = proposal_boxes roi_feature_fastrcnn = multilevel_roi_align(p23456[:4], rcnn_boxes, 7) fastrcnn_head_func = getattr(model, cfg.FPN.FRCNN_HEAD_FUNC) fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_head_func( 'fastrcnn', roi_feature_fastrcnn, cfg.DATA.NUM_CLASS) if is_training: # rpn loss is already defined above with tf.name_scope('rpn_losses'): rpn_total_label_loss = tf.add_n(rpn_loss_collection[::2], name='label_loss') rpn_total_box_loss = tf.add_n(rpn_loss_collection[1::2], name='box_loss') add_moving_summary(rpn_total_box_loss, rpn_total_label_loss) # fastrcnn loss: matched_gt_boxes = tf.gather(gt_boxes, fg_inds_wrt_gt) fg_inds_wrt_sample = tf.reshape(tf.where(rcnn_labels > 0), [-1]) # fg inds w.r.t all samples fg_sampled_boxes = tf.gather(rcnn_boxes, fg_inds_wrt_sample) fg_fastrcnn_box_logits = tf.gather(fastrcnn_box_logits, fg_inds_wrt_sample) fastrcnn_label_loss, fastrcnn_box_loss = self.fastrcnn_training( image, rcnn_labels, fg_sampled_boxes, matched_gt_boxes, fastrcnn_label_logits, fg_fastrcnn_box_logits) if cfg.MODE_MASK: # maskrcnn loss fg_labels = tf.gather(rcnn_labels, fg_inds_wrt_sample) roi_feature_maskrcnn = multilevel_roi_align( p23456[:4], fg_sampled_boxes, 14) mask_logits = maskrcnn_upXconv_head( 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 4) # #fg x #cat x 28 x 28 target_masks_for_fg = crop_and_resize( tf.expand_dims(gt_masks, 1), fg_sampled_boxes, fg_inds_wrt_gt, 28, pad_border=False) # fg x 1x28x28 target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets') mrcnn_loss = maskrcnn_loss(mask_logits, fg_labels, target_masks_for_fg) else: mrcnn_loss = 0.0 wd_cost = regularize_cost( '(?:group1|group2|group3|rpn|fpn|fastrcnn|maskrcnn)/.*W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost') total_cost = tf.add_n(rpn_loss_collection + [ fastrcnn_label_loss, fastrcnn_box_loss, mrcnn_loss, wd_cost], 'total_cost') add_moving_summary(total_cost, wd_cost) return total_cost else: final_boxes, final_labels = self.fastrcnn_inference( image_shape2d, rcnn_boxes, fastrcnn_label_logits, fastrcnn_box_logits) if cfg.MODE_MASK: # Cascade inference needs roi transform with refined boxes. roi_feature_maskrcnn = multilevel_roi_align(p23456[:4], final_boxes, 14) mask_logits = maskrcnn_upXconv_head( 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 4) # #fg x #cat x 28 x 28 indices = tf.stack([tf.range(tf.size(final_labels)), tf.to_int32(final_labels) - 1], axis=1) final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28 tf.sigmoid(final_mask_logits, name='final_masks')
def build_graph(self, *inputs): is_training = get_current_tower_context().is_training if cfg.MODE_MASK: image, anchor_labels, anchor_boxes, gt_boxes, gt_labels, gt_masks = inputs else: image, anchor_labels, anchor_boxes, gt_boxes, gt_labels = inputs image = self.preprocess(image) # 1CHW featuremap = resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCK[:3]) rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, 1024, cfg.RPN.NUM_ANCHOR) fm_anchors, anchor_labels, anchor_boxes = self.narrow_to_featuremap( featuremap, get_all_anchors(), anchor_labels, anchor_boxes) anchor_boxes_encoded = encode_bbox_target(anchor_boxes, fm_anchors) image_shape2d = tf.shape(image)[2:] # h,w pred_boxes_decoded = decode_bbox_target(rpn_box_logits, fm_anchors) # fHxfWxNAx4, floatbox proposal_boxes, proposal_scores = generate_rpn_proposals( tf.reshape(pred_boxes_decoded, [-1, 4]), tf.reshape(rpn_label_logits, [-1]), image_shape2d, cfg.RPN.TRAIN_PRE_NMS_TOPK if is_training else cfg.RPN.TEST_PRE_NMS_TOPK, cfg.RPN.TRAIN_POST_NMS_TOPK if is_training else cfg.RPN.TEST_POST_NMS_TOPK) if is_training: # sample proposal boxes in training rcnn_boxes, rcnn_labels, fg_inds_wrt_gt = sample_fast_rcnn_targets( proposal_boxes, gt_boxes, gt_labels) else: # The boxes to be used to crop RoIs. # Use all proposal boxes in inference rcnn_boxes = proposal_boxes boxes_on_featuremap = rcnn_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE) roi_resized = roi_align(featuremap, boxes_on_featuremap, 14) feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCK[-1]) # nxcx7x7 # Keep C5 feature to be shared with mask branch feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first') fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS) if is_training: # rpn loss rpn_label_loss, rpn_box_loss = rpn_losses( anchor_labels, anchor_boxes_encoded, rpn_label_logits, rpn_box_logits) # fastrcnn loss matched_gt_boxes = tf.gather(gt_boxes, fg_inds_wrt_gt) fg_inds_wrt_sample = tf.reshape(tf.where(rcnn_labels > 0), [-1]) # fg inds w.r.t all samples fg_sampled_boxes = tf.gather(rcnn_boxes, fg_inds_wrt_sample) fg_fastrcnn_box_logits = tf.gather(fastrcnn_box_logits, fg_inds_wrt_sample) fastrcnn_label_loss, fastrcnn_box_loss = self.fastrcnn_training( image, rcnn_labels, fg_sampled_boxes, matched_gt_boxes, fastrcnn_label_logits, fg_fastrcnn_box_logits) if cfg.MODE_MASK: # maskrcnn loss fg_labels = tf.gather(rcnn_labels, fg_inds_wrt_sample) # In training, mask branch shares the same C5 feature. fg_feature = tf.gather(feature_fastrcnn, fg_inds_wrt_sample) mask_logits = maskrcnn_upXconv_head( 'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14 target_masks_for_fg = crop_and_resize( tf.expand_dims(gt_masks, 1), fg_sampled_boxes, fg_inds_wrt_gt, 14, pad_border=False) # nfg x 1x14x14 target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets') mrcnn_loss = maskrcnn_loss(mask_logits, fg_labels, target_masks_for_fg) else: mrcnn_loss = 0.0 wd_cost = regularize_cost( '(?:group1|group2|group3|rpn|fastrcnn|maskrcnn)/.*W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost') total_cost = tf.add_n([ rpn_label_loss, rpn_box_loss, fastrcnn_label_loss, fastrcnn_box_loss, mrcnn_loss, wd_cost], 'total_cost') add_moving_summary(total_cost, wd_cost) return total_cost else: final_boxes, final_labels = self.fastrcnn_inference( image_shape2d, rcnn_boxes, fastrcnn_label_logits, fastrcnn_box_logits) if cfg.MODE_MASK: roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14) feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCK[-1]) mask_logits = maskrcnn_upXconv_head( 'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14 indices = tf.stack([tf.range(tf.size(final_labels)), tf.to_int32(final_labels) - 1], axis=1) final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14 tf.sigmoid(final_mask_logits, name='final_masks')