Example #1
0
    def GenerateProposalLabels(self, blobs_in):
        """Op for generating training labels for RPN proposals. This is used
        when training RPN jointly with Fast/Mask R-CNN (as in end-to-end
        Faster R-CNN training).

        blobs_in:
          - 'rpn_rois': 2D tensor of RPN proposals output by GenerateProposals
          - 'roidb': roidb entries that will be labeled
          - 'im_info': See GenerateProposals doc.

        blobs_out:
          - (variable set of blobs): returns whatever blobs are required for
            training the model. It does this by querying the data loader for
            the list of blobs that are needed.
        """
        name = 'GenerateProposalLabelsOp:' + ','.join(
            [str(b) for b in blobs_in])

        # The list of blobs is not known before run-time because it depends on
        # the specific model being trained. Query the data loader to get the
        # list of output blob names.
        blobs_out = fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=self.train)
        blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]

        self.net.Python(GenerateProposalLabelsOp().forward)(blobs_in,
                                                            blobs_out,
                                                            name=name)
        return blobs_out
Example #2
0
def get_minibatch_blob_names(is_training=True):
    """Return blob names in the order in which they are read by the data loader.
    """
    # data blob: holds a batch of N images, each with 3 channels
    blob_names = ['data']

    if cfg.REID.APM:
        blob_names += reid_apm_roi_data.get_reid_blob_names(
            is_training=is_training)
    else:
        blob_names += reid_roi_data.get_reid_blob_names(
            is_training=is_training)
    return blob_names

    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster R-CNN
        blob_names += rpn_roi_data.get_rpn_blob_names(is_training=is_training)
    elif cfg.RETINANET.RETINANET_ON:
        blob_names += retinanet_roi_data.get_retinanet_blob_names(
            is_training=is_training)
    else:
        # Fast R-CNN like models trained on precomputed proposals
        blob_names += fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=is_training)
    return blob_names
Example #3
0
    def CollectAndDistributeFpnRpnProposals(self):

        k_max = cfg.FPN.RPN_MAX_LEVEL
        k_min = cfg.FPN.RPN_MIN_LEVEL

        # Prepare input blobs
        rois_names = ['rpn_rois_fpn' + str(l) for l in range(k_min, k_max + 1)]
        score_names = [
            'rpn_roi_probs_fpn' + str(l) for l in range(k_min, k_max + 1)
        ]
        blobs_in = rois_names + score_names
        if self.train:
            blobs_in += ['roidb', 'im_info']
        blobs_in = [core.ScopedBlobReference(b) for b in blobs_in]
        name = 'CollectAndDistributeFpnRpnProposalsOp:' + ','.join(
            [str(b) for b in blobs_in])

        # Prepare output blobs
        blobs_out = fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=self.train)
        blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]

        outputs = self.net.Python(
            CollectAndDistributeFpnRpnProposalsOp(self.train).forward)(
                blobs_in, blobs_out, name=name)

        return outputs
Example #4
0
    def GenerateProposalLabels(self, blobs_in):
        """Op for generating training labels for RPN proposals. This is used
        when training RPN jointly with Fast/Mask R-CNN (as in end-to-end
        Faster R-CNN training).

        blobs_in:
          - 'rpn_rois': 2D tensor of RPN proposals output by GenerateProposals
          - 'roidb': roidb entries that will be labeled
          - 'im_info': See GenerateProposals doc.

        blobs_out:
          - (variable set of blobs): returns whatever blobs are required for
            training the model. It does this by querying the data loader for
            the list of blobs that are needed.
        """
        name = 'GenerateProposalLabelsOp:' + ','.join(
            [str(b) for b in blobs_in]
        )

        # The list of blobs is not known before run-time because it depends on
        # the specific model being trained. Query the data loader to get the
        # list of output blob names.
        blobs_out = fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=self.train
        )
        blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]

        self.net.Python(GenerateProposalLabelsOp().forward)(
            blobs_in, blobs_out, name=name
        )
        return blobs_out
Example #5
0
 def forward(self, inputs, outputs):
     """See modeling.detector.GenerateProposalLabels for inputs/outputs
     documentation.
     """
     # During training we reuse the data loader code. We populate roidb
     # entries on the fly using the rois generated by RPN.
     # im_info: [[im_height, im_width, im_scale], ...]
     rois = inputs[0].data
     roidb = blob_utils.deserialize(inputs[1].data)
     im_info = inputs[2].data
     im_scales = im_info[:, 2]
     if cfg.POLYGON.POLYGON_ON:
         output_blob_names = polygon_rcnn_roi_data.get_polygon_rcnn_blob_names(
         )
     else:
         output_blob_names = fast_rcnn_roi_data.get_fast_rcnn_blob_names()
     # For historical consistency with the original Faster R-CNN
     # implementation we are *not* filtering crowd proposals.
     # This choice should be investigated in the future (it likely does
     # not matter).
     json_dataset.add_proposals(roidb, rois, im_scales, crowd_thresh=0)
     if cfg.POLYGON.POLYGON_ON:
         roidb_utils.add_polygon_regression_targets(roidb)
     else:
         roidb_utils.add_bbox_regression_targets(roidb)
     blobs = {k: [] for k in output_blob_names}
     if cfg.POLYGON.POLYGON_ON:
         polygon_rcnn_roi_data.add_polygon_rcnn_blobs(
             blobs, im_scales, roidb)
     else:
         fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
     for i, k in enumerate(output_blob_names):
         blob_utils.py_op_copy_blob(blobs[k], outputs[i])
Example #6
0
def distribute_plus_pose(rois, label_blobs, inputs, outputs, train):

    lvl_min = cfg.FPN.ROI_MIN_LEVEL
    lvl_max = cfg.FPN.ROI_MAX_LEVEL
    lvls = fpn.map_rois_to_fpn_levels(rois[:, 1:5], lvl_min, lvl_max)

    im_info = inputs[-1].data
    #    print('inputs[-1].data shape:', im_info.shape)
    im_info = im_info[0]
    #    print('im_info shape:', im_info.shape)
    im_scale = im_info[2]
    im_h = im_info[0]
    im_w = im_info[1]
    output_blob_names = fast_rcnn_roi_data.get_fast_rcnn_blob_names(train)
    blobs = {k: [] for k in output_blob_names}

    hg_rois = rois * 1. / im_scale * np.array(
        [1, 384.0 / im_w, 384.0 / im_h, 384.0 / im_w, 384.0 / im_h],
        dtype=np.float32)

    #    hg_rois = rois[:, 1:5] * 1. / im_scale * np.array([1, 255.0/im_w, 255.0/im_h, 255.0/im_w, 255.0/im_h], dtype=np.float32)

    blobs['rois'] = rois
    blobs['rois_hg'] = hg_rois

    fpn.add_multilevel_roi_blobs(blobs, 'rois', blobs['rois'], lvls, lvl_min,
                                 lvl_max)
    fpn.add_multilevel_roi_blobs(blobs, 'rois_hg', blobs['rois_hg'], lvls,
                                 lvl_min, lvl_max)
    for i, k in enumerate(output_blob_names):
        blob_utils.py_op_copy_blob(blobs[k], outputs[i])
Example #7
0
 def forward(self, inputs, outputs):
     """See modeling.detector.CollectAndDistributeFpnRpnProposals for
     inputs/outputs documentation.
     """
     # inputs is
     # [rpn_rois_fpn2, ..., rpn_rois_fpn6,
     #  rpn_roi_probs_fpn2, ..., rpn_roi_probs_fpn6]
     # If training with Faster R-CNN, then inputs will additionally include
     #  + [roidb, im_info]
     rois = collect(inputs, self._train)
     if self._train:
         # During training we reuse the data loader code. We populate roidb
         # entries on the fly using the rois generated by RPN.
         # im_info: [[im_height, im_width, im_scale], ...]
         im_info = inputs[-1].data
         im_scales = im_info[:, 2]
         roidb = blob_utils.deserialize(inputs[-2].data)
         # For historical consistency with the original Faster R-CNN
         # implementation we are *not* filtering crowd proposals.
         # This choice should be investigated in the future (it likely does
         # not matter).
         json_dataset.add_proposals(roidb, rois, im_scales, crowd_thresh=0)
         roidb_utils.add_bbox_regression_targets(roidb)
         # Compute training labels for the RPN proposals; also handles
         # distributing the proposals over FPN levels
         output_blob_names = fast_rcnn_roi_data.get_fast_rcnn_blob_names()
         blobs = {k: [] for k in output_blob_names}
         fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_info, roidb)
         for i, k in enumerate(output_blob_names):
             blob_utils.py_op_copy_blob(blobs[k], outputs[i])
     else:
         # For inference we have a special code path that avoids some data
         # loader overhead
         distribute(rois, None, outputs, self._train)
 def forward(self, inputs, outputs):
     """See modeling.detector.CollectAndDistributeFpnRpnProposals for
     inputs/outputs documentation.
     """
     # inputs is
     # [rpn_rois_fpn2, ..., rpn_rois_fpn6,
     #  rpn_roi_probs_fpn2, ..., rpn_roi_probs_fpn6]
     # If training with Faster R-CNN, then inputs will additionally include
     #  + [roidb, im_info]
     rois = collect(inputs, self._train)
     if self._train:
         # During training we reuse the data loader code. We populate roidb
         # entries on the fly using the rois generated by RPN.
         # im_info: [[im_height, im_width, im_scale], ...]
         im_info = inputs[-1].data
         im_scales = im_info[:, 2]
         roidb = blob_utils.deserialize(inputs[-2].data)
         # For historical consistency with the original Faster R-CNN
         # implementation we are *not* filtering crowd proposals.
         # This choice should be investigated in the future (it likely does
         # not matter).
         json_dataset.add_proposals(roidb, rois, im_scales, crowd_thresh=0)
         # Compute training labels for the RPN proposals; also handles
         # distributing the proposals over FPN levels
         output_blob_names = fast_rcnn_roi_data.get_fast_rcnn_blob_names()
         blobs = {k: [] for k in output_blob_names}
         fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
         for i, k in enumerate(output_blob_names):
             blob_utils.py_op_copy_blob(blobs[k], outputs[i])
     else:
         # For inference we have a special code path that avoids some data
         # loader overhead
         distribute(rois, None, outputs, self._train)
Example #9
0
def get_minibatch_blob_names(is_training=True):
    """Return blob names in the order in which they are read by the data loader.
    """
    # data blob: holds a batch of N images, each with 3 channels
    blob_names = ['data']
    blob_names += ['normalizer'] # focal loss at fast_rcnn_heads
#    blob_names += ['normalizer_fcn'] # focal loss at mask_res_top
#    blob_names += ['pose_pred']
    blob_names += ['pose_pred_4']
    blob_names += ['pose_pred_8']
    blob_names += ['pose_pred_16']
    blob_names += ['pose_pred_32']
    
    blob_names += ['pose_line_8']
    blob_names += ['pose_line_16']
    
    # seg_gt_label, add segementation on top of fpn2-5
    blob_names += ['seg_gt_label']
    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster R-CNN
        blob_names += rpn_roi_data.get_rpn_blob_names(is_training=is_training)
    elif cfg.RETINANET.RETINANET_ON:
        blob_names += retinanet_roi_data.get_retinanet_blob_names(
            is_training=is_training
        )
    else:
        # Fast R-CNN like models trained on precomputed proposals
        blob_names += fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=is_training
        )
    return blob_names
Example #10
0
    def CollectAndDistributeFpnRpnProposals(self):
        """Merge RPN proposals generated at multiple FPN levels and then
        distribute those proposals to their appropriate FPN levels. An anchor
        at one FPN level may predict an RoI that will map to another level,
        hence the need to redistribute the proposals.

        This function assumes standard blob names for input and output blobs.

        Input blobs: [rpn_rois_fpn<min>, ..., rpn_rois_fpn<max>,
                      rpn_roi_probs_fpn<min>, ..., rpn_roi_probs_fpn<max>]
          - rpn_rois_fpn<i> are the RPN proposals for FPN level i; see rpn_rois
            documentation from GenerateProposals.
          - rpn_roi_probs_fpn<i> are the RPN objectness probabilities for FPN
            level i; see rpn_roi_probs documentation from GenerateProposals.

        If used during training, then the input blobs will also include:
          [roidb, im_info] (see GenerateProposalLabels).

        Output blobs: [rois_fpn<min>, ..., rois_rpn<max>, rois,
                       rois_idx_restore]
          - rois_fpn<i> are the RPN proposals for FPN level i
          - rois_idx_restore is a permutation on the concatenation of all
            rois_fpn<i>, i=min...max, such that when applied the RPN RoIs are
            restored to their original order in the input blobs.

        If used during training, then the output blobs will also include:
          [labels, bbox_targets, bbox_inside_weights, bbox_outside_weights].
        """
        k_max = cfg.FPN.RPN_MAX_LEVEL
        k_min = cfg.FPN.RPN_MIN_LEVEL

        # Prepare input blobs
        rois_names = ['rpn_rois_fpn' + str(l) for l in range(k_min, k_max + 1)]
        score_names = [
            'rpn_roi_probs_fpn' + str(l) for l in range(k_min, k_max + 1)
        ]
        blobs_in = rois_names + score_names
        if self.train:
            blobs_in += ['roidb', 'im_info']


#            blobs_in += ['roidb']
#        blobs_in += ['im_info'] # if use rois_hg
        blobs_in = [core.ScopedBlobReference(b) for b in blobs_in]
        name = 'CollectAndDistributeFpnRpnProposalsOp:' + ','.join(
            [str(b) for b in blobs_in])

        # Prepare output blobs
        blobs_out = fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=self.train)
        blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]

        outputs = self.net.Python(
            CollectAndDistributeFpnRpnProposalsOp(self.train).forward)(
                blobs_in, blobs_out, name=name)

        return outputs
Example #11
0
    def CollectAndDistributeFpnRpnProposals(self):
        """Merge RPN proposals generated at multiple FPN levels and then
        distribute those proposals to their appropriate FPN levels. An anchor
        at one FPN level may predict an RoI that will map to another level,
        hence the need to redistribute the proposals.

        This function assumes standard blob names for input and output blobs.

        Input blobs: [rpn_rois_fpn<min>, ..., rpn_rois_fpn<max>,
                      rpn_roi_probs_fpn<min>, ..., rpn_roi_probs_fpn<max>]
          - rpn_rois_fpn<i> are the RPN proposals for FPN level i; see rpn_rois
            documentation from GenerateProposals.
          - rpn_roi_probs_fpn<i> are the RPN objectness probabilities for FPN
            level i; see rpn_roi_probs documentation from GenerateProposals.

        If used during training, then the input blobs will also include:
          [roidb, im_info] (see GenerateProposalLabels).

        Output blobs: [rois_fpn<min>, ..., rois_rpn<max>, rois,
                       rois_idx_restore]
          - rois_fpn<i> are the RPN proposals for FPN level i
          - rois_idx_restore is a permutation on the concatenation of all
            rois_fpn<i>, i=min...max, such that when applied the RPN RoIs are
            restored to their original order in the input blobs.

        If used during training, then the output blobs will also include:
          [labels, bbox_targets, bbox_inside_weights, bbox_outside_weights].
        """
        k_max = cfg.FPN.RPN_MAX_LEVEL
        k_min = cfg.FPN.RPN_MIN_LEVEL

        # Prepare input blobs
        rois_names = ['rpn_rois_fpn' + str(l) for l in range(k_min, k_max + 1)]
        score_names = [
            'rpn_roi_probs_fpn' + str(l) for l in range(k_min, k_max + 1)
        ]
        blobs_in = rois_names + score_names
        if self.train:
            blobs_in += ['roidb', 'im_info']
        blobs_in = [core.ScopedBlobReference(b) for b in blobs_in]
        name = 'CollectAndDistributeFpnRpnProposalsOp:' + ','.join(
            [str(b) for b in blobs_in]
        )

        # Prepare output blobs
        blobs_out = fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=self.train
        )
        blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]

        outputs = self.net.Python(
            CollectAndDistributeFpnRpnProposalsOp(self.train).forward
        )(blobs_in, blobs_out, name=name)

        return outputs
Example #12
0
def get_minibatch_blob_names(is_training=True):
    #按照数据加载器(data loader)读取的顺序返回数据blob的name
    blob_names = ['data']
    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster R-CNN blob_names
        blob_names += rpn_roi_data.get_rpn_blob_names(is_training=is_training)
    elif cfg.RETINANET.RETINANET_ON:
        blob_names += retinanet_roi_data.get_retinanet_blob_names(
            is_training=is_training)
    else:
        # Fast R-CNN like models trained on precomputed proposals
        blob_names += fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=is_training)
    return blob_names
Example #13
0
    def GenerateProposalLabels(self, blobs_in):

        name = 'GenerateProposalLabelsOp:' + ','.join(
            [str(b) for b in blobs_in])

        # The list of blobs is not known before run-time because it depends on
        # the specific model being trained. Query the data loader to get the
        # list of output blob names.
        blobs_out = fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=self.train)
        blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]

        self.net.Python(GenerateProposalLabelsOp().forward)(blobs_in,
                                                            blobs_out,
                                                            name=name)
        return blobs_out
Example #14
0
    def forward(self, inputs, outputs):

        rois, transfer_rois = collect(inputs, self._train, self._mc)

        im_info = inputs[-1].data
        im_scales = im_info[:, 2]
        roidb = blob_utils.deserialize(inputs[-2].data)

        json_dataset.add_proposals(roidb, rois, im_scales, crowd_thresh=0)
        roidb_utils.add_bbox_regression_targets(roidb)

        output_blob_names = fast_rcnn_roi_data.get_fast_rcnn_blob_names()
        blobs = {k: [] for k in output_blob_names}
        fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb,
                                               transfer_rois)
        for i, k in enumerate(output_blob_names):
            blob_utils.py_op_copy_blob(blobs[k], outputs[i])
Example #15
0
def get_minibatch_blob_names(is_training=True):
    """Return blob names in the order in which they are read by the data loader.
    """
    # data blob: holds a batch of N images, each with 3 channels
    blob_names = ['data']
    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster R-CNN
        blob_names += rpn_roi_data.get_rpn_blob_names(is_training=is_training)
    elif cfg.RETINANET.RETINANET_ON:
        blob_names += retinanet_roi_data.get_retinanet_blob_names(
            is_training=is_training
        )
    else:
        # Fast R-CNN like models trained on precomputed proposals
        blob_names += fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=is_training
        )
    return blob_names
Example #16
0
def get_minibatch_blob_names(is_training=True):
    """Return blob names in the order in which they are read by the data loader.
    """
    # data blob: holds a batch of N images, each with 3 channels
    blob_names = ['data']
    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster R-CNN
        blob_names += rpn_roi_data.get_rpn_blob_names(is_training=is_training)
    elif cfg.RETINANET.RETINANET_ON:
        blob_names += retinanet_roi_data.get_retinanet_blob_names(
            is_training=is_training)
    else:
        # Fast R-CNN like models trained on precomputed proposals
        blob_names += fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=is_training)
    # Include pre-calculated blobs
    blob_names += list(cfg.DATA_LOADER.EXTRA_BLOBS)
    if 'track_n_rois' in cfg.DATA_LOADER.EXTRA_BLOBS:
        blob_names += ['track_n_rois_one', 'track_n_rois_two']
    return blob_names
 def forward(self, inputs, outputs):
     """See modeling.detector.GenerateProposalLabels for inputs/outputs
     documentation.
     """
     # During training we reuse the data loader code. We populate roidb
     # entries on the fly using the rois generated by RPN.
     # im_info: [[im_height, im_width, im_scale], ...]
     rois = inputs[0].data
     roidb = blob_utils.deserialize(inputs[1].data)
     im_info = inputs[2].data
     im_scales = im_info[:, 2]
     output_blob_names = fast_rcnn_roi_data.get_fast_rcnn_blob_names()
     # For historical consistency with the original Faster R-CNN
     # implementation we are *not* filtering crowd proposals.
     # This choice should be investigated in the future (it likely does
     # not matter).
     json_dataset.add_proposals(roidb, rois, im_scales, crowd_thresh=0)
     blobs = {k: [] for k in output_blob_names}
     fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
     for i, k in enumerate(output_blob_names):
         blob_utils.py_op_copy_blob(blobs[k], outputs[i])
Example #18
0
    def CollectAndDistributeFpnRpnProposals(self):
        """Merge RPN proposals generated at multiple FPN levels and then
        distribute those proposals to their appropriate FPN levels. An anchor
        at one FPN level may predict an RoI that will map to another level,
        hence the need to redistribute the proposals.

        从fpn的不同层合并proposals,然后将其分配到合适的FPN层。一个anchor预测的
        RoI可能会映射到另外的FPN层,所以需要重新分配。

        This function assumes standard blob names for input and output blobs.

        Input blobs: [rpn_rois_fpn<min>, ..., rpn_rois_fpn<max>,
                      rpn_roi_probs_fpn<min>, ..., rpn_roi_probs_fpn<max>]
          - rpn_rois_fpn<i> are the RPN proposals for FPN level i; see rpn_rois
            documentation from GenerateProposals.
          - rpn_roi_probs_fpn<i> are the RPN objectness probabilities for FPN
            level i; see rpn_roi_probs documentation from GenerateProposals.

        If used during training, then the input blobs will also include:
          [roidb, im_info] (see GenerateProposalLabels).

        Output blobs: [rois_fpn<min>, ..., rois_rpn<max>, rois,
                       rois_idx_restore]
          - rois_fpn<i> are the RPN proposals for FPN level i
          - rois_idx_restore is a permutation on the concatenation of all
            rois_fpn<i>, i=min...max, such that when applied the RPN RoIs are
            restored to their original order in the input blobs.
            对于所有rois,依次获取属于每一层的fpn的roi的索引,将这些索引连接起来,就是
            这个数组的意义

        If used during training, then the output blobs will also include:
          [labels, bbox_targets, bbox_inside_weights, bbox_outside_weights].
        """
        # 6, 2
        k_max = cfg.FPN.RPN_MAX_LEVEL
        k_min = cfg.FPN.RPN_MIN_LEVEL

        # Prepare input blobs
        # [u'rpn_rois_fpn2', u'rpn_rois_fpn3', u'rpn_rois_fpn4',
        #  u'rpn_rois_fpn5', u'rpn_rois_fpn6']
        rois_names = ['rpn_rois_fpn' + str(l) for l in range(k_min, k_max + 1)]

        # [u'rpn_roi_probs_fpn2', u'rpn_roi_probs_fpn3', u'rpn_roi_probs_fpn4',
        #  u'rpn_roi_probs_fpn5', u'rpn_roi_probs_fpn6']
        score_names = [
            'rpn_roi_probs_fpn' + str(l) for l in range(k_min, k_max + 1)
        ]

        blobs_in = rois_names + score_names

        # 训练需要额外的输入
        if self.train:
            blobs_in += ['roidb', 'im_info']
        # 转化为BlobReference
        blobs_in = [core.ScopedBlobReference(b) for b in blobs_in]

        name = 'CollectAndDistributeFpnRpnProposalsOp:' + ','.join(
            [str(b) for b in blobs_in])

        # Prepare output blobs
        # [u'rois', u'labels_int32', u'bbox_targets', u'bbox_inside_weights',
        #  u'bbox_outside_weights', u'rois_fpn2', u'rois_fpn3', u'rois_fpn4',
        #  u'rois_fpn5', u'rois_idx_restore_int32']
        blobs_out = fast_rcnn_roi_data.get_fast_rcnn_blob_names(
            is_training=self.train)
        blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]

        outputs = self.net.Python(
            CollectAndDistributeFpnRpnProposalsOp(self.train).forward)(
                blobs_in, blobs_out, name=name)

        return outputs
Example #19
0
def train_model():
    """Model training loop."""
    logger = logging.getLogger(__name__)
    model, weights_file, start_iter, checkpoints, output_dir = create_model(
    )  #for create model
    if 'final' in checkpoints:
        # The final model was found in the output directory, so nothing to do
        return checkpoints
    if 0:
        output_dir = '/home/icubic/daily_work/code/Detectron/train/coco_2014_train_ET_PH_part/generalized_rcnn_multi/'
    #output_dir = output_dir + '_101'
    setup_model_for_training(model, weights_file, output_dir)
    training_stats = TrainingStats(model)
    uuuu = model.roi_data_loader._blobs_queue_name
    CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
    print('------------train.py')
    for cur_iter in range(start_iter, cfg.SOLVER.MAX_ITER):
        training_stats.IterTic()
        lr = model.UpdateWorkspaceLr(cur_iter,
                                     lr_policy.get_lr_at_iter(cur_iter))
        #aaa_debug = workspace.FetchBlob('gpu_0/data')
        #bbb_debug = workspace.FetchBlob('gpu_0/conv1_w')
        #ccc_debug = workspace.FetchBlob('gpu_0/'+uuuu)
        try:
            workspace.RunNet(model.net.Proto().name)

            if 0:
                #import detectron.utils.blob as blob_utils
                inputs = [workspace.FetchBlob("gpu_0/rpn_rois_fpn2"),workspace.FetchBlob("gpu_0/rpn_rois_fpn3"),workspace.FetchBlob("gpu_0/rpn_rois_fpn4"),workspace.FetchBlob("gpu_0/rpn_rois_fpn5"), \
                          workspace.FetchBlob("gpu_0/rpn_rois_fpn6"),workspace.FetchBlob("gpu_0/rpn_roi_probs_fpn2"),workspace.FetchBlob("gpu_0/rpn_roi_probs_fpn3"),workspace.FetchBlob("gpu_0/rpn_roi_probs_fpn4"), \
                          workspace.FetchBlob("gpu_0/rpn_roi_probs_fpn5"),workspace.FetchBlob("gpu_0/rpn_roi_probs_fpn6"),workspace.FetchBlob("gpu_0/roidb"),workspace.FetchBlob("gpu_0/im_info"),\
                          ]
                rois = collect(inputs, True)
                #inputs.append(workspace.FetchBlob("gpu_0/rpn_rois_fpn2"))
                im_info = inputs[-1]
                im_scales = im_info[:, 2]
                roidb = blob_utils.deserialize(inputs[-2])
                # For historical consistency with the original Faster R-CNN
                # implementation we are *not* filtering crowd proposals.
                # This choice should be investigated in the future (it likely does
                # not matter).
                json_dataset.add_proposals(roidb,
                                           rois,
                                           im_scales,
                                           crowd_thresh=0)
                roidb_utils.add_bbox_regression_targets(roidb)
                # Compute training labels for the RPN proposals; also handles
                # distributing the proposals over FPN levels
                output_blob_names = fast_rcnn_roi_data.get_fast_rcnn_blob_names(
                )
                blobs = {k: [] for k in output_blob_names}
                fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
                for i, k in enumerate(output_blob_names):
                    blob_utils.py_op_copy_blob(blobs[k], outputs[i])
            #if (np.sum(bb == 1))>0:
            #   print('cc')
        except:
            aa = workspace.FetchBlob("gpu_0/rpn_rois_fpn2")
            aaa_debug = workspace.FetchBlob('gpu_0/data')
            print('aaaaaerror')
        #print("blobs:\n{}".format(workspace.Blobs()))
        #print('train.py   aaaaaaaa_debug')
        if 1:

            aaa = workspace.FetchBlob("gpu_0/data")  # nchw
            #img = aaa[1].copy()
            # BGR HWC -> CHW  12
            #transform_img = img.swapaxes(0, 1).swapaxes(1, 2)

            #cv2.imshow("image0 ", transform_img[:, :, (2, 1, 0)])

            #cv2.waitKey(0)
            #cv2.destroyAllWindows()
            #cv2.imshow('/home/icubic/daily_work/code/Detectron/aaa.png', aaa[0])
            aaa_debug = workspace.FetchBlob('gpu_0/data')
            bbb_debug = workspace.FetchBlob('gpu_0/conv1_w')
            ccc_debug = workspace.FetchBlob('gpu_0/' + uuuu)
            ddd_debug = workspace.FetchBlob('gpu_0/roidb')
            eee_debug = workspace.FetchBlob('gpu_0/im_info')
            #print("Fetched data:\n{}".format(workspace.FetchBlob("gpu_0/data")))
        if cur_iter == start_iter:
            nu.print_net(model)
        training_stats.IterToc()
        training_stats.UpdateIterStats()
        training_stats.LogIterStats(cur_iter, lr)

        if (cur_iter + 1) % (
                CHECKPOINT_PERIOD / 4
        ) == 0 and cur_iter > start_iter:  #((cur_iter + 1) % (CHECKPOINT_PERIOD/1) == 0 and (cur_iter > start_iter and cur_iter < 50000)) or ((cur_iter + 1) % (CHECKPOINT_PERIOD/8) == 0 and cur_iter > 50000):
            checkpoints[cur_iter] = os.path.join(
                output_dir, 'model_iter_50_{}.pkl'.format(cur_iter))
            nu.save_model_to_weights_file(checkpoints[cur_iter], model)

        if cur_iter == start_iter + training_stats.LOG_PERIOD:
            # Reset the iteration timer to remove outliers from the first few
            # SGD iterations
            training_stats.ResetIterTimer()

        if np.isnan(training_stats.iter_total_loss):
            logger.critical('Loss is NaN, exiting...')
            model.roi_data_loader.shutdown()
            envu.exit_on_error()

    # Save the final model
    checkpoints['final'] = os.path.join(output_dir, 'model_final_50.pkl')
    nu.save_model_to_weights_file(checkpoints['final'], model)
    # Shutdown data loading threads
    model.roi_data_loader.shutdown()
    return checkpoints