def add_ground_truth_to_proposals_single_image(gt_boxes, proposals): device = proposals.objectness_logits.device gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10))) gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device) gt_proposal = Instances(proposals.image_size) gt_proposal.proposal_boxes = gt_boxes gt_proposal.objectness_logits = gt_logits new_proposals = Instances.cat([proposals, gt_proposal]) return new_proposals
def inference(self, locations, box_cls, box_regression, centerness, image_sizes): sampled_boxes = [] for _, (l, o, b, c) in enumerate(zip(locations, box_cls, box_regression, centerness)): sampled_boxes.append( self.inference_on_single_feature_map(l, o, b, c, image_sizes) ) instances = list(zip(*sampled_boxes)) instances = [Instances.cat(x) for x in instances] results = self.select_over_all_levels(instances) return results