def __init__(self, cfg): super().__init__() self.in_features = cfg.MODEL.FCOS.IN_FEATURES # Loss parameters: # defined by method<get_ground_truth> self.num_points_per_level = None self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES self.center_sampling_radius = cfg.MODEL.FCOS.CENTER_SAMPLING_RADIUS self.norm_reg_targets = cfg.MODEL.FCOS.NORM_REG_TARGETS self.focal_loss_alpha = cfg.MODEL.FCOS.FOCAL_LOSS_ALPHA self.focal_loss_gamma = cfg.MODEL.FCOS.FOCAL_LOSS_GAMMA self.iou_loss_type = cfg.MODEL.FCOS.IOU_LOSS_TYPE # Inference parameters: self.score_thresh = 0.3 self.pre_nms_thresh = cfg.MODEL.FCOS.INFERENCE_TH self.pre_nms_top_n = cfg.MODEL.FCOS.PRE_NMS_TOP_N self.nms_thresh = cfg.MODEL.FCOS.NMS_TH self.max_detections_per_image = cfg.TEST.DETECTIONS_PER_IMAGE self.min_size = 0 self.num_classes = cfg.MODEL.FCOS.NUM_CLASSES self.backbone = build_backbone(cfg) backbone_shape = self.backbone.output_shape() feature_shapes = [backbone_shape[f] for f in self.in_features] self.head = FCOSHead(cfg, feature_shapes) self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))
def test_backbone(): backbone = build_backbone(cfg).to(device) images = torch.empty((2, 3, 512, 512)).to(device) assert isinstance(backbone, nn.Module) outputs = backbone(images) ipdb.set_trace()
def test_backbone(): backbone = build_backbone(cfg).to(device) images = torch.empty((2, 3, 512, 512)).to(device) assert isinstance(backbone, nn.Module) num = 0 for module in backbone.modules(): if isinstance(module, nn.Conv2d): num += 1 print(num) outputs = backbone(images) import pdb pdb.set_trace()