def __init__(self): super().__init__(model_name="RetinaNetEnsemble") self.m = retinanet_resnet50_fpn(False, trainable_backbone_layers=5, num_classes=len(Classifications) - 1) # TRANSFER BACKBONE self.a = RetinaNet() self.a.load(name=f'retinaFpnBackbone_realTestone@15000') self.m.backbone = self.a.m.backbone
def __init__(self): super().__init__(model_name="RetinaNet") self.m = retinanet_resnet50_fpn(True, trainable_backbone_layers=5) # Not my favorite code-- but it will get the job done and is nicer than the duck patch. It also allows # native transfer learning from the torchvision package. self.m.head = WholeImageRetinaHead(self.m.backbone, self.m.head) self.criterion = NLLLossOHE()
def __init__(self): super().__init__(model_name="RetinaNetFPN") self.m = retinanet_resnet50_fpn(True) # TRANSFER BACKBONE self.a = RetinaNet() self.a.load(name=f'*****@*****.**') self.m.backbone = self.a.m.backbone # TRANSFER # Not my favorite code-- but it will get the job done and is nicer than the duck patch. It also allows # native transfer learning from the torchvision package. self.m.head = TwoClassRetinaHead(self.m.backbone, self.m.head)
def __init__( self, load_model=None, backbone_timm_model='resnetv2_50x1_bitm', backbone_channel_size=32, trainable_backbone_layers=3, raise_errors=False, convs_for_head: int = 3, pretrain_retina_net=False, half=True, ): super().__init__("TimmRetinaNet") self.m = retinanet_resnet50_fpn(pretrain_retina_net, trainable_backbone_layers=trainable_backbone_layers) self.devices = config.devices #TODO - make this work for N gpus and maybe put this in base model... for now its just an experiment if len(self.devices) == 1: self.devices = [self.devices[0], self.devices[0]] if load_model: model = TimmClassifier() model.load(load_model) model = model.model self.m.backbone = model self.m.backbone.out_channels = backbone_channel_size else: model = timm.create_model(backbone_timm_model, pretrained=True, features_only=True) self.m.backbone = model self.m.backbone.out_channels = backbone_channel_size self.m.head = MultiClassRetinaHead(self.m.backbone, self.m.head, [x['num_chs'] for x in model.feature_info.info], convs_for_head=convs_for_head) self.raise_errors = raise_errors layer_names = list(model.return_layers.keys()) for i in range(max(0, len(model.return_layers) - trainable_backbone_layers)): layer_name = layer_names[i] layer = self.m.backbone.__getattr__(layer_name) for param in layer.parameters(): param.requires_grad = False torch.cuda.empty_cache() self.__half = half
def create_model(self, backbone_name, num_classes=1, **kwargs): model = retinanet_resnet50_fpn(pretrained=False, num_classes=num_classes + 1, pretrained_backbone=True) # Hacked to avoid model builtin call to GeneralizedRCNNTransform.normalize() as done in augmentation def noop_normalize(image): return image # Hacked to avoid model builtin call to GeneralizedRCNNTransform.resize() as done in augmentation def noop_resize(image, target): return image, target # HACK!! IceVision does this too! model.transform.normalize = noop_normalize model.transform.resize = noop_resize return model
def __init__(self, dictionary=None): super(RetinaNet, self).__init__() self.dictionary = dictionary self.input_size = [512, 512] self.dummy_input = torch.zeros(1, 3, self.input_size[0], self.input_size[1]) self.num_classes = len(self.dictionary) self.category = [v for d in self.dictionary for v in d.keys()] self.weight = [ d[v] for d in self.dictionary for v in d.keys() if v in self.category ] self.model = retinanet_resnet50_fpn(pretrained=False, progress=False, num_classes=self.num_classes, pretrained_backbone=True, trainable_backbone_layers=None)