def __init__(self, num_classes: int, D: int = 0, bidirectional: bool = True, freeze_backbone: bool = False, score_threshold: float = .1, weights: str = 'imagenet'): super(EfficientDet, self).__init__() self.config = config.EfficientDetCompudScaling(D=D) self.anchors_config = config.AnchorsConfig() self.num_classes = num_classes self.score_threshold = score_threshold self.backbone = (models.build_efficient_net_backbone( self.config.B, weights)) self.backbone.trainable = not freeze_backbone if bidirectional: self.neck = models.BiFPN(self.config.Wbifpn, self.config.Dbifpn) else: self.neck = models.FPN(self.config.Wbifpn) self.class_head = models.RetinaNetClassifier(self.config.Wbifpn, self.config.Dclass, num_classes) self.bb_head = models.RetinaNetBBPredictor(self.config.Wbifpn, self.config.Dclass) self.anchors_gen = [ utils.anchors.AnchorGenerator( size=self.anchors_config.sizes[i - 3], aspect_ratios=self.anchors_config.ratios, stride=self.anchors_config.strides[i - 3]) for i in range(3, 8) ] # 3 to 7 pyramid levels
def from_pretrained(checkpoint_path: Union[Path, str], num_classes: int = None, **kwargs) -> 'EfficientDet': """ Instantiates an efficientdet model with pretreined weights. For transfer learning, the classifier head can be overwritten by a new randomly initialized one. Parameters ---------- checkpoint_path: Union[Path, str] Checkpoint directory num_classes: int, default None If left to None the model will have the checkpoint head, otherwise the head will be overwrite with a new randomly initialized classification head. Useful when training on your own dataset Returns ------- EfficientDet """ AVAILABLE_MODELS = { 'D0-VOC': 'gs://ml-generic-purpose-tf-models/D0-VOC', 'D0-VOC-FPN': 'gs://ml-generic-purpose-tf-models/D0-VOC-FPN' } # TODO: Make checkpoint path also a reference to a path. # For example: EfficientDet.from_pretrained('voc') from efficientdet.utils.checkpoint import load if str(checkpoint_path) in AVAILABLE_MODELS: checkpoint_path = AVAILABLE_MODELS[checkpoint_path] model, _ = load(checkpoint_path, **kwargs) if num_classes is not None: print('Loading a custom classification head...') model.num_classes = num_classes model.class_head = models.RetinaNetClassifier( model.config.Wbifpn, model.config.D, num_classes) return model
def from_pretrained(checkpoint_path: Union[Path, str], num_classes: int = None, **kwargs: Any) -> 'EfficientDet': """ Instantiates an efficientdet model with pretreined weights. For transfer learning, the classifier head can be overwritten by a new randomly initialized one. Parameters ---------- checkpoint_path: Union[Path, str] Checkpoint directory num_classes: int, default None If left to None the model will have the checkpoint head, otherwise the head will be overwrite with a new randomly initialized classification head. Useful when training on your own dataset Returns ------- EfficientDet """ from efficientdet.utils.checkpoint import load if (not Path(checkpoint_path).is_dir() and str(checkpoint_path) not in _AVAILABLE_WEIGHTS): raise ValueError(f'Checkpoint {checkpoint_path} is not available') if str(checkpoint_path) in _AVAILABLE_WEIGHTS: checkpoint_path = _WEIGHTS_PATHS[str(checkpoint_path)] model, _ = load(checkpoint_path, **kwargs) if num_classes is not None: print('Loading a custom classification head...') model.num_classes = num_classes model.class_head = models.RetinaNetClassifier( model.config.Wbifpn, model.config.D, num_classes) return model
def __init__(self, num_classes: Optional[int] = None, D : int = 0, bidirectional: bool = True, freeze_backbone: bool = False, score_threshold: float = .1, weights : Optional[str] = 'imagenet', custom_head_classifier: bool = False, training_mode: bool = False) -> None: super(EfficientDet, self).__init__() # Check arguments coherency if custom_head_classifier is True and num_classes is None: raise ValueError('If include_top is False, you must specify ' 'the num_classes') if weights not in _AVAILABLE_WEIGHTS: raise ValueError(f'Weights {weights} not available.\n' f'The available weights are ' f'{list(_AVAILABLE_WEIGHTS)}') if ((weights is 'imagenet' or weights is None) and custom_head_classifier): raise ValueError('Custom Head does not make sense when ' 'training the model from scratch. ' 'Set custom_head_classifier to False or specify ' 'other weights.') # If weights related to efficientdet are set, # update the model hyperparameters according to the checkpoint, # but printing a warning if weights != 'imagenet' and weights is not None: from efficientdet.utils.checkpoint import download_folder checkpoint_path = _WEIGHTS_PATHS[weights] save_dir = Path(download_folder(checkpoint_path)) params = json.load((save_dir / 'hp.json').open()) # If num_classes is specified it must be the same as in the # weights checkpoint except if the custom head classifier is set # to true if (num_classes is not None and not custom_head_classifier and num_classes != params['n_classes']): raise ValueError(f'Weights {weights} num classes are different' 'from num_classes argument, please leave it ' ' as None or specify the correct classes') bidirectional = params['bidirectional'] D = params['efficientdet'] # Declare the model architecture self.config = config.EfficientDetCompudScaling(D=D) # Setup efficientnet backbone backbone_weights = 'imagenet' if weights == 'imagenet' else None self.backbone = (models .build_efficient_net_backbone( self.config.B, backbone_weights)) for l in self.backbone.layers: l.trainable = not freeze_backbone self.backbone.trainable = not freeze_backbone # Setup the feature extractor neck if bidirectional: self.neck = models.BiFPN(self.config.Wbifpn, self.config.Dbifpn, prefix='bifpn/') else: self.neck = models.FPN(self.config.Wbifpn) # Setup the heads if num_classes is None: raise ValueError('You have to specify the number of classes.') self.num_classes = num_classes self.class_head = models.RetinaNetClassifier( self.config.Wbifpn, self.config.Dclass, num_classes=self.num_classes, prefix='class_head/') self.bb_head = models.RetinaNetBBPredictor(self.config.Wbifpn, self.config.Dclass, prefix='regress_head/') self.training_mode = training_mode # Inference variables, won't be used during training self.filter_detections = models.layers.FilterDetections( config.AnchorsConfig(), score_threshold) # Load the weights if needed if weights is not None and weights != 'imagenet': tmp = training_mode self.training_mode = True self.build([None, *self.config.input_size, 3]) self.load_weights(str(save_dir / 'model.h5'), by_name=True, skip_mismatch=custom_head_classifier) self.training_mode = tmp self.training_mode = tmp # Append a custom classifier if custom_head_classifier: self.class_head = models.RetinaNetClassifier( self.config.Wbifpn, self.config.Dclass, num_classes=num_classes, prefix='class_head/')