def __init__( self, layout: List[Tuple[int, int]], num_classes: int = 80, in_channels: int = 3, stem_channels: int = 32, anchors: Optional[Tensor] = None, act_layer: Optional[nn.Module] = None, norm_layer: Optional[Callable[[int], nn.Module]] = None, drop_layer: Optional[Callable[..., nn.Module]] = None, conv_layer: Optional[Callable[..., nn.Module]] = None, backbone_norm_layer: Optional[Callable[[int], nn.Module]] = None) -> None: super().__init__() if act_layer is None: act_layer = nn.LeakyReLU(0.1, inplace=True) if norm_layer is None: norm_layer = nn.BatchNorm2d if backbone_norm_layer is None: backbone_norm_layer = norm_layer # backbone self.backbone = DarknetBodyV4(layout, in_channels, stem_channels, 3, Mish(), backbone_norm_layer, drop_layer, conv_layer) # neck self.neck = Neck([1024, 512, 256], act_layer, norm_layer, drop_layer, conv_layer) # head self.head = Yolov4Head(num_classes, anchors, act_layer, norm_layer, drop_layer, conv_layer) init_module(self.neck, 'leaky_relu') init_module(self.head, 'leaky_relu')
def cspdarknet53_mish(pretrained=False, progress=True, **kwargs): """Modified version of CSP-Darknet-53 from `"CSPNet: A New Backbone that can Enhance Learning Capability of CNN" <https://arxiv.org/pdf/1911.11929.pdf>`_ with Mish as activation layer and DropBlock as regularization layer. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr Returns: torch.nn.Module: classification model """ kwargs['act_layer'] = Mish() kwargs['drop_layer'] = DropBlock2d return _darknet('cspdarknet53_mish', pretrained, progress, **kwargs)
def __init__(self, layout, in_channels=3, stem_channels=32, num_features=1, act_layer=None, norm_layer=None, drop_layer=None, conv_layer=None): super().__init__() if act_layer is None: act_layer = Mish() if norm_layer is None: norm_layer = nn.BatchNorm2d if drop_layer is None: drop_layer = DropBlock2d in_chans = [stem_channels] + [_layout[0] for _layout in layout[:-1]] super().__init__( OrderedDict([ ('stem', nn.Sequential(*conv_sequence(in_channels, stem_channels, act_layer, norm_layer, drop_layer, conv_layer, kernel_size=3, padding=1, bias=False))), ('layers', nn.Sequential(*[ CSPStage(_in_chans, out_chans, num_blocks, act_layer, norm_layer, drop_layer, conv_layer) for _in_chans, (out_chans, num_blocks) in zip(in_chans, layout) ])) ])) self.num_features = num_features
def __init__(self, layout, num_classes=80, in_channels=3, stem_channels=32, anchors=None, act_layer=None, norm_layer=None, drop_layer=None, conv_layer=None, backbone_norm_layer=None): super().__init__() if act_layer is None: act_layer = nn.LeakyReLU(0.1, inplace=True) if norm_layer is None: norm_layer = nn.BatchNorm2d if backbone_norm_layer is None: backbone_norm_layer = norm_layer if drop_layer is None: drop_layer = DropBlock2d # backbone self.backbone = DarknetBodyV4(layout, in_channels, stem_channels, 3, Mish(), backbone_norm_layer, drop_layer, conv_layer) # neck self.neck = Neck([1024, 512, 256], act_layer, norm_layer, drop_layer, conv_layer) # head self.head = Yolov4Head(num_classes, anchors, act_layer, norm_layer, drop_layer, conv_layer) init_module(self.neck, 'leaky_relu') init_module(self.head, 'leaky_relu')