def __init__(self, num_classes: int, in_channels: int, n_dim: int = 2, pool_type: str = "Max"): super().__init__() self.features = torch.nn.Sequential( ConvNd(n_dim, in_channels, 64, kernel_size=11, stride=4, padding=2), torch.nn.ReLU(inplace=True), PoolingNd(pool_type, n_dim, kernel_size=3, stride=2), ConvNd(n_dim, 64, 192, kernel_size=5, padding=2), torch.nn.ReLU(inplace=True), PoolingNd(pool_type, n_dim, kernel_size=3, stride=2), ConvNd(n_dim, 192, 384, kernel_size=3, padding=1), torch.nn.ReLU(inplace=True), ConvNd(n_dim, 384, 256, kernel_size=3, padding=1), torch.nn.ReLU(inplace=True), ConvNd(n_dim, 256, 256, kernel_size=3, padding=1), torch.nn.ReLU(inplace=True), PoolingNd(pool_type, n_dim, kernel_size=3, stride=2), ) self.avgpool = PoolingNd("AdaptiveAvg", n_dim, 6) self.classifier = torch.nn.Sequential( torch.nn.Dropout(), torch.nn.Linear(256 * pow(6, n_dim), 4096), torch.nn.ReLU(inplace=True), torch.nn.Dropout(), torch.nn.Linear(4096, 4096), torch.nn.ReLU(inplace=True), torch.nn.Linear(4096, num_classes), )
def __init__(self, block: torch.nn.Module, layers: Sequence[int], num_classes: int, in_channels: int, zero_init_residual: bool = False, norm_layer: str = "Batch", n_dim: int = 2, start_filts: int = 64): super().__init__() self.start_filts = start_filts self.inplanes = copy.deepcopy(start_filts) self.conv1 = ConvNd(n_dim, in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = NormNd(norm_layer, n_dim, self.inplanes) self.relu = torch.nn.ReLU(inplace=True) self.maxpool = PoolingNd("Max", n_dim=n_dim, kernel_size=3, stride=2, padding=1) num_layers = 0 for idx, _layers in enumerate(layers): stride = 1 if idx == 0 else 2 planes = min(self.start_filts * pow(2, idx), self.start_filts * 8) _local_layer = self._make_layer(block, planes, _layers, stride=stride, norm_layer=norm_layer, n_dim=n_dim) setattr(self, "layer%d" % (idx + 1), _local_layer) num_layers += 1 self.num_layers = num_layers self.avgpool = PoolingNd("AdaptiveAvg", n_dim, 1) self.fc = torch.nn.Linear(self.inplanes, num_classes) self.reset_weights(zero_init_residual=zero_init_residual)
def __init__(self, n_dim: int, channel: int, reduction: int = 16): """ Squeeze and Excitation Layer https://arxiv.org/abs/1709.01507 Parameters ---------- n_dim : int dimensionality of convolution channel : int number of input channel reduction : int channel reduction factor """ super().__init__() self.pool = PoolingNd('AdaptiveAvg', n_dim, 1) self.fc = torch.nn.Sequential( ConvNd(n_dim, channel, channel // reduction, kernel_size=1, bias=False), torch.nn.ReLU(inplace=True), ConvNd(n_dim, channel // reduction, channel, kernel_size=1, bias=False), torch.nn.Sigmoid())
def __init__(self, n_dim: int, in_channels: int, out_channels: int, pooling: bool = True, norm_layer: str = "Batch"): """ Parameters ---------- n_dim : int dimensionality of the input in_channels : int number of input channels out_channels : int number of output cannels pooling : bool whether to apply pooling or not norm_layer : str the kind of normalization layer to use """ super().__init__() self.n_dim = n_dim self.in_channels = in_channels self.out_channels = out_channels self.pooling = pooling self.conv1 = _conv3x3(self.n_dim, self.in_channels, self.out_channels) self.norm1 = NormNd(norm_layer, n_dim, self.out_channels) self.conv2 = _conv3x3(self.n_dim, self.out_channels, self.out_channels) self.norm2 = NormNd(norm_layer, n_dim, self.out_channels) if self.pooling: self.pool = PoolingNd("Max", n_dim, 2)
def __init__(self, num_input_features: int, num_output_features: int, n_dim: int = 2, norm_type: str = "Batch"): super().__init__() self.add_module('norm', NormNd(norm_type, n_dim, num_input_features)) self.add_module('relu', torch.nn.ReLU(inplace=True)) self.add_module( 'conv', ConvNd(n_dim, num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) self.add_module('pool', PoolingNd("AdaptiveAvg", n_dim, output_size=2))
def make_layers(cfg: Sequence[Union[int, str]], in_channels: int, norm_type: str = None, n_dim: int = 2, pool_type: str = "Max") -> torch.nn.Sequential: layers = [] for v in cfg: if v == 'P': layers += [PoolingNd(pool_type, n_dim, kernel_size=2, stride=2)] else: _layers = [ConvNd(n_dim, in_channels, v, kernel_size=3, padding=1)] if norm_type is not None: _layers.append(NormNd(norm_type, n_dim, v)) _layers.append(torch.nn.ReLU(inplace=True)) layers += _layers in_channels = v return torch.nn.Sequential(*layers)
def __init__(self, feature_cfg: Sequence[Union[int, str]], num_classes: int, in_channels: int, n_dim: int = 2, norm_type: str = "Batch", pool_type="Max"): super().__init__() self.features = self.make_layers(feature_cfg, in_channels=in_channels, norm_type=norm_type, n_dim=n_dim, pool_type=pool_type) self.avgpool = PoolingNd("AdaptiveAvg", n_dim, 7) self.classifier = torch.nn.Sequential( torch.nn.Linear(512 * pow(7, n_dim), 4096), torch.nn.ReLU(True), torch.nn.Dropout(), torch.nn.Linear(4096, 4096), torch.nn.ReLU(True), torch.nn.Dropout(), torch.nn.Linear(4096, num_classes), ) self.reset_weights()
def __init__(self, num_classes: int, in_channels: int, growth_rate: int = 32, block_config: Sequence[int] = (6, 12, 24, 16), num_init_features: int = 64, bn_size: int = 4, drop_rate: float = 0, n_dim: int = 2, pool_type: str = "Max", norm_type: str = "Batch"): super().__init__() # First convolution self.features = torch.nn.Sequential( OrderedDict([ ('conv0', ConvNd(n_dim, in_channels, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', NormNd(norm_type, n_dim, num_init_features)), ('relu0', torch.nn.ReLU(inplace=True)), ('pool0', PoolingNd(pool_type, n_dim, kernel_size=3, stride=2, padding=1)), ])) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, n_dim=n_dim, norm_type=norm_type) self.features.add_module('denseblock%d' % (i + 1), block) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2, n_dim=n_dim, norm_type=norm_type) self.features.add_module('transition%d' % (i + 1), trans) num_features = num_features // 2 # Final norm self.features.add_module('norm5', NormNd(norm_type, n_dim, num_features)) self.pool = PoolingNd("AdaptiveAvg", n_dim, 1) # Linear layer self.classifier = torch.nn.Linear(num_features, num_classes) # Official init from torch repo. for m in self.modules(): if isinstance(m, ConvNd): torch.nn.init.kaiming_normal_(m.conv.weight) elif isinstance(m, NormNd): if hasattr(m.norm, "weight") and m.norm.weight is not None: torch.nn.init.constant_(m.norm.weight, 1) if hasattr(m.norm, "bias") and m.norm.bias is not None: torch.nn.init.constant_(m.norm.bias, 0) elif isinstance(m, torch.nn.Linear): torch.nn.init.constant_(m.bias, 0)
def __init__(self, block: torch.nn.Module, layers: Sequence[int], num_classes: int, in_channels: int, cardinality: int, width: int = 4, start_filts: int = 64, start_mode: str = '7x7', n_dim: int = 2, norm_layer: str = 'Batch'): """ Parameters ---------- block : nn.Module ResNeXt block used to build network layers : list of int defines how many blocks should be used in each stage num_classes : int number of classes in_channels : int number of input channels cardinality : int cardinality (number of groups) width : int width of resnext block start_filts : int number of start filter (number of channels after first conv) start_mode : str either '7x7' for default configuration (7x7 conv) or 3x3 for three consecutive convolutions as proposed in https://arxiv.org/abs/1812.01187 n_dim : int dimensionality of convolutions norm_layer : str type of normalization """ super().__init__() self._cardinality = cardinality self._width = width self._start_filts = start_filts self._num_classes = num_classes self._block = block self._block.start_filts = start_filts self._layers = layers self.inplanes = copy.deepcopy(self._start_filts) self.conv1 = _StartConv(n_dim, norm_layer, in_channels, start_filts, start_mode) self.maxpool = PoolingNd("Max", n_dim=n_dim, kernel_size=3, stride=2, padding=1) for idx, _layers in enumerate(layers): stride = 1 if idx == 0 else 2 planes = self._start_filts * pow(2, idx) _local_layer = self._make_layer(block, _layers, self.inplanes, planes, norm_layer=norm_layer, n_dim=n_dim, pool_stride=stride) setattr(self, "C%d" % (idx + 1), _local_layer) self.inplanes = planes * block.expansion self._num_layers = len(layers) self.avgpool = PoolingNd("AdaptiveAvg", n_dim, 1) self.fc = torch.nn.Linear(self.inplanes, num_classes)