def __init__(self, num_classes: int = 5) -> None: super(PoseNet, self).__init__() self.conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) self.conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.MaxPool_3a_3x3 = nn.MaxPool2d(3, stride=2) self.conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.MaxPool_5a_3x3 = nn.MaxPool2d(kernel_size=3, stride=2) # stem self.Mixed_5b = self._generate_inception_module(192, 320, 1, Mixed_5b) self.block35 = self._generate_inception_module(320, 320, 1, block35) self.conv_ls1 = BasicConv2d(320, 320, kernel_size=3, stride=2, padding=1) self.MaxPool_3x3_ls1 = nn.MaxPool2d(kernel_size=3, stride=2) self.Mixed_6a = self._generate_inception_module(320, 1088, 1, Mixed_6a) self.block17 = self._generate_inception_module(1088, 1088, 1, block17) self.conv_ls2 = BasicConv2d(1088, 1088, kernel_size=3, stride=2) self.Mixed_7a = self._generate_inception_module(1088, 2080, 1, Mixed_7a) self.block8 = self._generate_inception_module(2080, 2080, 1, block8) self.conv_ls3 = BasicConv2d(3488, 2080, kernel_size=1) self.Conv2d_7b_1x1 = BasicConv2d(2080, 1536, kernel_size=1) self.AvgPool_1a_8x8 = nn.AvgPool2d(kernel_size=[8, 8]) self.dense = nn.Linear(1536, num_classes) self.relu = nn.ReLU(inplace=True)
def __init__( self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None: super(InceptionE, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) self.avg_pool = nn.AvgPool2d(kernel_size=3, stride=1, padding=1)
def __init__(self, in_planes, out_planes, stride, groups): super(Bottleneck, self).__init__() self.stride = stride mid_planes = out_planes // 4 g = 1 if in_planes == 24 else groups self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes) self.shuffle1 = ShuffleBlock(groups=g) self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False) self.bn2 = nn.BatchNorm2d(mid_planes) self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False) self.bn3 = nn.BatchNorm2d(out_planes) self.shortcut = nn.Sequential() if stride == 2: self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def __init__(self): super(Discriminator, self).__init__() self.d1 = Down2d(5, 32, (3, 9), (1, 1), (1, 4)) self.d2 = Down2d(36, 32, (3, 8), (1, 2), (1, 3)) self.d3 = Down2d(36, 32, (3, 8), (1, 2), (1, 3)) self.d4 = Down2d(36, 32, (3, 6), (1, 2), (1, 2)) self.conv = nn.Conv2d(36, 1, (36, 5), (36, 1), (0, 2)) self.pool = nn.AvgPool2d((1, 64))
def __init__(self): super(DomainClassifier, self).__init__() self.main = nn.Sequential( Down2d(1, 8, (4, 4), (2, 2), (5, 1)), Down2d(8, 16, (4, 4), (2, 2), (1, 1)), Down2d(16, 32, (4, 4), (2, 2), (0, 1)), Down2d(32, 16, (3, 4), (1, 2), (1, 1)), nn.Conv2d(16, 4, (1, 4), (1, 2), (0, 1)), nn.AvgPool2d((1, 16)), nn.LogSoftmax(), )
def __init__(self, spatial_type="avg", spatial_size=7): super(SimpleSpatialModule, self).__init__() assert spatial_type in ["avg"] self.spatial_type = spatial_type self.spatial_size = (spatial_size if not isinstance(spatial_size, int) else (spatial_size, spatial_size)) if self.spatial_type == "avg": self.op = nn.AvgPool2d(self.spatial_size, stride=1, padding=0)
def __init__(self, n_classes): super(LeNet5, self).__init__() self.feature_extractor = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1), nn.Tanh(), nn.AvgPool2d(kernel_size=2), nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1), nn.Tanh(), nn.AvgPool2d(kernel_size=2), nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1), nn.Tanh(), ) self.classifier = nn.Sequential( nn.Linear(in_features=120, out_features=84), nn.Tanh(), nn.Linear(in_features=84, out_features=n_classes), )
def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [ nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True) ] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers)
def __init__(self, num_input_features: int, num_output_features: int) -> None: super(_Transition, self).__init__() self.add_module("norm", nn.BatchNorm2d(num_input_features)) self.add_module("relu", nn.ReLU(inplace=True)) self.add_module( "conv", nn.Conv2d( num_input_features, num_output_features, kernel_size=1, stride=1, bias=False, ), ) self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=2))
def __init__( self, in_channels: int, num_classes: int, conv_block: Optional[Callable[..., nn.Module]] = None, ) -> None: super(InceptionAux, self).__init__() if conv_block is None: conv_block = BasicConv2d self.conv0 = conv_block(in_channels, 128, kernel_size=1) self.conv1 = conv_block(128, 768, kernel_size=5) self.conv1.stddev = 0.01 # type: ignore[assignment] self.fc = nn.Linear(768, num_classes) self.fc.stddev = 0.001 # type: ignore[assignment] self.avg_pool = nn.AvgPool2d(kernel_size=5, stride=3) self.adaptive_avp_pool = nn.AdaptiveAvgPool2d((1, 1))
def __init__( self, in_channels: int, channels_7x7: int, conv_block: Optional[Callable[..., nn.Module]] = None, ) -> None: super(InceptionC, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) c7 = channels_7x7 self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) self.avg_pool = nn.AvgPool2d(kernel_size=3, stride=1, padding=1)
def __init__(self, input_channels): super().__init__() self.Branch_2 = nn.Sequential( BasicConv2d(input_channels, 64, kernel_size=1), BasicConv2d(64, 96, kernel_size=3, padding=1), BasicConv2d(96, 96, kernel_size=3, padding=1), ) self.Branch_1 = nn.Sequential( BasicConv2d(input_channels, 48, kernel_size=1, padding=1), BasicConv2d(48, 64, kernel_size=5, padding=1), ) self.Branch_0 = BasicConv2d(input_channels, 96, kernel_size=1) self.Branch_3 = nn.Sequential( nn.AvgPool2d(kernel_size=3, stride=1, padding=1), BasicConv2d(input_channels, 64, kernel_size=1), )
def __init__( self, in_channels: int, pool_features: int, conv_block: Optional[Callable[..., nn.Module]] = None, ) -> None: super(InceptionA, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) self.avg_pool = nn.AvgPool2d(kernel_size=3, stride=1, padding=1)
def __init__(self): super(GoogLeNet, self).__init__() self.pre_layers = nn.Sequential( nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.ReLU(True), ) self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) self.avgpool = nn.AvgPool2d(8, stride=1) self.linear = nn.Linear(1024, 10)
def __init__( self, block: Type[Union[BasicBlock, Bottleneck]], layers: List[int], num_classes: int = 1000, zero_init_residual: bool = False, groups: int = 1, width_per_group: int = 64, replace_stride_with_dilation: Optional[List[bool]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format( replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AvgPool2d((7, 7)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type] elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def __init__( self, levels, channels, num_classes=1000, block=BasicBlock, residual_root=False, return_levels=False, pool_size=7, linear_root=False, ): super(DLA, self).__init__() self.channels = channels self.return_levels = return_levels self.num_classes = num_classes self.base_layer = nn.Sequential( nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True), ) self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) self.level2 = Tree( levels[2], block, channels[1], channels[2], 2, level_root=False, root_residual=residual_root, ) self.level3 = Tree( levels[3], block, channels[2], channels[3], 2, level_root=True, root_residual=residual_root, ) self.level4 = Tree( levels[4], block, channels[3], channels[4], 2, level_root=True, root_residual=residual_root, ) self.level5 = Tree( levels[5], block, channels[4], channels[5], 2, level_root=True, root_residual=residual_root, ) self.avgpool = nn.AvgPool2d(pool_size) self.fc = nn.Conv2d(channels[-1], num_classes, kernel_size=1, stride=1, padding=0, bias=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) elif isinstance(m, BatchNorm): m.weight.data.fill_(1) m.bias.data.zeros_()
def __init__( self, block: Type[Union[BasicBlock, Bottleneck]], layers: List[int], num_classes: int = 1000, zero_init_residual: bool = False, groups: int = 1, width_per_group: int = 64, replace_stride_with_dilation: Optional[List[bool]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError( "replace_stride_with_dilation should be None or a 3-element tuple, got {}" .format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AvgPool2d((7, 7)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0)