def __init__(self, name, in_channels, out_channels, stride, dilation): super(BasicBlock, self).__init__() self.g_name = name self.in_channels = in_channels self.stride = stride channels = out_channels//2 if stride == 1: assert in_channels == out_channels self.conv = nn.Sequential( slim.conv_bn_relu(name + '/conv1', channels, channels, 1), slim.conv_bn(name + '/conv2', channels, channels, 3, stride=stride, dilation=dilation, padding=dilation, groups=channels), slim.conv_bn_relu(name + '/conv3', channels, channels, 1), ) else: self.conv = nn.Sequential( slim.conv_bn_relu(name + '/conv1', in_channels, channels, 1), slim.conv_bn(name + '/conv2', channels, channels, 3, stride=stride, dilation=dilation, padding=dilation, groups=channels), slim.conv_bn_relu(name + '/conv3', channels, channels, 1), ) self.conv0 = nn.Sequential( slim.conv_bn(name + '/conv4', in_channels, in_channels, 3, stride=stride, dilation=dilation, padding=dilation, groups=in_channels), slim.conv_bn_relu(name + '/conv5', in_channels, channels, 1), ) self.shuffle = slim.channel_shuffle(name + '/shuffle', 2)
def __init__(self, num_classes, width_multiplier, image_h, image_w): super(Network, self).__init__() width_config = { 0.02:(6,12,24,128), 0.04:(12,24,48,256), 0.05:(12,24,48,512), 0.1:(24,48,96,512), 0.125:(12,24,48,96,1024), 0.25: (24, 48, 96, 192, 1024), 0.041:(12,24,48,48,256), 0.031:(8,16,32,32,256), 0.021:(6,12,24,24,128), 0.022:(6,12,24,24,128), 0.015:(4,8,16,16,96), } width_config = width_config[width_multiplier] self.num_classes = num_classes #in_channels = 24 in_channels=width_config[0] # outputs, stride, dilation, blocks, type self.network_config = [ g_name('data/bn', nn.BatchNorm2d(3)), slim.conv_bn_relu('stage1/conv', 3, in_channels, 3, 2, 1), # g_name('stage1/pool', nn.MaxPool2d(3, 2, 1)), g_name('stage1/pool', nn.MaxPool2d(3, 2, 0, ceil_mode=True)), (width_config[1], 2, 1, 2, 'b'), (width_config[2], 2, 1, 2, 'b'), # x16 (width_config[3], 2, 1, 2, 'b'), # x32 slim.conv_bn_relu('conv5', width_config[3], width_config[4], 1), #g_name('pool', nn.AvgPool2d(7)), #g_name('pool', nn.AvgPool2d((2, 2))), #g_name('fc', nn.Conv2d(width_config[3], self.num_classes, (int(image_h / 32), int(image_w / 32)))), g_name('fc', nn.Conv2d(width_config[4], self.num_classes, 1)) #slim.flatten('flatten', 1), #g_name('fc', nn.Linear(width_config[4], self.num_classes, bias=False)) ] self.network = [] for i, config in enumerate(self.network_config): if isinstance(config, nn.Module): self.network.append(config) continue out_channels, stride, dilation, num_blocks, stage_type = config stage_prefix = 'stage_{}'.format(i - 1) blocks = [BasicBlock(stage_prefix + '_1', in_channels, out_channels, stride, dilation)] for i in range(1, num_blocks): blocks.append(BasicBlock(stage_prefix + '_{}'.format(i + 1), out_channels, out_channels, 1, dilation)) self.network += [nn.Sequential(*blocks)] in_channels = out_channels self.network = nn.Sequential(*self.network) for name, m in self.named_modules(): if any(map(lambda x: isinstance(m, x), [nn.Linear, nn.Conv1d, nn.Conv2d])): nn.init.kaiming_uniform_(m.weight, mode='fan_in') if m.bias is not None: nn.init.constant_(m.bias, 0)
def __init__(self, in_channels, num_classes, width_multiplier): super(Network, self).__init__() width_config = { 0.25: (24, 48, 96, 512, 512, 512), # 0.33: (32, 64, 128, 512), # 0.5: (48, 96, 192, 1024), # 1.0: (116, 232, 464, 1024), # 1.5: (176, 352, 704, 1024), # 2.0: (244, 488, 976, 2048), } width_config = width_config[width_multiplier] self.num_classes = num_classes first_channels = 24 # outputs, stride, dilation, blocks, type self.network_config = [ g_name('data/bn', nn.BatchNorm2d(in_channels)), slim.conv_bn_relu('stage1/conv', in_channels, first_channels, 3, 2, 1), g_name('stage1/pool', nn.MaxPool2d(3, 2, 0, ceil_mode=True)), (width_config[0], 2, 1, 4), (width_config[1], 2, 1, 8), (width_config[2], 2, 1, 4), (width_config[3], 2, 1, 4), (width_config[4], 2, 1, 4), (width_config[4], 2, 1, 4), slim.conv_bn_relu('conv5', width_config[-2], width_config[-1], 1), g_name('pool', nn.AdaptiveAvgPool2d(1)), g_name('fc', nn.Conv2d(width_config[-1], self.num_classes, 1)), ] self.network = [] for i, config in enumerate(self.network_config): if isinstance(config, nn.Module): self.network.append(config) continue out_channels, stride, dilation, num_blocks = config stage_prefix = 'stage_{}'.format(i - 1) blocks = [ BasicBlock(stage_prefix + '_1', first_channels, out_channels, stride, dilation) ] for i in range(1, num_blocks): blocks.append( BasicBlock(stage_prefix + '_{}'.format(i + 1), out_channels, out_channels, 1, dilation)) self.network += [nn.Sequential(*blocks)] first_channels = out_channels self.network = nn.Sequential(*self.network) for name, m in self.named_modules(): if any( map(lambda x: isinstance(m, x), [nn.Linear, nn.Conv1d, nn.Conv2d])): nn.init.kaiming_uniform_(m.weight, mode='fan_in') if m.bias is not None: nn.init.constant_(m.bias, 0)
def __init__(self, num_classes, width_multiplier): super(Network, self).__init__() assert width_multiplier in (0.25, 0.5, 1.0, 1.5, 2.0) self.num_classes = num_classes in_channels = 24 width_config = { 0.25: (24, 48, 96, 512), 0.5: (48, 96, 192, 1024), 1.0: (116, 232, 464, 1024), 1.5: (176, 352, 704, 1024), 2.0: (244, 488, 976, 2048), } # outputs, stride, dilation, blocks, type self.network_config = [ g_name('data/bn', nn.BatchNorm2d(3)), slim.conv_bn_relu('stage1/conv', 3, in_channels, 3, 2, 1), # g_name('stage1/pool', nn.MaxPool2d(3, 2, 1)), g_name('stage1/pool', nn.MaxPool2d(3, 2, 0, ceil_mode=True)), (width_config[width_multiplier][0], 2, 1, 4, 'b'), (width_config[width_multiplier][1], 2, 1, 8, 'b'), # x16 (width_config[width_multiplier][2], 2, 1, 4, 'b'), # x32 slim.conv_bn_relu('conv5', width_config[width_multiplier][2], width_config[width_multiplier][3], 1), g_name('pool', nn.AvgPool2d(7, 1)), g_name('fc', nn.Conv2d(width_config[width_multiplier][3], self.num_classes, 1)), ] self.network = [] for i, config in enumerate(self.network_config): if isinstance(config, nn.Module): self.network.append(config) continue out_channels, stride, dilation, num_blocks, stage_type = config stage_prefix = 'stage_{}'.format(i - 1) blocks = [BasicBlock(stage_prefix + '_1', in_channels, out_channels, stride, dilation)] for i in range(1, num_blocks): blocks.append(BasicBlock(stage_prefix + '_{}'.format(i + 1), out_channels, out_channels, 1, dilation)) self.network += [nn.Sequential(*blocks)] in_channels = out_channels self.network = nn.Sequential(*self.network) for name, m in self.named_modules(): if any(map(lambda x: isinstance(m, x), [nn.Linear, nn.Conv1d, nn.Conv2d])): nn.init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: nn.init.constant_(m.bias, 0)
def __init__(self, num_classes, width_multiplier): super(Network, self).__init__() assert width_multiplier <= 4.0 self.num_classes = num_classes in_channels = 24 # outputs, stride, dilation, blocks, type fc_channels = 1024 if width_multiplier < 2 else 2048 self.network_config = [ g_name('data/bn', nn.BatchNorm2d(3)), slim.conv_bn_relu('stage1/conv', 3, in_channels, 3, 2, 1), g_name('stage1/pool', nn.MaxPool2d(3, 2, 0, ceil_mode=True)), (48, 2, 1, 4, 'b'), (96, 2, 1, 8, 'b'), # x16 (192, 2, 1, 4, 'b'), # x32 slim.conv_bn_relu('conv5', int(192 * width_multiplier), fc_channels, 1), g_name('pool', nn.AvgPool2d(7, 1)), g_name('fc', nn.Conv2d(fc_channels, self.num_classes, 1)), ] self.network = [] for i, config in enumerate(self.network_config): if isinstance(config, nn.Module): self.network.append(config) continue out_channels, stride, dilation, num_blocks, stage_type = config out_channels = int(out_channels * width_multiplier) stage_prefix = 'stage_{}'.format(i - 1) blocks = [ BasicBlock(stage_prefix + '_1', in_channels, out_channels, stride, dilation) ] for i in range(1, num_blocks): blocks.append( BasicBlock(stage_prefix + '_{}'.format(i + 1), out_channels, out_channels, 1, dilation)) self.network += [nn.Sequential(*blocks)] in_channels = out_channels self.network = nn.Sequential(*self.network) for name, m in self.named_modules(): if any( map(lambda x: isinstance(m, x), [nn.Linear, nn.Conv1d, nn.Conv2d])): nn.init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: nn.init.constant_(m.bias, 0)
def __init__(self, in_c, out_c, group, stride=2, dilation=1): super(BasicUnitA, self).__init__() self.stride = stride out_c -= in_c bottleneck = out_c // 4 assert bottleneck % group == 0 assert out_c % group == 0 assert stride == 2 self.g_conv1 = slim.conv_bn_relu(in_c, bottleneck, 1, stride=1, pad=0, group=group) self.shuffle2 = slim.get_shuffle(group) self.dw_conv3 = slim.conv_bn(bottleneck, bottleneck, 3, stride=2, pad=1, group=bottleneck) self.g_conv4 = slim.conv_bn(bottleneck, out_c, 1, stride=1, pad=0, group=group) self.avg_pool1 = nn.AvgPool2d(3, 2, 1)
def __init__(self, num_classes): super(LayoutNet, self).__init__() group = 2 self.stage1 = nn.Sequential( slim.conv_bn_relu(c_in=3, c_out=24, k_size=3, stride=2, pad=0), nn.MaxPool2d(2, 2)) self.stage2 = nn.Sequential( BasicUnitA(24, 240, group), BasicUnitB(240, 240, group), BasicUnitB(240, 240, group), ) self.stage3 = nn.Sequential( BasicUnitA(240, 480, group), BasicUnitB(480, 480, group), BasicUnitB(480, 480, group), BasicUnitB(480, 480, group), BasicUnitB(480, 480, group), BasicUnitB(480, 480, group), BasicUnitB(480, 480, group), ) self.stage4 = nn.Sequential( BasicUnitA(480, 960, group), BasicUnitB(960, 960, group), BasicUnitB(960, 960, group), ) self.classifier1 = nn.Conv2d(240, num_classes, 1, 1) self.classifier2 = nn.Conv2d(480, num_classes, 1, 1) self.classifier3 = nn.Conv2d(960, num_classes, 1, 1) self.num_classes = num_classes
def __init__(self, in_c, out_c, group, stride=1, dilation=1): super(BasicUnitB, self).__init__() bottleneck = out_c // 4 assert stride == 1 #assert in_c == out_c assert bottleneck % group == 0 self.g_conv1 = slim.conv_bn_relu(in_c, bottleneck, 1, stride=stride, pad=0, group=group) self.shuffle2 = slim.get_shuffle(group) self.dw_conv3 = slim.conv_bn(bottleneck, bottleneck, 3, stride=1, pad=1, group=bottleneck) self.g_conv4 = slim.conv_bn(bottleneck, out_c, 1, stride=1, pad=0, group=group)
def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(Fire, self).__init__() self.inplanes = inplanes self.squeeze_activation = slim.conv_bn_relu(inplanes, squeeze_planes, k_size=1, stride=1, pad=0) self.expand1x1_activation = slim.conv_bn_relu(squeeze_planes, expand1x1_planes, k_size=1, stride=1, pad=0) self.expand3x3_activation = slim.conv_bn_relu(squeeze_planes, expand3x3_planes, k_size=3, stride=1, pad=1)