def __init__(self, in_channels, out_channels, stride=1, dropout=0, use_se=False): super().__init__() self.use_se = use_se self._dropout = dropout self.norm1 = Norm(in_channels) self.act1 = Act() self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3, stride=stride) self.norm2 = Norm(out_channels) self.act2 = Act() if self._dropout: self.dropout = nn.Dropout(dropout) self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3) if self.use_se: self.se = SEModule(out_channels, reduction=8) self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
def __init__(self, in_channels, channels, stride, group_channels, cardinality, start_block=False, end_block=False, exclude_bn0=False): super().__init__() out_channels = channels * self.expansion width = group_channels * cardinality if not start_block and not exclude_bn0: self.bn0 = Norm(in_channels) if not start_block: self.act0 = Act() self.conv1 = Conv2d(in_channels, width, kernel_size=1) self.bn1 = Norm(width) self.act1 = Act() self.conv2 = Conv2d(width, width, kernel_size=3, stride=stride, groups=cardinality, norm='def', act='def') self.conv3 = Conv2d(width, out_channels, kernel_size=1) if start_block: self.bn3 = Norm(out_channels) if end_block: self.bn3 = Norm(out_channels) self.act3 = Act() if stride != 1 or in_channels != out_channels: shortcut = [] if stride != 1: shortcut.append(Pool2d(2, 2, type='avg')) shortcut.append( Conv2d(in_channels, out_channels, kernel_size=1, norm='def')) self.shortcut = Sequential(shortcut) else: self.shortcut = Identity() self.start_block = start_block self.end_block = end_block self.exclude_bn0 = exclude_bn0
def __init__(self, in_channels, out_channels, stride=1, depthwise=True): super().__init__() self.layers = nn.Sequential( Norm(in_channels), Act(), Conv2d(in_channels, out_channels, kernel_size=1), Norm(out_channels), Act(), Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, groups=out_channels if depthwise else 1), Norm(out_channels), Act(), Conv2d(out_channels, out_channels, kernel_size=1), ) if in_channels != out_channels or stride != 1: self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride) else: self.shortcut = nn.Identity()
def __init__(self, in_channels, channels, stride, dropout, drop_path, start_block=False, end_block=False, exclude_bn0=False): super().__init__() # For torch.jit.script self.bn0 = Identity() self.act0 = Identity() self.act2 = Identity() self.bn2 = Identity() out_channels = channels * self.expansion if not start_block and not exclude_bn0: self.bn0 = Norm(in_channels) if not start_block: self.act0 = Act() self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3, stride=stride) self.bn1 = Norm(out_channels) self.act1 = Act() self.dropout = Dropout(dropout) if dropout else Identity() self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3) if start_block: self.bn2 = Norm(out_channels) self.drop_path = DropPath(drop_path) if drop_path else Identity() if end_block: self.bn2 = Norm(out_channels) self.act2 = Act() if stride != 1 or in_channels != out_channels: shortcut = [] if stride != 1: shortcut.append(Pool2d(2, 2, type='avg')) shortcut.append( Conv2d(in_channels, out_channels, kernel_size=1, norm='def')) self.shortcut = Sequential(shortcut) else: self.shortcut = Identity() self.start_block = start_block self.end_block = end_block self.exclude_bn0 = exclude_bn0
def __init__(self, in_channels, out_channels, dropout, use_se, drop_path): super().__init__() self.norm1 = Norm(in_channels) self.act1 = Act() self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3) self.norm2 = Norm(out_channels) self.act2 = Act() if dropout: self.dropout = nn.Dropout(dropout) self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3) if use_se: self.se = SEModule(out_channels, reduction=8) if drop_path: self.drop_path = DropPath(drop_path)
def __init__(self, depth, k, num_classes=10, depthwise=True): super().__init__() num_blocks = (depth - 4) // 6 self.stem = Conv2d(3, self.stages[0], kernel_size=3) self.layer1 = self._make_layer(self.stages[0] * 1, self.stages[1] * k, num_blocks, stride=1, depthwise=depthwise) self.layer2 = self._make_layer(self.stages[1] * k, self.stages[2] * k, num_blocks, stride=2, depthwise=depthwise) self.layer3 = self._make_layer(self.stages[2] * k, self.stages[3] * k, num_blocks, stride=2, depthwise=depthwise) self.norm = Norm(self.stages[3] * k) self.act = Act() self.avgpool = GlobalAvgPool() self.fc = Linear(self.stages[3] * k, num_classes)
def __init__(self, in_channels, channels, stride, cardinality, base_width): super().__init__() out_channels = channels * self.expansion D = math.floor(channels * (base_width / 64)) C = cardinality self.conv1 = Conv2d(in_channels, D * C, kernel_size=1, norm='def', act='def') self.conv2 = Conv2d(D * C, D * C, kernel_size=3, stride=stride, groups=cardinality, norm='def', act='def') self.conv3 = Conv2d(D * C, out_channels, kernel_size=1, norm='def') self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, norm='def') if in_channels != out_channels else Identity() self.act = Act()
def __init__(self, start_channels, num_classes, block, widening_fractor, depth): super().__init__() if block == 'basic': block = BasicBlock num_layers = [(depth - 2) // 6] * 3 elif block == 'bottleneck': block = Bottleneck num_layers = [(depth - 2) // 9] * 3 else: raise ValueError("invalid block type: %s" % block) strides = [1, 2, 2] self.add_channel = widening_fractor / sum(num_layers) self.in_channels = start_channels self.channels = start_channels layers = [Conv2d(3, start_channels, kernel_size=3, norm='default')] for n, s in zip(num_layers, strides): layers.append(self._make_layer(block, n, stride=s)) self.features = nn.Sequential(*layers) assert (start_channels + widening_fractor) * block.expansion == self.in_channels self.post_activ = nn.Sequential( Norm(self.in_channels), Act('default'), ) self.final_pool = nn.AdaptiveAvgPool2d(1) self.output = nn.Linear(self.in_channels, num_classes)
def __init__(self, in_channels, channels, stride, erase_relu): super().__init__() out_channels = channels * self.expansion self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, norm='def', act='def') self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3, norm='def') if stride != 1 or in_channels != out_channels: shortcut = [] if stride != 1: shortcut.append(Pool2d(2, 2, type='avg')) shortcut.append( Conv2d(in_channels, out_channels, kernel_size=1, norm='def')) self.shortcut = Sequential(shortcut) else: self.shortcut = Identity() self.act = Act() if not erase_relu else Identity()
def __init__(self, in_channels, out_channels, stride, groups, use_se): super().__init__() self.use_se = use_se self.conv1 = Conv2d(in_channels, out_channels, kernel_size=1, norm='default', act='default') self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, groups=groups, norm='default', act='default') if self.use_se: self.se = SE(out_channels, 4) self.conv3 = Conv2d(out_channels, out_channels, kernel_size=1, norm='default') if stride != 1 or in_channels != out_channels: layers = [] if stride != 1: layers.append(nn.AvgPool2d(kernel_size=(2, 2), stride=2)) layers.extend([ Conv2d(in_channels, out_channels, kernel_size=1, bias=False), Norm(out_channels), ]) self.shortcut = nn.Sequential(*layers) else: self.shortcut = nn.Identity() self.relu = Act('default')
def __init__(self, C_in, C_out): super().__init__() assert C_out % 2 == 0 self.act = Act('relu', inplace=False) self.conv_1 = Conv2d(C_in, C_out // 2, 1, stride=2, bias=False) self.conv_2 = Conv2d(C_in, C_out // 2, 1, stride=2, bias=False) self.bn = Norm(C_out)
def __init__(self, C_in, C_out, kernel_size, stride=1): super().__init__() self.op = nn.Sequential( Act('relu', inplace=False), Conv2d(C_in, C_out, kernel_size, bias=False, stride=stride), Norm(C_out), )
def __init__(self, channels, reduction): super().__init__() c = channels // reduction self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.f_ex = nn.Sequential( Conv2d(channels, c, 1), Act(), Conv2d(c, channels, 1), nn.Sigmoid(), )
def __init__(self, in_channels, channels, stride=1): super().__init__() out_channels = channels * self.expansion self.conv = nn.Sequential( Norm(in_channels), Conv2d(in_channels, channels, kernel_size=1, bias=False), Norm(channels), Act(), Conv2d(channels, channels, kernel_size=3, stride=stride, bias=False), Norm(channels), Act(), Conv2d(channels, out_channels, kernel_size=1, bias=False), Norm(out_channels), ) self.shortcut = Shortcut(in_channels, out_channels, stride)
def __init__(self, C, num_classes): """assuming input size 8x8""" super().__init__() self.features = nn.Sequential( Act('relu', inplace=True), Pool2d(5, stride=3, padding=0, type='avg'), Conv2d(C, 128, 1, norm='def', act='relu'), Conv2d(128, 768, 2, norm='def', act='relu', padding=0), ) self.classifier = nn.Sequential( GlobalAvgPool(), Linear(768, num_classes), )
def __init__(self, C_in, C_out, kernel_size, stride, dilation): super().__init__() self.op = nn.Sequential( Act('relu', inplace=False), Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=C_in, bias=False), Conv2d(C_in, C_out, kernel_size=1, bias=False), Norm(C_out), )
def __init__(self, depth, k, num_classes=10, dropout=0, use_se=False, drop_path=0, depthwise=True): super().__init__() num_blocks = (depth - 4) // 6 self.stem = Conv2d(3, self.stages[0], kernel_size=3) self.layer1 = self._make_layer(self.stages[0] * 1, self.stages[1] * k, num_blocks, stride=1, dropout=dropout, use_se=use_se, drop_path=drop_path, depthwise=depthwise) self.layer2 = self._make_layer(self.stages[1] * k, self.stages[2] * k, num_blocks, stride=2, dropout=dropout, use_se=use_se, drop_path=drop_path, depthwise=depthwise) self.layer3 = self._make_layer(self.stages[2] * k, self.stages[3] * k, num_blocks, stride=2, dropout=dropout, use_se=use_se, drop_path=drop_path, depthwise=depthwise) self.post_activ = nn.Sequential(Norm(self.stages[3] * k), Act()) self.classifier = nn.Sequential( GlobalAvgPool(), Linear(self.stages[3] * k, num_classes), )
def __init__(self, in_channels, channels, stride, erase_relu): super().__init__() out_channels = channels * self.expansion self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, norm='def', act='def') self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3, norm='def') if stride != 1 or in_channels != out_channels: self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, norm='def') else: self.shortcut = Identity() self.act = Act() if not erase_relu else Identity()
lambda C, stride: SepConv(C, C, 3, stride), 'sep_conv_5x5': lambda C, stride: SepConv(C, C, 5, stride), 'sep_conv_7x7': lambda C, stride: SepConv(C, C, 7, stride), 'nor_conv_1x1': lambda C, stride: ReLUConvBN(C, C, 1, stride), 'nor_conv_3x3': lambda C, stride: ReLUConvBN(C, C, 3, stride), 'dil_conv_3x3': lambda C, stride: DilConv(C, C, 3, stride, 2), 'dil_conv_5x5': lambda C, stride: DilConv(C, C, 5, stride, 2), 'conv_7x1_1x7': lambda C, stride: nn.Sequential( Act('relu', inplace=False), Conv2d(C, C, (1, 7), stride=(1, stride), bias=False), Conv2d(C, C, (7, 1), stride=(stride, 1), bias=False), Norm(C), ), } class ReLUConvBN(nn.Module): def __init__(self, C_in, C_out, kernel_size, stride=1): super().__init__() self.op = nn.Sequential( Act('relu', inplace=False), Conv2d(C_in, C_out, kernel_size, bias=False, stride=stride), Norm(C_out), )