def __init__(self, C_prev_prev, C_prev, C): super().__init__() self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1) self.preprocess1 = ReLUConvBN(C_prev, C, 1) self.branch_a1 = nn.Sequential( get_activation(), Conv2d(C, C, (1, 3), stride=(1, 2), groups=8, bias=False), Conv2d(C, C, (3, 1), stride=(2, 1), groups=8, bias=False), get_norm_layer(C, affine=True), get_activation(), Conv2d(C, C, 1), get_norm_layer(C, affine=True), ) self.branch_a2 = nn.Sequential( nn.MaxPool2d(3, stride=2, padding=1), get_norm_layer(C, affine=True) ) self.branch_b1 = nn.Sequential( get_activation(), Conv2d(C, C, (1, 3), stride=(1, 2), groups=8, bias=False), Conv2d(C, C, (3, 1), stride=(2, 1), groups=8, bias=False), get_norm_layer(C, affine=True), get_activation(), Conv2d(C, C, 1), get_norm_layer(C, affine=True), ) self.branch_b2 = nn.Sequential( nn.MaxPool2d(3, stride=2, padding=1), get_norm_layer(C, affine=True) )
def __init__(self, in_channels, out_channels, se=False): super().__init__() self.bn1 = nn.BatchNorm2d(in_channels) self.nl1 = get_activation("default") self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3) self.bn2 = nn.BatchNorm2d(out_channels) self.nl2 = get_activation("default") self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3) self.se = SEModule(out_channels, reduction=8) if se else Identity()
def __init__(self, in_channels, out_channels, use_se, drop_path): super().__init__() self.bn1 = get_norm_layer(in_channels) self.nl1 = get_activation("default") self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3) self.bn2 = get_norm_layer(out_channels) self.nl2 = get_activation("default") self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3) if use_se: self.se = SEModule(out_channels, reduction=8) if drop_path: self.drop_path = DropPath(drop_path)
def __init__(self, start_channels, num_classes, block, widening_fractor, depth): super().__init__() if block == 'basic': block = BasicBlock num_layers = [(depth - 2) // 6] * 3 elif block == 'bottleneck': block = Bottleneck num_layers = [(depth - 2) // 9] * 3 else: raise ValueError("invalid block type: %s" % block) strides = [1, 2, 2] self.add_channel = widening_fractor / sum(num_layers) self.in_channels = start_channels self.channels = start_channels layers = [ Conv2d(3, start_channels, kernel_size=3, norm_layer='default') ] for n, s in zip(num_layers, strides): layers.append(self._make_layer(block, n, stride=s)) self.features = nn.Sequential(*layers) assert (start_channels + widening_fractor) * block.expansion == self.in_channels self.post_activ = nn.Sequential( get_norm_layer(self.in_channels), get_activation('default'), ) self.final_pool = nn.AdaptiveAvgPool2d(1) self.output = nn.Linear(self.in_channels, num_classes)
def __init__(self, channels, init_block_channels, in_channels=3, num_classes=1000): super().__init__() self.num_classes = num_classes self.features = nasnet_dual_path_sequential(return_two=False, first_ordinals=1, last_ordinals=2) self.features.add_module( "init_block", Conv2d(in_channels, init_block_channels, kernel_size=3, norm_layer='default')) self.prev_in_channels = None self.in_channels = init_block_channels strides = [1, 2, 2] for i, stride, channels_per_stage in zip(range(len(channels)), strides, channels): self.features.add_module( "stage%d" % (i + 1), self._make_layer(channels_per_stage, stride)) self.features.add_module("activ", get_activation('default')) self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(1)) self.output = nn.Sequential() self.output.add_module( 'fc', nn.Linear(in_features=self.in_channels, out_features=num_classes))
def __init__(self, layers, k=4, num_classes=10, **kwargs): super().__init__() self.block_kwargs = kwargs self.conv = Conv2d(3, self.stages[0], kernel_size=3) self.layer1 = self._make_layer(self.stages[0] * 1, self.stages[1] * k, layers[0], stride=1, **kwargs) self.layer2 = self._make_layer(self.stages[1] * k, self.stages[2] * k, layers[1], stride=2, **kwargs) self.layer3 = self._make_layer(self.stages[2] * k, self.stages[3] * k, layers[2], stride=2, **kwargs) self.bn = nn.BatchNorm2d(self.stages[3] * k) self.nl = get_activation('default') self.avgpool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(self.stages[3] * k, num_classes)
def __init__(self, in_channels, out_channels, stride, groups, base_width): super().__init__() D = math.floor(out_channels // self.expansion * (base_width / 64)) self.conv1 = Conv2d(in_channels, D * groups, kernel_size=1, norm_layer='default', activation='default') self.conv2 = Conv2d(D * groups, D * groups, kernel_size=3, stride=stride, groups=groups, norm_layer='default', activation='default') self.conv3 = Conv2d(D * groups, out_channels, kernel_size=1, norm_layer='default') self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, norm_layer='default' ) if stride != 1 or in_channels != out_channels else nn.Identity() self.relu = get_activation('default')
def __init__(self, primitives=PRIMITIVES, C=16, num_stacked=5, nodes=4, num_classes=10, tau=10.0): super().__init__() self.primitives = primitives self.C = C self.num_classes = num_classes self.num_stacked = num_stacked self.nodes = nodes self.tau = tau self.stem = Conv2d(3, C, kernel_size=3, norm_layer='default') for i in range(3): if i != 0: self.add_module("reduce%d" % i, ReductionCell(C, C * 2)) C = C * 2 stage = nn.ModuleList() for _ in range(num_stacked): stage.append(NormalCell(primitives, nodes, C)) self.add_module("stage%d" % (i + 1), stage) self.post_activ = nn.Sequential( get_norm_layer(C), get_activation(), ) self.avg_pool = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(C, num_classes) self._initialize_alphas()
def __init__(self, in_channels, out_channels, stride=1, expansion=4): super().__init__() self.stride = stride self.in_channels = in_channels self.out_channels = out_channels channels = out_channels // expansion self.conv1 = Conv2d(in_channels, channels, kernel_size=1, norm_layer='default', activation='default') self.conv2 = Conv2d(channels, channels, kernel_size=3, stride=stride, norm_layer='default', activation='default') self.conv3 = Conv2d(channels, out_channels, kernel_size=1, norm_layer='default') self.relu3 = get_activation('default') self.downsample = None if stride != 1 or in_channels != out_channels: self.downsample = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, norm_layer='default')
def __init__(self, in_channels, out_channels, last=False): super().__init__() self.last = last self.conv1 = nn.Sequential( Conv2d(in_channels, out_channels, kernel_size=5, norm_layer='default', activation='relu', depthwise_separable=True), Conv2d(out_channels, out_channels, kernel_size=5, norm_layer='default', depthwise_separable=True), SEModule(out_channels, reduction=4), ) if not last: self.deconv1 = Conv2d(out_channels, out_channels, kernel_size=4, stride=2, norm_layer='default', depthwise_separable=True, transposed=True) self.nl1 = get_activation('default') self.conv2 = Conv2d(out_channels, out_channels, kernel_size=5, norm_layer='default', activation='default', depthwise_separable=True)
def __init__(self, in_channels, use_se=False): super().__init__() assert in_channels % 2 == 0 channels = in_channels // 2 branch = [ Conv2d(channels, channels, kernel_size=1, activation='default', norm_layer='default'), Conv2d(channels, channels, kernel_size=3, groups=channels, activation=None, norm_layer='default'), Conv2d(channels, channels, kernel_size=1, activation=None, norm_layer='default'), ] if use_se: branch.append(SELayer(channels, reduction=2)) self.branch = nn.Sequential(*branch) self.relu = get_activation()
def __init__(self, in_channels, f_channels, last=False): super().__init__() kernel_size = 3 self.last = last self.conv1 = nn.Sequential( Conv2d(in_channels, f_channels, kernel_size=kernel_size, norm_layer='default', activation='default'), Conv2d(f_channels, f_channels, kernel_size=kernel_size, norm_layer='default'), ) # if not last: # self.deconv = Conv2d(f_channels, f_channels, kernel_size=4, stride=2, # norm_layer='default', transposed=True) self.conv2 = nn.Sequential( get_activation('default'), Conv2d(f_channels, f_channels, kernel_size=kernel_size, norm_layer='default', activation='default'))
def __init__(self, in_channels, out_channels, stride): super().__init__() channels = out_channels // self.expansion self.conv1 = Conv2d(in_channels, channels, kernel_size=1, norm_layer='default', activation='default') self.conv2 = Conv2d(channels, channels, kernel_size=3, stride=stride, norm_layer='default', activation='default') self.conv3 = Conv2d(channels, out_channels, kernel_size=1, norm_layer='default') self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, norm_layer='default') if stride != 1 else nn.Identity() self.relu = get_activation('default')
def __init__(self, in_channels, out_channels, stride, groups, use_se): super().__init__() self.use_se = use_se self.conv1 = Conv2d(in_channels, out_channels, kernel_size=1, norm_layer='default', activation='default') self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, groups=groups, norm_layer='default', activation='default') if self.use_se: self.se = SE(out_channels, 4) self.conv3 = Conv2d(out_channels, out_channels, kernel_size=1, norm_layer='default') if stride != 1 or in_channels != out_channels: layers = [] if stride != 1: layers.append(nn.AvgPool2d(kernel_size=(2, 2), stride=2)) layers.extend([ Conv2d(in_channels, out_channels, kernel_size=1, bias=False), get_norm_layer(out_channels), ]) self.shortcut = nn.Sequential(*layers) else: self.shortcut = nn.Identity() self.relu = get_activation('default')
def __init__(self, in_channels, out_channels, shuffle_groups=2): super().__init__() channels = out_channels - in_channels // 2 self.conv1 = Conv2d( in_channels // 2, channels, kernel_size=1, norm_layer='default', activation='default', ) self.conv2 = Conv2d( channels, channels, kernel_size=3, groups=channels, norm_layer='default', ) self.conv3 = Conv2d( channels, channels, kernel_size=1, norm_layer='default', ) self.shortcut = nn.Sequential() if in_channels != out_channels: self.shortcut = Conv2d(in_channels // 2, channels, kernel_size=1, norm_layer='default') self.relu = get_activation('default') self.shuffle = ShuffleBlock(shuffle_groups)
def __init__(self, depth, k, num_classes=10, use_se=False, drop_path=0): super().__init__() num_blocks = (depth - 4) // 6 self.conv = Conv2d(3, self.stages[0], kernel_size=3) self.layer1 = self._make_layer(self.stages[0] * 1, self.stages[1] * k, num_blocks, stride=1, use_se=use_se, drop_path=drop_path) self.layer2 = self._make_layer(self.stages[1] * k, self.stages[2] * k, num_blocks, stride=2, use_se=use_se, drop_path=drop_path) self.layer3 = self._make_layer(self.stages[2] * k, self.stages[3] * k, num_blocks, stride=2, use_se=use_se, drop_path=drop_path) self.bn = get_norm_layer(self.stages[3] * k) self.nl = get_activation('default') self.avgpool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(self.stages[3] * k, num_classes)
def __init__(self, C_in, C_out, kernel_size): super().__init__() self.op = nn.Sequential( get_activation(), Conv2d(C_in, C_out, kernel_size, bias=False), get_norm_layer(C_out), )
def __init__(self, in_channels, out_channels, kernel_size, stride=1): super(NasConv, self).__init__() self.activ = get_activation('default') self.conv = Conv2d(in_channels, out_channels, kernel_size, stride, bias=False) self.bn = get_norm_layer('default', out_channels)
def __init__(self, channels, reduction): super().__init__() c = channels // reduction self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.f_ex = nn.Sequential( Conv2d(channels, c, 1), get_activation(), Conv2d(c, channels, 1), nn.Sigmoid(), )
def __init__(self, in_channels, out_channels, kernel_size, stride): super().__init__() self.activ = get_activation("default") self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, bias=False, depthwise_separable=True) self.bn = get_norm_layer('bn', out_channels)
def __init__(self, in_channels, out_channels, stride=1, use_se=False): super().__init__() self.use_se = use_se self.bn1 = get_norm_layer(in_channels) self.nl1 = get_activation("default") self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3, stride=stride) self.bn2 = get_norm_layer(out_channels) self.nl2 = get_activation("default") self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3) if self.use_se: self.se = SEModule(out_channels, reduction=8) self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
def DWConv2d(in_channels, out_channels, kernel_size, stride=1): return nn.Sequential( get_activation('default'), *Conv2d(in_channels, out_channels, kernel_size, stride, depthwise_separable=True), nn.BatchNorm2d(out_channels), )
def __init__(self, in_channels, channels, stride=1): super().__init__() out_channels = channels * self.expansion self.conv = nn.Sequential( get_norm_layer('default', in_channels), Conv2d(in_channels, channels, kernel_size=1, bias=False), get_norm_layer('default', channels), get_activation('default'), Conv2d(channels, channels, kernel_size=3, stride=stride, bias=False), get_norm_layer('default', channels), get_activation('default'), Conv2d(channels, out_channels, kernel_size=1, bias=False), get_norm_layer('default', out_channels), ) self.shortcut = Shortcut(in_channels, out_channels, stride)
def __init__(self, in_channels, out_channels): super().__init__() mid_channels = out_channels // 2 self.activ = get_activation('default') self.path1 = NasPathBranch(in_channels=in_channels, out_channels=mid_channels) self.path2 = NasPathBranch(in_channels=in_channels, out_channels=mid_channels, extra_padding=True) self.bn = get_norm_layer('default', out_channels)
def __init__(self, in_channels, out_channels, stride=1, lite=False): super().__init__() self.conv = nn.Sequential( get_activation("default"), *Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, norm_layer='default', depthwise_separable=lite), )
def __init__(self, stem_channels=64, mid_channels=(64, 80, 96, 112), out_channels=(128, 256, 384, 512), num_modules=(1, 1, 1, 1), num_classes=1000): super().__init__() num_stages = 5 assert len(mid_channels) == len(out_channels) == len( num_modules) == num_stages - 1 self.features = nn.Sequential() self.features.add_module( "init_block", nn.Sequential( Conv2d(3, stem_channels, kernel_size=3, stride=2, norm_layer='default', activation='default'), Conv2d(stem_channels, stem_channels, kernel_size=3, norm_layer='default', activation='default'), Conv2d(stem_channels, stem_channels * 2, kernel_size=3, norm_layer='default', activation='default'), )) in_channels = stem_channels * 2 for i, m, o, n in zip(range(num_stages - 1), mid_channels, out_channels, num_modules): stage = nn.Sequential() stage.add_module( "pool", nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)) for j in range(n): stage.add_module("unit%d" % (j + 1), OSA(in_channels, m, o)) in_channels = o self.features.add_module("stage%d" % (i + 1), stage) self.features.add_module( "post_activ", nn.Sequential( get_norm_layer("default", in_channels), get_activation("default"), )) self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(1)) self.output = nn.Linear(in_features=in_channels, out_features=num_classes)
def __init__(self, in_channels, channels, stride=1, with_se=True): super().__init__() self.with_se = with_se self.conv1 = Conv2d(in_channels, channels, kernel_size=3, stride=stride, norm_layer='default', activation='default') self.conv2 = Conv2d(channels, channels, kernel_size=3, norm_layer='default') if self.with_se: self.se = SEModule(channels, reduction=8) self.nl = get_activation('default') self.downsample = None if stride != 1 or in_channels != channels: self.downsample = Conv2d(in_channels, channels, stride=stride, norm_layer='default')
def __init__(self, C_in, C_out, kernel_size, stride, padding): super().__init__() self.op = nn.Sequential( get_activation(), Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, groups=C_in, bias=False), Conv2d(C_in, C_in, kernel_size=1, bias=False), get_norm_layer(C_in), get_activation(), nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False), nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), get_norm_layer(C_out), )
def __init__(self, C_in, C_out, kernel_size, stride, dilation): super().__init__() self.op = nn.Sequential( get_activation(), Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=C_in, bias=False), Conv2d(C_in, C_out, kernel_size=1, bias=False), get_norm_layer(C_out), )
def __init__(self, in_channels, out_channels, stride=1): super().__init__() self.conv = nn.Sequential( get_norm_layer(in_channels), Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, bias=False), get_norm_layer(out_channels), get_activation(), Conv2d(out_channels, out_channels, kernel_size=3, bias=False), get_norm_layer(out_channels), ) self.shortcut = Shortcut(in_channels, out_channels, stride)