def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, expand_ratio=6, mid_channels=None): super(MBInvertedConvLayer, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.expand_ratio = expand_ratio self.mid_channels = mid_channels if self.mid_channels is None: feature_dim = round(self.in_channels * self.expand_ratio) else: feature_dim = self.mid_channels if self.expand_ratio == 1: self.inverted_bottleneck = nn.Sequential() else: self.inverted_bottleneck = nn.Sequential( OrderedDict([ ('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)), ('bn', nn.BatchNorm2d(feature_dim)), ('act', nn.ReLU6(inplace=True)), ])) pad = get_same_padding(self.kernel_size) self.depth_conv = nn.Sequential( OrderedDict([ ('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=feature_dim, bias=False)), ('bn', nn.BatchNorm2d(feature_dim)), ('act', nn.ReLU6(inplace=True)), ])) self.point_linear = nn.Sequential( OrderedDict([ ('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)), ('bn', nn.BatchNorm2d(out_channels)), ]))
def __init__(self, alpha, depths, convops, kernel_sizes, num_layers, skips, num_classes=1000, dropout=0.2): super().__init__() assert alpha > 0.0 assert len(depths) == len(convops) == len(kernel_sizes) == len(num_layers) == len(skips) == 7 self.alpha = alpha self.num_classes = num_classes depths = _get_depths([_FIRST_DEPTH] + depths, alpha) base_filter_sizes = [16, 24, 40, 80, 96, 192, 320] exp_ratios = [3, 3, 3, 6, 6, 6, 6] strides = [1, 2, 2, 2, 1, 2, 1] layers = [ # First layer: regular conv. nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False), nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), ] count = 0 # for conv, prev_depth, depth, ks, skip, stride, repeat, exp_ratio in \ # zip(convops, depths[:-1], depths[1:], kernel_sizes, skips, strides, num_layers, exp_ratios): for filter_size, exp_ratio, stride in zip(base_filter_sizes, exp_ratios, strides): # TODO: restrict that "choose" can only be used within mutator ph = nn.Placeholder(label=f'mutable_{count}', **{ 'kernel_size_options': [1, 3, 5], 'n_layer_options': [1, 2, 3, 4], 'op_type_options': ['__mutated__.base_mnasnet.RegularConv', '__mutated__.base_mnasnet.DepthwiseConv', '__mutated__.base_mnasnet.MobileConv'], # 'se_ratio_options': [0, 0.25], 'skip_options': ['identity', 'no'], 'n_filter_options': [int(filter_size*x) for x in [0.75, 1.0, 1.25]], 'exp_ratio': exp_ratio, 'stride': stride, 'in_ch': depths[0] if count == 0 else None }) layers.append(ph) '''if conv == "mconv": # MNASNet blocks: stacks of inverted residuals. layers.append(_stack_inverted_residual(prev_depth, depth, ks, skip, stride, exp_ratio, repeat, _BN_MOMENTUM)) else: # Normal conv and depth-separated conv layers += _stack_normal_conv(prev_depth, depth, ks, skip, conv == "dconv", stride, repeat, _BN_MOMENTUM)''' count += 1 if count >= 2: break layers += [ # Final mapping to classifier input. nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False), nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), ] self.layers = nn.Sequential(*layers) self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), nn.Linear(1280, num_classes)) self._initialize_weights()
def __init__(self, kernel_size, in_ch, out_ch, skip, exp_ratio, stride): super().__init__() self.kernel_size = kernel_size self.in_ch = in_ch self.out_ch = out_ch self.skip = skip self.exp_ratio = exp_ratio self.stride = stride mid_ch = in_ch * exp_ratio self.layers = nn.Sequential( # Pointwise nn.Conv2d(in_ch, mid_ch, 1, bias=False), nn.BatchNorm2d(mid_ch, momentum=BN_MOMENTUM), nn.ReLU(inplace=False), # Depthwise nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=(kernel_size - 1) // 2, stride=stride, groups=mid_ch, bias=False), nn.BatchNorm2d(mid_ch, momentum=BN_MOMENTUM), nn.ReLU(inplace=False), # Linear pointwise. Note that there's no activation. nn.Conv2d(mid_ch, out_ch, 1, bias=False), nn.BatchNorm2d(out_ch, momentum=BN_MOMENTUM))
def _stack_normal_conv(in_ch, out_ch, kernel_size, skip, dconv, stride, repeats, bn_momentum): assert repeats >= 1 stack = [] for i in range(repeats): s = stride if i == 0 else 1 if dconv: modules = [ nn.Conv2d(in_ch, in_ch, kernel_size, padding=kernel_size // 2, stride=s, groups=in_ch, bias=False), nn.BatchNorm2d(in_ch, momentum=bn_momentum), nn.ReLU(inplace=True), nn.Conv2d(in_ch, out_ch, 1, padding=0, stride=1, bias=False), nn.BatchNorm2d(out_ch, momentum=bn_momentum) ] else: modules = [ nn.Conv2d(in_ch, out_ch, kernel_size, padding=kernel_size // 2, stride=s, bias=False), nn.ReLU(inplace=True), nn.BatchNorm2d(out_ch, momentum=bn_momentum) ] if skip and in_ch == out_ch and s == 1: # use different implementation for skip and noskip to align with pytorch stack.append(_ResidualBlock(nn.Sequential(*modules))) else: stack += modules in_ch = out_ch return stack
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): super().__init__() self.net = nn.Sequential( nn.ReLU(), nn.Conv2d(C_in, C_out, kernel_size, stride, padding, bias=False), nn.BatchNorm2d(C_out, affine=affine) )
def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor, skip, bn_momentum=0.1): super(_InvertedResidual, self).__init__() assert stride in [1, 2] assert kernel_size in [3, 5] mid_ch = in_ch * expansion_factor self.apply_residual = skip and in_ch == out_ch and stride == 1 self.layers = nn.Sequential( # Pointwise nn.Conv2d(in_ch, mid_ch, 1, bias=False), nn.BatchNorm2d(mid_ch, momentum=bn_momentum), nn.ReLU(inplace=False), # Depthwise nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2, stride=stride, groups=mid_ch, bias=False), nn.BatchNorm2d(mid_ch, momentum=bn_momentum), nn.ReLU(inplace=False), # Linear pointwise. Note that there's no activation. nn.Conv2d(mid_ch, out_ch, 1, bias=False), nn.BatchNorm2d(out_ch, momentum=bn_momentum))
def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers)
def __init__(self, input_size=224, first_conv_channels=16, last_conv_channels=1024, n_classes=1000, affine=False): super().__init__() assert input_size % 32 == 0 self.stage_blocks = [4, 4, 8, 4] self.stage_channels = [64, 160, 320, 640] self._input_size = input_size self._feature_map_size = input_size self._first_conv_channels = first_conv_channels self._last_conv_channels = last_conv_channels self._n_classes = n_classes self._affine = affine self._layerchoice_count = 0 # building first layer self.first_conv = nn.Sequential( nn.Conv2d(3, first_conv_channels, 3, 2, 1, bias=False), nn.BatchNorm2d(first_conv_channels, affine=affine), nn.ReLU(inplace=True), ) self._feature_map_size //= 2 p_channels = first_conv_channels features = [] for num_blocks, channels in zip(self.stage_blocks, self.stage_channels): features.extend(self._make_blocks(num_blocks, p_channels, channels)) p_channels = channels self.features = nn.Sequential(*features) self.conv_last = nn.Sequential( nn.Conv2d(p_channels, last_conv_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(last_conv_channels, affine=affine), nn.ReLU(inplace=True), ) self.globalpool = nn.AvgPool2d(self._feature_map_size) self.dropout = nn.Dropout(0.1) self.classifier = nn.Sequential( nn.Linear(last_conv_channels, n_classes, bias=False), ) self._initialize_weights()
def _stack_inverted_residual(in_ch, out_ch, kernel_size, skip, stride, exp_factor, repeats, bn_momentum): """ Creates a stack of inverted residuals. """ assert repeats >= 1 # First one has no skip, because feature map size changes. first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, skip, bn_momentum=bn_momentum) remaining = [] for _ in range(1, repeats): remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, skip, bn_momentum=bn_momentum)) return nn.Sequential(first, *remaining)
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True): super().__init__() self.net = nn.Sequential( nn.ReLU(), nn.Conv2d(C_in, C_in, kernel_size, stride, padding, dilation=dilation, groups=C_in, bias=False), nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False), nn.BatchNorm2d(C_out, affine=affine) )
def __init__(self, in_channels: int, out_channels: int, mid_channels: int, *, kernel_size: int, stride: int, sequence: str = "pdp", affine: bool = True): super().__init__() assert stride in [1, 2] assert kernel_size in [3, 5, 7] self.channels = in_channels // 2 if stride == 1 else in_channels self.in_channels = in_channels self.out_channels = out_channels self.mid_channels = mid_channels self.kernel_size = kernel_size self.stride = stride self.pad = kernel_size // 2 self.oup_main = out_channels - self.channels self.affine = affine assert self.oup_main > 0 self.branch_main = nn.Sequential( *self._decode_point_depth_conv(sequence)) if stride == 2: self.branch_proj = nn.Sequential( # dw nn.Conv2d(self.channels, self.channels, kernel_size, stride, self.pad, groups=self.channels, bias=False), nn.BatchNorm2d(self.channels, affine=affine), # pw-linear nn.Conv2d(self.channels, self.channels, 1, 1, 0, bias=False), nn.BatchNorm2d(self.channels, affine=affine), nn.ReLU(inplace=True)) else: # empty block to be compatible with torchscript self.branch_proj = nn.Sequential()
def __init__(self): super(LargeModel, self).__init__() dim = 15 n = 4 * 100 self.emb = nn.Embedding(n, dim) self.lin1 = nn.Linear(dim, 1) self.seq = nn.Sequential( self.emb, self.lin1, )
def __init__(self, in_features, out_features, bias=True, use_bn=False, act_func=None, dropout_rate=0, ops_order='weight_bn_act'): super(LinearLayer, self).__init__() self.in_features = in_features self.out_features = out_features self.bias = bias self.use_bn = use_bn self.act_func = act_func self.dropout_rate = dropout_rate self.ops_order = ops_order """ modules """ modules = {} # batch norm if self.use_bn: if self.bn_before_weight: modules['bn'] = nn.BatchNorm1d(in_features) else: modules['bn'] = nn.BatchNorm1d(out_features) else: modules['bn'] = None # activation modules['act'] = build_activation(self.act_func, self.ops_list[0] != 'act') # dropout if self.dropout_rate > 0: modules['dropout'] = nn.Dropout(self.dropout_rate, inplace=True) else: modules['dropout'] = None # linear modules['weight'] = { 'linear': nn.Linear(self.in_features, self.out_features, self.bias) } # add modules for op in self.ops_list: if modules[op] is None: continue elif op == 'weight': if modules['dropout'] is not None: self.add_module('dropout', modules['dropout']) for key in modules['weight']: self.add_module(key, modules['weight'][key]) else: self.add_module(op, modules[op]) self.sequence = nn.Sequential(self._modules)
def __init__(self, channels: int, reduction: int = 4, activation_layer: Optional[Callable[..., nn.Module]] = None): super().__init__() if activation_layer is None: activation_layer = nn.Sigmoid self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channels, make_divisible(channels // reduction, 8)), nn.ReLU(inplace=True), nn.Linear(make_divisible(channels // reduction, 8), channels), activation_layer())
def __init__(self, input_size, C, n_classes): """ assuming input size 7x7 or 8x8 """ assert input_size in [7, 8] super().__init__() self.net = nn.Sequential( nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=input_size - 5, padding=0, count_include_pad=False), # 2x2 out nn.Conv2d(C, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, kernel_size=2, bias=False), # 1x1 out nn.BatchNorm2d(768), nn.ReLU(inplace=True) ) self.linear = nn.Linear(768, n_classes)
def __init__(self, in_channels, out_channels, use_bn=True, act_func='relu', dropout_rate=0, ops_order='weight_bn_act'): super(Base2DLayer, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.use_bn = use_bn self.act_func = act_func self.dropout_rate = dropout_rate self.ops_order = ops_order """ modules """ modules = {} # batch norm if self.use_bn: if self.bn_before_weight: modules['bn'] = nn.BatchNorm2d(in_channels) else: modules['bn'] = nn.BatchNorm2d(out_channels) else: modules['bn'] = None # activation modules['act'] = build_activation(self.act_func, self.ops_list[0] != 'act') # dropout if self.dropout_rate > 0: modules['dropout'] = nn.Dropout2d(self.dropout_rate, inplace=True) else: modules['dropout'] = None # weight modules['weight'] = self.weight_op() # add modules for op in self.ops_list: if modules[op] is None: continue elif op == 'weight': if modules['dropout'] is not None: self.add_module('dropout', modules['dropout']) for key in modules['weight']: self.add_module(key, modules['weight'][key]) else: self.add_module(op, modules[op]) self.sequence = nn.Sequential(self._modules)
def __init__(self, num_labels: int = 1000, base_widths: Tuple[int, ...] = (32, 16, 32, 40, 80, 96, 192, 320, 1280), dropout_rate: float = 0., width_mult: float = 1.0, bn_eps: float = 1e-3, bn_momentum: float = 0.1): super().__init__() assert len(base_widths) == 9 # include the last stage info widths here widths = [make_divisible(width * width_mult, 8) for width in base_widths] downsamples = [True, False, True, True, True, False, True, False] self.num_labels = num_labels self.dropout_rate = dropout_rate self.bn_eps = bn_eps self.bn_momentum = bn_momentum self.stem = ConvBNReLU(3, widths[0], stride=2, norm_layer=nn.BatchNorm2d) blocks: List[nn.Module] = [ # first stage is fixed DepthwiseSeparableConv(widths[0], widths[1], kernel_size=3, stride=1) ] # https://github.com/ultmaster/AceNAS/blob/46c8895fd8a05ffbc61a6b44f1e813f64b4f66b7/searchspace/proxylessnas/__init__.py#L21 for stage in range(2, 8): # Rather than returning a fixed module here, # we return a builder that dynamically creates module for different `repeat_idx`. builder = inverted_residual_choice_builder( [3, 6], [3, 5, 7], downsamples[stage], widths[stage - 1], widths[stage], f's{stage}') if stage < 7: blocks.append(nn.Repeat(builder, (1, 4), label=f's{stage}_depth')) else: # No mutation for depth in the last stage. # Directly call builder to initiate one block blocks.append(builder(0)) self.blocks = nn.Sequential(*blocks) # final layers self.feature_mix_layer = ConvBNReLU(widths[7], widths[8], kernel_size=1, norm_layer=nn.BatchNorm2d) self.global_avg_pooling = nn.AdaptiveAvgPool2d(1) self.dropout_layer = nn.Dropout(dropout_rate) self.classifier = nn.Linear(widths[-1], num_labels) reset_parameters(self, bn_momentum=bn_momentum, bn_eps=bn_eps)
def __init__(self, C: int, num_labels: int, dataset: Literal['imagenet', 'cifar']): super().__init__() if dataset == 'imagenet': # assuming input size 14x14 stride = 2 elif dataset == 'cifar': stride = 3 self.features = nn.Sequential( nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=stride, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True)) self.classifier = nn.Linear(768, num_labels)
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): super().__init__() self.net = nn.Sequential( DilConv(C_in, C_in, kernel_size, stride, padding, dilation=1, affine=affine), DilConv(C_in, C_out, kernel_size, 1, padding, dilation=1, affine=affine))
def __init__(self, input_size, in_channels, channels, n_classes, n_layers, n_nodes=4, stem_multiplier=3, auxiliary=False): super().__init__() self.in_channels = in_channels self.channels = channels self.n_classes = n_classes self.n_layers = n_layers self.aux_pos = 2 * n_layers // 3 if auxiliary else -1 c_cur = stem_multiplier * self.channels self.stem = nn.Sequential( nn.Conv2d(in_channels, c_cur, 3, 1, 1, bias=False), nn.BatchNorm2d(c_cur)) # for the first cell, stem is used for both s0 and s1 # [!] channels_pp and channels_p is output channel size, but c_cur is input channel size. channels_pp, channels_p, c_cur = c_cur, c_cur, channels self.cells = nn.ModuleList() reduction_p, reduction = False, False for i in range(n_layers): reduction_p, reduction = reduction, False # Reduce featuremap size and double channels in 1/3 and 2/3 layer. if i in [n_layers // 3, 2 * n_layers // 3]: c_cur *= 2 reduction = True cell = Cell(n_nodes, channels_pp, channels_p, c_cur, reduction_p, reduction) self.cells.append(cell) c_cur_out = c_cur * n_nodes channels_pp, channels_p = channels_p, c_cur_out #if i == self.aux_pos: # self.aux_head = AuxiliaryHead(input_size // 4, channels_p, n_classes) self.gap = nn.AdaptiveAvgPool2d(1) self.linear = nn.Linear(channels_p, n_classes)
def __init__(self, config): super(SNLIClassifier, self).__init__() self.config = config self.embed = nn.Embedding(config.n_embed, config.d_embed) self.projection = Linear(config.d_embed, config.d_proj) self.encoder = Encoder(config) self.dropout = nn.Dropout(p=config.dp_ratio) self.relu = nn.ReLU() seq_in_size = 2 * config.d_hidden if self.config.birnn: seq_in_size *= 2 lin_config = [seq_in_size] * 2 self.out = nn.Sequential(Linear(*lin_config), self.relu, self.dropout, Linear(*lin_config), self.relu, self.dropout, Linear(*lin_config), self.relu, self.dropout, Linear(seq_in_size, config.d_out))
def __init__(self, config): super(SNLIClassifier, self).__init__() self.embed = nn.Embedding(config["n_embed"], config["d_embed"]) self.projection = Linear(config["d_embed"], config["d_proj"]) self.encoder = Encoder(config) self.dropout = nn.Dropout(p=config["dp_ratio"]) self.relu = nn.ReLU() seq_in_size = 2 * config["d_hidden"] if config["birnn"]: seq_in_size *= 2 lin_config = [seq_in_size] * 2 self.out = nn.Sequential(Linear(*lin_config), self.relu, self.dropout, Linear(*lin_config), self.relu, self.dropout, Linear(*lin_config), self.relu, self.dropout, Linear(seq_in_size, config["d_out"])) self.fix_emb = config["fix_emb"] self.project = config["projection"]
def __init__(self, C_in, C_out, kernel_length, stride, padding, affine=True): super(FacConv, self).__init__() self.net = nn.Sequential( nn.ReLU(), nn.Conv2d(C_in, C_in, (kernel_length, 1), stride, padding, bias=False), nn.Conv2d(C_in, C_out, (1, kernel_length), stride, padding, bias=False), nn.BatchNorm2d(C_out, affine=affine))
def __init__(self, nc, ndf): super(DCGANDiscriminator, self).__init__() self.main = nn.Sequential( # input is (nc) x 64 x 64 nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf) x 32 x 32 nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*2) x 16 x 16 nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*4) x 8 x 8 nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*8) x 4 x 4 nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False), nn.Sigmoid())
def __init__(self, nz, ngf, nc): super(DCGANGenerator, self).__init__() self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh() # state size. (nc) x 64 x 64 )
def __init__(self, op_candidates: List[str], merge_op: Literal['all', 'loose_end'] = 'all', num_nodes_per_cell: int = 4, width: Union[Tuple[int, ...], int] = 16, num_cells: Union[Tuple[int, ...], int] = 20, dataset: Literal['cifar', 'imagenet'] = 'imagenet', auxiliary_loss: bool = False): super().__init__() self.dataset = dataset self.num_labels = 10 if dataset == 'cifar' else 1000 self.auxiliary_loss = auxiliary_loss # preprocess the specified width and depth if isinstance(width, Iterable): C = nn.ValueChoice(list(width), label='width') else: C = width self.num_cells: nn.MaybeChoice[int] = cast(int, num_cells) if isinstance(num_cells, Iterable): self.num_cells = nn.ValueChoice(list(num_cells), label='depth') num_cells_per_stage = [ (i + 1) * self.num_cells // 3 - i * self.num_cells // 3 for i in range(3) ] # auxiliary head is different for network targetted at different datasets if dataset == 'imagenet': self.stem0 = nn.Sequential( nn.Conv2d(3, cast(int, C // 2), kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(cast(int, C // 2)), nn.ReLU(inplace=True), nn.Conv2d(cast(int, C // 2), cast(int, C), 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) self.stem1 = nn.Sequential( nn.ReLU(inplace=True), nn.Conv2d(cast(int, C), cast(int, C), 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) C_pprev = C_prev = C_curr = C last_cell_reduce = True elif dataset == 'cifar': self.stem = nn.Sequential( nn.Conv2d(3, cast(int, 3 * C), 3, padding=1, bias=False), nn.BatchNorm2d(cast(int, 3 * C))) C_pprev = C_prev = 3 * C C_curr = C last_cell_reduce = False else: raise ValueError(f'Unsupported dataset: {dataset}') self.stages = nn.ModuleList() for stage_idx in range(3): if stage_idx > 0: C_curr *= 2 # For a stage, we get C_in, C_curr, and C_out. # C_in is only used in the first cell. # C_curr is number of channels for each operator in current stage. # C_out is usually `C * num_nodes_per_cell` because of concat operator. cell_builder = CellBuilder(op_candidates, C_pprev, C_prev, C_curr, num_nodes_per_cell, merge_op, stage_idx > 0, last_cell_reduce) stage: Union[NDSStage, nn.Sequential] = NDSStage( cell_builder, num_cells_per_stage[stage_idx]) if isinstance(stage, NDSStage): stage.estimated_out_channels_prev = cast(int, C_prev) stage.estimated_out_channels = cast( int, C_curr * num_nodes_per_cell) stage.downsampling = stage_idx > 0 self.stages.append(stage) # NOTE: output_node_indices will be computed on-the-fly in trial code. # When constructing model space, it's just all the nodes in the cell, # which happens to be the case of one-shot supernet. # C_pprev is output channel number of last second cell among all the cells already built. if len(stage) > 1: # Contains more than one cell C_pprev = len(cast(nn.Cell, stage[-2]).output_node_indices) * C_curr else: # Look up in the out channels of last stage. C_pprev = C_prev # This was originally, # C_prev = num_nodes_per_cell * C_curr. # but due to loose end, it becomes, C_prev = len(cast(nn.Cell, stage[-1]).output_node_indices) * C_curr # Useful in aligning the pprev and prev cell. last_cell_reduce = cell_builder.last_cell_reduce if stage_idx == 2: C_to_auxiliary = C_prev if auxiliary_loss: assert isinstance( self.stages[2], nn.Sequential ), 'Auxiliary loss can only be enabled in retrain mode.' self.stages[2] = SequentialBreakdown( cast(nn.Sequential, self.stages[2])) self.auxiliary_head = AuxiliaryHead( C_to_auxiliary, self.num_labels, dataset=self.dataset) # type: ignore self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Linear(cast(int, C_prev), self.num_labels)
def __init__(self, op_candidates: List[str], merge_op: Literal['all', 'loose_end'] = 'all', num_nodes_per_cell: int = 4, width: Union[Tuple[int], int] = 16, num_cells: Union[Tuple[int], int] = 20, dataset: Literal['cifar', 'imagenet'] = 'imagenet', auxiliary_loss: bool = False): super().__init__() self.dataset = dataset self.num_labels = 10 if dataset == 'cifar' else 1000 self.auxiliary_loss = auxiliary_loss # preprocess the specified width and depth if isinstance(width, Iterable): C = nn.ValueChoice(list(width), label='width') else: C = width if isinstance(num_cells, Iterable): num_cells = nn.ValueChoice(list(num_cells), label='depth') num_cells_per_stage = [ i * num_cells // 3 - (i - 1) * num_cells // 3 for i in range(3) ] # auxiliary head is different for network targetted at different datasets if dataset == 'imagenet': self.stem0 = nn.Sequential( nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C // 2), nn.ReLU(inplace=True), nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) self.stem1 = nn.Sequential( nn.ReLU(inplace=True), nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) C_pprev = C_prev = C_curr = C last_cell_reduce = True elif dataset == 'cifar': self.stem = nn.Sequential( nn.Conv2d(3, 3 * C, 3, padding=1, bias=False), nn.BatchNorm2d(3 * C)) C_pprev = C_prev = 3 * C C_curr = C last_cell_reduce = False self.stages = nn.ModuleList() for stage_idx in range(3): if stage_idx > 0: C_curr *= 2 # For a stage, we get C_in, C_curr, and C_out. # C_in is only used in the first cell. # C_curr is number of channels for each operator in current stage. # C_out is usually `C * num_nodes_per_cell` because of concat operator. cell_builder = CellBuilder(op_candidates, C_pprev, C_prev, C_curr, num_nodes_per_cell, merge_op, stage_idx > 0, last_cell_reduce) stage = nn.Repeat(cell_builder, num_cells_per_stage[stage_idx]) self.stages.append(stage) # C_pprev is output channel number of last second cell among all the cells already built. if len(stage) > 1: # Contains more than one cell C_pprev = len(stage[-2].output_node_indices) * C_curr else: # Look up in the out channels of last stage. C_pprev = C_prev # This was originally, # C_prev = num_nodes_per_cell * C_curr. # but due to loose end, it becomes, C_prev = len(stage[-1].output_node_indices) * C_curr # Useful in aligning the pprev and prev cell. last_cell_reduce = cell_builder.last_cell_reduce if stage_idx == 2: C_to_auxiliary = C_prev if auxiliary_loss: assert isinstance( self.stages[2], nn.Sequential ), 'Auxiliary loss can only be enabled in retrain mode.' self.stages[2] = SequentialBreakdown(self.stages[2]) self.auxiliary_head = AuxiliaryHead(C_to_auxiliary, self.num_labels, dataset=self.dataset) self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Linear(C_prev, self.num_labels)
lambda C, stride, affine: nn.AvgPool2d( 5, stride=stride, padding=2, count_include_pad=False), 'max_pool_2x2': lambda C, stride, affine: nn.MaxPool2d(2, stride=stride, padding=0), 'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1), 'max_pool_5x5': lambda C, stride, affine: nn.MaxPool2d(5, stride=stride, padding=2), 'max_pool_7x7': lambda C, stride, affine: nn.MaxPool2d(7, stride=stride, padding=3), 'skip_connect': lambda C, stride, affine: nn.Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine), 'conv_1x1': lambda C, stride, affine: nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(C, C, 1, stride=stride, padding=0, bias=False), nn.BatchNorm2d(C, affine=affine)), 'conv_3x3': lambda C, stride, affine: nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(C, C, 3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(C, affine=affine)), 'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine), 'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine), 'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine), 'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine), 'dil_conv_5x5':
def __init__(self, num_labels: int = 1000, base_widths: Tuple[int, ...] = (16, 16, 32, 64, 128, 256, 512, 1024), width_multipliers: Tuple[float, ...] = (0.5, 0.625, 0.75, 1.0, 1.25, 1.5, 2.0), expand_ratios: Tuple[int, ...] = (1, 2, 3, 4, 5, 6), dropout_rate: float = 0.2, bn_eps: float = 1e-3, bn_momentum: float = 0.1): super().__init__() self.widths = [ nn.ValueChoice([ make_divisible(base_width * mult, 8) for mult in width_multipliers ], label=f'width_{i}') for i, base_width in enumerate(base_widths) ] self.expand_ratios = expand_ratios blocks = [ # Stem ConvBNReLU(3, self.widths[0], nn.ValueChoice([3, 5], label='ks_0'), stride=2, activation_layer=h_swish), SeparableConv(self.widths[0], self.widths[0], activation_layer=nn.ReLU), ] # counting for kernel sizes and expand ratios self.layer_count = 2 blocks += [ # Body self._make_stage(1, self.widths[0], self.widths[1], False, 2, nn.ReLU), self._make_stage(2, self.widths[1], self.widths[2], True, 2, nn.ReLU), self._make_stage(1, self.widths[2], self.widths[3], False, 2, h_swish), self._make_stage(1, self.widths[3], self.widths[4], True, 1, h_swish), self._make_stage(1, self.widths[4], self.widths[5], True, 2, h_swish), ] # Head blocks += [ ConvBNReLU(self.widths[5], self.widths[6], 1, 1, activation_layer=h_swish), nn.AdaptiveAvgPool2d(1), ConvBNReLU(self.widths[6], self.widths[7], 1, 1, norm_layer=nn.Identity, activation_layer=h_swish), ] self.blocks = nn.Sequential(*blocks) self.classifier = nn.Sequential( nn.Dropout(dropout_rate), nn.Linear(self.widths[7], num_labels), ) reset_parameters(self, bn_momentum=bn_momentum, bn_eps=bn_eps)
def __init__(self, num_labels: int = 1000, channel_search: bool = False, affine: bool = False): super().__init__() self.num_labels = num_labels self.channel_search = channel_search self.affine = affine # the block number in each stage. 4 stages in total. 20 blocks in total. self.stage_repeats = [4, 4, 8, 4] # output channels for all stages, including the very first layer and the very last layer self.stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024] # building first layer out_channels = self.stage_out_channels[1] self.first_conv = nn.Sequential( nn.Conv2d(3, out_channels, 3, 2, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ) self.features = [] global_block_idx = 0 for stage_idx, num_repeat in enumerate(self.stage_repeats): for block_idx in range(num_repeat): # count global index to give names to choices global_block_idx += 1 # get ready for input and output in_channels = out_channels out_channels = self.stage_out_channels[stage_idx + 2] stride = 2 if block_idx == 0 else 1 # mid channels can be searched base_mid_channels = out_channels // 2 if self.channel_search: k_choice_list = [ int(base_mid_channels * (.2 * k)) for k in range(1, 9) ] mid_channels = nn.ValueChoice( k_choice_list, label=f'channel_{global_block_idx}') else: mid_channels = int(base_mid_channels) choice_block = nn.LayerChoice( [ ShuffleNetBlock(in_channels, out_channels, mid_channels=mid_channels, kernel_size=3, stride=stride, affine=affine), ShuffleNetBlock(in_channels, out_channels, mid_channels=mid_channels, kernel_size=5, stride=stride, affine=affine), ShuffleNetBlock(in_channels, out_channels, mid_channels=mid_channels, kernel_size=7, stride=stride, affine=affine), ShuffleXceptionBlock(in_channels, out_channels, mid_channels=mid_channels, stride=stride, affine=affine) ], label=f'layer_{global_block_idx}') self.features.append(choice_block) self.features = nn.Sequential(*self.features) # final layers last_conv_channels = self.stage_out_channels[-1] self.conv_last = nn.Sequential( nn.Conv2d(out_channels, last_conv_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(last_conv_channels, affine=affine), nn.ReLU(inplace=True), ) self.globalpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = nn.Dropout(0.1) self.classifier = nn.Sequential( nn.Linear(last_conv_channels, num_labels, bias=False), ) self._initialize_weights()