def __init__(self, in_channels): super().__init__() self.conv1 = nn.Conv2d(in_channels, 10, 3) self.conv2 = nn.LayerChoice([nn.Conv2d(10, 10, 3), nn.MaxPool2d(3)]) self.conv3 = nn.LayerChoice([nn.Identity(), nn.Conv2d(10, 10, 1)]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(10, 1)
def __init__(self, block, layers, num_classes=1000): super(ResNet, self).__init__() self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): torch.nn.init.constant_(m.weight, 1) torch.nn.init.constant_(m.bias, 0)
def __init__(self, channels: int, reduction: int = 4, activation_layer: Optional[Callable[..., nn.Module]] = None): super().__init__() if activation_layer is None: activation_layer = nn.Sigmoid self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channels, make_divisible(channels // reduction, 8)), nn.ReLU(inplace=True), nn.Linear(make_divisible(channels // reduction, 8), channels), activation_layer())
def __init__(self, num_labels: int = 1000, base_widths: Tuple[int, ...] = (32, 16, 32, 40, 80, 96, 192, 320, 1280), dropout_rate: float = 0., width_mult: float = 1.0, bn_eps: float = 1e-3, bn_momentum: float = 0.1): super().__init__() assert len(base_widths) == 9 # include the last stage info widths here widths = [make_divisible(width * width_mult, 8) for width in base_widths] downsamples = [True, False, True, True, True, False, True, False] self.num_labels = num_labels self.dropout_rate = dropout_rate self.bn_eps = bn_eps self.bn_momentum = bn_momentum self.stem = ConvBNReLU(3, widths[0], stride=2, norm_layer=nn.BatchNorm2d) blocks: List[nn.Module] = [ # first stage is fixed DepthwiseSeparableConv(widths[0], widths[1], kernel_size=3, stride=1) ] # https://github.com/ultmaster/AceNAS/blob/46c8895fd8a05ffbc61a6b44f1e813f64b4f66b7/searchspace/proxylessnas/__init__.py#L21 for stage in range(2, 8): # Rather than returning a fixed module here, # we return a builder that dynamically creates module for different `repeat_idx`. builder = inverted_residual_choice_builder( [3, 6], [3, 5, 7], downsamples[stage], widths[stage - 1], widths[stage], f's{stage}') if stage < 7: blocks.append(nn.Repeat(builder, (1, 4), label=f's{stage}_depth')) else: # No mutation for depth in the last stage. # Directly call builder to initiate one block blocks.append(builder(0)) self.blocks = nn.Sequential(*blocks) # final layers self.feature_mix_layer = ConvBNReLU(widths[7], widths[8], kernel_size=1, norm_layer=nn.BatchNorm2d) self.global_avg_pooling = nn.AdaptiveAvgPool2d(1) self.dropout_layer = nn.Dropout(dropout_rate) self.classifier = nn.Linear(widths[-1], num_labels) reset_parameters(self, bn_momentum=bn_momentum, bn_eps=bn_eps)
def __init__(self, input_size, in_channels, channels, n_classes, n_layers, n_nodes=4, stem_multiplier=3, auxiliary=False): super().__init__() self.in_channels = in_channels self.channels = channels self.n_classes = n_classes self.n_layers = n_layers self.aux_pos = 2 * n_layers // 3 if auxiliary else -1 c_cur = stem_multiplier * self.channels self.stem = nn.Sequential( nn.Conv2d(in_channels, c_cur, 3, 1, 1, bias=False), nn.BatchNorm2d(c_cur)) # for the first cell, stem is used for both s0 and s1 # [!] channels_pp and channels_p is output channel size, but c_cur is input channel size. channels_pp, channels_p, c_cur = c_cur, c_cur, channels self.cells = nn.ModuleList() reduction_p, reduction = False, False for i in range(n_layers): reduction_p, reduction = reduction, False # Reduce featuremap size and double channels in 1/3 and 2/3 layer. if i in [n_layers // 3, 2 * n_layers // 3]: c_cur *= 2 reduction = True cell = Cell(n_nodes, channels_pp, channels_p, c_cur, reduction_p, reduction) self.cells.append(cell) c_cur_out = c_cur * n_nodes channels_pp, channels_p = channels_p, c_cur_out #if i == self.aux_pos: # self.aux_head = AuxiliaryHead(input_size // 4, channels_p, n_classes) self.gap = nn.AdaptiveAvgPool2d(1) self.linear = nn.Linear(channels_p, n_classes)
def __init__(self, op_candidates: List[str], merge_op: Literal['all', 'loose_end'] = 'all', num_nodes_per_cell: int = 4, width: Union[Tuple[int, ...], int] = 16, num_cells: Union[Tuple[int, ...], int] = 20, dataset: Literal['cifar', 'imagenet'] = 'imagenet', auxiliary_loss: bool = False): super().__init__() self.dataset = dataset self.num_labels = 10 if dataset == 'cifar' else 1000 self.auxiliary_loss = auxiliary_loss # preprocess the specified width and depth if isinstance(width, Iterable): C = nn.ValueChoice(list(width), label='width') else: C = width self.num_cells: nn.MaybeChoice[int] = cast(int, num_cells) if isinstance(num_cells, Iterable): self.num_cells = nn.ValueChoice(list(num_cells), label='depth') num_cells_per_stage = [ (i + 1) * self.num_cells // 3 - i * self.num_cells // 3 for i in range(3) ] # auxiliary head is different for network targetted at different datasets if dataset == 'imagenet': self.stem0 = nn.Sequential( nn.Conv2d(3, cast(int, C // 2), kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(cast(int, C // 2)), nn.ReLU(inplace=True), nn.Conv2d(cast(int, C // 2), cast(int, C), 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) self.stem1 = nn.Sequential( nn.ReLU(inplace=True), nn.Conv2d(cast(int, C), cast(int, C), 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) C_pprev = C_prev = C_curr = C last_cell_reduce = True elif dataset == 'cifar': self.stem = nn.Sequential( nn.Conv2d(3, cast(int, 3 * C), 3, padding=1, bias=False), nn.BatchNorm2d(cast(int, 3 * C))) C_pprev = C_prev = 3 * C C_curr = C last_cell_reduce = False else: raise ValueError(f'Unsupported dataset: {dataset}') self.stages = nn.ModuleList() for stage_idx in range(3): if stage_idx > 0: C_curr *= 2 # For a stage, we get C_in, C_curr, and C_out. # C_in is only used in the first cell. # C_curr is number of channels for each operator in current stage. # C_out is usually `C * num_nodes_per_cell` because of concat operator. cell_builder = CellBuilder(op_candidates, C_pprev, C_prev, C_curr, num_nodes_per_cell, merge_op, stage_idx > 0, last_cell_reduce) stage: Union[NDSStage, nn.Sequential] = NDSStage( cell_builder, num_cells_per_stage[stage_idx]) if isinstance(stage, NDSStage): stage.estimated_out_channels_prev = cast(int, C_prev) stage.estimated_out_channels = cast( int, C_curr * num_nodes_per_cell) stage.downsampling = stage_idx > 0 self.stages.append(stage) # NOTE: output_node_indices will be computed on-the-fly in trial code. # When constructing model space, it's just all the nodes in the cell, # which happens to be the case of one-shot supernet. # C_pprev is output channel number of last second cell among all the cells already built. if len(stage) > 1: # Contains more than one cell C_pprev = len(cast(nn.Cell, stage[-2]).output_node_indices) * C_curr else: # Look up in the out channels of last stage. C_pprev = C_prev # This was originally, # C_prev = num_nodes_per_cell * C_curr. # but due to loose end, it becomes, C_prev = len(cast(nn.Cell, stage[-1]).output_node_indices) * C_curr # Useful in aligning the pprev and prev cell. last_cell_reduce = cell_builder.last_cell_reduce if stage_idx == 2: C_to_auxiliary = C_prev if auxiliary_loss: assert isinstance( self.stages[2], nn.Sequential ), 'Auxiliary loss can only be enabled in retrain mode.' self.stages[2] = SequentialBreakdown( cast(nn.Sequential, self.stages[2])) self.auxiliary_head = AuxiliaryHead( C_to_auxiliary, self.num_labels, dataset=self.dataset) # type: ignore self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Linear(cast(int, C_prev), self.num_labels)
def __init__(self, num_labels: int = 1000, channel_search: bool = False, affine: bool = False): super().__init__() self.num_labels = num_labels self.channel_search = channel_search self.affine = affine # the block number in each stage. 4 stages in total. 20 blocks in total. self.stage_repeats = [4, 4, 8, 4] # output channels for all stages, including the very first layer and the very last layer self.stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024] # building first layer out_channels = self.stage_out_channels[1] self.first_conv = nn.Sequential( nn.Conv2d(3, out_channels, 3, 2, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ) self.features = [] global_block_idx = 0 for stage_idx, num_repeat in enumerate(self.stage_repeats): for block_idx in range(num_repeat): # count global index to give names to choices global_block_idx += 1 # get ready for input and output in_channels = out_channels out_channels = self.stage_out_channels[stage_idx + 2] stride = 2 if block_idx == 0 else 1 # mid channels can be searched base_mid_channels = out_channels // 2 if self.channel_search: k_choice_list = [ int(base_mid_channels * (.2 * k)) for k in range(1, 9) ] mid_channels = nn.ValueChoice( k_choice_list, label=f'channel_{global_block_idx}') else: mid_channels = int(base_mid_channels) choice_block = nn.LayerChoice( [ ShuffleNetBlock(in_channels, out_channels, mid_channels=mid_channels, kernel_size=3, stride=stride, affine=affine), ShuffleNetBlock(in_channels, out_channels, mid_channels=mid_channels, kernel_size=5, stride=stride, affine=affine), ShuffleNetBlock(in_channels, out_channels, mid_channels=mid_channels, kernel_size=7, stride=stride, affine=affine), ShuffleXceptionBlock(in_channels, out_channels, mid_channels=mid_channels, stride=stride, affine=affine) ], label=f'layer_{global_block_idx}') self.features.append(choice_block) self.features = nn.Sequential(*self.features) # final layers last_conv_channels = self.stage_out_channels[-1] self.conv_last = nn.Sequential( nn.Conv2d(out_channels, last_conv_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(last_conv_channels, affine=affine), nn.ReLU(inplace=True), ) self.globalpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = nn.Dropout(0.1) self.classifier = nn.Sequential( nn.Linear(last_conv_channels, num_labels, bias=False), ) self._initialize_weights()
def __init__(self, num_labels: int = 1000, base_widths: Tuple[int, ...] = (16, 16, 32, 64, 128, 256, 512, 1024), width_multipliers: Tuple[float, ...] = (0.5, 0.625, 0.75, 1.0, 1.25, 1.5, 2.0), expand_ratios: Tuple[int, ...] = (1, 2, 3, 4, 5, 6), dropout_rate: float = 0.2, bn_eps: float = 1e-3, bn_momentum: float = 0.1): super().__init__() self.widths = [ nn.ValueChoice([ make_divisible(base_width * mult, 8) for mult in width_multipliers ], label=f'width_{i}') for i, base_width in enumerate(base_widths) ] self.expand_ratios = expand_ratios blocks = [ # Stem ConvBNReLU(3, self.widths[0], nn.ValueChoice([3, 5], label='ks_0'), stride=2, activation_layer=h_swish), SeparableConv(self.widths[0], self.widths[0], activation_layer=nn.ReLU), ] # counting for kernel sizes and expand ratios self.layer_count = 2 blocks += [ # Body self._make_stage(1, self.widths[0], self.widths[1], False, 2, nn.ReLU), self._make_stage(2, self.widths[1], self.widths[2], True, 2, nn.ReLU), self._make_stage(1, self.widths[2], self.widths[3], False, 2, h_swish), self._make_stage(1, self.widths[3], self.widths[4], True, 1, h_swish), self._make_stage(1, self.widths[4], self.widths[5], True, 2, h_swish), ] # Head blocks += [ ConvBNReLU(self.widths[5], self.widths[6], 1, 1, activation_layer=h_swish), nn.AdaptiveAvgPool2d(1), ConvBNReLU(self.widths[6], self.widths[7], 1, 1, norm_layer=nn.Identity, activation_layer=h_swish), ] self.blocks = nn.Sequential(*blocks) self.classifier = nn.Sequential( nn.Dropout(dropout_rate), nn.Linear(self.widths[7], num_labels), ) reset_parameters(self, bn_momentum=bn_momentum, bn_eps=bn_eps)
def __init__(self, op_candidates: List[str], merge_op: Literal['all', 'loose_end'] = 'all', num_nodes_per_cell: int = 4, width: Union[Tuple[int], int] = 16, num_cells: Union[Tuple[int], int] = 20, dataset: Literal['cifar', 'imagenet'] = 'imagenet', auxiliary_loss: bool = False): super().__init__() self.dataset = dataset self.num_labels = 10 if dataset == 'cifar' else 1000 self.auxiliary_loss = auxiliary_loss # preprocess the specified width and depth if isinstance(width, Iterable): C = nn.ValueChoice(list(width), label='width') else: C = width if isinstance(num_cells, Iterable): num_cells = nn.ValueChoice(list(num_cells), label='depth') num_cells_per_stage = [ i * num_cells // 3 - (i - 1) * num_cells // 3 for i in range(3) ] # auxiliary head is different for network targetted at different datasets if dataset == 'imagenet': self.stem0 = nn.Sequential( nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C // 2), nn.ReLU(inplace=True), nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) self.stem1 = nn.Sequential( nn.ReLU(inplace=True), nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C), ) C_pprev = C_prev = C_curr = C last_cell_reduce = True elif dataset == 'cifar': self.stem = nn.Sequential( nn.Conv2d(3, 3 * C, 3, padding=1, bias=False), nn.BatchNorm2d(3 * C)) C_pprev = C_prev = 3 * C C_curr = C last_cell_reduce = False self.stages = nn.ModuleList() for stage_idx in range(3): if stage_idx > 0: C_curr *= 2 # For a stage, we get C_in, C_curr, and C_out. # C_in is only used in the first cell. # C_curr is number of channels for each operator in current stage. # C_out is usually `C * num_nodes_per_cell` because of concat operator. cell_builder = CellBuilder(op_candidates, C_pprev, C_prev, C_curr, num_nodes_per_cell, merge_op, stage_idx > 0, last_cell_reduce) stage = nn.Repeat(cell_builder, num_cells_per_stage[stage_idx]) self.stages.append(stage) # C_pprev is output channel number of last second cell among all the cells already built. if len(stage) > 1: # Contains more than one cell C_pprev = len(stage[-2].output_node_indices) * C_curr else: # Look up in the out channels of last stage. C_pprev = C_prev # This was originally, # C_prev = num_nodes_per_cell * C_curr. # but due to loose end, it becomes, C_prev = len(stage[-1].output_node_indices) * C_curr # Useful in aligning the pprev and prev cell. last_cell_reduce = cell_builder.last_cell_reduce if stage_idx == 2: C_to_auxiliary = C_prev if auxiliary_loss: assert isinstance( self.stages[2], nn.Sequential ), 'Auxiliary loss can only be enabled in retrain mode.' self.stages[2] = SequentialBreakdown(self.stages[2]) self.auxiliary_head = AuxiliaryHead(C_to_auxiliary, self.num_labels, dataset=self.dataset) self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Linear(C_prev, self.num_labels)
def __init__(self, width_stages=[24,40,80,96,192,320], n_cell_stages=[4,4,4,4,4,1], stride_stages=[2,2,2,1,2,1], width_mult=1, n_classes=1000, dropout_rate=0, bn_param=(0.1, 1e-3)): """ Parameters ---------- width_stages: str width (output channels) of each cell stage in the block n_cell_stages: str number of cells in each cell stage stride_strages: str stride of each cell stage in the block width_mult : int the scale factor of width """ super(SearchMobileNet, self).__init__() input_channel = putils.make_divisible(32 * width_mult, 8) first_cell_width = putils.make_divisible(16 * width_mult, 8) for i in range(len(width_stages)): width_stages[i] = putils.make_divisible(width_stages[i] * width_mult, 8) # first conv first_conv = ops.ConvLayer(3, input_channel, kernel_size=3, stride=2, use_bn=True, act_func='relu6', ops_order='weight_bn_act') # first block first_block_conv = ops.OPS['3x3_MBConv1'](input_channel, first_cell_width, 1) first_block = first_block_conv input_channel = first_cell_width blocks = [first_block] stage_cnt = 0 for width, n_cell, s in zip(width_stages, n_cell_stages, stride_stages): for i in range(n_cell): if i == 0: stride = s else: stride = 1 op_candidates = [ops.OPS['3x3_MBConv3'](input_channel, width, stride), ops.OPS['3x3_MBConv6'](input_channel, width, stride), ops.OPS['5x5_MBConv3'](input_channel, width, stride), ops.OPS['5x5_MBConv6'](input_channel, width, stride), ops.OPS['7x7_MBConv3'](input_channel, width, stride), ops.OPS['7x7_MBConv6'](input_channel, width, stride)] if stride == 1 and input_channel == width: # if it is not the first one op_candidates += [ops.OPS['Zero'](input_channel, width, stride)] conv_op = LayerChoice(op_candidates, label="s{}_c{}".format(stage_cnt, i)) else: conv_op = LayerChoice(op_candidates, label="s{}_c{}".format(stage_cnt, i)) # shortcut if stride == 1 and input_channel == width: # if not first cell shortcut = ops.IdentityLayer(input_channel, input_channel) else: shortcut = None inverted_residual_block = ops.MobileInvertedResidualBlock(conv_op, shortcut, op_candidates) blocks.append(inverted_residual_block) input_channel = width stage_cnt += 1 # feature mix layer last_channel = putils.make_devisible(1280 * width_mult, 8) if width_mult > 1.0 else 1280 feature_mix_layer = ops.ConvLayer(input_channel, last_channel, kernel_size=1, use_bn=True, act_func='relu6', ops_order='weight_bn_act', ) classifier = ops.LinearLayer(last_channel, n_classes, dropout_rate=dropout_rate) self.first_conv = first_conv self.blocks = nn.ModuleList(blocks) self.feature_mix_layer = feature_mix_layer self.global_avg_pooling = nn.AdaptiveAvgPool2d(1) self.classifier = classifier # set bn param self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])