def mixnet_s(depth_multiplier=1, depth_divisor=8, min_depth=None): """ Creates mixnet-s model. Args: depth_multiplier: depth_multiplier to number of filters per layer. """ stem = round_filters(16, depth_multiplier, depth_divisor, min_depth) head = round_filters(1536, depth_multiplier, depth_divisor, min_depth) dropout = 0.2 blocks_args = [ 'r1_k3_a1_p1_s11_e1_i16_o16', 'r1_k3_a1.1_p1.1_s22_e6_i16_o24', 'r1_k3_a1.1_p1.1_s11_e3_i24_o24', 'r1_k3.5.7_a1_p1_s22_e6_i24_o40_se0.5_sw', 'r3_k3.5_a1.1_p1.1_s11_e6_i40_o40_se0.5_sw', 'r1_k3.5.7_a1_p1.1_s22_e6_i40_o80_se0.25_sw', 'r2_k3.5_a1_p1.1_s11_e6_i80_o80_se0.25_sw', 'r1_k3.5.7_a1.1_p1.1_s11_e6_i80_o120_se0.5_sw', 'r2_k3.5.7.9_a1.1_p1.1_s11_e3_i120_o120_se0.5_sw', 'r1_k3.5.7.9.11_a1_p1_s22_e6_i120_o200_se0.5_sw', 'r2_k3.5.7.9_a1_p1.1_s11_e6_i200_o200_se0.5_sw', ] blocks_args = MixnetDecoder.decode(blocks_args, depth_multiplier, depth_divisor, min_depth) print("-----------") print("Mixnet S") for a in blocks_args: print(a) print("-----------") return MixNet(stem, blocks_args, head, dropout)
def __init__(self, blocks_args=None, global_params=None): super().__init__() assert isinstance(blocks_args, list), 'blocks_args should be a list' assert len(blocks_args) > 0, 'block args must be greater than 0' self._global_params = global_params self._blocks_args = blocks_args # Get static or dynamic convolution depending on image size Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) # Batch norm parameters bn_mom = 1 - self._global_params.batch_norm_momentum bn_eps = self._global_params.batch_norm_epsilon # Stem in_channels = 3 # rgb out_channels = round_filters(32, self._global_params) # number of output channels self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) # Build blocks self._blocks = nn.ModuleList([]) for i in range(len(self._blocks_args)): # Update block input and output filters based on depth multiplier. self._blocks_args[i] = self._blocks_args[i]._replace( input_filters=round_filters(self._blocks_args[i].input_filters, self._global_params), output_filters=round_filters(self._blocks_args[i].output_filters, self._global_params), num_repeat=round_repeats(self._blocks_args[i].num_repeat, self._global_params) ) # The first block needs to take care of stride and filter size increase. self._blocks.append(MBConvBlock(self._blocks_args[i], self._global_params)) if self._blocks_args[i].num_repeat > 1: self._blocks_args[i] = self._blocks_args[i]._replace(input_filters=self._blocks_args[i].output_filters, stride=1) for _ in range(self._blocks_args[i].num_repeat - 1): self._blocks.append(MBConvBlock(self._blocks_args[i], self._global_params)) # Head'efficientdet-d0': 'efficientnet-b0', in_channels = self._blocks_args[len(self._blocks_args)-1].output_filters # output of final block out_channels = round_filters(1280, self._global_params) self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False) self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) # Final linear layer self._avg_pooling = nn.AdaptiveAvgPool2d(1) self._dropout = nn.Dropout(self._global_params.dropout_rate) self._fc = nn.Linear(out_channels, self._global_params.num_classes) self._swish = MemoryEfficientSwish()
def get_stem_ops(self, input_size, kernel, stride, out_channel, activation, use_bias): all_ops = [] out_channel = round_filters(out_channel, self.global_params) all_ops.append(self.get_conv_stem(input_size, kernel, stride, out_channel, activation, use_bias)) if stride == 2: input_size = math.floor(input_size / 2) return all_ops, input_size
def from_pretrained(cls, model_name, num_classes=1000, in_channels = 3): model = cls.from_name(model_name, override_params={'num_classes': num_classes}) load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000)) if in_channels != 3: Conv2d = get_same_padding_conv2d(image_size = model._global_params.image_size) out_channels = round_filters(32, model._global_params) model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) return model
def get_last_ops(self, all_ops, input_size, in_channel, out_channel, num_classes, activation, use_bias): out_channel = round_filters(out_channel, self.global_params) all_ops.append(('_conv_head', Conv2D(input_size=input_size, kernel_shape=[1, 1, in_channel, out_channel], strides=(1, 1), padding='same', use_bias=use_bias, activation=activation))) all_ops.append(('_avg_pooling', GlobalAvg(input_size=input_size, n_channels=out_channel))) all_ops.append(('_fc', FullyConnected(kernel_shape=[out_channel, num_classes], use_bias=True, activation=None))) return all_ops
def get_block_ops(self, all_ops, blocks_args, global_params, input_size, activation, use_bias): start = True block_ops = [] block_num = 0 for block_args in blocks_args: block_args = block_args._replace( input_filters=round_filters(block_args.input_filters, self.global_params), output_filters=round_filters(block_args.output_filters, self.global_params), num_repeat=round_repeats(block_args.num_repeat, self.global_params) ) if not start: block_ops.append(self.get_expand_conv(input_size, block_args.input_filters, block_args.input_filters * block_args.expand_ratio, activation, use_bias)) in_channel = block_args.input_filters * block_args.expand_ratio else: in_channel = block_args.input_filters start = False block_ops, input_size = self._get_efficientnet_ops(block_ops, block_args, input_size, in_channel, activation, use_bias) all_ops.append(('block_%d' % block_num, block_ops)) block_ops = [] block_num += 1 if block_args.num_repeat > 1: block_args = block_args._replace(input_filters=block_args.output_filters, stride=1) in_channel = block_args.input_filters * block_args.expand_ratio for _ in range(block_args.num_repeat - 1): block_ops.append(self.get_expand_conv(input_size, block_args.input_filters, block_args.input_filters * block_args.expand_ratio, activation, use_bias)) block_ops, input_size = self._get_efficientnet_ops(block_ops, block_args, input_size, in_channel, activation, use_bias) all_ops.append(('block_%d' % block_num, block_ops)) block_ops = [] block_num += 1 return all_ops, input_size, block_args.output_filters
def get_network_from_name(cls, model_name, num_classes=1000, in_channels=3): model = cls.from_name(model_name, override_params={'num_classes': num_classes}) if in_channels != 3: Conv2d = get_same_padding_conv2d( image_size=model._global_params.image_size) out_channels = round_filters(32, model._global_params) model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) return model