def __init__(self, blocks_args=None, global_params=None):
        super().__init__()
        assert isinstance(blocks_args, list), 'blocks_args should be a list'
        assert len(blocks_args) > 0, 'block args must be greater than 0'
        self._global_params = global_params
        self._blocks_args = blocks_args

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Batch norm parameters
        bn_mom = 1 - self._global_params.batch_norm_momentum
        bn_eps = self._global_params.batch_norm_epsilon

        # Stem
        in_channels = 3  # rgb
        out_channels = round_filters(32, self._global_params)  # number of output channels
        self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
        self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
        
        # Build blocks
        self._blocks = nn.ModuleList([])
        for i in range(len(self._blocks_args)):
            # Update block input and output filters based on depth multiplier.
            self._blocks_args[i] = self._blocks_args[i]._replace(
                input_filters=round_filters(self._blocks_args[i].input_filters, self._global_params),
                output_filters=round_filters(self._blocks_args[i].output_filters, self._global_params),
                num_repeat=round_repeats(self._blocks_args[i].num_repeat, self._global_params)
            )

            # The first block needs to take care of stride and filter size increase.
            self._blocks.append(MBConvBlock(self._blocks_args[i], self._global_params))
            if self._blocks_args[i].num_repeat > 1:
                self._blocks_args[i] = self._blocks_args[i]._replace(input_filters=self._blocks_args[i].output_filters, stride=1)
            for _ in range(self._blocks_args[i].num_repeat - 1):
                self._blocks.append(MBConvBlock(self._blocks_args[i], self._global_params))

        # Head'efficientdet-d0': 'efficientnet-b0',
        in_channels = self._blocks_args[len(self._blocks_args)-1].output_filters  # output of final block
        out_channels = round_filters(1280, self._global_params)
        self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
        self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)

        # Final linear layer
        self._avg_pooling = nn.AdaptiveAvgPool2d(1)
        self._dropout = nn.Dropout(self._global_params.dropout_rate)
        self._fc = nn.Linear(out_channels, self._global_params.num_classes)
        self._swish = MemoryEfficientSwish()
예제 #2
0
    def get_block_ops(self, all_ops, blocks_args, global_params, input_size, activation, use_bias):
        start = True
        block_ops = []
        block_num = 0
        
        for block_args in blocks_args:
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters, self.global_params),
                output_filters=round_filters(block_args.output_filters, self.global_params),
                num_repeat=round_repeats(block_args.num_repeat, self.global_params)
            )

            if not start:
                block_ops.append(self.get_expand_conv(input_size, block_args.input_filters, block_args.input_filters * block_args.expand_ratio, activation, use_bias))
                in_channel = block_args.input_filters * block_args.expand_ratio
            else:
                in_channel = block_args.input_filters
                start = False

            block_ops, input_size = self._get_efficientnet_ops(block_ops, block_args, input_size, in_channel, activation, use_bias)
            
            all_ops.append(('block_%d' % block_num, block_ops))
            block_ops = []
            block_num += 1

            if block_args.num_repeat > 1:
                block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
                in_channel = block_args.input_filters * block_args.expand_ratio

            for _ in range(block_args.num_repeat - 1):
                block_ops.append(self.get_expand_conv(input_size, block_args.input_filters, block_args.input_filters * block_args.expand_ratio, activation, use_bias))
                block_ops, input_size = self._get_efficientnet_ops(block_ops, block_args, input_size, in_channel, activation, use_bias)
                          
                all_ops.append(('block_%d' % block_num, block_ops))
                block_ops = []
                block_num += 1
                          
        return all_ops, input_size, block_args.output_filters