コード例 #1
0
    def __init__(self, blocks_args=None, global_params=None):
        super().__init__()
        assert isinstance(blocks_args, list), 'blocks_args should be a list'
        assert len(blocks_args) > 0, 'block args must be greater than 0'
        self._global_params = global_params
        self._blocks_args = blocks_args

        # Build blocks
        self._blocks = nn.ModuleList([])
        self._array = []
        for block_args in self._blocks_args:

            self.temp = []
            # Update block input and output filters based on depth multiplier.
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters,
                                            self._global_params),
                output_filters=round_filters(block_args.output_filters,
                                             self._global_params),
                num_repeat=round_repeats(block_args.num_repeat,
                                         self._global_params))

            # The first block needs to take care of stride and filter size increase.
            self._blocks.append(MBConvBlock(block_args, self._global_params))
            self.temp.append(MBConvBlock(block_args, self._global_params))
            if block_args.num_repeat > 1:
                block_args = block_args._replace(
                    input_filters=block_args.output_filters, stride=1)
            for _ in range(block_args.num_repeat - 1):
                self._blocks.append(
                    MBConvBlock(block_args, self._global_params))
                self.temp.append(MBConvBlock(block_args, self._global_params))
            self._array.append(nn.Sequential(*self.temp))
コード例 #2
0
    def __init__(self, blocks_args=None, global_params=None):
        super().__init__()
        assert isinstance(blocks_args, list), 'blocks_args should be a list'
        assert len(blocks_args) > 0, 'block args must be greater than 0'
        self._global_params = global_params
        self._blocks_args = blocks_args

        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        bn_mom = 1 - self._global_params.batch_norm_momentum
        bn_eps = self._global_params.batch_norm_epsilon

        in_channels = 3  # rgb
        out_channels = round_filters(
            32, self._global_params)  # number of output channels
        self._conv_stem = Conv2d(in_channels,
                                 out_channels,
                                 kernel_size=3,
                                 stride=2,
                                 bias=False)
        self._bn0 = nn.BatchNorm2d(num_features=out_channels,
                                   momentum=bn_mom,
                                   eps=bn_eps)

        self._blocks = nn.ModuleList([])
        for block_args in self._blocks_args:

            # Update block input and output filters based on depth multiplier.
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters,
                                            self._global_params),
                output_filters=round_filters(block_args.output_filters,
                                             self._global_params),
                num_repeat=round_repeats(block_args.num_repeat,
                                         self._global_params))

            self._blocks.append(MBConvBlock(block_args, self._global_params))
            if block_args.num_repeat > 1:
                block_args = block_args._replace(
                    input_filters=block_args.output_filters, stride=1)
            for _ in range(block_args.num_repeat - 1):
                self._blocks.append(
                    MBConvBlock(block_args, self._global_params))

        in_channels = block_args.output_filters  # output of final block
        out_channels = round_filters(1280, self._global_params)
        self._conv_head = Conv2d(in_channels,
                                 out_channels,
                                 kernel_size=1,
                                 bias=False)
        self._bn1 = nn.BatchNorm2d(num_features=out_channels,
                                   momentum=bn_mom,
                                   eps=bn_eps)

        self._dropout = self._global_params.dropout_rate
        self._fc = nn.Linear(out_channels, self._global_params.num_classes)
コード例 #3
0
    def __init__(self, blocks_args=None, global_params=None):
        super().__init__()
        assert isinstance(blocks_args, list), "blocks_args should be a list"
        assert len(blocks_args) > 0, "block args must be greater than 0"
        self._global_params = global_params
        self._blocks_args = blocks_args

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Batch norm parameters
        bn_mom = 1 - self._global_params.batch_norm_momentum
        bn_eps = self._global_params.batch_norm_epsilon

        # Build blocks
        self._blocks = nn.ModuleList([])
        for i, block_args in enumerate(self._blocks_args):
            # Update block input and output filters based on depth multiplier.
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters,
                                            self._global_params),
                output_filters=round_filters(block_args.output_filters,
                                             self._global_params),
                num_repeat=round_repeats(block_args.num_repeat,
                                         self._global_params),
            )
            print(i, block_args)

            # The first block needs to take care of stride and filter size increase.
            self._blocks.append(MBConvBlock(block_args, self._global_params))
            if block_args.num_repeat > 1:
                block_args = block_args._replace(
                    input_filters=block_args.output_filters, stride=1)
            for _ in range(block_args.num_repeat - 1):
                self._blocks.append(
                    MBConvBlock(block_args, self._global_params))

        # Head
        in_channels = block_args.output_filters  # output of final block
        out_channels = round_filters(1280, self._global_params)
        self._conv_head = Conv2d(in_channels,
                                 out_channels,
                                 kernel_size=1,
                                 bias=False)
        self._bn1 = nn.BatchNorm2d(num_features=out_channels,
                                   momentum=bn_mom,
                                   eps=bn_eps)

        # Define the activation function
        self._swish = MemoryEfficientSwish()
コード例 #4
0
    def __init__(self, blocks_args=None, global_params=None):
        super().__init__()
        assert isinstance(blocks_args, list), 'blocks_args should be a list'
        assert len(blocks_args) > 0, 'block args must be greater than 0'
        self._global_params = global_params
        self._blocks_args = blocks_args

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Batch norm parameters
        bn_mom = 1 - self._global_params.batch_norm_momentum
        bn_eps = self._global_params.batch_norm_epsilon

        # Stem
        in_channels = 3  # rgb
        out_channels = round_filters(32, self._global_params)  # number of output channels
        self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
        self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)

        # Build blocks
        self._blocks = nn.ModuleList([])
        for block_args in self._blocks_args:

            # Update block input and output filters based on depth multiplier.
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters, self._global_params),
                output_filters=round_filters(block_args.output_filters, self._global_params),
                num_repeat=round_repeats(block_args.num_repeat, self._global_params)
            )

            # The first block needs to take care of stride and filter size increase.
            self._blocks.append(MBConvBlock(block_args, self._global_params))
            if block_args.num_repeat > 1:
                block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
            for _ in range(block_args.num_repeat - 1):
                self._blocks.append(MBConvBlock(block_args, self._global_params))

        # Head
        in_channels = block_args.output_filters  # output of final block
        out_channels = round_filters(1280, self._global_params)
        self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
        self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)

        # Final linear layer
        self._avg_pooling = nn.AdaptiveAvgPool2d(1)
        self._dropout = nn.Dropout(self._global_params.dropout_rate)
        self._fc = nn.Linear(out_channels, self._global_params.num_classes)
        self._swish = MemoryEfficientSwish()
コード例 #5
0
    def __init__(
        self,
        model_name: str = "efficientnet-b7",
        num_classes: int = 4,
    ) -> None:
        super().__init__()
        # Make separate models so we don't share weights
        # self.dct_y_efficientnet = DCTEfficientNet.from_pretrained(
        #     model_name=model_name, num_classes=num_classes
        # )
        # self.dct_cb_efficientnet = DCTEfficientNet.from_pretrained(
        #     model_name=model_name, num_classes=num_classes
        # )
        # self.dct_cr_efficientnet = DCTEfficientNet.from_pretrained(
        #     model_name=model_name, num_classes=num_classes
        # )
        self.dct_y_efficientnet = DCTEfficientNet.from_name(
            model_name=model_name,
            override_params={"num_classes": num_classes})
        self.dct_cb_efficientnet = DCTEfficientNet.from_name(
            model_name=model_name,
            override_params={"num_classes": num_classes})
        self.dct_cr_efficientnet = DCTEfficientNet.from_name(
            model_name=model_name,
            override_params={"num_classes": num_classes})

        self._avg_pooling = nn.AdaptiveAvgPool2d(1)
        self._dropout = nn.Dropout(0.5)
        out_channels = round_filters(1280,
                                     self.dct_y_efficientnet._global_params)
        self._fc = nn.Linear(int(out_channels * 3), num_classes)

        self.relu = nn.ReLU(inplace=True)
        self.l1 = nn.Linear(int(out_channels * 3), 1024)
        self.l2 = nn.Linear(1024, num_classes)
コード例 #6
0
 def from_pretrained(cls, model_name, advprop=False, num_classes=1000, in_channels=3):
     model = cls.from_name(model_name, override_params={'num_classes': num_classes})
     load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000), advprop=advprop)
     if in_channels != 3:
         Conv2d = get_same_padding_conv2d(image_size=model._global_params.image_size)
         out_channels = round_filters(32, model._global_params)
         model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
     return model
コード例 #7
0
ファイル: efficientnet.py プロジェクト: aLeX1443/alaska2
 def __init__(self, *args, **kwargs):
     super(StegoQFactorEfficientNet, self).__init__(*args, **kwargs)
     dropout_rate = self._global_params.dropout_rate
     print("Dropout rate:", dropout_rate)
     self._dropout = nn.Dropout(dropout_rate)
     out_channels = round_filters(1280, self._global_params)
     self._concatenated_fc = nn.Linear(
         out_channels + 3, self._global_params.num_classes
     )
コード例 #8
0
ファイル: encoder.py プロジェクト: chengxiaoy/PBAD
    def __init__(self, skip_connections, model_name):
        blocks_args, global_params = get_model_params(model_name,
                                                      override_params=None)

        super().__init__(blocks_args, global_params)
        self._skip_connections = list(skip_connections)
        self._skip_connections.append(len(self._blocks))
        self.channels = [round_filters(1280, self._global_params)]

        del self._fc
コード例 #9
0
 def _change_in_channels(model, in_channels):
     if in_channels != 3:
         Conv2d = get_same_padding_conv2d(
             image_size=model._global_params.image_size)
         out_channels = round_filters(32, model._global_params)
         model._conv_stem = Conv2d(in_channels,
                                   out_channels,
                                   kernel_size=3,
                                   stride=2,
                                   bias=False)
コード例 #10
0
ファイル: efficientnet.py プロジェクト: jlingohr/panda
 def _change_in_channels(self, in_channels):
     """Adjust model's first convolution layer to in_channels, if in_channels not equals 3.
     Args:
         in_channels (int): Input data's channel number.
     """
     if in_channels != 3:
         Conv2d = get_same_padding_conv2d(
             image_size=self._global_params.image_size)
         out_channels = round_filters(32, self._global_params)
         self._conv_stem = Conv2d(in_channels,
                                  out_channels,
                                  kernel_size=3,
                                  stride=2,
                                  bias=False)
コード例 #11
0
 def __init__(self,
              model_name='efficientnet-b0',
              use_pretrained=False,
              in_channels=3):
     super(EfficientNetWrapper, self).__init__()
     if use_pretrained:
         self.model = EfficientNet.from_pretrained(model_name=model_name,
                                                   in_channels=in_channels)
     else:
         model = EfficientNet.from_name(model_name, num_classes=1000)
         if in_channels != 3:
             Conv2d = get_same_padding_conv2d(
                 image_size=model._global_params.image_size)
             out_channels = round_filters(32, model._global_params)
             model._conv_stem = Conv2d(in_channels,
                                       out_channels,
                                       kernel_size=3,
                                       stride=2,
                                       bias=False)
         self.model = model
コード例 #12
0
import torch