Ejemplo n.º 1
0
    def __init__(self, channels, reduction_channels, norm_act="relu"):
        super(SEModule, self).__init__()

        self.pool = FastGlobalAvgPool2d()
        # authors of original paper DO use bias
        self.fc1 = conv1x1(channels, reduction_channels, bias=True)
        self.act1 = activation_from_name(norm_act)
        self.fc2 = conv1x1(reduction_channels, channels, bias=True)
Ejemplo n.º 2
0
    def __init__(self, channels, reduction_channels, norm_act="relu"):

        super().__init__()

        self.pool = FastGlobalAvgPool2d()
        # authors of original paper DO use bias
        self.fc = nn.Sequential(
            conv1x1(channels, reduction_channels, bias=True),
            activation_from_name(norm_act),
            conv1x1(reduction_channels, channels, bias=True),
            nn.Sigmoid(),
        )
        # dummy shape. would be overwritten later. not registering as buffer intentionally
        self.pos_encoding = torch.ones(1, 1, 1, 1) 
Ejemplo n.º 3
0
 def __init__(self, in_ch, reduced_ch, norm_layer=ABN, norm_act="relu"): # parse additional args for compatability
     super().__init__()
     self.global_attn = nn.Sequential(
         FastGlobalAvgPool2d(),
         conv1x1(in_ch, reduced_ch),
         norm_layer(reduced_ch, activation=norm_act),
         conv1x1(reduced_ch, in_ch),
         norm_layer(in_ch, activation="identity"), # no last activation
     )
     # this 
     self.local_attn = nn.Sequential(
         conv1x1(in_ch, reduced_ch),
         norm_layer(reduced_ch, activation=norm_act),
         conv1x1(reduced_ch, in_ch),
         norm_layer(in_ch, activation="identity"), # no last activation
     )
Ejemplo n.º 4
0
    def __init__(
        self,
        layers=None,
        pretrained=None,  # not used. here for proper signature
        num_classes=1000,
        in_channels=3,
        width_factor=1.0,
        output_stride=32,
        norm_layer="inplaceabn",
        norm_act="leaky_relu",
        encoder=False,
        drop_rate=0.0,
        drop_connect_rate=0.0,
    ):
        nn.Module.__init__(self)
        stem_width = int(64 * width_factor)
        norm_layer = bn_from_name(norm_layer)
        self.inplanes = stem_width
        self.num_classes = num_classes
        self.groups = 1  # not really used but needed inside _make_layer
        self.base_width = 64  # used inside _make_layer
        self.norm_act = norm_act
        self.block_idx = 0
        self.num_blocks = sum(layers)
        self.drop_connect_rate = drop_connect_rate

        self._make_stem("space2depth", stem_width, in_channels, norm_layer,
                        norm_act)

        if output_stride not in [8, 16, 32]:
            raise ValueError("Output stride should be in [8, 16, 32]")
        # TODO add OS later
        # if output_stride == 8:
        # stride_3, stride_4, dilation_3, dilation_4 = 1, 1, 2, 4
        # elif output_stride == 16:
        # stride_3, stride_4, dilation_3, dilation_4 = 2, 1, 1, 2
        # elif output_stride == 32:
        stride_3, stride_4, dilation_3, dilation_4 = 2, 2, 1, 1

        largs = dict(attn_type="se",
                     norm_layer=norm_layer,
                     norm_act=norm_act,
                     antialias=True)
        self.block = TBasicBlock
        self.expansion = TBasicBlock.expansion
        self.layer1 = self._make_layer(stem_width,
                                       layers[0],
                                       stride=1,
                                       **largs)
        self.layer2 = self._make_layer(stem_width * 2,
                                       layers[1],
                                       stride=2,
                                       **largs)

        self.block = TBottleneck  # first 2 - Basic, last 2 - Bottleneck
        self.expansion = TBottleneck.expansion
        self.layer3 = self._make_layer(stem_width * 4,
                                       layers[2],
                                       stride=stride_3,
                                       dilation=dilation_3,
                                       **largs)
        largs.update(attn_type=None)  # no se in last layer
        self.layer4 = self._make_layer(stem_width * 8,
                                       layers[3],
                                       stride=stride_4,
                                       dilation=dilation_4,
                                       **largs)
        self.global_pool = FastGlobalAvgPool2d(flatten=True)
        self.num_features = stem_width * 8 * self.expansion
        self.encoder = encoder
        if not encoder:
            self.dropout = nn.Dropout(p=drop_rate, inplace=True)
            self.last_linear = nn.Linear(self.num_features, num_classes)
        else:
            self.forward = self.encoder_features

        self._initialize_weights(init_bn0=True)
Ejemplo n.º 5
0
 def __init__(self, *args, **kwargs):
     super().__init__()
     self.pool = FastGlobalAvgPool2d()
     self.conv = nn.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
* input stem
* input deep

* GAP
* SE
* ECA
"""

# represent tensor in the middle of ResNet-like model
inp1 = torch.randn(hparams.bs, 32, 224, 224).cuda()
# represent tensor in the end of ResNet-like model
inp2 = torch.randn(hparams.bs, 256, 14, 14).cuda()

num_threads = torch.get_num_threads()

gap = FastGlobalAvgPool2d().cuda()
se = SEModule(32, 16).cuda()
se2 = SEModule(256, 64).cuda()
eca = ECAModule().cuda()
eca9 = ECAModule(kernel_size=9).cuda()

if hparams.half:
    inp1 = inp1.half()
    inp2 = inp2.half()
    gap = gap.half()
    se = se.half()
    se2 = se2.half()
    eca = eca.half()
    eca9 = eca9.half()

all_res = []
Ejemplo n.º 7
0
    def __init__(self, channels, *args):
        super().__init__()

        self.pool = FastGlobalAvgPool2d()
        # authors of original paper DO use bias
        self.fc1 = conv1x1(channels, channels, bias=True)
Ejemplo n.º 8
0
 def __init__(self, *args, kernel_size=3, **kwargs):
     super().__init__()
     self.pool = FastGlobalAvgPool2d()
     self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
     # dummy shape. would be overwritten later. not registering as buffer intentionally
     self.pos_encoding = torch.ones(1, 1, 1, 1)