コード例 #1
0
 def __init__(self, channels, reduction_channels):
     super(SEModule, self).__init__()
     self.pool = GlobalPool2d("avg")
     # authors of original paper DO use bias
     self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, stride=1, bias=True)
     self.relu = nn.ReLU(inplace=True)
     self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, stride=1, bias=True)
コード例 #2
0
ファイル: residual.py プロジェクト: kuan-li/pytorch-tools
    def __init__(self, channels, reduction_channels, norm_act="relu"):
        super(SEModule, self).__init__()

        self.pool = GlobalPool2d("avg")
        # authors of original paper DO use bias
        self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, stride=1, bias=True)
        self.act1 = activation_from_name(norm_act)
        self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, stride=1, bias=True)
コード例 #3
0
ファイル: densenet.py プロジェクト: yuv4r4j/pytorch-tools
    def __init__(
            self,
            growth_rate=None,
            block_config=None,
            pretrained=None,  # not used. here for proper signature
            num_classes=1000,
            drop_rate=0.0,
            in_channels=3,
            norm_layer='abn',
            norm_act='relu',
            deep_stem=False,
            stem_width=64,
            encoder=False,
            global_pool='avg',
            memory_efficient=True):

        super(DenseNet, self).__init__()
        norm_layer = bn_from_name(norm_layer)
        self.num_classes = num_classes
        if deep_stem:
            self.conv0 = nn.Sequential(
                conv3x3(in_channels, stem_width // 2, 2),
                norm_layer(stem_width // 2, activation=norm_act),
                conv3x3(stem_width // 2, stem_width // 2),
                norm_layer(stem_width // 2, activation=norm_act),
                conv3x3(stem_width // 2, stem_width, 2))
        else:
            self.conv0 = nn.Conv2d(in_channels,
                                   stem_width,
                                   kernel_size=7,
                                   stride=2,
                                   padding=3,
                                   bias=False)

        self.norm0 = norm_layer(stem_width, activation=norm_act)
        self.pool0 = nn.MaxPool2d(kernel_size=3,
                                  stride=2,
                                  padding=1,
                                  ceil_mode=False)

        largs = dict(growth_rate=growth_rate,
                     drop_rate=drop_rate,
                     memory_efficient=memory_efficient,
                     norm_layer=norm_layer,
                     norm_act=norm_act)
        in_planes = stem_width
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers, in_planes, **largs)
            setattr(self, 'denseblock{}'.format(i + 1), block)
            in_planes += num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(in_planes=in_planes,
                                    out_planes=in_planes // 2)
                setattr(self, 'transition{}'.format(i + 1), trans)
                in_planes //= 2

        # Final normalization
        self.norm5 = nn.BatchNorm2d(in_planes)

        # Linear layer
        self.encoder = encoder
        if not encoder:
            self.global_pool = GlobalPool2d(global_pool)
            self.classifier = nn.Linear(in_planes, num_classes)
        else:
            assert len(block_config) == 4, 'Need 4 blocks to use as encoder'
            self.forward = self.encoder_features
コード例 #4
0
ファイル: resnet.py プロジェクト: yuv4r4j/pytorch-tools
    def __init__(
            self,
            block=None,
            layers=None,
            pretrained=None,  # not used. here for proper signature
            num_classes=1000,
            in_channels=3,
            use_se=False,
            groups=1,
            base_width=64,
            deep_stem=False,
            dilated=False,
            norm_layer='abn',
            norm_act='relu',
            antialias=False,
            encoder=False,
            drop_rate=0.0,
            global_pool='avg',
            init_bn0=True):

        stem_width = 64
        if norm_layer.lower() == 'abn':
            norm_act = 'relu'

        norm_layer = bn_from_name(norm_layer)
        self.inplanes = stem_width
        self.num_classes = num_classes
        self.groups = groups
        self.base_width = base_width
        self.drop_rate = drop_rate
        self.block = block
        self.expansion = block.expansion
        self.dilated = dilated
        self.norm_act = norm_act
        super(ResNet, self).__init__()

        if deep_stem:
            self.conv1 = nn.Sequential(
                conv3x3(in_channels, stem_width // 2, 2),
                norm_layer(stem_width // 2, activation=norm_act),
                conv3x3(stem_width // 2, stem_width // 2, 2),
                norm_layer(stem_width // 2, activation=norm_act),
                conv3x3(stem_width // 2, stem_width))
        else:
            self.conv1 = nn.Conv2d(in_channels,
                                   stem_width,
                                   kernel_size=7,
                                   stride=2,
                                   padding=3,
                                   bias=False)
        self.bn1 = norm_layer(stem_width, activation=norm_act)
        if deep_stem:
            self.maxpool = nn.Sequential()  # don't need it
        elif antialias:
            self.maxpool = nn.Sequential(
                nn.MaxPool2d(kernel_size=3, stride=1, padding=1), BlurPool())
        else:
            # for se resnets fist maxpool is slightly different
            self.maxpool = nn.MaxPool2d(kernel_size=3,
                                        stride=2,
                                        padding=0 if use_se else 1,
                                        ceil_mode=True if use_se else False)
        # Output stride is 8 with dilated and 32 without
        stride_3_4 = 1 if self.dilated else 2
        dilation_3 = 2 if self.dilated else 1
        dilation_4 = 4 if self.dilated else 1
        largs = dict(use_se=use_se,
                     norm_layer=norm_layer,
                     norm_act=norm_act,
                     antialias=antialias)
        self.layer1 = self._make_layer(64, layers[0], stride=1, **largs)
        self.layer2 = self._make_layer(128, layers[1], stride=2, **largs)
        self.layer3 = self._make_layer(256,
                                       layers[2],
                                       stride=stride_3_4,
                                       dilation=dilation_3,
                                       **largs)
        self.layer4 = self._make_layer(512,
                                       layers[3],
                                       stride=stride_3_4,
                                       dilation=dilation_4,
                                       **largs)
        self.global_pool = GlobalPool2d(global_pool)
        self.num_features = 512 * self.expansion
        self.encoder = encoder
        if not encoder:
            self.last_linear = nn.Linear(
                self.num_features * self.global_pool.feat_mult(), num_classes)
        else:
            self.forward = self.encoder_features

        self._initialize_weights(init_bn0)
コード例 #5
0
    def __init__(
        self,
        block=None,
        layers=None,
        pretrained=None,  # not used. here for proper signature
        num_classes=1000,
        in_channels=3,
        use_se=False,
        groups=1,
        base_width=64,
        deep_stem=False,
        output_stride=32,
        norm_layer="abn",
        norm_act="relu",
        antialias=False,
        encoder=False,
        drop_rate=0.0,
        drop_connect_rate=0.0,
        global_pool="avg",
        init_bn0=True,
    ):

        stem_width = 64
        norm_layer = bn_from_name(norm_layer)
        self.inplanes = stem_width
        self.num_classes = num_classes
        self.groups = groups
        self.base_width = base_width
        self.block = block
        self.expansion = block.expansion
        self.norm_act = norm_act
        self.block_idx = 0
        self.num_blocks = sum(layers)
        self.drop_connect_rate = drop_connect_rate
        super(ResNet, self).__init__()

        if deep_stem:
            self.conv1 = nn.Sequential(
                conv3x3(in_channels, stem_width // 2, 2),
                norm_layer(stem_width // 2, activation=norm_act),
                conv3x3(stem_width // 2, stem_width // 2),
                norm_layer(stem_width // 2, activation=norm_act),
                conv3x3(stem_width // 2, stem_width),
            )
        else:
            self.conv1 = nn.Conv2d(in_channels,
                                   stem_width,
                                   kernel_size=7,
                                   stride=2,
                                   padding=3,
                                   bias=False)
        self.bn1 = norm_layer(stem_width, activation=norm_act)
        self.maxpool = nn.MaxPool2d(
            kernel_size=3,
            stride=2,
            padding=0 if use_se else 1,
            ceil_mode=True if use_se else False,
        )
        if output_stride not in [8, 16, 32]:
            raise ValueError("Output stride should be in [8, 16, 32]")
        if output_stride == 8:
            stride_3, stride_4, dilation_3, dilation_4 = 1, 1, 2, 4
        elif output_stride == 16:
            stride_3, stride_4, dilation_3, dilation_4 = 2, 1, 1, 2
        elif output_stride == 32:
            stride_3, stride_4, dilation_3, dilation_4 = 2, 2, 1, 1
        largs = dict(use_se=use_se,
                     norm_layer=norm_layer,
                     norm_act=norm_act,
                     antialias=antialias)
        self.layer1 = self._make_layer(64, layers[0], stride=1, **largs)
        self.layer2 = self._make_layer(128, layers[1], stride=2, **largs)
        self.layer3 = self._make_layer(256,
                                       layers[2],
                                       stride=stride_3,
                                       dilation=dilation_3,
                                       **largs)
        self.layer4 = self._make_layer(512,
                                       layers[3],
                                       stride=stride_4,
                                       dilation=dilation_4,
                                       **largs)
        self.global_pool = GlobalPool2d(global_pool)
        self.num_features = 512 * self.expansion
        self.encoder = encoder
        if not encoder:
            self.dropout = nn.Dropout(p=drop_rate, inplace=True)
            self.last_linear = nn.Linear(
                self.num_features * self.global_pool.feat_mult(), num_classes)
        else:
            self.forward = self.encoder_features

        self._initialize_weights(init_bn0)