Ejemplo n.º 1
0
    def __init__(self, num_class=NUM_CLASS, fc_dim=4096,
                 use_softmax=False, pool_scales=(1, 2, 3, 6)):
        super().__init__()
        self.use_softmax = use_softmax

        self.ppm = []
        for scale in pool_scales:
            self.ppm.append(nn.Sequential(
                nn.AdaptiveAvgPool2d(scale),
                nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                BatchNorm2d(512),
                nn.ReLU(inplace=True)
            ))
        self.ppm = nn.ModuleList(self.ppm)
        self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)

        self.conv_last = nn.Sequential(
            nn.Conv2d(fc_dim + len(pool_scales) * 512, 512,
                      kernel_size=3, padding=1, bias=False),
            BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_class, kernel_size=1)
        )
        self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
        self.dropout_deepsup = nn.Dropout2d(0.1)
Ejemplo n.º 2
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=True)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 3
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              dilation=1,
              stride=1,
              padding=1,
              activation='PReLU',
              bias=False,
              asymmetric=False,
              dropout_prob=0):
     super(RegularBottleNeck, self).__init__()
     internal_channels = in_channels // 4
     self.conv_down = Sequential(
         Conv2d(in_channels,
                internal_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(internal_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     if asymmetric is False:
         self.conv_main = Sequential(
             Conv2d(internal_channels,
                    internal_channels,
                    kernel_size=kernel_size,
                    dilation=dilation,
                    stride=stride,
                    padding=padding,
                    bias=bias), BatchNorm2d(internal_channels),
             PReLU() if activation == 'PReLU' else ReLU())
     else:
         self.conv_main = Sequential(
             Conv2d(internal_channels,
                    internal_channels,
                    kernel_size=(kernel_size, 1),
                    dilation=dilation,
                    stride=stride,
                    padding=(padding, 0),
                    bias=bias), BatchNorm2d(internal_channels),
             PReLU() if activation == 'PReLU' else ReLU(),
             Conv2d(internal_channels,
                    internal_channels,
                    kernel_size=(1, kernel_size),
                    dilation=dilation,
                    stride=stride,
                    padding=(0, padding),
                    bias=bias), BatchNorm2d(internal_channels),
             PReLU() if activation == 'PReLU' else ReLU())
     self.conv_up = Sequential(
         Conv2d(internal_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(out_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.regularizer = Dropout2d(p=dropout_prob)
     self.out_activation = PReLU() if activation == 'PReLU' else ReLU()
Ejemplo n.º 4
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = BatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = BatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
Ejemplo n.º 5
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              dilation=1,
              stride=2,
              padding=1,
              output_padding=1,
              activation='PReLU',
              bias=False,
              dropout_prob=0.1):
     super(UpsamplingBottleNeck, self).__init__()
     internal_channels = in_channels // 4
     self.conv_down = Sequential(
         Conv2d(in_channels,
                internal_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(internal_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.conv_main = Sequential(
         ConvTranspose2d(internal_channels,
                         internal_channels,
                         kernel_size=kernel_size,
                         stride=stride,
                         padding=padding,
                         output_padding=output_padding,
                         dilation=dilation,
                         bias=bias), BatchNorm2d(internal_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.conv_up = Sequential(
         Conv2d(internal_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(out_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.main_conv = Sequential(
         Conv2d(in_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(out_channels))
     self.mainmaxunpool = MaxUnpool2d(kernel_size=2, stride=2, padding=0)
     self.regularizer = Dropout2d(p=dropout_prob)
     self.out_activation = PReLU() if activation == 'PReLU' else ReLU()
Ejemplo n.º 6
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = BatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = BatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = BatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Ejemplo n.º 7
0
def add_bn(model):
    seq = list(model)
    relu_positions = [(59, 64), (57, 64), (54, 128), (52, 128), (49, 256),
                      (47, 256), (45, 256), (42, 512), (40, 512), (38, 512),
                      (35, 512), (33, 512), (31, 512), (29, 512), (27, 512),
                      (25, 512), (22, 512), (20, 512), (18, 512), (15, 256),
                      (13, 256), (11, 256), (8, 128), (6, 128), (3, 64),
                      (1, 64)]
    for pos, channels in relu_positions:
        seq.insert(pos, BatchNorm2d(channels))
    return torch.nn.Sequential(*seq)
Ejemplo n.º 8
0
def conv3x3_bn_relu(in_planes, out_planes, stride=1):
    return nn.Sequential(
        nn.Conv2d(in_planes,
                  out_planes,
                  kernel_size=3,
                  stride=stride,
                  padding=1,
                  bias=False),
        BatchNorm2d(out_planes),
        nn.ReLU(inplace=True),
    )
Ejemplo n.º 9
0
 def __init__(self, in_channels: int,
              out_channels: int,
              batchnorm: bool = True,
              bottleneck: bool = False):
     super(Downsampling, self).__init__()
     self.batchnorm = BatchNorm2d(out_channels) if batchnorm else None
     self.bottleneck = bottleneck
     self.kernel_size = (2, 2) if self.bottleneck else (4, 4)
     self.pad = (1, 1, 1, 1) if self.bottleneck else (2, 2, 2, 2)
     self.conv2d = nn.Conv2d(in_channels=in_channels,
                             out_channels=out_channels,
                             kernel_size=self.kernel_size,
                             stride=(1, 1))
Ejemplo n.º 10
0
    def __init__(
        self,
        in_shape: Tuple[int, int, int],
        outdims: int,
        bias: bool,
        init_noise: float = 1e-3,
        attention_kernel: int = 1,
        attention_layers: int = 1,
        mean_activity: Optional[Mapping[str, float]] = None,
        feature_reg_weight: float = 1.0,
        gamma_readout: Optional[
            float] = None,  # deprecated, use feature_reg_weight instead
        **kwargs: Any,
    ) -> None:
        super().__init__()
        self.in_shape = in_shape
        self.outdims = outdims
        self.feature_reg_weight = self.resolve_deprecated_gamma_readout(
            feature_reg_weight, gamma_readout)  # type: ignore[no-untyped-call]
        self.mean_activity = mean_activity
        c, w, h = in_shape
        self.features = Parameter(torch.Tensor(self.outdims, c))

        attention = Sequential()
        for i in range(attention_layers - 1):
            attention.add_module(
                f"conv{i}",
                Conv2d(c, c, attention_kernel, padding=attention_kernel > 1),
            )
            attention.add_module(
                f"norm{i}", BatchNorm2d(c))  # type: ignore[no-untyped-call]
            attention.add_module(f"nonlin{i}", ELU())
        else:
            attention.add_module(
                f"conv{attention_layers}",
                Conv2d(c,
                       outdims,
                       attention_kernel,
                       padding=attention_kernel > 1),
            )
        self.attention = attention

        self.init_noise = init_noise
        if bias:
            bias_param = Parameter(torch.Tensor(self.outdims))
            self.register_parameter("bias", bias_param)
        else:
            self.register_parameter("bias", None)
        self.initialize(mean_activity)
Ejemplo n.º 11
0
    def __init__(self,
                 num_class=150,
                 fc_dim=4096,
                 use_softmax=False,
                 pool_scales=(1, 2, 3, 6)):
        super(PPM, self).__init__()
        self.use_softmax = use_softmax

        self.ppm = []
        for scale in pool_scales:
            self.ppm.append(
                nn.Sequential(
                    nn.AdaptiveAvgPool2d(scale),
                    nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                    BatchNorm2d(512), nn.ReLU(inplace=True)))
        self.ppm = nn.ModuleList(self.ppm)

        self.conv_last = nn.Sequential(
            nn.Conv2d(fc_dim + len(pool_scales) * 512,
                      512,
                      kernel_size=3,
                      padding=1,
                      bias=False), BatchNorm2d(512), nn.ReLU(inplace=True),
            nn.Dropout2d(0.1), nn.Conv2d(512, num_class, kernel_size=1))
Ejemplo n.º 12
0
 def __init__(self,
              in_channels,
              out_channels,
              activation='PReLU',
              bias=False):
     super(InitialBlock, self).__init__()
     self.conv = Conv2d(in_channels=in_channels,
                        out_channels=out_channels - 3,
                        kernel_size=3,
                        stride=2,
                        padding=1)
     self.maxpooling = MaxPool2d(kernel_size=2, stride=2, padding=0)
     self.bnActivate = Sequential(
         BatchNorm2d(out_channels),
         PReLU() if activation == 'PReLU' else ReLU())
Ejemplo n.º 13
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
Ejemplo n.º 14
0
 def __init__(self, features):
     super(bna, self).__init__()
     self.batchnorm = BatchNorm2d(features)
     self.activate = ReLU(inplace=True)
Ejemplo n.º 15
0
def create_model(input_channels):
    # Create encoder based on VGG16 architecture
    # original_vgg16 = vgg16()
    #
    # # select only convolutional layers
    # encoder = torch.nn.Sequential(*list(original_vgg16.features)[:30])

    # new enconder
    encoder = [
        Conv2d(input_channels,
               64,
               kernel_size=(3, 3),
               stride=(1, 1),
               padding=(1, 1)),
        BatchNorm2d(64),
        ReLU(),
        Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(64),
        ReLU(),
        MaxPool2d(kernel_size=2,
                  stride=2,
                  padding=0,
                  dilation=1,
                  ceil_mode=False),
        Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(128),
        ReLU(),
        Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(128),
        ReLU(),
        MaxPool2d(kernel_size=2,
                  stride=2,
                  padding=0,
                  dilation=1,
                  ceil_mode=False),
        Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(256),
        ReLU(),
        Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(256),
        ReLU(),
        Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(256),
        ReLU(),
        MaxPool2d(kernel_size=2,
                  stride=2,
                  padding=0,
                  dilation=1,
                  ceil_mode=False),
        Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        MaxPool2d(kernel_size=2,
                  stride=2,
                  padding=0,
                  dilation=1,
                  ceil_mode=False),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU()
    ]

    # define decoder based on VGG16 (inverse order and Upsampling layers)
    decoder_list = [
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Upsample(scale_factor=2, mode='nearest'),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(512),
        ReLU(),
        Upsample(scale_factor=2, mode='nearest'),
        Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(256),
        ReLU(),
        Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(256),
        ReLU(),
        Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(256),
        ReLU(),
        Upsample(scale_factor=2, mode='nearest'),
        Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(128),
        ReLU(),
        Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(128),
        ReLU(),
        Upsample(scale_factor=2, mode='nearest'),
        Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(64),
        ReLU(),
        Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        BatchNorm2d(64),
        ReLU(),
        Conv2d(64, 1, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),
        Sigmoid(),
    ]
    encoder = torch.nn.Sequential(*encoder)
    decoder = torch.nn.Sequential(*decoder_list)

    # assamble the full architecture encoder-decoder
    model = torch.nn.Sequential(*(list(encoder.children()) +
                                  list(decoder.children())))

    return model
Ejemplo n.º 16
0
 def __init__(self, c_in, c_out, filter_size, stride=1, padding=0, **kwargs):
     super(Conv2dBN, self).__init__()
     self.conv = Conv2d(c_in, c_out, filter_size, stride=stride, padding=padding, **kwargs)
     self.bn = BatchNorm2d(c_out)
     self.relu = ReLU()