Example #1
0
 def __init__(self,
              opt,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64):
     super(Bottleneck, self).__init__()
     width = int(planes * (base_width / 64.)) * groups
     # Both self.conv2 and self.downsample layers downsample the input when stride != 1
     self.conv1block = ConvBlock(opt=opt,
                                 in_channels=inplanes,
                                 out_channels=width,
                                 kernel_size=1)
     self.conv2block = ConvBlock(opt=opt,
                                 in_channels=width,
                                 out_channels=width,
                                 kernel_size=3,
                                 stride=stride,
                                 groups=groups,
                                 padding=1)
     self.conv3block = ConvBlock(opt=opt,
                                 in_channels=width,
                                 out_channels=planes * self.expansion,
                                 kernel_size=1)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Example #2
0
    def __init__(self,
                 opt,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 groups=1,
                 base_width=64):
        super(BasicBlock, self).__init__()
        if base_width != 64:
            raise ValueError(
                'BasicBlock only supports groups=1 and base_width=64')

        # Both self.conv1 and self.downsample layers downsample the input when stride != 1
        self.conv1block = ConvBlock(opt=opt,
                                    in_channels=inplanes,
                                    out_channels=planes,
                                    kernel_size=3,
                                    stride=stride,
                                    padding=1)
        self.conv2block = ConvBlock(opt=opt,
                                    in_channels=planes,
                                    out_channels=planes,
                                    kernel_size=3,
                                    padding=1)
        self.downsample = downsample
        self.relu = nn.ReLU(inplace=True)
Example #3
0
 def __init__(self,
              opt,
              inChannels,
              outChannels,
              stride=1,
              downsample=None):
     super(BottleneckBlock, self).__init__()
     expansion = 4
     self.conv1 = ConvBlock(opt=opt,
                            in_channels=inChannels,
                            out_channels=outChannels,
                            kernel_size=1,
                            stride=1,
                            padding=0,
                            bias=False)
     self.conv2 = ConvBlock(opt=opt,
                            in_channels=outChannels,
                            out_channels=outChannels,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.conv3 = ConvBlock(opt=opt,
                            in_channels=outChannels,
                            out_channels=outChannels * expansion,
                            kernel_size=1,
                            stride=1,
                            padding=0,
                            bias=False)
     self.downsample = downsample
Example #4
0
    def __init__(self, pretr_emb, pad_idx, n_conv_units=1024, p_dropout=0.6, filter_size=3,
                 pool_size=3, emb_dim=100, n_classes=12, dense_layer_units=256):
        super(BaseCNN, self).__init__()

        # input_size: (batch, seq)
        weights = torch.FloatTensor(pretr_emb.vectors)
        self.embedder = nn.Embedding.from_pretrained(weights)
        self.embedder.padding_idx = pad_idx
#         for param in self.embedder.parameters():
#             param.requires_grad = False
        # embedded_input_size: (batch, seq, 100)
        self.model = nn.Sequential(
            # (batch, 800, 100)
            TransposeChannels(),
            # (batch, 100, 800)
            ConvBlock(emb_dim, n_conv_units, p_dropout, filter_size, pool_size),
            # (batch, 1024, 268)
            ConvBlock(n_conv_units, n_conv_units, p_dropout, filter_size, pool_size),
            # (batch, 1024, ?????)
            nn.Conv1d(n_conv_units, n_conv_units, filter_size),
            # (batch, 1024, ?????)
            nn.ReLU(),
            GlobalMaxPool(dim=2),
            # (batch, 1024)
            nn.Linear(in_features=n_conv_units, out_features=dense_layer_units),
            nn.ReLU(),
            # (batch, 256)
            nn.Linear(in_features=dense_layer_units, out_features=n_classes),
            nn.Sigmoid()
            # (batch, 12)
        )
Example #5
0
    def _make_layer(self, opt, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = ConvBlock(opt=opt,
                                   in_channels=self.inplanes,
                                   out_channels=planes * block.expansion,
                                   kernel_size=1,
                                   stride=stride)

        layers = []
        layers.append(
            block(opt=opt,
                  inplanes=self.inplanes,
                  planes=planes,
                  stride=stride,
                  downsample=downsample,
                  groups=self.groups,
                  base_width=self.base_width))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(opt=opt,
                      inplanes=self.inplanes,
                      planes=planes,
                      groups=self.groups,
                      base_width=self.base_width))

        return nn.Sequential(*layers)
Example #6
0
def test_conv_block(bn, pool):
    in_channels = 64
    out_channels = 128
    size = 8
    batch_size = 10
    conv = ConvBlock(in_channels, out_channels, bn=bn, pool=pool)
    fake_input = torch.randn(batch_size, in_channels, size, size)
    out = conv(fake_input)
    out_size = size if pool is None else size//2
    assert out.shape == (batch_size, out_channels, out_size, out_size)
Example #7
0
 def _createFeatures(self, opt):
     layers = [
         InitialBlock(opt=opt,
                      out_channels=128,
                      kernel_size=5,
                      stride=1,
                      padding=2)
     ]
     layers += [
         ConvBlock(opt=opt, in_channels=128, out_channels=96, kernel_size=1)
     ]
     layers += [
         ConvBlock(opt=opt, in_channels=96, out_channels=48, kernel_size=1)
     ]
     layers += [getattr(nn, opt.pooltype)(kernel_size=3, stride=2)]
     #layers += [nn.Dropout(opt.drop_rate)]
     layers += [
         ConvBlock(opt=opt,
                   in_channels=48,
                   out_channels=128,
                   kernel_size=5,
                   stride=1,
                   padding=2)
     ]
     layers += [
         ConvBlock(opt=opt, in_channels=128, out_channels=96, kernel_size=1)
     ]
     layers += [
         ConvBlock(opt=opt, in_channels=96, out_channels=48, kernel_size=1)
     ]
     layers += [getattr(nn, opt.pooltype)(kernel_size=3, stride=2)]
     #layers += [nn.Dropout(opt.drop_rate)]
     layers += [
         ConvBlock(opt=opt,
                   in_channels=48,
                   out_channels=128,
                   kernel_size=3,
                   stride=1,
                   padding=1)
     ]
     layers += [
         ConvBlock(opt=opt, in_channels=128, out_channels=96, kernel_size=1)
     ]
     layers += [
         ConvBlock(opt=opt,
                   in_channels=96,
                   out_channels=opt.num_classes,
                   kernel_size=1)
     ]
     layers += [nn.AdaptiveAvgPool2d(1)]
     return layers
Example #8
0
 def __init__(self, opt, block, inChannels, outChannels, depth, stride=1):
     super(ResidualBlock, self).__init__()
     if stride != 1 or inChannels != outChannels * block.expansion:
         downsample = ConvBlock(opt=opt, in_channels=inChannels, out_channels=outChannels * block.expansion,
                                kernel_size=1, stride=stride, padding=0, bias=False)
     else:
         downsample = None
     self.blocks = nn.Sequential()
     self.blocks.add_module('block0', block(opt, inChannels, outChannels, stride, downsample))
     inChannels = outChannels * block.expansion
     for i in range(1, depth):
         self.blocks.add_module('block{}'.format(i), block(opt, inChannels, outChannels))
Example #9
0
def block(opt, in_channels, out_channels, pool):
    return nn.Sequential(
        ConvBlock(in_channels, out_channels, bn=opt.bn),
        ConvBlock(out_channels, out_channels, bn=opt.bn, pool=pool)
    )