Exemple #1
0
    def __init__(self, in_channel, out_channel, kernel_size, padding, downsample=False, fused=False):
        super(ConvBlock, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding),
            nn.LeakyReLU(0.2)
        )

        if downsample:
            if fused:
                self.conv2 = nn.Sequential(
                    nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding),
                    nn.Pool(2),
                    nn.LeakyReLU(0.2)
                )
            else:
                self.conv2 = nn.Sequential(
                    nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding),
                    nn.Pool(2),
                    nn.LeakyReLU(0.2)
                )
        else:
            self.conv2 = nn.Sequential(
                nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding),
                nn.LeakyReLU(0.2)
            )
Exemple #2
0
 def __init__(self, num_classes=1000, aux_logits=True, init_weights=True, blocks=None):
     super(GoogLeNet, self).__init__()
     if (blocks is None):
         blocks = [BasicConv2d, Inception, InceptionAux]
     assert (len(blocks) == 3)
     conv_block = blocks[0]
     inception_block = blocks[1]
     inception_aux_block = blocks[2]
     self.aux_logits = aux_logits
     self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)
     self.maxpool1 = nn.Pool(3, stride=2, ceil_mode=True, op='maximum')
     self.conv2 = conv_block(64, 64, kernel_size=1)
     self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
     self.maxpool2 = nn.Pool(3, stride=2, ceil_mode=True, op='maximum')
     self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
     self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
     self.maxpool3 = nn.Pool(3, stride=2, ceil_mode=True, op='maximum')
     self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
     self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
     self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
     self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
     self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
     self.maxpool4 = nn.Pool(2, stride=2, ceil_mode=True, op='maximum')
     self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
     self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)
     if aux_logits:
         self.aux1 = inception_aux_block(512, num_classes)
         self.aux2 = inception_aux_block(528, num_classes)
     else:
         self.aux1 = None
         self.aux2 = None
     self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
     self.dropout = nn.Dropout(0.2)
     self.fc = nn.Linear(1024, num_classes)
Exemple #3
0
 def __init__(self):
     super(VGGBase, self).__init__()
     self.conv1_1 = nn.Conv(3, 64, kernel_size=3, padding=1)
     self.conv1_2 = nn.Conv(64, 64, kernel_size=3, padding=1)
     self.pool1 = nn.Pool(kernel_size=2, stride=2, op='maximum')
     self.conv2_1 = nn.Conv(64, 128, kernel_size=3, padding=1)
     self.conv2_2 = nn.Conv(128, 128, kernel_size=3, padding=1)
     self.pool2 = nn.Pool(kernel_size=2, stride=2, op='maximum')
     self.conv3_1 = nn.Conv(128, 256, kernel_size=3, padding=1)
     self.conv3_2 = nn.Conv(256, 256, kernel_size=3, padding=1)
     self.conv3_3 = nn.Conv(256, 256, kernel_size=3, padding=1)
     self.pool3 = nn.Pool(kernel_size=2,
                          stride=2,
                          ceil_mode=True,
                          op='maximum')
     self.conv4_1 = nn.Conv(256, 512, kernel_size=3, padding=1)
     self.conv4_2 = nn.Conv(512, 512, kernel_size=3, padding=1)
     self.conv4_3 = nn.Conv(512, 512, kernel_size=3, padding=1)
     self.pool4 = nn.Pool(kernel_size=2, stride=2, op='maximum')
     self.conv5_1 = nn.Conv(512, 512, kernel_size=3, padding=1)
     self.conv5_2 = nn.Conv(512, 512, kernel_size=3, padding=1)
     self.conv5_3 = nn.Conv(512, 512, kernel_size=3, padding=1)
     self.pool5 = nn.Pool(kernel_size=3, stride=1, padding=1, op='maximum')
     self.conv6 = nn.Conv(512, 1024, kernel_size=3, padding=6, dilation=6)
     self.conv7 = nn.Conv(1024, 1024, kernel_size=1)
 def _make_layers(self, cfg):
     layers = []
     in_channels = 3
     for x in cfg:
         if x == 'M':
             layers += [nn.Pool(kernel_size=2, stride=2, op="maximum")]
         else:
             layers += [
                 nn.Conv(in_channels, x, kernel_size=3, padding=1),
                 nn.BatchNorm(x),
                 nn.ReLU()
             ]
             in_channels = x
     layers += [nn.Pool(kernel_size=1, stride=1, op="mean")]
     return nn.Sequential(*layers)
Exemple #5
0
    def __init__(self, block, layers, output_stride, baseWidth = 26, scale = 4):
        super(Res2Net, self).__init__()
        self.baseWidth = baseWidth
        self.scale = scale
        self.inplanes = 64
        blocks = [1, 2, 4]
        if output_stride == 16:
            strides = [1, 2, 2, 1]
            # strides = [2, 1, 2, 1]
            dilations = [1, 1, 1, 2]
        elif output_stride == 8:
            strides = [1, 2, 1, 1]
            dilations = [1, 1, 2, 4]
        else:
            raise NotImplementedError

        # Modules
        self.conv1 = nn.Sequential(
            nn.Conv(3, 32, 3, 2, 1, bias=False),
            nn.BatchNorm(32),
            nn.ReLU(),
            nn.Conv(32, 32, 3, 1, 1, bias=False),
            nn.BatchNorm(32),
            nn.ReLU(),
            nn.Conv(32, 64, 3, 1, 1, bias=False)
        )
        self.bn1 = nn.BatchNorm(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.Pool(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1])
        self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2])
        self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3])
Exemple #6
0
    def __init__(self,
                 input_nc,
                 ndf=64,
                 n_layers=3,
                 norm_layer=nn.InstanceNorm2d,
                 use_sigmoid=False,
                 num_D=3,
                 getIntermFeat=False):
        super(MultiscaleDiscriminator, self).__init__()
        self.num_D = num_D
        self.n_layers = n_layers
        self.getIntermFeat = getIntermFeat

        for i in range(num_D):
            netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer,
                                       use_sigmoid, getIntermFeat)
            if getIntermFeat:
                for j in range(n_layers + 2):
                    setattr(self, 'scale' + str(i) + '_layer' + str(j),
                            getattr(netD, 'model' + str(j)))
            else:
                setattr(self, 'layer' + str(i), netD.model)

        self.downsample = nn.Pool(3,
                                  stride=2,
                                  padding=1,
                                  count_include_pad=False,
                                  op='mean')
Exemple #7
0
 def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000):
     self.inplanes = 64
     super(Res2Net, self).__init__()
     self.baseWidth = baseWidth
     self.scale = scale
     self.conv1 = nn.Sequential(
         nn.Conv(3, 32, 3, stride=2, padding=1, bias=False),
         nn.BatchNorm(32), nn.ReLU(),
         nn.Conv(32, 32, 3, stride=1, padding=1, bias=False),
         nn.BatchNorm(32), nn.ReLU(),
         nn.Conv(32, 64, 3, stride=1, padding=1, bias=False))
     self.bn1 = nn.BatchNorm(64)
     self.relu = nn.ReLU()
     self.maxpool = nn.Pool(3, stride=2, padding=1, op='maximum')
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
     self.avgpool = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Linear((512 * block.expansion), num_classes)
     for m in self.modules():
         if isinstance(m, nn.Conv):
             nn.init.kaiming_normal_(m.weight, mode='fan_out')
         elif isinstance(m, nn.BatchNorm):
             init.constant_(m.weight, value=1)
             init.constant_(m.bias, value=0)
Exemple #8
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              baseWidth=26,
              scale=4,
              stype='normal'):
     super(Bottle2neck, self).__init__()
     width = int(math.floor((planes * (baseWidth / 64.0))))
     self.conv1 = nn.Conv(inplanes, (width * scale), 1, bias=False)
     self.bn1 = nn.BatchNorm((width * scale))
     if (scale == 1):
         self.nums = 1
     else:
         self.nums = (scale - 1)
     if (stype == 'stage'):
         self.pool = nn.Pool(3, stride=stride, padding=1, op='mean')
     convs = []
     bns = []
     for i in range(self.nums):
         convs.append(
             nn.Conv(width, width, 3, stride=stride, padding=1, bias=False))
         bns.append(nn.BatchNorm(width))
     self.convs = nn.ModuleList(convs)
     self.bns = nn.ModuleList(bns)
     self.conv3 = nn.Conv((width * scale), (planes * self.expansion),
                          1,
                          bias=False)
     self.bn3 = nn.BatchNorm((planes * self.expansion))
     self.relu = nn.ReLU()
     self.downsample = downsample
     self.stype = stype
     self.scale = scale
     self.width = width
Exemple #9
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None

        if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
            downsample = nn.Sequential(
                nn.Pool(stride, stride=stride, ceil_mode=True, op='mean'),
                nn.Conv(self.inplanes, (planes * block.expansion),
                        1,
                        stride=1,
                        bias=False), nn.BatchNorm((planes * block.expansion)))

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample=downsample,
                  stype='stage',
                  baseWidth=self.baseWidth,
                  scale=self.scale))

        self.inplanes = (planes * block.expansion)
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      baseWidth=self.baseWidth,
                      scale=self.scale))

        return nn.Sequential(*layers)
Exemple #10
0
 def __init__(self, in_ch=3, n_classes=2):
     super(NestedUNet, self).__init__()
     n1 = 64
     filters = [n1, (n1 * 2), (n1 * 4), (n1 * 8), (n1 * 16)]
     self.pool = nn.Pool(2, stride=2, op='maximum')
     self.Up = nn.Upsample(scale_factor=2, mode='bilinear')
     self.conv0_0 = DoubleConv(in_ch, filters[0], filters[0])
     self.conv1_0 = DoubleConv(filters[0], filters[1], filters[1])
     self.conv2_0 = DoubleConv(filters[1], filters[2], filters[2])
     self.conv3_0 = DoubleConv(filters[2], filters[3], filters[3])
     self.conv4_0 = DoubleConv(filters[3], filters[4], filters[4])
     self.conv0_1 = DoubleConv((filters[0] + filters[1]), filters[0],
                               filters[0])
     self.conv1_1 = DoubleConv((filters[1] + filters[2]), filters[1],
                               filters[1])
     self.conv2_1 = DoubleConv((filters[2] + filters[3]), filters[2],
                               filters[2])
     self.conv3_1 = DoubleConv((filters[3] + filters[4]), filters[3],
                               filters[3])
     self.conv0_2 = DoubleConv(((filters[0] * 2) + filters[1]), filters[0],
                               filters[0])
     self.conv1_2 = DoubleConv(((filters[1] * 2) + filters[2]), filters[1],
                               filters[1])
     self.conv2_2 = DoubleConv(((filters[2] * 2) + filters[3]), filters[2],
                               filters[2])
     self.conv0_3 = DoubleConv(((filters[0] * 3) + filters[1]), filters[0],
                               filters[0])
     self.conv1_3 = DoubleConv(((filters[1] * 3) + filters[2]), filters[1],
                               filters[1])
     self.conv0_4 = DoubleConv(((filters[0] * 4) + filters[1]), filters[0],
                               filters[0])
     self.final = nn.Conv(filters[0], n_classes, 1)
Exemple #11
0
 def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
     super(DenseNet, self).__init__()
     self.features = nn.Sequential(OrderedDict([
         ('conv0', nn.Conv(3, num_init_features, 7, stride=2, padding=3, bias=False)),
         ('norm0', nn.BatchNorm(num_init_features)),
         ('relu0', nn.ReLU()),
         ('pool0', nn.Pool(3, stride=2, padding=1, op='maximum')),
     ]))
     num_features = num_init_features
     for (i, num_layers) in enumerate(block_config):
         block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
         self.features.add_module('denseblock%d' % (i + 1), block)
         num_features = (num_features + (num_layers * growth_rate))
         if (i != (len(block_config) - 1)):
             trans = _Transition(num_input_features=num_features, num_output_features=(num_features // 2))
             self.features.add_module('transition%d' % (i + 1), trans)
             num_features = (num_features // 2)
     self.features.add_module('norm5', nn.BatchNorm(num_features))
     self.classifier = nn.Linear(num_features, num_classes)
     for m in self.modules():
         if isinstance(m, nn.Conv):
             nn.init.invariant_uniform_(m.weight)
         elif isinstance(m, nn.BatchNorm):
             nn.init.constant_(m.weight, 1)
             nn.init.constant_(m.bias, 0)
         elif isinstance(m, nn.Linear):
             nn.init.constant_(m.bias, 0)
Exemple #12
0
 def __init__(self,
              in_channels,
              ch1x1,
              ch3x3red,
              ch3x3,
              ch5x5red,
              ch5x5,
              pool_proj,
              conv_block=None):
     super(Inception, self).__init__()
     if (conv_block is None):
         conv_block = BasicConv2d
     self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)
     self.branch2 = nn.Sequential(
         conv_block(in_channels, ch3x3red, kernel_size=1),
         conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1))
     self.branch3 = nn.Sequential(
         conv_block(in_channels, ch5x5red, kernel_size=1),
         conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1))
     self.branch4 = nn.Sequential(
         nn.Pool(kernel_size=3,
                 stride=1,
                 padding=1,
                 ceil_mode=True,
                 op='maximum'),
         conv_block(in_channels, pool_proj, kernel_size=1))
Exemple #13
0
 def __call__(self, im1, im2, mask=None, conf_sigma=None):
     im = jt.contrib.concat([im1, im2], dim=0)
     im = self.normalize(im)
     feats = []
     f = self.slice1(im)
     feats += [jt.chunk(f, 2, dim=0)]
     f = self.slice2(f)
     feats += [jt.chunk(f, 2, dim=0)]
     f = self.slice3(f)
     feats += [jt.chunk(f, 2, dim=0)]
     f = self.slice4(f)
     feats += [jt.chunk(f, 2, dim=0)]
     losses = []
     for (f1, f2) in feats[2:3]:
         loss = ((f1 - f2).sqr())
         if (conf_sigma is not None):
             loss = ((loss / ((2 * (conf_sigma.sqr())) + EPS)) +
                     (conf_sigma + EPS).log())
         if (mask is not None):
             (b, c, h, w) = loss.shape
             (_, _, hm, wm) = mask.shape
             (sh, sw) = ((hm // h), (wm // w))
             assert sh == sw
             mask0 = nn.Pool(kernel_size=sh, stride=sw,
                             op="mean")(mask).broadcast(loss)
             loss = ((loss * mask0).sum() / mask0.sum())
         else:
             loss = loss.mean()
         losses += [loss]
     return sum(losses)
Exemple #14
0
    def __init__(self,
                 block,
                 layers,
                 num_classes=1000,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 replace_stride_with_dilation=None,
                 norm_layer=None):
        super(ResNet, self).__init__()
        if (norm_layer is None):
            norm_layer = nn.BatchNorm
        self._norm_layer = norm_layer
        self.inplanes = 64
        self.dilation = 1
        if (replace_stride_with_dilation is None):
            replace_stride_with_dilation = [False, False, False]
        if (len(replace_stride_with_dilation) != 3):
            raise ValueError(
                'replace_stride_with_dilation should be None or a 3-element tuple, got {}'
                .format(replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group
        self.conv1 = nn.Conv(3,
                             self.inplanes,
                             kernel_size=7,
                             stride=2,
                             padding=3,
                             bias=False)
        jt.init.relu_invariant_gauss_(self.conv1.weight, mode="fan_out")
        self.bn1 = norm_layer(self.inplanes)
        self.relu = nn.GELU()
        self.maxpool = nn.Pool(kernel_size=3,
                               stride=2,
                               padding=1,
                               op='maximum')
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[2])
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

        # self.conv2 = conv1x1((512 * block.expansion), 1024)
        # self.at = Attention(1024, num_heads=1, kdim=1024,
        #                     vdim=1024, self_attention=True)
        # self.conv3 = conv1x1(1024, (512 * block.expansion))

        self.fc = nn.Linear((512 * block.expansion), num_classes)
    def __init__(self, block, layers, output_stride):
        super(ResNet, self).__init__()
        self.inplanes = 128
        blocks = [1, 2, 4]
        if output_stride == 16:
            strides = [1, 2, 2, 1]
            dilations = [1, 1, 1, 2]
        elif output_stride == 8:
            strides = [1, 2, 1, 1]
            dilations = [1, 1, 2, 4]
        else:
            raise NotImplementedError

        # Modules
        self.conv1 = nn.Sequential(
            nn.Conv(3, 64, kernel_size=3, stride=2, padding=1, bias=False),
            nn.BatchNorm(64),
            nn.ReLU(),
            nn.Conv(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm(64),
            nn.ReLU(),
            nn.Conv(64, 128, kernel_size=3, stride=1, padding=1, bias=False))


        self.maxpool = nn.Pool(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1])
        self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2])
        self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3])
Exemple #16
0
 def __init__(self, block, layers, num_classes=1000):
     self.inplanes = 64
     self.conv1 = nn.Conv(3,
                          64,
                          kernel_size=7,
                          stride=2,
                          padding=3,
                          bias=False)
     self.bn1 = nn.BatchNorm(64)
     self.relu = nn.Relu()
     self.maxpool = nn.Pool(kernel_size=3, stride=2, padding=1)
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
     self.avgpool = nn.Pool(7, stride=1, op="mean")
     self.fc = nn.Linear(512 * block.expansion, num_classes)
Exemple #17
0
 def __init__(self, num_classes=1000):
     super(AlexNet, self).__init__()
     self.features = nn.Sequential(
         nn.Conv(3, 64, kernel_size=11, stride=4, padding=2), nn.Relu(),
         nn.Pool(kernel_size=3, stride=2, op='maximum'),
         nn.Conv(64, 192, kernel_size=5, padding=2), nn.Relu(),
         nn.Pool(kernel_size=3, stride=2, op='maximum'),
         nn.Conv(192, 384, kernel_size=3, padding=1), nn.Relu(),
         nn.Conv(384, 256, kernel_size=3, padding=1), nn.Relu(),
         nn.Conv(256, 256, kernel_size=3, padding=1), nn.Relu(),
         nn.Pool(kernel_size=3, stride=2, op='maximum'))
     self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
     self.classifier = nn.Sequential(nn.Dropout(),
                                     nn.Linear(((256 * 6) * 6), 4096),
                                     nn.Relu(), nn.Dropout(),
                                     nn.Linear(4096, 4096), nn.Relu(),
                                     nn.Linear(4096, num_classes))
Exemple #18
0
 def __init__(self, latent_dim, input_shape):
     super(Encoder, self).__init__()
     resnet18_model = resnet.Resnet18()
     self.feature_extractor = nn.Sequential(*list(resnet18_model.children())[:(- 3)])
     self.pooling = nn.Pool(kernel_size=8, stride=8, padding=0, op='mean')
     self.fc_mu = nn.Linear(256, latent_dim)
     self.fc_logvar = nn.Linear(256, latent_dim)
     for m in self.modules():
         weights_init_normal(m)
def resnet_fake():
    from jittor import nn
    net = nn.Sequential(
        nn.Conv(3, 64, 7, 2, 3),
        nn.BatchNorm(64),
        nn.ReLU(),
        nn.Pool(3, 2, 1)
    )
    return net
Exemple #20
0
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv(3, 32, 3, 1)  # no padding
        self.conv2 = nn.Conv(32, 64, 3, 1)
        self.bn = nn.BatchNorm(64)

        self.max_pool = nn.Pool(2, 2)
        self.relu = nn.Relu()
        self.fc1 = nn.Linear(64 * 12 * 12, 256)
        self.fc2 = nn.Linear(256, 10)
Exemple #21
0
 def __init__(self, num_input_features, num_output_features):
     super(_Transition, self).__init__()
     self.add_module('norm', nn.BatchNorm(num_input_features))
     self.add_module('relu', nn.ReLU())
     self.add_module(
         'conv',
         nn.Conv(num_input_features,
                 num_output_features,
                 1,
                 stride=1,
                 bias=False))
     self.add_module('pool', nn.Pool(2, stride=2, op='mean'))
Exemple #22
0
 def __init__(self, version='1_0', num_classes=1000):
     super(SqueezeNet, self).__init__()
     self.num_classes = num_classes
     if (version == '1_0'):
         self.features = nn.Sequential(
             nn.Conv(3, 96, kernel_size=7, stride=2), nn.Relu(),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(96, 16, 64, 64), Fire(128, 16, 64, 64),
             Fire(128, 32, 128, 128),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(256, 32, 128, 128), Fire(256, 48, 192, 192),
             Fire(384, 48, 192, 192), Fire(384, 64, 256, 256),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(512, 64, 256, 256))
     elif (version == '1_1'):
         self.features = nn.Sequential(
             nn.Conv(3, 64, kernel_size=3, stride=2), nn.Relu(),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(64, 16, 64, 64), Fire(128, 16, 64, 64),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(128, 32, 128, 128), Fire(256, 32, 128, 128),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(256, 48, 192, 192), Fire(384, 48, 192, 192),
             Fire(384, 64, 256, 256), Fire(512, 64, 256, 256))
     else:
         raise ValueError(
             'Unsupported SqueezeNet version {version}:1_0 or 1_1 expected'.
             format(version=version))
     final_conv = nn.Conv(512, self.num_classes, kernel_size=1)
     self.classifier = nn.Sequential(nn.Dropout(p=0.5),
                                     final_conv, nn.Relu(),
                                     nn.AdaptiveAvgPool2d((1, 1)))
Exemple #23
0
    def __init__(self,
                 block,
                 layers,
                 num_classes=1000,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 replace_stride_with_dilation=None,
                 norm_layer=None):
        super(ResNet, self).__init__()
        if (norm_layer is None):
            norm_layer = nn.BatchNorm
        self._norm_layer = norm_layer
        self.inplanes = 64
        self.dilation = 1
        self.feat_stride = 16
        self.out_channels = 1024

        if (replace_stride_with_dilation is None):
            replace_stride_with_dilation = [False, False, False]
        if (len(replace_stride_with_dilation) != 3):
            raise ValueError(
                'replace_stride_with_dilation should be None or a 3-element tuple, got {}'
                .format(replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group
        stride = 2
        self.conv1 = nn.Conv(3,
                             self.inplanes,
                             kernel_size=7,
                             stride=stride,
                             padding=3,
                             bias=False)
        jt.init.relu_invariant_gauss_(self.conv1.weight, mode="fan_out")
        self.bn1 = norm_layer(self.inplanes)
        self.relu = nn.Relu()

        self.maxpool = nn.Pool(kernel_size=3,
                               stride=2,
                               padding=1,
                               op='maximum')
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=stride,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=stride,
                                       dilate=replace_stride_with_dilation[1])
    def __progressive_down_sampling(self, real_batch, depth, alpha):
        """
        private helper for down_sampling the original images in order to facilitate the
        progressive growing of the layers.

        :param real_batch: batch of real samples
        :param depth: depth at which training is going on
        :param alpha: current value of the fade-in alpha
        :return: real_samples => modified real batch of samples
        """

        # from torch.nn import AvgPool2d
        # from torch.nn.functional import interpolate

        if self.structure == 'fixed':
            return real_batch

        # down_sample the real_batch for the given depth
        down_sample_factor = int(np.power(2, self.depth - depth - 1))
        prior_down_sample_factor = max(int(np.power(2, self.depth - depth)), 0)

        # ds_real_samples = AvgPool2d(down_sample_factor)(real_batch)
        ds_real_samples = nn.Pool(down_sample_factor)(real_batch)

        if depth > 0:
            # prior_ds_real_samples = interpolate(AvgPool2d(prior_down_sample_factor)(real_batch), scale_factor=2)
            prior_ds_real_samples = nn.interpolate(
                nn.Pool(prior_down_sample_factor)(real_batch),
                scale_factor=2,
                mode='nearest')
        else:
            prior_ds_real_samples = ds_real_samples

        # real samples are a combination of ds_real_samples and prior_ds_real_samples
        real_samples = (alpha * ds_real_samples) + (
            (1 - alpha) * prior_ds_real_samples)

        # return the so computed real_samples
        return real_samples
Exemple #25
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.Pool(kernel_size=2, stride=2, op="maximum")]
        else:
            conv2d = nn.Conv(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm(v), nn.ReLU()]
            else:
                layers += [conv2d, nn.ReLU()]
            in_channels = v
    return nn.Sequential(*layers)
Exemple #26
0
    def __init__(self,
                 cardinality,
                 depth,
                 nlabels,
                 base_width,
                 widen_factor=4):
        """ Constructor

        Args:
            cardinality: number of convolution groups.
            depth: number of layers.
            nlabels: number of classes
            base_width: base number of channels in each group.
            widen_factor: factor to adjust the channel dimensionality
        """
        super(CifarResNeXt, self).__init__()
        self.cardinality = cardinality
        self.depth = depth
        self.block_depth = (self.depth - 2) // 9
        self.base_width = base_width
        self.widen_factor = widen_factor
        self.nlabels = nlabels
        self.output_size = 64
        self.stages = [
            64, 64 * self.widen_factor, 128 * self.widen_factor,
            256 * self.widen_factor
        ]

        self.conv_1_3x3 = nn.Conv(3, 64, 3, 1, 1, bias=False)
        self.bn_1 = nn.BatchNorm(64)
        self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
        self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
        self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
        self.classifier = nn.Linear(self.stages[3], nlabels)

        self.pool = nn.Pool(8, 1, op="mean")

        self.relu = nn.Relu()
        init.relu_invariant_gauss_(self.classifier.weight)

        for param in self.parameters():
            key = param.name()
            if key.split('.')[-1] == 'weight':
                if 'Conv' in key:
                    init.relu_invariant_gauss_(param, mode='fan_out')
                if 'BatchNorm' in key:
                    init.constant_(param, value=1.0)
            elif key.split('.')[-1] == 'bias':
                init.constant_(param, value=0.0)
Exemple #27
0
    def __init__(self, num_classes=10):
        super(ResNet18, self).__init__()
        self.inchannel = 64
        self.conv1 = nn.Sequential(
            nn.Conv(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm(64),
            nn.ReLU(),
        )
        self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1)
        self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)
        self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)
        self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)

        self.pool = nn.Pool(4)
        self.fc = nn.Linear(512, num_classes)
    def __init__(self, block, num_blocks, num_classes=10):
        super(PreActResNet, self).__init__()
        self.in_planes = 64

        self.conv1 = nn.Conv(3,
                             64,
                             kernel_size=3,
                             stride=1,
                             padding=1,
                             bias=False)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        self.linear = nn.Linear(512 * block.expansion, num_classes)
        self.pool = nn.Pool(4)
Exemple #29
0
    def __init__(
        self,
        in_ch,
        stage_ch,
        concat_ch,
        block_per_stage,
        layer_per_block,
        stage_num,
        SE=False,
        dcn_config={},
    ):
        super(_OSA_stage, self).__init__()

        if not stage_num == 2:
            self.add_module(
                'Pooling',
                nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'))

        if block_per_stage != 1:
            SE = False
        module_name = f'OSA{stage_num}_1'
        self.add_module(
            module_name,
            _OSA_module(in_ch,
                        stage_ch,
                        concat_ch,
                        layer_per_block,
                        module_name,
                        SE=SE,
                        dcn_config=dcn_config))
        for i in range(block_per_stage - 1):
            if i != block_per_stage - 2:  #last block
                SE = False
            module_name = f'OSA{stage_num}_{i + 2}'
            self.add_module(
                module_name,
                _OSA_module(concat_ch,
                            stage_ch,
                            concat_ch,
                            layer_per_block,
                            module_name,
                            SE=SE,
                            identity=True,
                            dcn_config=dcn_config))
    def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Pool(kernel_size=stride, stride=stride, 
                    ceil_mode=True, op='mean'),
                nn.Conv(self.inplanes, planes * block.expansion, 
                    kernel_size=1, stride=1, bias=False),
                nn.BatchNorm(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, dilation, downsample,
                        stype='stage', baseWidth = self.baseWidth, scale=self.scale))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, dilation=dilation, baseWidth = self.baseWidth, scale=self.scale))

        return nn.Sequential(*layers)