예제 #1
0
 def __init__(self,
              num_input_features,
              growth_rate,
              bn_size,
              drop_rate,
              norm_type='Unknown'):
     super(_DenseLayer, self).__init__()
     self.add_module('norm1', get_norm(norm_type, num_input_features)),
     self.add_module('relu1', nn.ReLU(inplace=True)),
     self.add_module(
         'conv1',
         nn.Conv2d(num_input_features,
                   bn_size * growth_rate,
                   kernel_size=1,
                   stride=1,
                   bias=False)),
     self.add_module('norm2', get_norm(norm_type, bn_size * growth_rate)),
     self.add_module('relu2', nn.ReLU(inplace=True)),
     self.add_module(
         'conv2',
         nn.Conv2d(bn_size * growth_rate,
                   growth_rate,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False)),
     self.drop_rate = drop_rate
예제 #2
0
 def __init__(self,
              num_input_features,
              num_output_features,
              norm_type='Unknown'):
     super(_Transition, self).__init__()
     self.add_module('norm', get_norm(norm_type, num_input_features))
     self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module(
         'conv',
         nn.Conv2d(
             num_input_features,
             num_output_features,  # noqa
             kernel_size=1,
             stride=1,
             bias=False))
     self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
예제 #3
0
def make_layers(cfg, batch_norm=False,
                norm_type='Unknown'):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, get_norm(norm_type, v),
                           nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
예제 #4
0
 def __init__(self,
              in_channels,
              out_channels,
              norm_type='Unknown',
              bilinear=True):
     super(_UpBlock, self).__init__()
     if bilinear:
         self.add_module(
             'conv1',
             nn.Conv2d(in_channels,
                       out_channels,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False))
         self.add_module('norm1', get_norm(norm_type, out_channels))
         self.add_module('relu1', nn.ReLU(inplace=True))
     else:
         print('TODO')
 def __init__(self, in_channels, out_channels, norm_type='Unknown',
              **kwargs):
     super(BasicConv2d, self).__init__()
     self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
     self.norm = get_norm(norm_type, out_channels, eps=0.001)
예제 #6
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 norm_type='Unknown',
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000):  # noqa

        super(DenseNet, self).__init__()

        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),  # noqa
                ('norm0', get_norm(norm_type, num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(
                num_layers=num_layers,
                num_input_features=num_features,
                norm_type=norm_type,  # noqa
                bn_size=bn_size,
                growth_rate=growth_rate,
                drop_rate=drop_rate)  # noqa
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2,
                                    norm_type=norm_type)  # noqa
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', get_norm(norm_type, num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)
        self.num_features = num_features

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.GroupNorm):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.InstanceNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
    def __init__(self,
                 n_channels,
                 n_classes,
                 num_classes,
                 t_classes,
                 pool,
                 bilinear=True):
        super(DenseUNet, self).__init__()
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.num_classes = num_classes
        self.t_classes = t_classes
        self.bilinear = bilinear
        self.pool = pool
        self.global_pool = GlobalPool(pool)

        # DenseNet Arch Hyperparameters
        growth_rate = 32
        block_config = [6, 12, 24, 16]
        num_init_features = 64
        norm_type = 'BatchNorm'
        bn_size = 4
        drop_rate = 0

        # Source Domain Architecture
        # First convolution
        self.s_inc = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(self.n_channels,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),  # noqa
                ('norm0', get_norm(norm_type, num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # S - Denseblock 1 - 128 x 128
        num_features = num_init_features  # 64
        self.s_denseblock_1 = _DenseBlock(num_layers=block_config[0],
                                          num_input_features=num_features,
                                          norm_type=norm_type,
                                          bn_size=bn_size,
                                          growth_rate=growth_rate,
                                          drop_rate=drop_rate)
        num_features = num_features + block_config[0] * growth_rate  # 256
        self.s_transition_1 = _Transition(num_input_features=num_features,
                                          num_output_features=num_features //
                                          2,
                                          norm_type=norm_type)
        num_features = num_features // 2  # 128

        # S - Denseblock 2 - 64 x 64
        self.s_denseblock_2 = _DenseBlock(num_layers=block_config[1],
                                          num_input_features=num_features,
                                          norm_type=norm_type,
                                          bn_size=bn_size,
                                          growth_rate=growth_rate,
                                          drop_rate=drop_rate)
        num_features = num_features + block_config[1] * growth_rate  # 512
        self.s_transition_2 = _Transition(num_input_features=num_features,
                                          num_output_features=num_features //
                                          2,
                                          norm_type=norm_type)
        num_features = num_features // 2  # 256

        # S - Denseblock 3 - 32 x 32
        self.s_denseblock_3 = _DenseBlock(num_layers=block_config[2],
                                          num_input_features=num_features,
                                          norm_type=norm_type,
                                          bn_size=bn_size,
                                          growth_rate=growth_rate,
                                          drop_rate=drop_rate)
        num_features = num_features + block_config[2] * growth_rate  # 1024
        self.s_transition_3 = _Transition(num_input_features=num_features,
                                          num_output_features=num_features //
                                          2,
                                          norm_type=norm_type)
        num_features = num_features // 2  # 512

        # S - Denseblock 4 - 16 x 16
        self.s_denseblock_4 = _DenseBlock(num_layers=block_config[3],
                                          num_input_features=num_features,
                                          norm_type=norm_type,
                                          bn_size=bn_size,
                                          growth_rate=growth_rate,
                                          drop_rate=drop_rate)
        num_features = num_features + block_config[3] * growth_rate  # 1024
        self.s_transition_4 = _Transition(num_input_features=num_features,
                                          num_output_features=num_features //
                                          2,
                                          norm_type=norm_type)
        num_features = num_features // 2  # 512

        # S - UpBlock 4 - 512 x 8 x 8
        in_channels = num_features * 2  # 512 + 512 = 1024
        out_channels = num_features  # 512
        self.s_upblock_4 = _UpBlock(in_channels,
                                    out_channels,
                                    norm_type=norm_type,
                                    bilinear=self.bilinear)

        # S - UpBlock 3 - 512 x 16 x 16
        in_channels = out_channels // 2 + out_channels  # 256 + 512 = 768
        out_channels = out_channels // 2  # 256
        self.s_upblock_3 = _UpBlock(in_channels,
                                    out_channels,
                                    norm_type=norm_type,
                                    bilinear=self.bilinear)

        # S - UpBlock 2 - 256 x 32 x 32
        in_channels = out_channels // 2 + out_channels  # 128 + 256 = 384
        out_channels = out_channels // 2  # 128
        self.s_upblock_2 = _UpBlock(in_channels,
                                    out_channels,
                                    norm_type=norm_type,
                                    bilinear=self.bilinear)

        # S - UpBlock 1 - 128 x 64 x 64
        in_channels = out_channels // 2 + out_channels  # 64 + 128 = 192
        out_channels = out_channels // 2  # 64
        self.s_upblock_1 = _UpBlock(in_channels,
                                    out_channels,
                                    norm_type=norm_type,
                                    bilinear=self.bilinear)

        # S - OutConv - 64 x 128 x 128
        in_channels = out_channels
        out_channels = self.n_classes
        self.s_outc = _OutConv(in_channels,
                               out_channels)  # num_classes x 512 x 512

        # Target Domain Architecture
        # T - First convolution
        self.t_inc = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),  # noqa
                ('norm0', get_norm(norm_type, num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # T - Denseblock 1 - 128 x 128
        num_features = num_init_features * 2  # 64 + 64 = 128
        self.t_denseblock_1 = nn.Sequential(
            OrderedDict([('denseblock1',
                          _DenseBlock(num_layers=block_config[0],
                                      num_input_features=num_features,
                                      norm_type=norm_type,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate))]))
        num_features = num_features + block_config[
            0] * growth_rate  # 128 + 192 = 320
        self.t_transition_1 = nn.Sequential(
            OrderedDict([
                ('transition1',
                 _Transition(num_input_features=num_features,
                             num_output_features=num_init_features * 2,
                             norm_type=norm_type))
            ]))
        num_features = num_init_features * 4  # 128 + 128 = 256

        # T - Denseblock 2 - 64 x 64
        self.t_denseblock_2 = nn.Sequential(
            OrderedDict([('denseblock2',
                          _DenseBlock(num_layers=block_config[1],
                                      num_input_features=num_features,
                                      norm_type=norm_type,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate))]))
        num_features = num_features + block_config[
            1] * growth_rate  # 256 + 384 = 640
        self.t_transition_2 = nn.Sequential(
            OrderedDict([
                ('transition2',
                 _Transition(num_input_features=num_features,
                             num_output_features=num_init_features * 4,
                             norm_type=norm_type))
            ]))
        num_features = num_init_features * 8  # 256 + 256 = 512

        # T - Denseblock 3 - 32 x 32
        self.t_denseblock_3 = nn.Sequential(
            OrderedDict([('denseblock3',
                          _DenseBlock(num_layers=block_config[2],
                                      num_input_features=num_features,
                                      norm_type=norm_type,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate))]))
        num_features = num_features + block_config[
            2] * growth_rate  # 512 + 768 = 1280
        self.t_transition_3 = nn.Sequential(
            OrderedDict([
                ('transition3',
                 _Transition(num_input_features=num_features,
                             num_output_features=num_init_features * 8,
                             norm_type=norm_type))
            ]))
        num_features = num_init_features * 16  # 512 + 512 = 1024

        # T - Denseblock 4 - 16 x 16
        self.t_denseblock_4 = nn.Sequential(
            OrderedDict([('denseblock4',
                          _DenseBlock(num_layers=block_config[3],
                                      num_input_features=num_features,
                                      norm_type=norm_type,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate))]))
        num_features = num_features + block_config[
            3] * growth_rate  # 1024 + 512 = 1536
        self.t_transition_4 = _Transition(num_input_features=num_features,
                                          num_output_features=num_features //
                                          3,
                                          norm_type=norm_type)
        num_features = num_features // 3  # 512

        # T - Final batch norm - (512 + 512) x 8 x 8
        self.t_norm_5 = nn.Sequential(
            OrderedDict([('norm5', get_norm(norm_type, num_features * 2))]))

        # T - Classifier
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.t_classifier = nn.Conv2d(num_features * 2,
                                      self.t_classes,
                                      kernel_size=1,
                                      stride=1,
                                      padding=0,
                                      bias=True)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.GroupNorm):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.InstanceNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)