コード例 #1
0
    def __init__(self, config, anchors, num_cls, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0):
        nn.Module.__init__(self)

        # First convolution
        self.layers = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', nn.BatchNorm2d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.layers.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.layers.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.layers.add_module('norm5', nn.BatchNorm2d(num_features))

        self.layers.add_module('conv', nn.Conv2d(num_features, model.output_channels(len(anchors), num_cls), 1))
コード例 #2
0
 def __init__(self, argparse):
     """
     :param argparse: hyper parameter
     """
     super(PolicyValueFn, self).__init__()
     self.args = argparse
     self.conv = nn.Sequential(
         nn.BatchNorm2d(self.args.channels),
         nn.Conv2d(self.args.channels, 32, kernel_size=(1, 1)),
         nn.ReLU(inplace=True))
     self.dense = nn.Sequential(
         _DenseBlock(num_layers=7,
                     num_input_features=32,
                     bn_size=4,
                     growth_rate=12,
                     drop_rate=self.args.drop_rate),
         _Transition(num_input_features=32 + 7 * 12,
                     num_output_features=12),
         _DenseBlock(num_layers=7,
                     num_input_features=12,
                     bn_size=2,
                     growth_rate=9,
                     drop_rate=self.args.drop_rate))
     size = (self.args.size // 2)**2 * (12 + 7 * 9)
     self.policyFc = nn.Linear(size, self.args.size**2)
     self.valueFc = nn.Linear(size, 1)
コード例 #3
0
ファイル: densenet.py プロジェクト: codealphago/yolo2-pytorch
    def __init__(self, config_channels, anchors, num_cls, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0):
        nn.Module.__init__(self)

        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', nn.BatchNorm2d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))
        self.features.add_module('conv', nn.Conv2d(num_features, model.output_channels(len(anchors), num_cls), 1))

        # init
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
コード例 #4
0
    def __init__(self, 
                out_dim=256,
                in_channels=3,
                fpn_finest_layer=1):
        super().__init__()
        self.depth = 121
        self.feature_upsample = True
        self.fpn_finest_layer = fpn_finest_layer
        self.out_dim = out_dim
        self.in_channel = in_channels
        assert self.depth in [121]
        if self.depth == 121:
            num_init_features = 64
            growth_rate = 32
            block_config = (6, 12, 24)
            self.in_dim = [64, 256, 512, 1024]
        bn_size = 4
        drop_rate = 0

        # First convolution
        self.conv0 = nn.Conv2d(self.in_channel, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)
        self.norm0 = nn.BatchNorm2d(num_init_features)
        self.relu0 = nn.ReLU(inplace=True)
        self.pool0 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, \
                                    memory_efficient=True)
            self.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        # self.add_module('norm5', nn.BatchNorm2d(num_features))

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight.data)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()

        if self.feature_upsample:
            for p in range(4, self.fpn_finest_layer - 1, -1):
                layer = nn.Conv2d(self.in_dim[p - 1], self.out_dim, 1)
                name = 'lateral%d' % p
                self.add_module(name, layer)

                nn.init.kaiming_uniform_(layer.weight, a=1)
                nn.init.constant_(layer.bias, 0)
        self.init_weights()
コード例 #5
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 6, 6),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000):

        super(DenseNet, self).__init__()
        self.block_config = block_config
        # First convolution
        self.first_conv = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        self.denseblock = []
        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)

            self.denseblock.append(
                nn.Sequential(OrderedDict([
                    (f'dblock{i}', block),
                ])))
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.denseblock[i].add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.final_bn = nn.BatchNorm2d(num_features)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)

        self.denseblock = nn.ModuleList(self.denseblock)
コード例 #6
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 u_depth=[5, 4, 3],
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000):

        super(DenseNet, self).__init__()
        # First convolution
        self.features = OrderedDict([
            ('conv0',
             nn.Conv2d(48,
                       num_init_features,
                       kernel_size=7,
                       stride=2,
                       padding=3,
                       bias=False)),
            ('norm0', nn.BatchNorm2d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ])

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)
            self.features['denseblock%d' % (i + 1)] = block
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features['transition%d' % (i + 1)] = trans
                num_features = num_features // 2
                fmf = FMF(num_features, num_features, u_depth[i])
                self.features['fmf%d' % (i + 1)] = fmf
        # Final batch norm
        self.features['norm5'] = nn.BatchNorm2d(num_features)

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal(m.weight.data)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()
        # for k, v in self.features.items():
        #     self.__setattr__(k, v)
        self.features = nn.ModuleDict(self.features)
コード例 #7
0
ファイル: DenseNet_LOC_HM.py プロジェクト: jo5e7/Thesis_DTU
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000,
                 all_in_1_reduction=False):

        super(DenseNet_LOC_HM, self).__init__()

        self.all_in_1_reduction = all_in_1_reduction

        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', nn.BatchNorm2d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # Convolution to reduce dimesion
        self.conv_reducer_1 = nn.Conv2d(1664, 64, 1)
        self.con_relu_1 = nn.ReLU()
        self.conv_reducer_2 = nn.Conv2d(64, 32, 1)
        self.con_relu_2 = nn.ReLU()
        self.conv_reducer_3 = nn.Conv2d(32, 16, 1)
        self.con_relu_3 = nn.ReLU()
        self.conv_reducer_4 = nn.Conv2d(16, 8, 1)
        self.con_relu_4 = nn.ReLU()

        self.conv_reducer_all_in_1 = nn.Conv2d(1664, 8, 1)

        # Linear layer for radiographic finding
        self.classifier = nn.Linear(num_features, 1)

        # Linear layer for localization
        self.classifier_locations = nn.Linear(2048, 7)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
コード例 #8
0
    def __init__(self,
                 block_config=None,
                 depth=100,
                 growth_rate=12,
                 reduction=0.5,
                 num_classes=10,
                 bottleneck_size=4,
                 avg_pool_size=8):
        super(DenseNetCIFAR, self).__init__()

        # Compute blocks from depth
        if block_config is None:
            layers = (depth - 4) // 6
            block_config = (layers,) * 3

        # First convolution
        num_features = growth_rate * 2
        self.add_module("conv", nn.Conv2d(in_channels=3,
                                          out_channels=num_features,
                                          kernel_size=3,
                                          padding=1,
                                          bias=False))

        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bottleneck_size,
                                growth_rate=growth_rate,
                                drop_rate=0)
            self.add_module("block{0}".format(i + 1), block)

            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                out_features = math.floor(num_features * reduction)
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=out_features)
                self.add_module("transition{0}".format(i + 1), trans)
                num_features = out_features

        # Final batch norm
        self.add_module("norm", nn.BatchNorm2d(num_features))
        self.add_module("relu", nn.ReLU(inplace=True))
        self.add_module("avg_pool", nn.AvgPool2d(kernel_size=avg_pool_size))

        # classifier layer
        outputs = int(num_features * 16 / (avg_pool_size * avg_pool_size))
        self.add_module("flatten", Flatten())
        self.add_module("classifier", nn.Linear(outputs, num_classes))

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight.data)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()
コード例 #9
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 transitionBlock=False,
                 transitionDense=True,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000,
                 memory_efficient=False):
        super(Csp_DenseNet, self).__init__()
        self.growth_down_rate = 2 if transitionBlock else 1
        self.features = torch.nn.Sequential(
            OrderedDict([
                ('conv0',
                 torch.nn.Conv2d(3,
                                 num_init_features,
                                 kernel_size=7,
                                 stride=2,
                                 padding=3,
                                 bias=False)),
                ('norm0', torch.nn.BatchNorm2d(num_init_features)),
                ('relu0', torch.nn.ReLU(inplace=True)),
                ('pool0', torch.nn.MaxPool2d(kernel_size=3,
                                             stride=2,
                                             padding=1)),
            ]))

        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _Csp_DenseBlock(num_layers=num_layers,
                                    num_input_features=num_features,
                                    bn_size=bn_size,
                                    growth_rate=growth_rate,
                                    drop_rate=drop_rate,
                                    memory_efficient=memory_efficient,
                                    transition=transitionBlock)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features // 2 + num_layers * growth_rate // 2
            if (i != len(block_config) - 1) and transitionDense:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2
        self.features.add_module('norm5', torch.nn.BatchNorm2d(num_features))
        self.classifier = torch.nn.Linear(num_features, num_classes)

        for m in self.modules():
            if isinstance(m, torch.nn.Conv2d):
                torch.nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, torch.nn.BatchNorm2d):
                torch.nn.init.constant_(m.weight, 1)
                torch.nn.init.constant_(m.bias, 0)
            elif isinstance(m, torch.nn.Linear):
                torch.nn.init.constant_(m.bias, 0)
コード例 #10
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 memory_efficient=False):

        super(Encoder, self).__init__()

        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(1,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = dsn._DenseBlock(num_layers=num_layers,
                                    num_input_features=num_features,
                                    bn_size=bn_size,
                                    growth_rate=growth_rate,
                                    drop_rate=drop_rate,
                                    memory_efficient=memory_efficient)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = dsn._Transition(num_input_features=num_features,
                                        num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        self.num_features = num_features

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
コード例 #11
0
    def __init__(self,
                 input_n_channels,
                 n_features=10,
                 n_classes=1,
                 num_init_features=64,
                 growth_rate=32,
                 block_config=(6, 12, 48, 32)):
        super(IcebergDenseNet4, self).__init__()

        bn_size = 4
        drop_rate = 0

        # First convolution
        self.features = Sequential(
            OrderedDict([
                ('conv0',
                 Conv2d(input_n_channels,
                        num_init_features,
                        kernel_size=3,
                        stride=2,
                        padding=1,
                        bias=False)),
                ('norm0', BatchNorm2d(num_init_features)),
                ('relu0', ReLU(inplace=True)),
                ('pool0', MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', BatchNorm2d(num_features))
        self.features.add_module('relu5', ReLU(inplace=True))

        # Linear layer
        self.classifier = Sequential(AdaptiveAvgPool2d(1), Flatten(),
                                     Linear(num_features, n_features),
                                     ReLU(inplace=True))
        self.final_classifier = Linear(n_features + 1, n_classes)
        self._initialize_weights()
コード例 #12
0
    def __init__(self,
                 growth_rate: int = 32,
                 block_config: Tuple[int, int, int] = (12, 12, 12),
                 num_init_features: int = 64,
                 bn_size: int = 4,
                 drop_rate: float = 0,
                 num_classes: int = 10,
                 memory_efficient: bool = False) -> None:

        super().__init__()

        # First convolution
        self.features = nn.Sequential(
            OrderedDict([('conv0',
                          nn.Conv2d(3,
                                    num_init_features,
                                    kernel_size=3,
                                    stride=1,
                                    padding=1,
                                    bias=False))]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate,
                                memory_efficient=memory_efficient)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
コード例 #13
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 32, 32),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000,
                 all_in_1_reduction=False):

        super(DenseNet_MH, self).__init__()

        self.all_in_1_reduction = all_in_1_reduction

        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # get the classifier of the densenet
        self.classifier = nn.Linear(num_features, 1)

        # placeholder for the gradients
        self.gradients = None
コード例 #14
0
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
                 num_init_features=64, bn_size=4, dropout_rate=0, num_classes=1000,
                 small_inputs=True):

        super(DenseNet, self).__init__()

        # First convolution
        if small_inputs:
            self.features = nn.Sequential(OrderedDict([
                ('conv0', nn.Conv2d(3, num_init_features, kernel_size=3,
                                    stride=1, padding=1, bias=False)),
            ]))
        else:
            self.features = nn.Sequential(OrderedDict([
                ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7,
                                    stride=2, padding=3, bias=False)),
            ]))
            self.features.add_module('norm0', nn.BatchNorm2d(num_init_features))
            self.features.add_module('relu0', nn.ReLU(inplace=True))
            self.features.add_module('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1,
                                                           ceil_mode=False))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate,
                                drop_rate=dropout_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
コード例 #15
0
    def __init__(self,
                 growth_rate=12,
                 block_config=(16, 16, 16),
                 compression=0.5,
                 num_init_features=24,
                 bn_size=4,
                 drop_rate=0,
                 avgpool_size=8,
                 num_classes=10):

        super(DenseNet, self).__init__()
        assert 0 < compression <= 1, 'compression of densenet should be between '
        self.avgpool_size = avgpool_size

        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=int(num_features *
                                                            compression))
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = int(num_features * compression)

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)
コード例 #16
0
    def __init__(self,
                 model_path,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000,
                 **kwargs):

        super(DenseNet_Alignment, self).__init__()

        # First convolution
        self.firstconvolution = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        self.features = nn.Sequential()
        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)
コード例 #17
0
    def __init__(self,
                 hid_size=100,
                 growth_rate=32,
                 block_config=(2, 4, 4),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0.2,
                 num_directions=1):
        super(DNet, self).__init__()

        self.features = nn.Sequential(
            OrderedDict([('conv0',
                          nn.Conv2d(hid_size * num_directions,
                                    num_init_features, 1))]))

        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        self.features.add_module('norm4', nn.BatchNorm2d(num_features))

        #self.classifier = nn.Linear(num_features, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal(m.weight.data)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()
コード例 #18
0
    def __init__(self,
                 branches,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000):
        super(MTLDenseNet,
              self).__init__(growth_rate, block_config, num_init_features,
                             bn_size, drop_rate, num_classes)
        self.classifier = None

        self.fcs = ListModule(self, 'fc_')
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        for num_classes in branches:
            self.fcs.append(nn.Linear(num_features, num_classes))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
コード例 #19
0
def create_densenet_features(input_n_channels,
                             num_init_features=64,
                             growth_rate=32,
                             block_config=(6, 12, 48, 32)):
    bn_size = 4
    drop_rate = 0
    # First convolution
    features = Sequential(
        Conv2d(input_n_channels,
               num_init_features,
               kernel_size=3,
               stride=2,
               padding=1,
               bias=False),
        BatchNorm2d(num_init_features),
        ReLU(inplace=True),
        MaxPool2d(kernel_size=3, stride=2, padding=1),
    )
    # Each denseblock
    num_features = num_init_features
    for i, num_layers in enumerate(block_config):
        block = _DenseBlock(num_layers=num_layers,
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate)
        features.add_module('denseblock%d' % (i + 1), block)
        num_features = num_features + num_layers * growth_rate
        if i != len(block_config) - 1:
            trans = _Transition(num_input_features=num_features,
                                num_output_features=num_features // 2)
            features.add_module('transition%d' % (i + 1), trans)
            num_features = num_features // 2

    # Final batch norm
    features.add_module('norm5', BatchNorm2d(num_features))
    features.add_module('relu5', ReLU(inplace=True))
    return features, num_features
コード例 #20
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 18, 12),
                 bn_size=4,
                 drop_rate=0.,
                 pretrained_encoder=2,
                 simple=False,
                 debug=False):
        """
        bn_size = bottleneck size, the factor by which the first conv in a _DenseLayer
            is larger than the second conv.
        :pretrained_encoder: int in [0,1,2] designating level of pretrained layers to use.
            0 is none, 1 is just first convolutions, 2 is the first dense block.
        """

        super(MammogramDenseNet, self).__init__()

        self.debug = debug
        self.pretrained = pretrained_encoder
        self.nb_dense_blocks = len(block_config)
        num_classes = 1  # indicator score of whether it is malignant

        include_denseblock = self.pretrained == 2
        pretrained_layers = get_pretrained_layers(
            include_denseblock=include_denseblock)
        self.features = nn.Sequential(pretrained_layers)

        if self.pretrained == 0:  # Re-initialize layers; don't use pretrained weights inside
            print("self.pretrained = 0. Re-initializing weights")
            for m in self.features.modules():

                if isinstance(m, nn.Conv2d):
                    old_m = deepcopy(m)
                    nn.init.kaiming_normal_(m.weight)
                    # Conv layers have no bias when in conjunction with Batchnorm
                elif isinstance(m, nn.BatchNorm2d):
                    old_m = deepcopy(m)
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)
                else:
                    continue

                print("old m is", old_m.weight)
                print("new m is", m.weight)

        # Display shapes for debugging
        if debug:
            print("pretrained_encoder = %d" % pretrained_encoder)
            print(
                "Output shape after the pretrained modules (batch, channels, H, W):"
            )

            test_input = torch.rand(1, 1, 1024, 1024)
            test_output = self.features(test_input)
            print(test_output.size())
            del test_input
            del test_output

        # A counter to track what input shape our final nn.Linear layer should expect
        #  Just num_channels is fine, because global avg pool at end
        num_features = 256 if self.pretrained == 2 else 64

        # Add the rest of the architecture (Dense blocks, transition layers)
        for i, num_layers in enumerate(block_config):
            if simple:
                block = _SimpleDenseBlock(num_layers=num_layers,
                                          num_input_features=num_features,
                                          growth_rate=growth_rate,
                                          drop_rate=drop_rate)
            else:
                block = _DenseBlock(num_layers=num_layers,
                                    num_input_features=num_features,
                                    bn_size=bn_size,
                                    growth_rate=growth_rate,
                                    drop_rate=drop_rate)

            # Initialize the weights of block
            for m in block.modules():
                if isinstance(m, nn.Conv2d):
                    nn.init.kaiming_normal_(m.weight)
                    # Conv layers have no bias when in conjunction with Batchnorm
                elif isinstance(m, nn.BatchNorm2d):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)

            block_name = 'simpledenseblock%d' % (
                i + 1) if simple else 'denseblock%d' % (i + 1)
            self.features.add_module(block_name, block)

            num_features = num_features + num_layers * growth_rate
            if debug:
                print("num features after denseblock %d:" % (i + 1),
                      num_features)

            # Add a transition layer if not the last dense block:
            #  Norm, 1x1 Conv (unless simple), activation, AvgPool
            if i != self.nb_dense_blocks - 1:
                if simple:
                    trans = _SimpleTransition(num_input_features=num_features)
                else:
                    trans = _Transition(num_input_features=num_features,
                                        num_output_features=num_features)

                transition_name = 'simpletransition%d' % (
                    i + 1) if simple else 'transition%d' % (i + 1)
                self.features.add_module(transition_name, trans)

                if debug:
                    print("num features after transition %d:" % (i + 1),
                          num_features)

        if debug: print("final num features:", num_features)

        # Put the classifier here separately
        #  will apply it manually in forward(x), after global avg pool and reshape
        self.classifier = nn.Linear(num_features, num_classes)
        nn.init.constant_(self.classifier.bias, 0)

        if debug:
            summary(self.features)
コード例 #21
0
    def __init__(self, features, global_features, v2=False):
        super(DualDenseNetUNetRDB, self).__init__()
        self.features = features
        self.global_features = global_features
        self.name = 'DualDenseUnetRDB'
        self.v2 = v2

        channels = [64, 256, 512, 1024, 1024]
        self.base_channel_size = channels[0]
        if not v2:
            self.smooth5 = nn.Sequential(
                nn.ReLU(),
                nn.Conv2d(channels[4] * 2, channels[4], kernel_size=1),
                nn.ReLU(),
                RDB(n_channels=channels[4],
                    nDenselayer=3,
                    growthRate=channels[4] // 4))
            self.smooth4 = nn.Sequential(
                nn.ReLU(), nn.Conv2d(channels[3], channels[3], kernel_size=1),
                nn.ReLU())
            self.smooth3 = nn.Sequential(
                nn.ReLU(), nn.Conv2d(channels[2], channels[2], kernel_size=1),
                nn.ReLU())
            self.smooth2 = nn.Sequential(
                nn.ReLU(), nn.Conv2d(channels[1], channels[1], kernel_size=1),
                nn.ReLU())
            self.smooth1 = nn.Sequential(
                nn.ReLU(), nn.Conv2d(channels[0], channels[0], kernel_size=1),
                nn.ReLU())
            self.up1 = RDBUp(channels[4], channels[3],
                             channels[3])  # 1024 + 1024, 1024
            self.up2 = RDBUp(channels[3], channels[2],
                             channels[2])  # 1024 + 512, 512
            self.up3 = RDBUp(channels[2], channels[1],
                             channels[1])  # 512 + 256, 256
            self.up4 = RDBUp(channels[1], channels[0],
                             channels[0])  # 256 + 64, 64
            self.last_up = UpPad()
            self.conv_3x3 = nn.Conv2d(channels[0],
                                      channels[0],
                                      kernel_size=3,
                                      padding=1)
        else:
            self.down5 = nn.Sequential(
                _Transition(channels[4] * 2, channels[4]),
                _DenseBlock(num_layers=16,
                            num_input_features=channels[4],
                            bn_size=4,
                            growth_rate=32,
                            drop_rate=0,
                            memory_efficient=False))
            self.up0 = ConvUp(channels[4] + 512, channels[4], channels[4])
            self.smooth4 = nn.Sequential(nn.BatchNorm2d(channels[3]),
                                         nn.LeakyReLU())
            self.smooth3 = nn.Sequential(nn.BatchNorm2d(channels[2]),
                                         nn.LeakyReLU())
            self.smooth2 = nn.Sequential(nn.BatchNorm2d(channels[1]),
                                         nn.LeakyReLU())
            self.smooth1 = nn.Sequential(nn.BatchNorm2d(channels[0]),
                                         nn.LeakyReLU())

            #                                                                       up + skip, out
            self.up1 = ConvUp(channels[4], channels[3],
                              channels[3])  # 1024 + 1024, 1024
            self.up2 = ConvUp(channels[3], channels[2],
                              channels[2])  # 1024 + 512, 512
            self.up3 = ConvUp(channels[2], channels[1],
                              channels[1])  # 512 + 256, 256
            self.up4 = ConvUp(channels[1], channels[0],
                              channels[0])  # 256 + 64, 64
        self.pad_to = PadToX(32)
コード例 #22
0
    def __init__(self, config):
        '''
        There is a cleaner way of implementing this. TODO clean-up

        Arguments:
            config: as specified in .utils.Dense_U_Net_lidar_helper 
        '''

        super().__init__()

        self.config = config

        # original densenet attributes
        self.growth_rate = config.model.growth_rate
        self.block_config = config.model.block_config
        self.num_init_features = config.model.num_init_features
        self.bn_size = config.model.bn_size
        self.drop_rate = config.model.drop_rate
        self.memory_efficient = config.model.memory_efficient
        self.num_classes = config.model.num_classes

        # param assignment
        self.concat_before_block_num = config.model.concat_before_block_num
        self.num_layers_before_blocks = config.model.num_layers_before_blocks
        self.concat_after_module_idx = self.num_layers_before_blocks - 1 + 2 * (
            self.concat_before_block_num - 1)
        self.stream_1_in_channels = config.model.stream_1_in_channels
        self.stream_2_in_channels = config.model.stream_2_in_channels
        self.network_input_channels = self.stream_1_in_channels  # Allowing for rgb input or torch.cat((rgb,lidar),1) | added
        if self.concat_before_block_num == 1 and self.stream_2_in_channels == 0:
            self.fusion = 'no'
        elif self.concat_before_block_num == 1 and self.stream_2_in_channels > 0:
            self.fusion = 'early'
            self.network_input_channels += self.stream_2_in_channels
        elif self.concat_before_block_num > 1 and self.concat_before_block_num <= len(
                self.block_config):
            self.fusion = 'mid'
        else:
            raise AttributeError

        ### core structure

        ## Encoder | same as densenet without norm5 and classifier

        # First convolution | original densenet
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(self.network_input_channels,
                           self.num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(self.num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock | original densenet + stack comprising layer sizes for the decoder
        feature_size_stack = deque()
        feature_size_stack.append(self.num_init_features +
                                  2 * self.growth_rate)
        num_features = self.num_init_features
        for i, num_layers in enumerate(self.block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=self.bn_size,
                                growth_rate=self.growth_rate,
                                drop_rate=self.drop_rate,
                                memory_efficient=self.memory_efficient)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * self.growth_rate
            feature_size_stack.append(num_features)
            if i != len(self.block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        ## Decoder
        # U net like | ugly: should have own class for whole sequence
        self.decoder = nn.Sequential()
        num_in_features = feature_size_stack.pop()
        for i in range(len(self.block_config)):
            num_features = feature_size_stack.pop()
            transp_conv_seq = nn.Sequential(
                OrderedDict(
                    [  # denselayer like struct; reduce channels with 1x1 convs
                        ('norm0', nn.BatchNorm2d(num_in_features)),
                        ('relu0', nn.ReLU(inplace=True)),
                        ('conv_reduce',
                         nn.Conv2d(num_in_features,
                                   num_features,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0,
                                   bias=False)),
                        ('norm1', nn.BatchNorm2d(num_features)),
                        ('relu1', nn.ReLU(inplace=True))
                    ]))
            self.decoder.add_module(
                'Transposed_Convolution_Sequence_%d' % (i + 1),
                transp_conv_seq)
            self.decoder.add_module(
                'Transposed_Convolution_%d' % (i + 1),
                nn.ConvTranspose2d(num_features,
                                   num_features,
                                   3,
                                   stride=2,
                                   padding=1,
                                   bias=False))
            num_in_features = num_features * 2
        self.decoder.add_module('Upsampling', nn.Upsample(scale_factor=2))

        # final refinement: concat orig rgb & lidar before passing
        self.dec_out_to_heat_maps = nn.Sequential(
            OrderedDict([
                ('norm0',
                 nn.BatchNorm2d(num_features + self.stream_1_in_channels +
                                self.stream_2_in_channels)),
                ('relu0', nn.ReLU(inplace=True)),
                ('refine0',
                 nn.Conv2d(num_features + self.stream_1_in_channels +
                           self.stream_2_in_channels,
                           num_features // 2,
                           3,
                           stride=1,
                           padding=1,
                           bias=False)),
                ('norm1', nn.BatchNorm2d(num_features // 2)),
                ('relu1', nn.ReLU(inplace=True)),
                ('refine1',
                 nn.Conv2d(num_features // 2,
                           self.num_classes,
                           5,
                           stride=1,
                           padding=2,
                           bias=False))
            ]))

        ### additional structure depending on fusion mechanism
        if self.fusion == 'no':
            # i.e. one stream only

            pass

        elif self.fusion == 'early':
            # i.e. concat rgb and lidar before network

            pass

        elif self.fusion == 'mid':
            # add all the same processing for the lidar data as for rgb data
            # add concat layer
            '''
            # weirdly gives slower iteration times
            # Stream_2 mirrors Stream_1 up to concat level
            self.stream_2_features = copy.deepcopy(self.features[:self.concat_after_module_idx+1])
            self.stream_2_features.conv0 = nn.Conv2d(self.stream_2_in_channels, 
                self.num_init_features, kernel_size=7, stride=2, padding=3, bias=False)
            '''
            # First convolution | original densenet | for lidar block
            self.stream_2_features = nn.Sequential(
                OrderedDict([
                    ('conv0',
                     nn.Conv2d(self.stream_2_in_channels,
                               self.num_init_features,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)),
                    ('norm0', nn.BatchNorm2d(self.num_init_features)),
                    ('relu0', nn.ReLU(inplace=True)),
                    ('pool0', nn.MaxPool2d(kernel_size=3, stride=2,
                                           padding=1)),
                ]))

            # Each denseblock | original densenet + break before concat layer
            num_features = self.num_init_features
            for i, num_layers in enumerate(self.block_config):
                if i == self.concat_before_block_num - 1:
                    break
                block = _DenseBlock(num_layers=num_layers,
                                    num_input_features=num_features,
                                    bn_size=self.bn_size,
                                    growth_rate=self.growth_rate,
                                    drop_rate=self.drop_rate,
                                    memory_efficient=self.memory_efficient)
                self.stream_2_features.add_module('denseblock%d' % (i + 1),
                                                  block)
                num_features = num_features + num_layers * self.growth_rate
                if i != len(self.block_config) - 1:
                    trans = _Transition(num_input_features=num_features,
                                        num_output_features=num_features // 2)
                    self.stream_2_features.add_module('transition%d' % (i + 1),
                                                      trans)
                    num_features = num_features // 2

            # concat layer | rgb + lidar | 1x1 conv
            num_features = self.features[self.concat_after_module_idx +
                                         1].denselayer1.norm1.num_features
            self.concat_module = nn.Sequential(
                OrderedDict([('norm', nn.BatchNorm2d(num_features * 2)),
                             ('relu', nn.ReLU(inplace=True)),
                             ('conv',
                              nn.Conv2d(num_features * 2,
                                        num_features,
                                        kernel_size=1,
                                        stride=1,
                                        padding=0,
                                        bias=False))]))

        else:
            raise AttributeError

        # Official init from torch repo
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)

        # get number of parameters of model
        self.num_params = sum(p.numel() for p in self.parameters())
コード例 #23
0
    def __init__(
        self,
        growth_rate=32,
        block_config=(6, 12, 24, 16),
        num_init_features=64,
        bn_size=4,
        drop_rate=0,
        memory_efficient=False,
        outputs=[],
        url=None,
    ):
        super(DenseNet, self).__init__()
        self.url = url
        self.outputs = outputs
        self.block_config = block_config

        # First convolution
        self.conv1 = nn.Sequential(
            OrderedDict([
                (
                    "conv",
                    nn.Conv2d(
                        3,
                        num_init_features,
                        kernel_size=7,
                        stride=2,
                        padding=3,
                        bias=False,
                    ),
                ),
                ("norm", nn.BatchNorm2d(num_init_features)),
                ("relu", nn.ReLU(inplace=True)),
                ("pool", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = densenet._DenseBlock(
                num_layers=num_layers,
                num_input_features=num_features,
                bn_size=bn_size,
                growth_rate=growth_rate,
                drop_rate=drop_rate,
                memory_efficient=memory_efficient,
            )
            self.add_module("denseblock%d" % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = densenet._Transition(
                    num_input_features=num_features,
                    num_output_features=num_features // 2,
                )
                self.add_module("transition%d" % (i + 1), trans)
                num_features = num_features // 2

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
コード例 #24
0
    def __init__(
        self,
        img_channels,
        out_channels,
        growth_rate=16,
        block_config=(2, 6, 4, 12, 8),
        num_init_features=8,
        bn_size=4,
        drop_rate=0.0,
    ):
        super(DenseYOLO, self).__init__()

        self.features = nn.Sequential(
            OrderedDict(
                [
                    (
                        "conv0",
                        nn.Conv2d(
                            in_channels=img_channels,
                            out_channels=num_init_features,
                            kernel_size=5,
                            padding=2,
                            bias=False,
                        ),
                    ),
                    ("norm0", nn.BatchNorm2d(num_features=num_init_features)),
                    ("relu0", nn.ReLU(inplace=True)),
                    ("pool0", nn.MaxPool2d(kernel_size=2, stride=2)),
                ]
            )
        )

        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(
                num_layers=num_layers,
                num_input_features=num_features,
                bn_size=bn_size,
                growth_rate=growth_rate,
                drop_rate=drop_rate,
            )
            self.features.add_module("denseblock%d" % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(
                    num_input_features=num_features,
                    num_output_features=num_features // 2,
                )
                self.features.add_module("transition%d" % (i + 1), trans)
                num_features = num_features // 2

        self.features.add_module("norm1", nn.BatchNorm2d(num_features))

        self.features.add_module(
            "conv1",
            nn.Conv2d(
                in_channels=num_features,
                out_channels=out_channels,
                kernel_size=3,
                stride=3,
                bias=False,
            ),
        )

        # initialization
        p = 1.0 / 77.0  # prior for output assumes 1 box per grid of size 11x7
        b = -1.0 * np.log10((1.0 - p) / p)  # bias for output layer based on focal loss paper
        for name, module in self.named_modules():
            if isinstance(module, nn.Conv2d):
                nn.init.kaiming_normal_(module.weight, nonlinearity="relu")
            elif isinstance(module, nn.BatchNorm2d):
                nn.init.constant_(module.weight, 1)
                if name == "features.norm1":
                    nn.init.constant_(module.bias, b)
                else:
                    nn.init.constant_(module.bias, 0)
コード例 #25
0
    def __init__(self,
                 model_path,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000,
                 **kwargs):

        super(MyDenseNet_stn, self).__init__()

        #Branch1 First convolution
        self.firstconvolution = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Branch1 denseblock 1
        DBnum_features = num_init_features
        TRnum_features = DBnum_features + block_config[0] * growth_rate
        self.block1 = nn.Sequential(
            OrderedDict([('denseblock1',
                          _DenseBlock(num_layers=block_config[0],
                                      num_input_features=DBnum_features,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate)),
                         ('transition1',
                          _Transition(num_input_features=TRnum_features,
                                      num_output_features=TRnum_features // 2))
                         ]))

        #Branch1 denseblock 2
        DBnum_features = TRnum_features // 2
        TRnum_features = DBnum_features + block_config[1] * growth_rate
        self.block2 = nn.Sequential(
            OrderedDict([('denseblock2',
                          _DenseBlock(num_layers=block_config[1],
                                      num_input_features=DBnum_features,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate)),
                         ('transition2',
                          _Transition(num_input_features=TRnum_features,
                                      num_output_features=TRnum_features // 2))
                         ]))

        #Branch1 denseblock 3
        DBnum_features = TRnum_features // 2
        TRnum_features = DBnum_features + block_config[2] * growth_rate
        self.highnum_fea = DBnum_features
        self.trannum_fea = TRnum_features
        self.block3 = nn.Sequential(
            OrderedDict([('denseblock3',
                          _DenseBlock(num_layers=block_config[2],
                                      num_input_features=DBnum_features,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate)),
                         ('transition3',
                          _Transition(num_input_features=TRnum_features,
                                      num_output_features=TRnum_features // 2))
                         ]))

        #Branch1 denseblock 4
        DBnum_features = TRnum_features // 2
        TRnum_features = DBnum_features + block_config[3] * growth_rate
        self.block4 = nn.Sequential(
            OrderedDict([('denseblock4',
                          _DenseBlock(num_layers=block_config[3],
                                      num_input_features=DBnum_features,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate))]))

        #Branch1 Final batch norm
        self.norm5 = nn.BatchNorm2d(TRnum_features)

        #Branch1 Linear layer
        self.classifier = nn.Linear(TRnum_features, num_classes)

        #Spatical transformer Localization network
        self.localization = nn.Sequential(
            OrderedDict([
                ('denseblock3',
                 _DenseBlock(num_layers=24,
                             num_input_features=self.highnum_fea,
                             bn_size=bn_size,
                             growth_rate=growth_rate,
                             drop_rate=drop_rate)),
                ('transition3',
                 _Transition(num_input_features=self.trannum_fea,
                             num_output_features=self.trannum_fea // 2))
            ]))

        self.downsample = nn.Sequential(
            OrderedDict([('bn', nn.BatchNorm2d(512)),
                         ('relu', nn.ReLU(inplace=True)),
                         ('conv',
                          nn.Conv2d(512,
                                    128,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=True))]))
        #Regressor
        self.fc_loc = nn.Sequential(nn.Linear(128 * 8 * 8, 128 * 8),
                                    nn.ReLU(True), nn.Linear(1024, 128),
                                    nn.ReLU(True), nn.Linear(128, 32),
                                    nn.ReLU(True), nn.Linear(32, 3 * 2))

        self.fc_loc[6].weight.data.fill_(0)
        self.fc_loc[6].bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0])

        alignmodel = DenseNet_Alignment()
        self.alignbase = alignmodel.features
        self.alignclassifier = nn.Linear(TRnum_features, num_classes)

        self.load_param()
    def __init__(self):
        super(DenseNetCustomTrunc, self).__init__()
        name = cfg.MODEL.BACKBONE.CONV_BODY
        self.depth = int(name.split('-')[1])
        self.feature_upsample = cfg.MODEL.BACKBONE.FEATURE_UPSAMPLE

        assert self.depth in [121]
        if self.depth == 121:
            num_init_features = 64
            growth_rate = 32
            block_config = (6, 12, 24)
            self.in_dim = [64, 256, 512, 1024]
        bn_size = 4
        drop_rate = 0

        # First convolution
        self.conv0 = nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)
        self.norm0 = nn.BatchNorm2d(num_init_features)
        self.relu0 = nn.ReLU(inplace=True)
        self.pool0 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2
        cfg.runtime_info.backbone_ft_dim = self.in_dim[-1]

        # Final batch norm
        # self.add_module('norm5', nn.BatchNorm2d(num_features))

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight.data)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()

        if self.feature_upsample:
            self.out_dim = cfg.MODEL.BACKBONE.OUT_CHANNELS
            self.fpn_finest_layer = cfg.MODEL.BACKBONE.FEATURE_UPSAMPLE_LEVEL-1
            for p in range(4, self.fpn_finest_layer - 1, -1):
                layer = nn.Conv2d(self.in_dim[p - 1], self.out_dim, 1)
                name = 'lateral%d' % p
                self.add_module(name, layer)

                nn.init.kaiming_uniform_(layer.weight, a=1)
                nn.init.constant_(layer.bias, 0)
            cfg.runtime_info.backbone_ft_dim = self.out_dim

        self.indim_ilf = [64, 128, 256]
        self.num_image = cfg.INPUT.NUM_IMAGES_3DCE
        self.feature_fusion_level_list = cfg.MODEL.BACKBONE.FEATURE_FUSION_LEVELS
        for p in range(len(self.feature_fusion_level_list)):
            if self.feature_fusion_level_list[p]:
                layer = nn.Conv2d(self.num_image * self.indim_ilf[p], self.indim_ilf[p], 1)
                self.add_module('conv_ilf%d'%p, layer)
コード例 #27
0
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000,
                 bp_position=False, hidden_layers_att=1, Heads=4, dq=16, dv=16, kernel_att=3, stride_att=1,
                 non_linearity_att='softmax', self_att=False):

        super(DenseNet_att_QKV_HM, self).__init__()

        self.bp_position = bp_position
        self.hidden_layers_att = hidden_layers_att
        self.Nh = Heads
        self.dq = dq
        self.dv = dv
        self.kernel_att = kernel_att
        self.stride_att = stride_att
        self.padding_att = (self.kernel_att - 1) // 2
        self.non_linearity_att = non_linearity_att
        self.self_att = self_att


        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', nn.BatchNorm2d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # Convolution to reduce dimesion
        if self.self_att is True:
            self.conv_reducer_1 = nn.Conv2d(1664, 64, 1)
        else:
            self.conv_reducer_1 = nn.Conv2d(self.dq, 64, 1)

        self.con_relu_1 = nn.ReLU()
        self.conv_reducer_2 = nn.Conv2d(64, 32, 1)
        self.con_relu_2 = nn.ReLU()
        self.conv_reducer_3 = nn.Conv2d(32, 16, 1)
        self.con_relu_3 = nn.ReLU()
        self.conv_reducer_4 = nn.Conv2d(16, 8, 1)
        self.con_relu_4 = nn.ReLU()




        #Guide attention

        self.conv_embedding = AttentionNet(1664, self.dq, self.hidden_layers_att, self.kernel_att, self.stride_att)

        if self.self_att is True:
            self.conv_get_K = nn.Conv2d(1664, self.dq, self.kernel_att, self.stride_att, self.padding_att)
        else:
            self.conv_get_K = nn.Conv2d(self.dq, self.dq, self.kernel_att, self.stride_att, self.padding_att)

        self.conv_get_QV = nn.Conv2d(1664, self.dq + self.dv, self.kernel_att, self.stride_att, self.padding_att)

        self.conv_reducer_att_output = nn.Conv2d(int((self.dv)), int(4*self.Nh), 1)
        self.softmax_att = nn.Softmax()
        self.sigmoid_att = nn.Sigmoid()


        # Linear layer for localization
        self.classifier_locations = nn.Linear(2048, 7)

        # Official init from torch repo.

        # Linear layer for radiographic finding
        self.classifier = nn.Linear(4 * self.Nh * 16 * 16, 1)
        #self.classifier = nn.Linear(832, 1)

        if self.non_linearity_att == 'sigmoid':
            self.bn = torch.nn.BatchNorm2d(self.Nh)


        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
コード例 #28
0
    def __init__(self,
                 model_path,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000,
                 gh=False,
                 ah=False,
                 **kwargs):
        super(MyDenseNet_stn_local, self).__init__()
        self.gh = gh
        self.ah = ah
        #Branch1 First convolution
        self.firstconvolution = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Branch1 denseblock 1
        DBnum_features = num_init_features
        TRnum_features = DBnum_features + block_config[0] * growth_rate
        self.block1 = nn.Sequential(
            OrderedDict([('denseblock1',
                          _DenseBlock(num_layers=block_config[0],
                                      num_input_features=DBnum_features,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate)),
                         ('transition1',
                          _Transition(num_input_features=TRnum_features,
                                      num_output_features=TRnum_features // 2))
                         ]))

        #Branch1 denseblock 2
        DBnum_features = TRnum_features // 2
        TRnum_features = DBnum_features + block_config[1] * growth_rate
        self.block2 = nn.Sequential(
            OrderedDict([('denseblock2',
                          _DenseBlock(num_layers=block_config[1],
                                      num_input_features=DBnum_features,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate)),
                         ('transition2',
                          _Transition(num_input_features=TRnum_features,
                                      num_output_features=TRnum_features // 2))
                         ]))

        #Branch1 denseblock 3
        DBnum_features = TRnum_features // 2
        TRnum_features = DBnum_features + block_config[2] * growth_rate
        self.highnum_fea = DBnum_features
        self.trannum_fea = TRnum_features
        self.block3 = nn.Sequential(
            OrderedDict([('denseblock3',
                          _DenseBlock(num_layers=block_config[2],
                                      num_input_features=DBnum_features,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate)),
                         ('transition3',
                          _Transition(num_input_features=TRnum_features,
                                      num_output_features=TRnum_features // 2))
                         ]))

        #Branch1 denseblock 4
        DBnum_features = TRnum_features // 2
        TRnum_features = DBnum_features + block_config[3] * growth_rate
        self.block4 = nn.Sequential(
            OrderedDict([('denseblock4',
                          _DenseBlock(num_layers=block_config[3],
                                      num_input_features=DBnum_features,
                                      bn_size=bn_size,
                                      growth_rate=growth_rate,
                                      drop_rate=drop_rate))]))

        #Branch1 Final batch norm
        self.norm5 = nn.BatchNorm2d(TRnum_features)

        #Branch1 Linear layer
        self.classifier = nn.Linear(TRnum_features, num_classes)

        if self.gh:
            # =====================append conv for Horizontal======================= #
            self.localgh_conv = nn.Conv2d(1024,
                                          256,
                                          kernel_size=1,
                                          padding=0,
                                          bias=False)
            init.kaiming_normal(self.localgh_conv.weight, mode='fan_out')
            self.ghfeat_bn2d = nn.BatchNorm2d(256)
            init.constant(self.ghfeat_bn2d.weight, 1)
            init.constant(self.ghfeat_bn2d.bias, 0)

            ##------------------------------stripe----------------------------------------##
            self.ghinstance0 = nn.Linear(256, self.num_classes)
            init.normal(self.ghinstance0.weight, std=0.001)
            init.constant(self.ghinstance0.bias, 0)
            ##------------------------------stripe----------------------------------------##
            ##------------------------------stripe----------------------------------------##
            self.ghinstance1 = nn.Linear(256, self.num_classes)
            init.normal(self.ghinstance1.weight, std=0.001)
            init.constant(self.ghinstance1.bias, 0)
            ##------------------------------stripe----------------------------------------##
            ##------------------------------stripe----------------------------------------##
            self.ghinstance2 = nn.Linear(256, self.num_classes)
            init.normal(self.ghinstance2.weight, std=0.001)
            init.constant(self.ghinstance2.bias, 0)
            ##------------------------------stripe----------------------------------------##
            ##------------------------------stripe----------------------------------------##
            self.ghinstance3 = nn.Linear(256, self.num_classes)
            init.normal(self.ghinstance3.weight, std=0.001)
            init.constant(self.ghinstance3.bias, 0)
            ##------------------------------stripe----------------------------------------##

            self.ghdrop = nn.Dropout(0.5)

        #Spatical transformer Localization network
        self.localization = nn.Sequential(
            OrderedDict([
                ('denseblock3',
                 _DenseBlock(num_layers=24,
                             num_input_features=self.highnum_fea,
                             bn_size=bn_size,
                             growth_rate=growth_rate,
                             drop_rate=drop_rate)),
                ('transition3',
                 _Transition(num_input_features=self.trannum_fea,
                             num_output_features=self.trannum_fea // 2))
            ]))

        self.downsample = nn.Sequential(
            OrderedDict([('bn', nn.BatchNorm2d(512)),
                         ('relu', nn.ReLU(inplace=True)),
                         ('conv',
                          nn.Conv2d(512,
                                    128,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=True))]))
        #Regressor
        self.fc_loc = nn.Sequential(nn.Linear(128 * 8 * 8, 128 * 8),
                                    nn.ReLU(True), nn.Linear(1024, 128),
                                    nn.ReLU(True), nn.Linear(128, 32),
                                    nn.ReLU(True), nn.Linear(32, 3 * 2))

        self.fc_loc[6].weight.data.fill_(0)
        self.fc_loc[6].bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0])

        alignmodel = DenseNet_Alignment()
        self.alignbase = alignmodel.features
        self.alignclassifier = nn.Linear(TRnum_features, num_classes)

        if self.ah:
            # =====================append conv for Horizontal======================= #
            self.localah_conv = nn.Conv2d(1024,
                                          256,
                                          kernel_size=1,
                                          padding=0,
                                          bias=False)
            init.kaiming_normal(self.localah_conv.weight, mode='fan_out')
            self.ahfeat_bn2d = nn.BatchNorm2d(256)
            init.constant(self.ahfeat_bn2d.weight, 1)
            init.constant(self.ahfeat_bn2d.bias, 0)

            ##------------------------------stripe----------------------------------------##
            self.ahinstance0 = nn.Linear(256, self.num_classes)
            init.normal(self.ahinstance0.weight, std=0.001)
            init.constant(self.ahinstance0.bias, 0)
            ##------------------------------stripe----------------------------------------##
            ##------------------------------stripe----------------------------------------##
            self.ahinstance1 = nn.Linear(256, self.num_classes)
            init.normal(self.ahinstance1.weight, std=0.001)
            init.constant(self.ahinstance1.bias, 0)
            ##------------------------------stripe----------------------------------------##
            ##------------------------------stripe----------------------------------------##
            self.ahinstance2 = nn.Linear(256, self.num_classes)
            init.normal(self.ahinstance2.weight, std=0.001)
            init.constant(self.ahinstance2.bias, 0)
            ##------------------------------stripe----------------------------------------##
            ##------------------------------stripe----------------------------------------##
            self.ahinstance3 = nn.Linear(256, self.num_classes)
            init.normal(self.ahinstance3.weight, std=0.001)
            init.constant(self.ahinstance3.bias, 0)
            ##------------------------------stripe----------------------------------------##

            self.ahdrop = nn.Dropout(0.5)

        self.load_param()
コード例 #29
0
 def __init__(self, num_input_features=16, num_output_features=32):
     super(TransitionBlock, self).__init__()
     self.block = _Transition(num_input_features=num_input_features,
                              num_output_features=num_output_features)