def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
     super(_DenseLayer, self).__init__()
     self.layers = []
     self.add_module('1',BatchNorm2d(num_input_features))
     self.add_module('2',ReLU(inplace=False))
     self.add_module('3',Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)) # L*k -> 4*k
     self.add_module('4',BatchNorm2d(bn_size * growth_rate))
     self.add_module('5',ReLU(inplace=False))
     self.add_module('6',Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)) # 4*k -> k
     self.drop_rate = drop_rate
     self.growth_rate = growth_rate
def conv1x1(in_planes, out_planes, stride=1):
    """1x1 convolution"""
    return Conv2d(in_planes,
                  out_planes,
                  kernel_size=1,
                  stride=stride,
                  bias=False)
예제 #3
0
파일: resnet.py 프로젝트: Juna2/RSR-LRP_tmp
    def __init__(self, block, layers, num_classes=2, zero_init_residual=False, input_depth=None):
        super(ResNet, self).__init__()
        self.inplanes = 64
        self.conv1 = Conv2d(input_depth, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = BatchNorm2d(64)
        self.relu = ReLU(inplace=True)
        self.maxpool = MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = AdaptiveAvgPool2d((1, 1))
        self.fc = Linear(512 * block.expansion, num_classes, whichScore = args.whichScore,lastLayer=True)

        for m in self.modules():
            if isinstance(m, Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
 def __init__(self, num_input_features, num_output_features):
     super(_Transition, self).__init__()
     self.layers = []
     self.add_module('1',BatchNorm2d(num_input_features))
     self.add_module('2',ReLU(inplace=False))
     self.add_module('3',Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
     self.add_module('4',AvgPool2d(kernel_size=2, stride=2))
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):

        super(DenseNet, self).__init__()

        # First convolution
        self.layers = []
        self.layers.append(Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False))
        self.layers.append(BatchNorm2d(num_init_features))
        self.layers.append(ReLU(inplace=False))
        self.layers.append(MaxPool2d(kernel_size=3, stride=2, padding=1))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.layers += block
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.layers.append(trans)
                num_features = num_features // 2

        # Final batch norm
        self.layers.append(BatchNorm2d(num_features))

        # Linear layer
        self.layers.append(ReLU(inplace=False))
        self.layers.append(AdaptiveAvgPool2d((1, 1)))
        self.layers.append(Linear(num_features, num_classes, whichScore = args.whichScore,lastLayer=True))
def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return Conv2d(in_planes,
                  out_planes,
                  kernel_size=3,
                  stride=stride,
                  padding=1,
                  bias=False)
 def make_layers(self, cfg, batch_norm = args.vgg_bn):
     layers = []
     in_channels = 3
     for v in cfg:
         if v == 'M':
             layers += [MaxPool2d(kernel_size=2, stride=2)]
         else:
             conv2d = Conv2d(in_channels, v, kernel_size=3, padding=1)
             if batch_norm:
                 layers += [conv2d, BatchNorm2d(v), ReLU()]
             else:
                 layers += [conv2d, ReLU()]
             in_channels = v
     return layers
예제 #8
0
파일: model.py 프로젝트: Juna2/RSR-LRP_tmp
 def forward(self):
     return Sequential(
         Conv2d(1, 5, 3), ReLU(), Conv2d(5, 5, 3), ReLU(),
         Linear(24 * 24 * 5, 10, lastLayer=True,
                whichScore=args.whichScore))