コード例 #1
0
ファイル: lenet_mnist.py プロジェクト: wangshuai9517/KervNets
 def __init__(self):
     super(LeKervNet, self).__init__()
     self.kerv1 = nn.Sequential(
         nn.Kerv2d(
             in_channels=1,              # input height
             out_channels=6,             # n_filters
             kernel_size=5,              # filter size
             stride=1,                   # filter movement/step
             padding=2,                  # if want same width and length of this image after con2d, padding=(kernel_size-1)/2 if stride=1
             kernel_type = args.kernel_type,
             learnable_kernel = args.learnable_kernel,
         ),                              # input shape (1, 28, 28)
         nn.ReLU(),                      # activation
         nn.MaxPool2d(2),                # output shape (6, 14, 14)
     )
     self.kerv2 = nn.Sequential(         # input shape (6, 14, 14)
         nn.Kerv2d(6,16,5,1,0,           # output shape (16, 10, 10)
             mapping='translation',
             kernel_type='linear'
             ),
         nn.ReLU(),                      # activation
         nn.MaxPool2d(2),                # output shape (16, 5, 5)
     )
     self.fc1 = nn.Linear(16 * 5 * 5, 120)   # fully connected layer, output 10 classes
     self.fc2 = nn.Linear(120, 84)
     self.fc3 = nn.Linear(84, 10)
コード例 #2
0
 def __init__(self,
              num_input_features,
              growth_rate,
              bn_size,
              drop_rate,
              memory_efficient=False):
     super(_DenseLayer, self).__init__()
     self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
     self.add_module('relu1', nn.ReLU(inplace=True)),
     self.add_module(
         'Kerv1',
         nn.Kerv2d(num_input_features,
                   bn_size * growth_rate,
                   kernel_size=1,
                   stride=1,
                   bias=False)),
     self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
     self.add_module('relu2', nn.ReLU(inplace=True)),
     self.add_module(
         'Kerv2',
         nn.Kerv2d(bn_size * growth_rate,
                   growth_rate,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False)),
     self.drop_rate = float(drop_rate)
     self.memory_efficient = memory_efficient
コード例 #3
0
ファイル: kresnet.py プロジェクト: wangshuai9517/KervNets
    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.kerv1 = nn.Kerv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.kerv2 = nn.Kerv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion*planes:
            self.shortcut = nn.Sequential(
                nn.Kerv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion*planes)
            )
コード例 #4
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Kerv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False,
                               kernel_type='polynomial',
                               learnable_kernel=False,
                               kernel_regularizer=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m.kernel_size == 7:  # for kervolution initialization
                    n = m.kernel_size * m.kernel_size * m.out_channels
                    m.weight.data.normal_(0, math.sqrt(2. / n))
                else:
                    n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                    m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
コード例 #5
0
ファイル: kgooglenet.py プロジェクト: wangshuai9517/KervNets
    def __init__(self, num_classes=10):
        super(KGoogLeNet, self).__init__()
        self.pre_layers = nn.Sequential(
            nn.Kerv2d(
                3,
                192,
                kernel_size=3,
                padding=1,
                kernel_type='polynomial',
                learnable_kernel=True,
            ),
            nn.BatchNorm2d(192),
            nn.ReLU(True),
        )

        self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
        self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)

        self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
        self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
        self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
        self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
        self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

        self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

        self.avgpool = nn.AvgPool2d(8, stride=1)
        self.linear = nn.Linear(1024, num_classes)
コード例 #6
0
ファイル: lenet_mnist.py プロジェクト: wangshuai9517/KervNets
 def __init__(self):
     super(KervNet, self).__init__()
       
     self.features = nn.Sequential(
         nn.Kerv2d(1, 32, kernel_size=3, stride=1, padding=1, kernel_type=args.kernel_type, learnable_kernel=args.learnable_kernel),
         nn.BatchNorm2d(32),
         nn.ReLU(inplace=True),
         nn.Kerv2d(32, 32, kernel_size=3, stride=1, padding=1),
         nn.BatchNorm2d(32),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2, stride=2),
         nn.Kerv2d(32, 64, kernel_size=3, padding=1),
         nn.BatchNorm2d(64),
         nn.ReLU(inplace=True),
         nn.Kerv2d(64, 64, kernel_size=3, padding=1),
         nn.BatchNorm2d(64),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2, stride=2)
     )
       
     self.classifier = nn.Sequential(
         nn.Dropout(p = 0.5),
         nn.Linear(64 * 7 * 7, 512),
         nn.BatchNorm1d(512),
         nn.ReLU(inplace=True),
         nn.Dropout(p = 0.5),
         nn.Linear(512, 512),
         nn.BatchNorm1d(512),
         nn.ReLU(inplace=True),
         nn.Dropout(p = 0.5),
         nn.Linear(512, 10),
     )
       
     for m in self.features.children():
         if isinstance(m, nn.Conv2d):
             n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
             m.weight.data.normal_(0, math.sqrt(2. / n))
         elif isinstance(m, nn.BatchNorm2d):
             m.weight.data.fill_(1)
             m.bias.data.zero_()
     
     for m in self.classifier.children():
         if isinstance(m, nn.Linear):
             nn.init.xavier_uniform(m.weight)
         elif isinstance(m, nn.BatchNorm1d):
             m.weight.data.fill_(1)
             m.bias.data.zero_()
コード例 #7
0
def kerv3x3(in_planes, out_planes, stride=1):
    "3x3 convolution with padding"
    return nn.Kerv2d(in_planes,
                     out_planes,
                     kernel_size=3,
                     stride=stride,
                     padding=1,
                     bias=False)
コード例 #8
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000,
                 memory_efficient=False):

        super(DenseNet, self).__init__()

        # First Kervolution
        self.features = nn.Sequential(
            OrderedDict([
                ('Kerv0',
                 nn.Kerv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate,
                                memory_efficient=memory_efficient)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Kerv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
コード例 #9
0
    def __init__(self, din):
        super(_RPN, self).__init__()

        self.din = din  # get depth of input feature map, e.g., 512
        self.anchor_scales = cfg.ANCHOR_SCALES
        self.anchor_ratios = cfg.ANCHOR_RATIOS
        self.feat_stride = cfg.FEAT_STRIDE[0]

        # define the convrelu layers processing input feature map
        self.RPN_Conv = nn.Kerv2d(self.din,
                                  512,
                                  3,
                                  1,
                                  1,
                                  mapping='translation',
                                  kernel_type='polynomial',
                                  learnable_kernel=True,
                                  kernel_regularizer=False,
                                  bias=True)
        # self.RPN_Conv = nn.Conv2d(self.din, 512, 3, 1, 1, bias=True)
        # define bg/fg classifcation score layer
        self.nc_score_out = len(self.anchor_scales) * len(
            self.anchor_ratios) * 2  # 2(bg/fg) * 9 (anchors)
        self.RPN_cls_score = nn.Kerv2d(512, self.nc_score_out, 1, 1, 0)

        # define anchor box offset prediction layer
        self.nc_bbox_out = len(self.anchor_scales) * len(
            self.anchor_ratios) * 4  # 4(coords) * 9 (anchors)
        self.RPN_bbox_pred = nn.Kerv2d(512, self.nc_bbox_out, 1, 1, 0)

        # define proposal layer
        self.RPN_proposal = _ProposalLayer(self.feat_stride,
                                           self.anchor_scales,
                                           self.anchor_ratios)

        # define anchor target layer
        self.RPN_anchor_target = _AnchorTargetLayer(self.feat_stride,
                                                    self.anchor_scales,
                                                    self.anchor_ratios)

        self.rpn_loss_cls = 0
        self.rpn_loss_box = 0
コード例 #10
0
 def __init__(self, num_input_features, num_output_features):
     super(_Transition, self).__init__()
     self.add_module('norm', nn.BatchNorm2d(num_input_features))
     self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module(
         'Kerv',
         nn.Kerv2d(num_input_features,
                   num_output_features,
                   kernel_size=1,
                   stride=1,
                   bias=False))
     self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
コード例 #11
0
ファイル: kresnet.py プロジェクト: wangshuai9517/KervNets
    def __init__(self, block, num_blocks, num_classes=10):
        super(KResNet, self).__init__()
        self.in_planes = 64

        # self.kerv1 = nn.Kerv2d(3, 64, 7, 1, 2,
        self.kerv1 = nn.Kerv2d(3, 64, 3, 1,
                        kernel_type='polynomial',
                        learnable_kernel=True,
                        kernel_regularizer=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        self.linear = nn.Linear(512*block.expansion, num_classes)
コード例 #12
0
ファイル: kgooglenet.py プロジェクト: wangshuai9517/KervNets
    def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5,
                 pool_planes):
        super(Inception, self).__init__()
        # 1x1 conv branch
        self.b1 = nn.Sequential(
            nn.Kerv2d(in_planes, n1x1, kernel_size=1),
            nn.BatchNorm2d(n1x1),
            nn.ReLU(True),
        )

        # 1x1 conv -> 3x3 conv branch
        self.b2 = nn.Sequential(
            nn.Kerv2d(in_planes, n3x3red, kernel_size=1),
            nn.BatchNorm2d(n3x3red),
            nn.ReLU(True),
            nn.Kerv2d(n3x3red, n3x3, kernel_size=3, padding=1),
            nn.BatchNorm2d(n3x3),
            nn.ReLU(True),
        )

        # 1x1 conv -> 5x5 conv branch
        self.b3 = nn.Sequential(
            nn.Kerv2d(in_planes, n5x5red, kernel_size=1),
            nn.BatchNorm2d(n5x5red),
            nn.ReLU(True),
            nn.Kerv2d(n5x5red, n5x5, kernel_size=3, padding=1),
            nn.BatchNorm2d(n5x5),
            nn.ReLU(True),
            nn.Kerv2d(n5x5, n5x5, kernel_size=3, padding=1),
            nn.BatchNorm2d(n5x5),
            nn.ReLU(True),
        )

        # 3x3 pool -> 1x1 conv branch
        self.b4 = nn.Sequential(
            nn.MaxPool2d(3, stride=1, padding=1),
            nn.Kerv2d(in_planes, pool_planes, kernel_size=1),
            nn.BatchNorm2d(pool_planes),
            nn.ReLU(True),
        )
コード例 #13
0
ファイル: kervolution.py プロジェクト: wang-chen/kervolution
            outputs = 1 / (1 + (weight_norm + input_norm - 2 * y) /
                           (self.sigma**2))
        else:
            return NotImplementedError()

        if self.kernel_regularizer:
            outputs = outputs + self.alpha * self.weights.abs().mean()

        return outputs

    def parameters(self):
        if self.learnable_kernel:
            print(
                'alpha: %.3f, power: %.2f, balance: %.2f, sigma: %.2f, gamma: %.2f'
                % (self.alpha.data[0], self.power, self.balance.data[0],
                   self.sigma.data[0], self.gamma.data[0]))


nn.Kerv2d = Kerv2d

if __name__ == '__main__':
    kerv = nn.Kerv2d(
        in_channels=2,  # input height
        out_channels=3,  # n_filters
        kernel_size=3,  # filter size
        stride=1,  # filter movement/step
        padding=1,  # input padding
        mapping='random',  # mapping
        kernel_type='polynomial',  # kernel type
        learnable_kernel=True)  # enable learning parameters