コード例 #1
0
    def __init__(self, in_planes, planes, stride=1):
        super(Bottleneck, self).__init__()
        self.conv1 = P4MConvP4M(in_planes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm3d(planes)
        self.conv2 = P4MConvP4M(planes,
                                planes,
                                kernel_size=3,
                                stride=stride,
                                padding=1,
                                bias=False)
        self.bn2 = nn.BatchNorm3d(planes)
        self.conv3 = P4MConvP4M(planes,
                                self.expansion * planes,
                                kernel_size=1,
                                bias=False)
        self.bn3 = nn.BatchNorm3d(self.expansion * planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                P4MConvP4M(in_planes,
                           self.expansion * planes,
                           kernel_size=1,
                           stride=stride,
                           bias=False),
                nn.BatchNorm3d(self.expansion * planes))
コード例 #2
0
    def __init__(self, no_dp=False, p=0.5):
        super(Net, self).__init__()
        self.no_dp = no_dp
        self.p = p
        self.conv1 = P4MConvZ2(in_channels=3, out_channels=8, kernel_size=5)
        self.bn1 = nn.BatchNorm2d(64)
        self.conv2 = P4MConvP4M(in_channels=8, out_channels=16, kernel_size=5)
        self.bn2 = nn.BatchNorm2d(128)
        self.conv3 = P4MConvP4M(in_channels=16, out_channels=32, kernel_size=3)
        self.bn3 = nn.BatchNorm2d(256)
        self.conv4 = P4MConvP4M(in_channels=32, out_channels=32, kernel_size=3)
        self.bn4 = nn.BatchNorm2d(256)
        self.conv5 = P4MConvP4M(in_channels=32, out_channels=64, kernel_size=3)
        self.bn5 = nn.BatchNorm2d(512)
        self.conv6 = P4MConvP4M(in_channels=64, out_channels=64, kernel_size=3)
        self.bn6 = nn.BatchNorm2d(512)

        #self.fc1 = nn.Linear(8192, 2048)
        #self.fc2 = nn.Linear(2048, 2048)
        #self.fc3 = nn.Linear(2048, nclasses)
        self.fc1 = nn.Linear(8192, 512)
        self.fc2 = nn.Linear(512, nclasses)

        # Initilize the parameters
        '''
コード例 #3
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              channels=8,
              downsample=None):
     super(BasicBlock, self).__init__()
     self.to_groupy = shaper(to_groupy=True)
     self.to_normal = shaper(to_groupy=False)
     self.channels = channels
     self.conv1 = P4MConvP4M(inplanes // self.channels,
                             planes // self.channels,
                             kernel_size=3,
                             stride=stride,
                             padding=1)
     #self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = groupy_bn(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = P4MConvP4M(planes // channels,
                             planes // channels,
                             kernel_size=3,
                             stride=1,
                             padding=1)
     #self.conv2 = conv3x3(planes, planes)
     #self.bn2 = nn.BatchNorm2d(planes)
     self.bn2 = groupy_bn(planes)
     self.downsample = downsample
     self.stride = stride
コード例 #4
0
ファイル: networks.py プロジェクト: 5l1v3r1/cycle-gan
    def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
        from groupy.gconv.pytorch_gconv import P4MConvP4M, P4MConvZ2

        conv_block = [EqPad(padding_type)]
        conv_block += [P4MConvP4M(dim, dim, kernel_size=3, padding=0, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]
        conv_block += [EqPad(padding_type), P4MConvP4M(dim, dim, kernel_size=3, padding=0, bias=use_bias), norm_layer(dim)]

        return nn.Sequential(*conv_block)
コード例 #5
0
 def __init__(self,num_out_capsules,in_capsule_dim,out_capsule_dim,stride=1):
     super(CapsuleP4MResidualBlock,self).__init__()
     self.conv1 = P4MConvP4M(in_capsule_dim,out_capsule_dim,3,stride,1)
     self.bn1 = nn.BatchNorm3d(out_capsule_dim)
     self.conv2 = P4MConvP4M(out_capsule_dim,num_out_capsules*out_capsule_dim,3,1,1)
     self.bn2 = nn.BatchNorm3d(num_out_capsules*out_capsule_dim)
     self.shortcut = nn.Sequential()
     self.shortcut = nn.Sequential(
                                      P4MConvP4M(in_capsule_dim,num_out_capsules*out_capsule_dim, kernel_size=1, stride=stride),
                                      nn.BatchNorm3d(num_out_capsules*out_capsule_dim)
                                  )
コード例 #6
0
    def __init__(self, finetune_feature_extraction=False, use_cuda=False):
        super(DenseFeatureExtractionModuleRotInv, self).__init__()

        model = models.vgg16(pretrained=True)
        vgg16_layers = [
            'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
            'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
            'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
            'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
            'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3',
            'pool5'
        ]
        conv3_3_idx = vgg16_layers.index('conv3_3')

        geometric_conv_channels = 512 // 8
        rot_inv_layers = [
            P4MConvZ2(256,
                      geometric_conv_channels,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False),
            nn.ReLU(inplace=True),
            P4MConvP4M(geometric_conv_channels,
                       geometric_conv_channels,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False),
            nn.ReLU(inplace=True),
            P4MConvP4M(geometric_conv_channels,
                       geometric_conv_channels,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False),
            nn.ReLU(inplace=True),
        ]

        self.model = nn.Sequential(
            *list(model.features.children())[:conv3_3_idx + 2],
            *rot_inv_layers)

        self.num_channels = 512

        # Fix forward parameters
        for param in self.model.parameters():
            param.requires_grad = False
        if finetune_feature_extraction:
            # Unlock conv4_3
            for param in list(self.model.parameters())[-2:]:
                param.requires_grad = True
コード例 #7
0
 def __init__(self,
              num_in_capsules,
              in_capsule_dim,
              num_out_capsules,
              out_capsule_dim,
              kernel_size=7,
              stride=1,
              padding=0,
              class_=False):
     super(ConvolutionalCapsules, self).__init__()
     self.num_in_capsules = num_in_capsules
     self.in_capsule_dim = in_capsule_dim
     self.num_out_capsules = num_out_capsules
     self.out_capsule_dim = out_capsule_dim
     self.kernel_size = kernel_size
     self.stride = stride
     self.padding = padding
     #self.projection_networks = nn.ModuleList([nn.Sequential(
     #                            P4ResidualBlock(in_capsule_dim,out_capsule_dim,kernel_size,stride,padding)) for i in range(num_out_capsules)
     #])
     #self.projection_network = nn.Sequential(P4ResidualBlock(in_capsule_dim,out_capsule_dim*num_out_capsules,kernel_size,stride,padding))
     #self.projection_network = nn.Sequential(P4ConvP4(in_capsule_dim,out_capsule_dim*num_out_capsules,kernel_size,stride,padding),
     #                                        nn.BatchNorm3d(out_capsule_dim*num_out_capsules))
     if class_ == False:
         self.projection_network = CapsuleP4MResidualBlock(
             num_out_capsules, in_capsule_dim, out_capsule_dim, stride)
     else:
         self.projection_network = nn.Sequential(
             P4MConvP4M(in_capsule_dim, out_capsule_dim * num_out_capsules,
                        kernel_size, stride, padding))
コード例 #8
0
ファイル: models.py プロジェクト: kiranvad/introrl
    def __init__(self, env, num_channels, res):
        self.num_actions = env.action_space.n
        super(Gconvnet, self).__init__()
        num_actions = env.action_space.n
        self.num_channels = num_channels
        outchannels, kernelsize = 4, 5
        outchannels, kernelsize, stride, padding = 32, 7, 4, 0
        self.layer1 = nn.Sequential(
            P4MConvZ2(self.num_channels,
                      outchannels,
                      kernelsize,
                      stride=stride,
                      padding=padding), nn.BatchNorm3d(outchannels),
            gMaxPool2D(2, 2), nn.ReLU())
        outchannels, kernelsize, stride, padding = 64, 5, 2, 0
        self.layer2 = nn.Sequential(
            P4MConvP4M(32,
                       outchannels,
                       kernelsize,
                       stride=stride,
                       padding=padding), nn.BatchNorm3d(outchannels),
            gMaxPool2D(2, 2), nn.ReLU())

        outshape = [64, 8, 1, 1]
        self.out = np.prod(outshape)
        self.fc = nn.Linear(self.out, self.num_actions)
コード例 #9
0
 def __init__(self,in_channels,out_channels):
     super(InitialP4MResidualBlock,self).__init__()
     self.conv1 = P4MConvZ2(in_channels,out_channels,3,1,1)
     self.bn1 = nn.BatchNorm3d(out_channels)
     self.conv2 = P4MConvP4M(out_channels,out_channels,3,1,1)
     self.bn2 = nn.BatchNorm3d(out_channels,out_channels)
     #self.shortcut = nn.Sequential()
     self.shortcut = nn.Sequential(P4MConvZ2(in_channels,out_channels,1),nn.BatchNorm3d(out_channels))
コード例 #10
0
def P4MConvP4M_1x1(in_planes,
                   planes,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   bias=False):
    return P4MConvP4M(in_planes,
                      planes,
                      kernel_size=kernel_size,
                      stride=stride,
                      padding=padding,
                      bias=bias)
コード例 #11
0
    def __init__(self, block_expansion, sn=False, shift_invariance=False, rot_invariance=False,
                 flip_invariance=False, **kwargs):
        super(DCDiscriminator, self).__init__()

        self.rot_invariance = rot_invariance
        self.flip_invariance = flip_invariance
        self.shift_invariance = shift_invariance
        self.sn = sn

        assert self.rot_invariance or not self.flip_invariance

        if self.shift_invariance and self.rot_invariance and self.flip_invariance:
            block_expansion //= int(8 ** 0.5)
            self.conv0 = P4MConvZ2(3, block_expansion, kernel_size=3)
            self.conv1 = P4MConvP4M(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = P4MConvP4M(block_expansion * 2, block_expansion * 4, kernel_size=3, dilation=2)
            self.conv3 = P4MConvP4M(block_expansion * 4, block_expansion * 8, kernel_size=3, dilation=4)
        elif self.shift_invariance and self.rot_invariance:
            block_expansion //= 2
            self.conv0 = P4ConvZ2(3, block_expansion, kernel_size=3)
            self.conv1 = P4ConvP4(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = P4ConvP4(block_expansion * 2, block_expansion * 4, kernel_size=3, dilation=2)
            self.conv3 = P4ConvP4(block_expansion * 4, block_expansion * 8, kernel_size=3, dilation=4)
        elif self.shift_invariance:
            self.conv0 = nn.Conv2d(3, block_expansion, kernel_size=3)
            self.conv1 = nn.Conv2d(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = nn.Conv2d(block_expansion * 2, block_expansion * 4, kernel_size=3, dilation=2)
            self.conv3 = nn.Conv2d(block_expansion * 4, block_expansion * 8, kernel_size=3, dilation=4)
        elif self.rot_invariance and self.flip_invariance:
            block_expansion //= int(8 ** 0.5)
            self.conv0 = P4MConvZ2(3, block_expansion, kernel_size=3)
            self.conv1 = P4MConvP4M(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = P4MConvP4M(block_expansion * 2, block_expansion * 4, kernel_size=3)
            self.conv3 = P4MConvP4M(block_expansion * 4, block_expansion * 8, kernel_size=3)
        elif self.rot_invariance:
            block_expansion //= 2
            self.conv0 = P4ConvZ2(3, block_expansion, kernel_size=3)
            self.conv1 = P4ConvP4(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = P4ConvP4(block_expansion * 2, block_expansion * 4, kernel_size=3)
            self.conv3 = P4ConvP4(block_expansion * 4, block_expansion * 8, kernel_size=3)
        else:
            self.conv0 = nn.Conv2d(3, block_expansion, kernel_size=3)
            self.conv1 = nn.Conv2d(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = nn.Conv2d(block_expansion * 2, block_expansion * 4, kernel_size=3)
            self.conv3 = nn.Conv2d(block_expansion * 4, block_expansion * 8, kernel_size=3)

        self.fc = nn.Linear(block_expansion * 8, 1)

        if self.sn:
            self.conv0 = nn.utils.spectral_norm(self.conv0)
            self.conv1 = nn.utils.spectral_norm(self.conv1)
            self.conv2 = nn.utils.spectral_norm(self.conv2)
            self.conv3 = nn.utils.spectral_norm(self.conv3)
            self.fc = nn.utils.spectral_norm(self.fc)
コード例 #12
0
 def __init__(self, input_size, n_classes=10, **kwargs):
     super(AllConvNet2, self).__init__()
     self.conv1 = P4MConvZ2(input_size, 96, 3, padding=1)
     self.bn1 = nn.BatchNorm3d(96) 
     self.conv2 = P4MConvP4M(96, 96, 3, padding=1)
     self.bn2 = nn.BatchNorm3d(96)
     self.conv3 = P4MConvP4M(96, 96, 3, padding=1, stride=2)
     self.bn3 = nn.BatchNorm3d(96)
     self.conv4 = P4MConvP4M(96, 192, 3, padding=1)
     self.bn4 = nn.BatchNorm3d(192)
     self.conv5 = P4MConvP4M(192, 192, 3, padding=1)
     self.bn5 = nn.BatchNorm3d(192)
     self.conv6 = P4MConvP4M(192, 192, 3, padding=1, stride=2)
     self.bn6 = nn.BatchNorm3d(192)
     self.conv7 = P4MConvP4M(192, 192, 3, padding=1)
     self.bn7 = nn.BatchNorm3d(192)
     self.conv8 = P4MConvP4M(192, 192, 1)
     self.bn8 = nn.BatchNorm3d(192)
     self.hidden_fc = nn.Linear(192*8*8*8,128)
     self.class_fc = nn.Linear(128, n_classes)
コード例 #13
0
 def __init__(self, num_capsules=10, in_channels=32, out_channels=32):
     super(PrimaryCapsules, self).__init__()
     self.num_capsules = num_capsules
     self.out_channels = out_channels
     '''self.capsules = nn.ModuleList([
       nn.Sequential(
                     nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True),
                     nn.BatchNorm2d(out_channels),  
                     nn.SELU(),                         
                     nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True),
                     nn.BatchNorm2d(out_channels), 
                     nn.SELU(),
                     nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True),
                     nn.BatchNorm2d(out_channels),    
                     nn.SELU(),
        )
        for i in range(num_capsules)
     ])'''
     #self.capsules = nn.ModuleList([
     #                               nn.Sequential(
     #                                             P4ResidualBlock(in_channels,out_channels,3,1,1),
     #                                             P4ResidualBlock(out_channels,out_channels,3,1,1),
     #                                             P4ResidualBlock(out_channels,out_channels,3,1,1)
     #                               ) for i in range(num_capsules)])
     #self.capsules = nn.Sequential(
     #                              P4ConvP4(in_channels,out_channels*num_capsules,kernel_size=5),
     #                              nn.BatchNorm3d(out_channels*num_capsules),
     #                             )
     self.capsules = nn.Sequential(
         P4MConvP4M(in_channels, out_channels, 3, 1, 1),
         nn.SELU(),
         nn.BatchNorm3d(out_channels),
         #P4ConvP4(out_channels,out_channels,3,1,1),
         #nn.SELU(),
         #nn.BatchNorm3d(out_channels),
         CapsuleP4MResidualBlock(num_capsules,
                                 out_channels,
                                 out_channels,
                                 stride=1))
コード例 #14
0
ファイル: main.py プロジェクト: sanghun3819/Groupy_revision
import torch
from torch.autograd import Variable
from groupy.gconv.pytorch_gconv import P4ConvZ2, P4ConvP4, P4MConvZ2, P4MConvP4M, P4MConvP4M_SC, P4MConvP4M_SCC, P4MConvP4M_SF

# Construct G-Conv layers
#C1 = P4ConvZ2(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1)
#C2 = P4ConvP4(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)

P1 = P4MConvZ2(in_channels=3,
               out_channels=64,
               kernel_size=3,
               stride=1,
               padding=1)
P2 = P4MConvP4M(in_channels=64,
                out_channels=64,
                kernel_size=3,
                stride=1,
                padding=1)
P3 = P4MConvP4M_SF(in_channels=64,
                   out_channels=64,
                   kernel_size=3,
                   stride=1,
                   padding=1)

C1 = P4MConvZ2(in_channels=3,
               out_channels=64,
               kernel_size=3,
               stride=1,
               padding=1)
C2 = P4MConvP4M(in_channels=64,
                out_channels=64,