Example #1
0
 def __init__(self,in_channels,out_channels):
     super(InitialP4MResidualBlock,self).__init__()
     self.conv1 = P4MConvZ2(in_channels,out_channels,3,1,1)
     self.bn1 = nn.BatchNorm3d(out_channels)
     self.conv2 = P4MConvP4M(out_channels,out_channels,3,1,1)
     self.bn2 = nn.BatchNorm3d(out_channels,out_channels)
     #self.shortcut = nn.Sequential()
     self.shortcut = nn.Sequential(P4MConvZ2(in_channels,out_channels,1),nn.BatchNorm3d(out_channels))
Example #2
0
    def __init__(self, block_expansion, sn=False, shift_invariance=False, rot_invariance=False,
                 flip_invariance=False, **kwargs):
        super(DCDiscriminator, self).__init__()

        self.rot_invariance = rot_invariance
        self.flip_invariance = flip_invariance
        self.shift_invariance = shift_invariance
        self.sn = sn

        assert self.rot_invariance or not self.flip_invariance

        if self.shift_invariance and self.rot_invariance and self.flip_invariance:
            block_expansion //= int(8 ** 0.5)
            self.conv0 = P4MConvZ2(3, block_expansion, kernel_size=3)
            self.conv1 = P4MConvP4M(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = P4MConvP4M(block_expansion * 2, block_expansion * 4, kernel_size=3, dilation=2)
            self.conv3 = P4MConvP4M(block_expansion * 4, block_expansion * 8, kernel_size=3, dilation=4)
        elif self.shift_invariance and self.rot_invariance:
            block_expansion //= 2
            self.conv0 = P4ConvZ2(3, block_expansion, kernel_size=3)
            self.conv1 = P4ConvP4(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = P4ConvP4(block_expansion * 2, block_expansion * 4, kernel_size=3, dilation=2)
            self.conv3 = P4ConvP4(block_expansion * 4, block_expansion * 8, kernel_size=3, dilation=4)
        elif self.shift_invariance:
            self.conv0 = nn.Conv2d(3, block_expansion, kernel_size=3)
            self.conv1 = nn.Conv2d(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = nn.Conv2d(block_expansion * 2, block_expansion * 4, kernel_size=3, dilation=2)
            self.conv3 = nn.Conv2d(block_expansion * 4, block_expansion * 8, kernel_size=3, dilation=4)
        elif self.rot_invariance and self.flip_invariance:
            block_expansion //= int(8 ** 0.5)
            self.conv0 = P4MConvZ2(3, block_expansion, kernel_size=3)
            self.conv1 = P4MConvP4M(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = P4MConvP4M(block_expansion * 2, block_expansion * 4, kernel_size=3)
            self.conv3 = P4MConvP4M(block_expansion * 4, block_expansion * 8, kernel_size=3)
        elif self.rot_invariance:
            block_expansion //= 2
            self.conv0 = P4ConvZ2(3, block_expansion, kernel_size=3)
            self.conv1 = P4ConvP4(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = P4ConvP4(block_expansion * 2, block_expansion * 4, kernel_size=3)
            self.conv3 = P4ConvP4(block_expansion * 4, block_expansion * 8, kernel_size=3)
        else:
            self.conv0 = nn.Conv2d(3, block_expansion, kernel_size=3)
            self.conv1 = nn.Conv2d(block_expansion, block_expansion * 2, kernel_size=3)
            self.conv2 = nn.Conv2d(block_expansion * 2, block_expansion * 4, kernel_size=3)
            self.conv3 = nn.Conv2d(block_expansion * 4, block_expansion * 8, kernel_size=3)

        self.fc = nn.Linear(block_expansion * 8, 1)

        if self.sn:
            self.conv0 = nn.utils.spectral_norm(self.conv0)
            self.conv1 = nn.utils.spectral_norm(self.conv1)
            self.conv2 = nn.utils.spectral_norm(self.conv2)
            self.conv3 = nn.utils.spectral_norm(self.conv3)
            self.fc = nn.utils.spectral_norm(self.fc)
Example #3
0
    def __init__(self, no_dp=False, p=0.5):
        super(Net, self).__init__()
        self.no_dp = no_dp
        self.p = p
        self.conv1 = P4MConvZ2(in_channels=3, out_channels=8, kernel_size=5)
        self.bn1 = nn.BatchNorm2d(64)
        self.conv2 = P4MConvP4M(in_channels=8, out_channels=16, kernel_size=5)
        self.bn2 = nn.BatchNorm2d(128)
        self.conv3 = P4MConvP4M(in_channels=16, out_channels=32, kernel_size=3)
        self.bn3 = nn.BatchNorm2d(256)
        self.conv4 = P4MConvP4M(in_channels=32, out_channels=32, kernel_size=3)
        self.bn4 = nn.BatchNorm2d(256)
        self.conv5 = P4MConvP4M(in_channels=32, out_channels=64, kernel_size=3)
        self.bn5 = nn.BatchNorm2d(512)
        self.conv6 = P4MConvP4M(in_channels=64, out_channels=64, kernel_size=3)
        self.bn6 = nn.BatchNorm2d(512)

        #self.fc1 = nn.Linear(8192, 2048)
        #self.fc2 = nn.Linear(2048, 2048)
        #self.fc3 = nn.Linear(2048, nclasses)
        self.fc1 = nn.Linear(8192, 512)
        self.fc2 = nn.Linear(512, nclasses)

        # Initilize the parameters
        '''
Example #4
0
    def __init__(self, env, num_channels, res):
        self.num_actions = env.action_space.n
        super(Gconvnet, self).__init__()
        num_actions = env.action_space.n
        self.num_channels = num_channels
        outchannels, kernelsize = 4, 5
        outchannels, kernelsize, stride, padding = 32, 7, 4, 0
        self.layer1 = nn.Sequential(
            P4MConvZ2(self.num_channels,
                      outchannels,
                      kernelsize,
                      stride=stride,
                      padding=padding), nn.BatchNorm3d(outchannels),
            gMaxPool2D(2, 2), nn.ReLU())
        outchannels, kernelsize, stride, padding = 64, 5, 2, 0
        self.layer2 = nn.Sequential(
            P4MConvP4M(32,
                       outchannels,
                       kernelsize,
                       stride=stride,
                       padding=padding), nn.BatchNorm3d(outchannels),
            gMaxPool2D(2, 2), nn.ReLU())

        outshape = [64, 8, 1, 1]
        self.out = np.prod(outshape)
        self.fc = nn.Linear(self.out, self.num_actions)
Example #5
0
    def __init__(self, block, num_blocks, num_f1, num_f2, num_f3, num_f4, num_classes=10):
        super(ResNet, self).__init__()

        self.in_planes = num_f1
        self.conv1 = P4MConvZ2(3, self.in_planes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm3d(self.in_planes)
        self.layer1 = self._make_layer(block, self.in_planes, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, num_f2, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, num_f3, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, num_f4, num_blocks[3], stride=2)
        self.linear = nn.Linear(num_f4*8*block.expansion, num_classes)
def P4MConvZ2_5x5(in_planes,
                  planes,
                  kernel_size=5,
                  stride=1,
                  padding=2,
                  bias=False):
    return P4MConvZ2(in_planes,
                     planes,
                     kernel_size=kernel_size,
                     stride=stride,
                     padding=padding,
                     bias=bias)
Example #7
0
    def __init__(self, finetune_feature_extraction=False, use_cuda=False):
        super(DenseFeatureExtractionModuleRotInv, self).__init__()

        model = models.vgg16(pretrained=True)
        vgg16_layers = [
            'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
            'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
            'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
            'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
            'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3',
            'pool5'
        ]
        conv3_3_idx = vgg16_layers.index('conv3_3')

        geometric_conv_channels = 512 // 8
        rot_inv_layers = [
            P4MConvZ2(256,
                      geometric_conv_channels,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False),
            nn.ReLU(inplace=True),
            P4MConvP4M(geometric_conv_channels,
                       geometric_conv_channels,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False),
            nn.ReLU(inplace=True),
            P4MConvP4M(geometric_conv_channels,
                       geometric_conv_channels,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False),
            nn.ReLU(inplace=True),
        ]

        self.model = nn.Sequential(
            *list(model.features.children())[:conv3_3_idx + 2],
            *rot_inv_layers)

        self.num_channels = 512

        # Fix forward parameters
        for param in self.model.parameters():
            param.requires_grad = False
        if finetune_feature_extraction:
            # Unlock conv4_3
            for param in list(self.model.parameters())[-2:]:
                param.requires_grad = True
Example #8
0
    def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', group_pool='avg'):
        assert(n_blocks >= 0)
        assert (group_pool in ['avg', 'cat'])
        super(EqResnetGenerator, self).__init__()
        from groupy.gconv.pytorch_gconv import P4MConvP4M, P4MConvZ2


        if norm_layer == nn.BatchNorm2d:
            eq_norm_layer = nn.BatchNorm3d
        else:
            eq_norm_layer = nn.InstanceNorm3d

        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        model = [nn.ReflectionPad2d(3),
                 nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
                 norm_layer(ngf),
                 nn.ReLU(True)]

        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2 ** i
            model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
                      norm_layer(ngf * mult * 2),
                      nn.ReLU(True)]

        eq_ngf = int(ngf / np.sqrt(8))
        mult = 2 ** n_downsampling
        model += [P4MConvZ2(ngf * mult, eq_ngf * mult, kernel_size=1)]
 
        for i in range(n_blocks):       # add ResNet blocks
            model += [EqResnetBlock(eq_ngf * mult, padding_type=padding_type, norm_layer=eq_norm_layer, use_dropout=use_dropout, use_bias=use_bias)]

        model += [GroupPool(group_pool), nn.Conv2d(eq_ngf * mult if group_pool != 'cat' else 8 * eq_ngf * mult, ngf * mult, kernel_size=1)]
 
        for i in range(n_downsampling):  # add upsampling layers
            mult = 2 ** (n_downsampling - i)
            model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
                                         kernel_size=3, stride=2,
                                         padding=1, output_padding=1,
                                         bias=use_bias),
                      norm_layer(int(ngf * mult / 2)),
                      nn.ReLU(True)]
        model += [nn.ReflectionPad2d(3)]
        model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
        model += [nn.Tanh()]

        self.model = nn.Sequential(*model)
    def __init__(self, block, num_blocks, low_dim=128, multitask=False):
        super(ResNet, self).__init__()
        self.multitask = multitask

        self.in_planes = 23
        self.conv1 = P4MConvZ2(3, 23, kernel_size=7, stride=2, padding=3, bias=False, batch_norm=True, max_pool=True)

        self.layer1 = self._make_layer(block, 23, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 45, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 91, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 181, num_blocks[3], stride=2)
        self.linear = nn.Linear(181*8*block.expansion, low_dim)

        self.l2norm = Normalize(2)
    def __init__(self, block, num_blocks, num_classes=10):
        super(ResNet, self).__init__()
        self.in_planes = 23

        self.conv1 = P4MConvZ2(3,
                               23,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(23)
        self.layer1 = self._make_layer(block, 23, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 45, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 91, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 181, num_blocks[3], stride=2)
        self.linear = nn.Linear(181 * 8 * block.expansion, num_classes)
 def __init__(self, input_size, n_classes=10, **kwargs):
     super(AllConvNet2, self).__init__()
     self.conv1 = P4MConvZ2(input_size, 96, 3, padding=1)
     self.bn1 = nn.BatchNorm3d(96) 
     self.conv2 = P4MConvP4M(96, 96, 3, padding=1)
     self.bn2 = nn.BatchNorm3d(96)
     self.conv3 = P4MConvP4M(96, 96, 3, padding=1, stride=2)
     self.bn3 = nn.BatchNorm3d(96)
     self.conv4 = P4MConvP4M(96, 192, 3, padding=1)
     self.bn4 = nn.BatchNorm3d(192)
     self.conv5 = P4MConvP4M(192, 192, 3, padding=1)
     self.bn5 = nn.BatchNorm3d(192)
     self.conv6 = P4MConvP4M(192, 192, 3, padding=1, stride=2)
     self.bn6 = nn.BatchNorm3d(192)
     self.conv7 = P4MConvP4M(192, 192, 3, padding=1)
     self.bn7 = nn.BatchNorm3d(192)
     self.conv8 = P4MConvP4M(192, 192, 1)
     self.bn8 = nn.BatchNorm3d(192)
     self.hidden_fc = nn.Linear(192*8*8*8,128)
     self.class_fc = nn.Linear(128, n_classes)
Example #12
0
import torch
from torch.autograd import Variable
from groupy.gconv.pytorch_gconv import P4ConvZ2, P4ConvP4, P4MConvZ2, P4MConvP4M, P4MConvP4M_SC, P4MConvP4M_SCC, P4MConvP4M_SF

# Construct G-Conv layers
#C1 = P4ConvZ2(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1)
#C2 = P4ConvP4(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)

P1 = P4MConvZ2(in_channels=3,
               out_channels=64,
               kernel_size=3,
               stride=1,
               padding=1)
P2 = P4MConvP4M(in_channels=64,
                out_channels=64,
                kernel_size=3,
                stride=1,
                padding=1)
P3 = P4MConvP4M_SF(in_channels=64,
                   out_channels=64,
                   kernel_size=3,
                   stride=1,
                   padding=1)

C1 = P4MConvZ2(in_channels=3,
               out_channels=64,
               kernel_size=3,
               stride=1,
               padding=1)
C2 = P4MConvP4M(in_channels=64,
                out_channels=64,