예제 #1
0
    def __init__(self, n_classes=40):
        super(DGCNN, self).__init__()
        self.k = 20
        self.knn = KNN(self.k)
        self.bn1 = nn.BatchNorm(64)
        self.bn2 = nn.BatchNorm(64)
        self.bn3 = nn.BatchNorm(128)
        self.bn4 = nn.BatchNorm(256)
        self.bn5 = nn.BatchNorm1d(1024)

        self.conv1 = nn.Sequential(nn.Conv(6, 64, kernel_size=1, bias=False),
                                   self.bn1,
                                   nn.LeakyReLU(scale=0.2))
        self.conv2 = nn.Sequential(nn.Conv(64*2, 64, kernel_size=1, bias=False),
                                   self.bn2,
                                   nn.LeakyReLU(scale=0.2))
        self.conv3 = nn.Sequential(nn.Conv(64*2, 128, kernel_size=1, bias=False),
                                   self.bn3,
                                   nn.LeakyReLU(scale=0.2))
        self.conv4 = nn.Sequential(nn.Conv(128*2, 256, kernel_size=1, bias=False),
                                   self.bn4,
                                   nn.LeakyReLU(scale=0.2))
        self.conv5 = nn.Sequential(nn.Conv1d(512, 1024, kernel_size=1, bias=False),
                                   self.bn5,
                                   nn.LeakyReLU(scale=0.2))
        self.linear1 = nn.Linear(1024*2, 512, bias=False)
        self.bn6 = nn.BatchNorm1d(512)
        self.dp1 = nn.Dropout(p=0.5)
        self.linear2 = nn.Linear(512, 256)
        self.bn7 = nn.BatchNorm1d(256)
        self.dp2 = nn.Dropout(p=0.5)
        self.linear3 = nn.Linear(256, n_classes)
예제 #2
0
 def __init__(self, n_classes=40):
     super(PointConvDensityClsSsg, self).__init__()
     self.sa1 = PointConvDensitySetAbstraction(npoint=512,
                                               nsample=32,
                                               in_channel=3,
                                               mlp=[64, 64, 128],
                                               bandwidth=0.1,
                                               group_all=False)
     self.sa2 = PointConvDensitySetAbstraction(npoint=128,
                                               nsample=64,
                                               in_channel=128 + 3,
                                               mlp=[128, 128, 256],
                                               bandwidth=0.2,
                                               group_all=False)
     self.sa3 = PointConvDensitySetAbstraction(npoint=1,
                                               nsample=None,
                                               in_channel=256 + 3,
                                               mlp=[256, 512, 1024],
                                               bandwidth=0.4,
                                               group_all=True)
     self.fc1 = nn.Linear(1024, 512)
     self.bn1 = nn.BatchNorm1d(512)
     self.drop1 = nn.Dropout(0.4)
     self.fc2 = nn.Linear(512, 256)
     self.bn2 = nn.BatchNorm1d(256)
     self.drop2 = nn.Dropout(0.4)
     self.fc3 = nn.Linear(256, n_classes)
     self.relu = nn.ReLU()
예제 #3
0
    def __init__(self, output_channels=40):
        super(Point_Transformer, self).__init__()

        self.conv1 = nn.Conv1d(3, 128, kernel_size=1, bias=False)
        self.conv2 = nn.Conv1d(128, 128, kernel_size=1, bias=False)

        self.bn1 = nn.BatchNorm1d(128)
        self.bn2 = nn.BatchNorm1d(128)

        self.sa1 = SA_Layer(128)
        self.sa2 = SA_Layer(128)
        self.sa3 = SA_Layer(128)
        self.sa4 = SA_Layer(128)

        self.conv_fuse = nn.Sequential(
            nn.Conv1d(512, 1024, kernel_size=1, bias=False),
            nn.BatchNorm1d(1024), nn.LeakyReLU(scale=0.2))

        self.linear1 = nn.Linear(1024, 512, bias=False)
        self.bn6 = nn.BatchNorm1d(512)
        self.dp1 = nn.Dropout(p=0.5)
        self.linear2 = nn.Linear(512, 256)
        self.bn7 = nn.BatchNorm1d(256)
        self.dp2 = nn.Dropout(p=0.5)
        self.linear3 = nn.Linear(256, output_channels)

        self.relu = nn.ReLU()
예제 #4
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.label_embedding = nn.Embedding(n_classes, n_classes)
     self.model = nn.Sequential(
         nn.Linear((n_classes + int(np.prod(img_shape))), 512),
         nn.LeakyReLU(0.2), nn.Linear(512, 512), nn.Dropout(0.4),
         nn.LeakyReLU(0.2), nn.Linear(512, 512), nn.Dropout(0.4),
         nn.LeakyReLU(0.2), nn.Linear(512, 1))
예제 #5
0
 def __init__(self, features, num_classes=1000, init_weights=True):
     super(VGG, self).__init__()
     self.features = features
     self.classifier = nn.Sequential(
         nn.Linear(512 * 7 * 7, 4096),
         nn.ReLU(),
         nn.Dropout(),
         nn.Linear(4096, 4096),
         nn.ReLU(),
         nn.Dropout(),
         nn.Linear(4096, num_classes),
     )
예제 #6
0
    def __init__(self, num_classes):
        super(Decoder, self).__init__()
        low_level_inplanes = 256  # mobilenet = 24 resnet / res2net = 256 xception = 128

        self.conv1 = nn.Conv(low_level_inplanes, 48, 1, bias=False)
        self.bn1 = nn.BatchNorm(48)
        self.relu = nn.ReLU()
        self.last_conv = nn.Sequential(
            nn.Conv(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm(256), nn.ReLU(), nn.Dropout(0.5),
            nn.Conv(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm(256), nn.ReLU(), nn.Dropout(0.1),
            nn.Conv(256, num_classes, kernel_size=1, stride=1))
예제 #7
0
    def __init__(self, part_num):
        super(DGCNN_partseg, self).__init__()
        self.seg_num_all = part_num
        self.k = 40
        self.knn = KNN(self.k)
        self.bn1 = nn.BatchNorm2d(64)
        self.bn2 = nn.BatchNorm2d(64)
        self.bn3 = nn.BatchNorm2d(64)
        self.bn4 = nn.BatchNorm2d(64)
        self.bn5 = nn.BatchNorm2d(64)
        self.bn6 = nn.BatchNorm1d(1024)
        self.bn7 = nn.BatchNorm1d(64)
        self.bn8 = nn.BatchNorm1d(256)
        self.bn9 = nn.BatchNorm1d(256)
        self.bn10 = nn.BatchNorm1d(128)

        self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False),
                                   self.bn1,
                                   nn.LeakyReLU(scale=0.2))
        self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False),
                                   self.bn2,
                                   nn.LeakyReLU(scale=0.2))
        self.conv3 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False),
                                   self.bn3,
                                   nn.LeakyReLU(scale=0.2))
        self.conv4 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False),
                                   self.bn4,
                                   nn.LeakyReLU(scale=0.2))
        self.conv5 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False),
                                   self.bn5,
                                   nn.LeakyReLU(scale=0.2))
        self.conv6 = nn.Sequential(nn.Conv1d(192, 1024, kernel_size=1, bias=False),
                                   self.bn6,
                                   nn.LeakyReLU(scale=0.2))
        self.conv7 = nn.Sequential(nn.Conv1d(16, 64, kernel_size=1, bias=False),
                                   self.bn7,
                                   nn.LeakyReLU(scale=0.2))
        self.conv8 = nn.Sequential(nn.Conv1d(1280, 256, kernel_size=1, bias=False),
                                   self.bn8,
                                   nn.LeakyReLU(scale=0.2))
        self.dp1 = nn.Dropout(p=0.5)
        self.conv9 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=1, bias=False),
                                   self.bn9,
                                   nn.LeakyReLU(scale=0.2))
        self.dp2 = nn.Dropout(p=0.5)
        self.conv10 = nn.Sequential(nn.Conv1d(256, 128, kernel_size=1, bias=False),
                                   self.bn10,
                                   nn.LeakyReLU(scale=0.2))
        self.conv11 = nn.Conv1d(128, self.seg_num_all, kernel_size=1, bias=False)
예제 #8
0
파일: pct.py 프로젝트: xiaoxTM/jittor-pcl
 def __init__(self, args, margs):
     super(PointCloudTransformer, self).__init__(args, margs)
     self.input_embeds = nn.Sequential(
         Permute(0, 2, 1), nn.Conv1d(3, 64, kernel_size=1, bias=False),
         nn.BatchNorm1d(64), nn.ReLU(),
         nn.Conv1d(64, 64, kernel_size=1, bias=False), nn.BatchNorm1d(64),
         nn.ReLU(), Permute(0, 2, 1))
     self.knn_embeds = nn.Sequential(KNNEmbed(128, 128, 512, 32),
                                     KNNEmbed(256, 256, 256, 32))
     self.transformer = PointTransformer()
     self.classifier = nn.Sequential(nn.Linear(1024, 512),
                                     nn.BatchNorm1d(512), nn.ReLU(),
                                     nn.Dropout(p=0.5), nn.Linear(512, 256),
                                     nn.BatchNorm1d(256), nn.Dropout(p=0.5),
                                     nn.Linear(256, 40))
 def _forward(self, x):
     x = self.Conv2d_1a_3x3(x)
     x = self.Conv2d_2a_3x3(x)
     x = self.Conv2d_2b_3x3(x)
     x = nn.pool(x, 3, "maximum", stride=2)
     x = self.Conv2d_3b_1x1(x)
     x = self.Conv2d_4a_3x3(x)
     x = nn.pool(x, 3, "maximum", stride=2)
     x = self.Mixed_5b(x)
     x = self.Mixed_5c(x)
     x = self.Mixed_5d(x)
     x = self.Mixed_6a(x)
     x = self.Mixed_6b(x)
     x = self.Mixed_6c(x)
     x = self.Mixed_6d(x)
     x = self.Mixed_6e(x)
     aux_defined = self.aux_logits
     if aux_defined:
         aux = self.AuxLogits(x)
     else:
         aux = None
     x = self.Mixed_7a(x)
     x = self.Mixed_7b(x)
     x = self.Mixed_7c(x)
     x = nn.AdaptiveAvgPool2d(1)(x)
     x = nn.Dropout()(x)
     x = jt.reshape(x, (x.shape[0], (-1)))
     x = self.fc(x)
     return (x, aux)
예제 #10
0
 def __init__(self, version='1_0', num_classes=1000):
     super(SqueezeNet, self).__init__()
     self.num_classes = num_classes
     if (version == '1_0'):
         self.features = nn.Sequential(
             nn.Conv(3, 96, kernel_size=7, stride=2), nn.Relu(),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(96, 16, 64, 64), Fire(128, 16, 64, 64),
             Fire(128, 32, 128, 128),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(256, 32, 128, 128), Fire(256, 48, 192, 192),
             Fire(384, 48, 192, 192), Fire(384, 64, 256, 256),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(512, 64, 256, 256))
     elif (version == '1_1'):
         self.features = nn.Sequential(
             nn.Conv(3, 64, kernel_size=3, stride=2), nn.Relu(),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(64, 16, 64, 64), Fire(128, 16, 64, 64),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(128, 32, 128, 128), Fire(256, 32, 128, 128),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(256, 48, 192, 192), Fire(384, 48, 192, 192),
             Fire(384, 64, 256, 256), Fire(512, 64, 256, 256))
     else:
         raise ValueError(
             'Unsupported SqueezeNet version {version}:1_0 or 1_1 expected'.
             format(version=version))
     final_conv = nn.Conv(512, self.num_classes, kernel_size=1)
     self.classifier = nn.Sequential(nn.Dropout(p=0.5),
                                     final_conv, nn.Relu(),
                                     nn.AdaptiveAvgPool2d((1, 1)))
예제 #11
0
 def __init__(self, num_classes=1000, aux_logits=True, init_weights=True, blocks=None):
     super(GoogLeNet, self).__init__()
     if (blocks is None):
         blocks = [BasicConv2d, Inception, InceptionAux]
     assert (len(blocks) == 3)
     conv_block = blocks[0]
     inception_block = blocks[1]
     inception_aux_block = blocks[2]
     self.aux_logits = aux_logits
     self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)
     self.maxpool1 = nn.Pool(3, stride=2, ceil_mode=True, op='maximum')
     self.conv2 = conv_block(64, 64, kernel_size=1)
     self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
     self.maxpool2 = nn.Pool(3, stride=2, ceil_mode=True, op='maximum')
     self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
     self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
     self.maxpool3 = nn.Pool(3, stride=2, ceil_mode=True, op='maximum')
     self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
     self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
     self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
     self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
     self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
     self.maxpool4 = nn.Pool(2, stride=2, ceil_mode=True, op='maximum')
     self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
     self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)
     if aux_logits:
         self.aux1 = inception_aux_block(512, num_classes)
         self.aux2 = inception_aux_block(528, num_classes)
     else:
         self.aux1 = None
         self.aux2 = None
     self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
     self.dropout = nn.Dropout(0.2)
     self.fc = nn.Linear(1024, num_classes)
예제 #12
0
    def build_conv_block(self, dim, padding_type, norm_layer, activation,
                         use_dropout):
        conv_block = []
        p = 0
        if (padding_type == 'reflect'):
            conv_block += [nn.ReflectionPad2d(1)]
        elif (padding_type == 'replicate'):
            conv_block += [nn.ReplicationPad2d(1)]
        elif (padding_type == 'zero'):
            p = 1
        else:
            raise NotImplementedError(
                ('padding [%s] is not implemented' % padding_type))
        conv_block += [
            nn.Conv(dim, dim, 3, padding=p),
            norm_layer(dim), activation
        ]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if (padding_type == 'reflect'):
            conv_block += [nn.ReflectionPad2d(1)]
        elif (padding_type == 'replicate'):
            conv_block += [nn.ReplicationPad2d(1)]
        elif (padding_type == 'zero'):
            p = 1
        else:
            raise NotImplementedError(
                ('padding [%s] is not implemented' % padding_type))
        conv_block += [nn.Conv(dim, dim, 3, padding=p), norm_layer(dim)]
        return nn.Sequential(*conv_block)
예제 #13
0
    def __init__(self, dim, heads=8, dropout=0.):
        super(Attention, self).__init__()
        self.heads = heads
        self.scale = dim**-0.5

        self.to_qkv = nn.Linear(dim, dim * 3, bias=False)
        self.to_out = nn.Sequential(nn.Linear(dim, dim), nn.Dropout(dropout))
예제 #14
0
 def __init__(self):
     super(SingleInputNet, self).__init__()
     self.conv1 = nn.Conv(1, 10, 5)
     self.conv2 = nn.Conv(10, 20, 5)
     self.conv2_drop = nn.Dropout(p=0.3)
     self.fc1 = nn.Linear(320, 50)
     self.fc2 = nn.Linear(50, 10)
예제 #15
0
    def __init__(self, part_num=50):
        super(Point_Transformer_partseg, self).__init__()
        self.part_num = part_num
        self.conv1 = nn.Conv1d(3, 128, kernel_size=1, bias=False)
        self.conv2 = nn.Conv1d(128, 128, kernel_size=1, bias=False)

        self.bn1 = nn.BatchNorm1d(128)
        self.bn2 = nn.BatchNorm1d(128)

        self.sa1 = SA_Layer(128)
        self.sa2 = SA_Layer(128)
        self.sa3 = SA_Layer(128)
        self.sa4 = SA_Layer(128)

        self.conv_fuse = nn.Sequential(
            nn.Conv1d(512, 1024, kernel_size=1, bias=False),
            nn.BatchNorm1d(1024), nn.LeakyReLU(scale=0.2))

        self.label_conv = nn.Sequential(
            nn.Conv1d(16, 64, kernel_size=1, bias=False), nn.BatchNorm1d(64),
            nn.LeakyReLU(scale=0.2))

        self.convs1 = nn.Conv1d(1024 * 3 + 64, 512, 1)
        self.dp1 = nn.Dropout(0.5)
        self.convs2 = nn.Conv1d(512, 256, 1)
        self.convs3 = nn.Conv1d(256, self.part_num, 1)
        self.bns1 = nn.BatchNorm1d(512)
        self.bns2 = nn.BatchNorm1d(256)

        self.relu = nn.ReLU()
예제 #16
0
 def __init__(self, alpha, num_classes=1000, dropout=0.2):
     super(MNASNet, self).__init__()
     assert (alpha > 0.0)
     self.alpha = alpha
     self.num_classes = num_classes
     depths = _get_depths(alpha)
     layers = [
         nn.Conv(3, 32, 3, padding=1, stride=2, bias=False),
         nn.BatchNorm(32, momentum=_BN_MOMENTUM),
         nn.Relu(),
         nn.Conv(32, 32, 3, padding=1, stride=1, groups=32, bias=False),
         nn.BatchNorm(32, momentum=_BN_MOMENTUM),
         nn.Relu(),
         nn.Conv(32, 16, 1, padding=0, stride=1, bias=False),
         nn.BatchNorm(16, momentum=_BN_MOMENTUM),
         _stack(16, depths[0], 3, 2, 3, 3, _BN_MOMENTUM),
         _stack(depths[0], depths[1], 5, 2, 3, 3, _BN_MOMENTUM),
         _stack(depths[1], depths[2], 5, 2, 6, 3, _BN_MOMENTUM),
         _stack(depths[2], depths[3], 3, 1, 6, 2, _BN_MOMENTUM),
         _stack(depths[3], depths[4], 5, 2, 6, 4, _BN_MOMENTUM),
         _stack(depths[4], depths[5], 3, 1, 6, 1, _BN_MOMENTUM),
         nn.Conv(depths[5], 1280, 1, padding=0, stride=1, bias=False),
         nn.BatchNorm(1280, momentum=_BN_MOMENTUM),
         nn.Relu()
     ]
     self.layers = nn.Sequential(*layers)
     self.classifier = nn.Sequential(nn.Dropout(p=dropout),
                                     nn.Linear(1280, num_classes))
예제 #17
0
    def __init__(self, in_channels, out_channels):
        super(DANetHead, self).__init__()
        inter_channels = in_channels // 4
        self.conv5a = nn.Sequential(
            nn.Conv(in_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())

        self.conv5c = nn.Sequential(
            nn.Conv(in_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())

        self.sa = PAM_Module(inter_channels)
        self.sc = CAM_Module(inter_channels)
        self.conv51 = nn.Sequential(
            nn.Conv(inter_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())
        self.conv52 = nn.Sequential(
            nn.Conv(inter_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())

        #        self.conv6 = nn.Sequential(nn.Dropout(0.1, False), nn.Conv(inter_channels, out_channels, 1))
        #        self.conv7 = nn.Sequential(nn.Dropout(0.1, False), nn.Conv(inter_channels, out_channels, 1))

        self.conv8 = nn.Sequential(nn.Dropout(0.1, False),
                                   nn.Conv(inter_channels, out_channels, 1))
예제 #18
0
    def __init__(self, part_num=50):
        super(PointConvDensity_partseg, self).__init__()
        self.part_num = part_num 

        self.sa0 = PointConvDensitySetAbstraction(npoint=1024, nsample=32, in_channel=3, mlp=[32,32,64], bandwidth = 0.1, group_all=False)
        self.sa1 = PointConvDensitySetAbstraction(npoint=256, nsample=32, in_channel=64 + 3, mlp=[64,64,128], bandwidth = 0.2, group_all=False)
        self.sa2 = PointConvDensitySetAbstraction(npoint=64, nsample=32, in_channel=128 + 3, mlp=[128,128,256], bandwidth = 0.4, group_all=False)
        self.sa3 = PointConvDensitySetAbstraction(npoint=36, nsample=32, in_channel=256 + 3, mlp=[256,256,512], bandwidth = 0.8, group_all=False)
        

        # TODO upsample  
        # upsampling 
        # def __init__(self, nsample, in_channel, mlp, bandwidth):

        self.in0 = PointConvDensitySetInterpolation(nsample=16, in_channel=512 + 3, mlp=[512,512], bandwidth=0.8)
        self.in1 = PointConvDensitySetInterpolation(nsample=16, in_channel=512 + 3, mlp=[256,256], bandwidth=0.4)
        self.in2 = PointConvDensitySetInterpolation(nsample=16, in_channel=256 + 3, mlp=[128,128], bandwidth=0.2)
        self.in3 = PointConvDensitySetInterpolation(nsample=16, in_channel=128 + 3, mlp=[128,128, 128], bandwidth=0.1)
        
        # self.fp0 = PointConvDensitySetAbstraction(npoint=1024, nsample=32, in_channel=3, mlp=[32,32,64], bandwidth = 0.1, group_all=False)
        # self.fp1 = PointConvDensitySetAbstraction(npoint=256, nsample=32, in_channel=64 + 3, mlp=[64,64,128], bandwidth = 0.2, group_all=False)
        # self.fp2 = PointConvDensitySetAbstraction(npoint=64, nsample=32, in_channel=128 + 3, mlp=[128,128,256], bandwidth = 0.4, group_all=False)
        # self.fp3 = PointConvDensitySetAbstraction(npoint=36, nsample=32, in_channel=256 + 3, mlp=[256,256,512], bandwidth = 0.8, group_all=False)
        
        self.fc1 = nn.Conv1d(128, 128, 1)
        self.bn1 = nn.BatchNorm1d(128)
        self.drop1 = nn.Dropout(0.4)
        self.fc3 = nn.Conv1d(128, self.part_num, 1)
        self.relu = nn.ReLU() 
예제 #19
0
    def __init__(self, in_size, out_size, inner_nc, dropout=0.0, innermost=False, outermost=False, submodule=None):
        super(UnetBlock, self).__init__()
        self.outermost = outermost

        downconv = nn.Conv(in_size, inner_nc, 4, stride=2, padding=1, bias=False)
        downnorm = nn.BatchNorm2d(inner_nc)
        downrelu = nn.LeakyReLU(0.2)
        upnorm = nn.BatchNorm2d(out_size)
        uprelu = nn.ReLU()

        if outermost:
            upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose(inner_nc, out_size, 4, stride=2, padding=1, bias=False)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1, bias=False)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]
            if dropout:
                model = down + [submodule] + up + [nn.Dropout(dropout)]
            else:
                model = down + [submodule] + up
        
        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
예제 #20
0
 def __init__(self, in_features, dropout=0.5):
     super(ResidualBlock, self).__init__()
     model = [nn.ReflectionPad2d(1), nn.Conv(in_features, in_features, 3, bias=False), nn.BatchNorm2d(in_features), nn.ReLU()]
     if dropout:
         model += [nn.Dropout(dropout)]
     model += [nn.ReflectionPad2d(1), nn.Conv(in_features, in_features, 3, bias=False), nn.BatchNorm2d(in_features)]
     self.conv_block = nn.Sequential(*model)
예제 #21
0
    def build_model(self):
        self.pointnet_modules = nn.ModuleList()
        self.pointnet_modules.append(
            PointnetModule(
                n_points=512,
                radius=0.2,
                n_samples=64,
                mlp=[3, 64, 64, 128],
                use_xyz=self.use_xyz,
            ))

        self.pointnet_modules.append(
            PointnetModule(
                n_points=128,
                radius=0.4,
                n_samples=64,
                mlp=[128, 128, 128, 256],
                use_xyz=self.use_xyz,
            ))

        self.pointnet_modules.append(
            PointnetModule(
                mlp=[256, 256, 512, 1024],
                use_xyz=self.use_xyz,
            ))

        self.fp3 = PointNetFeaturePropagation(in_channel=1280, mlp=[256, 256])
        self.fp2 = PointNetFeaturePropagation(in_channel=384, mlp=[256, 128])
        self.fp1 = PointNetFeaturePropagation(in_channel=128 + 16 + 6,
                                              mlp=[128, 128, 128])

        self.fc_layer = nn.Sequential(nn.Conv1d(128, 128, 1),
                                      nn.BatchNorm1d(128), nn.Dropout(0.5),
                                      nn.Conv1d(128, self.part_num, 1))
예제 #22
0
    def __init__(self,
                 img_size=224,
                 patch_size=16,
                 in_chans=3,
                 num_classes=1000,
                 embed_dim=768,
                 depth=12,
                 num_heads=12,
                 mlp_ratio=4.,
                 qkv_bias=False,
                 qk_scale=None,
                 drop_rate=0.,
                 attn_drop_rate=0.,
                 drop_path_rate=0.,
                 hybrid_backbone=None,
                 norm_layer=nn.LayerNorm):
        super(VisionTransformer, self).__init__()
        if hybrid_backbone is not None:
            self.patch_embed = HybridEmbed(hybrid_backbone,
                                           img_size=img_size,
                                           in_chans=in_chans,
                                           embed_dim=embed_dim)
        else:
            self.patch_embed = PatchEmbed(img_size=img_size,
                                          patch_size=patch_size,
                                          in_chans=in_chans,
                                          embed_dim=embed_dim)

        num_patches = self.patch_embed.num_patches

        self.cls_token = jt.zeros((1, 1, embed_dim))
        self.pos_embed = jt.zeros((1, num_patches + 1, embed_dim))
        self.pos_drop = nn.Dropout(drop_rate)

        dpr = [x.item() for x in np.linspace(0, drop_path_rate, depth)
               ]  # stochastic depth decay rule
        self.blocks = nn.ModuleList([
            Block(dim=embed_dim,
                  num_heads=num_heads,
                  mlp_ratio=mlp_ratio,
                  qkv_bias=qkv_bias,
                  qk_scale=qk_scale,
                  drop=drop_rate,
                  attn_drop=attn_drop_rate,
                  drop_path=dpr[i],
                  norm_layer=norm_layer) for i in range(depth)
        ])
        self.norm = norm_layer(embed_dim)

        # NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here
        #self.repr = nn.Linear(embed_dim, representation_size)
        #self.repr_act = nn.Tanh()

        # Classifier head
        self.head = nn.Linear(embed_dim, num_classes)

        self.pos_embed = trunc_normal(self.pos_embed, std=.02)
        self.cls_token = trunc_normal(self.cls_token, std=.02)
        self.apply(self._init_weights)
예제 #23
0
    def __init__(self,
                 dim,
                 num_heads=8,
                 qkv_bias=False,
                 qk_scale=None,
                 attn_drop=0.,
                 proj_drop=0.):
        super(Attention, self).__init__()
        self.num_heads = num_heads
        head_dim = dim // num_heads
        # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
        self.scale = qk_scale or head_dim**-0.5

        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)
예제 #24
0
 def execute(self, x):
     x = nn.AdaptiveAvgPool2d(4)(x)
     x = self.conv(x)
     x = jt.reshape(x, (x.shape[0], (- 1)))
     x = nn.relu(self.fc1(x))
     x = nn.Dropout(0.7)(x)
     x = self.fc2(x)
     return x
예제 #25
0
 def __init__(self, num_classes=1000):
     super(AlexNet, self).__init__()
     self.features = nn.Sequential(
         nn.Conv(3, 64, kernel_size=11, stride=4, padding=2), nn.Relu(),
         nn.Pool(kernel_size=3, stride=2, op='maximum'),
         nn.Conv(64, 192, kernel_size=5, padding=2), nn.Relu(),
         nn.Pool(kernel_size=3, stride=2, op='maximum'),
         nn.Conv(192, 384, kernel_size=3, padding=1), nn.Relu(),
         nn.Conv(384, 256, kernel_size=3, padding=1), nn.Relu(),
         nn.Conv(256, 256, kernel_size=3, padding=1), nn.Relu(),
         nn.Pool(kernel_size=3, stride=2, op='maximum'))
     self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
     self.classifier = nn.Sequential(nn.Dropout(),
                                     nn.Linear(((256 * 6) * 6), 4096),
                                     nn.Relu(), nn.Dropout(),
                                     nn.Linear(4096, 4096), nn.Relu(),
                                     nn.Linear(4096, num_classes))
예제 #26
0
파일: lsgan.py 프로젝트: whuyyc/gan-jittor
 def discriminator_block(in_filters, out_filters, bn=True):
     block = [
         nn.Conv(in_filters, out_filters, 3, stride=2, padding=1),
         nn.LeakyReLU(scale=0.2),
         nn.Dropout(p=0.25)
     ]
     if bn:
         block.append(nn.BatchNorm(out_filters, eps=0.8))
     return block
예제 #27
0
 def __init__(self, num_classes=21, output_stride=16):
     super(EANet, self).__init__()
     self.backbone = resnet50(output_stride)
     self.fc0 = ConvBNReLU(2048, 512, 3, 1, 1, 1)
     self.head = External_attention(512)
     self.fc1 = nn.Sequential(
         ConvBNReLU(512, 256, 3, 1, 1, 1),
         nn.Dropout(p=0.1))
     self.fc2 = nn.Conv2d(256, num_classes, 1)
예제 #28
0
 def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
     super(_DenseLayer, self).__init__()
     self.add_module('norm1', nn.BatchNorm(num_input_features))
     self.add_module('relu1', nn.ReLU())
     self.add_module('conv1', nn.Conv(num_input_features, (bn_size * growth_rate), 1, stride=1, bias=False))
     self.add_module('norm2', nn.BatchNorm((bn_size * growth_rate)))
     self.add_module('relu2', nn.ReLU())
     self.add_module('conv2', nn.Conv((bn_size * growth_rate), growth_rate, 3, stride=1, padding=1, bias=False))
     self.drop_rate = drop_rate
     self.drop = nn.Dropout(self.drop_rate)
예제 #29
0
 def __init__(self, in_size, out_size, normalize=True, dropout=0.0):
     super(UNetDown, self).__init__()
     layers = [
         nn.Conv(in_size, out_size, 4, stride=2, padding=1, bias=False)
     ]
     if normalize:
         layers.append(nn.BatchNorm2d(out_size))
     layers.append(nn.LeakyReLU(scale=0.2))
     if dropout:
         layers.append(nn.Dropout(dropout))
     self.model = nn.Sequential(*layers)
예제 #30
0
 def __init__(self,
              in_features,
              hidden_features=None,
              out_features=None,
              act_layer=nn.GELU,
              drop=0.):
     super(MLP, self).__init__()
     out_features = out_features or in_features
     hidden_features = hidden_features or in_features
     self.fc1 = nn.Linear(in_features, hidden_features)
     self.act = act_layer()
     self.fc2 = nn.Linear(hidden_features, out_features)
     self.drop = nn.Dropout(drop)