예제 #1
0
 def __init__(self,**kwargs):
     super(Decoder_hybrid,self).__init__(**kwargs)
     self.layer1 = nn.HybridSequential()
     self.layer1.add(nn.Conv3DTranspose(512,kernel_size=4,strides=2,padding=1,
                     use_bias=False),
                     nn.BatchNorm(in_channels=512),
                     nn.Activation("relu"))
     self.layer2 = nn.HybridSequential()
     self.layer2.add(nn.Conv3DTranspose(128,kernel_size=4,strides=2,padding=1,
                    use_bias=False),
                    nn.BatchNorm(in_channels=128),
                    nn.Activation("relu"))
     self.layer3 = nn.HybridSequential()
     self.layer3.add(nn.Conv3DTranspose(32,kernel_size=4,strides=2,padding=1,
                     use_bias=False),
                     nn.BatchNorm(in_channels=32),
                     nn.Activation("relu"))
     self.layer4 = nn.HybridSequential()
     self.layer4.add(nn.Conv3DTranspose(8,kernel_size=4,strides=2,padding=1,
                     use_bias=False),
                     nn.BatchNorm(in_channels=8),
                     nn.Activation("relu"))
     self.layer5 = nn.HybridSequential()
     self.layer5.add(nn.Conv3DTranspose(1,kernel_size=1,
                     activation="sigmoid",use_bias=False))
예제 #2
0
 def __init__(self,**kwargs):
     super(Refiner_hybrid,self).__init__(**kwargs)
     self.layer1 = nn.HybridSequential()
     self.layer1.add(
         nn.Conv3D(32, kernel_size=4, padding=2),
         nn.BatchNorm(in_channels=32),
         nn.LeakyReLU(.2),
         nn.MaxPool3D(pool_size=2)
     )
     self.layer2 = nn.HybridSequential()
     self.layer2.add(
         nn.Conv3D(64, kernel_size=4, padding=2),
         nn.BatchNorm(in_channels=64),
         nn.LeakyReLU(.2),
         nn.MaxPool3D(pool_size=2)
     )
     self.layer3 = nn.HybridSequential()
     self.layer3.add(
         nn.Conv3D(128, kernel_size=4, padding=2),
         nn.BatchNorm(in_channels=128),
         nn.LeakyReLU(.2),
         nn.MaxPool3D(pool_size=2)
     )
     self.layer4 = nn.HybridSequential()
     self.layer4.add(
         nn.Dense(2048,activation = 'relu')
     )
     self.layer5 = nn.HybridSequential()
     self.layer5.add(
         nn.Dense(8192,activation='relu')
     )
     self.layer6 = nn.HybridSequential()
     self.layer6.add(
         nn.Conv3DTranspose(64, kernel_size=4, strides=2, padding=1, use_bias=False ),
         nn.BatchNorm(in_channels = 64),
         nn.Activation('relu')
     )
     self.layer7 = nn.HybridSequential()
     self.layer7.add(
         nn.Conv3DTranspose(32, kernel_size=4, strides=2, padding=1, use_bias=False),
         nn.BatchNorm(in_channels =32),
         nn.Activation('relu')
     )
     self.layer8 = nn.HybridSequential()
     self.layer8.add(
         nn.Conv3DTranspose(1, kernel_size=4, strides=2, padding=1, use_bias=False),
         nn.Activation('sigmoid')
     )
예제 #3
0
파일: d3.py 프로젝트: chenjunweii/danmaku
 def add(self, layers, kernel, stride, dilation, channel):
     for l in range(layers):
         self.encoder.append(nn.Conv3D(channel, 
             kernel_size = [kernel, 1, 1],
             strides = [stride, 1, 1],
             padding = [dilation, 0, 0],
             dilation = [dilation, 1, 1]))
         conv = nn.Conv3D if 'bottleneck' in self.arch else nn.Conv3DTranspose
         self.decoder.insert(0, conv(channel, 
             kernel_size = [kernel, 1, 1],
             strides = [stride, 1, 1],
             padding = [dilation, 0, 0],
             dilation = [dilation, 1, 1]))
         self.register_child(self.encoder[-1])
         self.register_child(self.decoder[0])
         if self.reconstruct:
             assert('bottleneck' not in self.arch)
             assert('encoder' in self.arch and 'decoder' in self.arch)
             channel = channel if l != layers - 1 else self.feature
             self.reconstructor.append(nn.Conv3DTranspose(channel,
                 kernel_size = [kernel, 1, 1],
                 strides = [stride, 1, 1],
                 padding = [dilation, 0, 0],
                 dilation = [dilation, 1, 1]))
             self.register_child(self.reconstructor[-1])
         if self.norm:
             self.enorm.append(self.block['norm'](axis = 2))
             self.dnorm.append(self.block['norm'](axis = 2))
             self.register_child(self.enorm[-1])
             self.register_child(self.dnorm[-1])
             if self.reconstruct:
                 self.rnorm.append(self.block['norm'](axis = 2))
                 self.register_child(self.rnorm[-1])
예제 #4
0
    def __init__(self, opt):
        super(RenderNet, self).__init__()

        # program LSTM parameter
        self.vocab_size = opt.program_size
        self.max_param = opt.max_param
        self.input_encoding_size = opt.input_encoding_size
        self.rnn_size = opt.rnn_size
        self.num_layers = opt.num_layers
        self.drop_prob_lm = opt.drop_prob_lm
        self.seq_length = opt.seq_length
        self.program_vector_size = opt.program_vector_size
        self.nc = opt.nc

        self.pgm_embed = nn.Dense(int(self.input_encoding_size / 2))
        self.param_embed = nn.Dense(self.input_encoding_size -
                                    int(self.input_encoding_size / 2))
        self.ctx = d2l.try_gpu()
        self.lstm = rnn.LSTM(hidden_size=self.rnn_size,
                             num_layers=self.num_layers,
                             dropout=self.drop_prob_lm,
                             input_size=self.input_encoding_size)

        self.pgm_param_feat = nn.Dense(self.program_vector_size)

        self.decoder = nn.Sequential()
        params = [(64, 4, 1, 0), (64, 3, 1, 1), (16, 4, 2, 1), (16, 3, 1, 1),
                  (4, 4, 2, 1), (4, 3, 1, 1)]
        for idx, param in enumerate(params):
            if idx % 2 == 0:
                self.decoder.add(
                    nn.Conv3DTranspose(channels=param[0],
                                       kernel_size=param[1],
                                       strides=param[2],
                                       padding=param[3],
                                       use_bias=False), nn.BatchNorm(),
                    nn.Activation('relu'))
            else:
                self.decoder.add(
                    nn.Conv3D(channels=param[0],
                              kernel_size=param[1],
                              strides=param[2],
                              padding=param[3],
                              use_bias=False), nn.BatchNorm(),
                    nn.Activation('relu'))

        self.decoder.add(nn.Conv3DTranspose(self.nc, 4, 2, 1, use_bias=False))
예제 #5
0
 def __init__(self, out_channels, **kwargs):
     super(TransitionBlockUp, self).__init__(**kwargs)
     self.ops = nn.HybridSequential()
     self.ops.add(
         nn.BatchNorm(),
         nn.Activation(activation='relu'),
         nn.Conv3DTranspose(channels=out_channels,
                            kernel_size=4,
                            strides=2,
                            padding=1),
     )
예제 #6
0
 def __init__(self, dropout_ratio=0.5,expansion=1,
              num_segments=1, num_crop=1, feat_ext=False,
              init_std=0.001, ctx=None, partial_bn=False,
              norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
     super(R2Plus1D_TranConv, self).__init__()
     self.partial_bn = partial_bn
     self.dropout_ratio = dropout_ratio
     self.init_std = init_std
     self.num_segments = num_segments
     self.num_crop = num_crop
     self.feat_ext = feat_ext
     self.inplanes = 64
     self.feat_dim = 512 * expansion
     with self.name_scope():
         self.bn1 = norm_layer(in_channels=45, **({} if norm_kwargs is None else norm_kwargs))
         self.relu = nn.Activation('relu')            
         self.bn2 = norm_layer(in_channels=64, **({} if norm_kwargs is None else norm_kwargs))
         self.sigmoid = nn.Activation('sigmoid')
     if self.partial_bn:
         if norm_kwargs is not None:
             norm_kwargs['use_global_stats'] = True
         else:
             norm_kwargs = {}
             norm_kwargs['use_global_stats'] = True
             
     self.up_layer4 = nn.Conv3DTranspose(in_channels=512, channels=256, kernel_size=(3, 3, 3), padding=(1, 1, 1))             
     self.up_sample4 = nn.Conv3DTranspose(in_channels=256, channels=256, kernel_size=(2,2,2), strides=(2,2,2), padding=0,
         use_bias=False,  weight_initializer=init.Bilinear())             
     self.up_layer3 = nn.Conv3DTranspose(in_channels=256, channels=128, kernel_size=(3, 3, 3), padding=(1, 1, 1))             
     self.up_sample3 = nn.Conv3DTranspose(in_channels=128, channels=128, kernel_size=(2,2,2), strides=(2,2,2), padding=0,
         use_bias=False,  weight_initializer=init.Bilinear())             
     self.up_layer2 = nn.Conv3DTranspose(in_channels=128, channels=64, kernel_size=(3, 3, 3), padding=(1, 1, 1))           
     self.up_sample2 = nn.Conv3DTranspose(in_channels=64, channels=64, kernel_size=(2,2,2), strides=(2,2,2), padding=0,
         use_bias=False, weight_initializer=init.Bilinear()) 
     self.up_layer1 = nn.Conv3DTranspose(in_channels=64, channels=64, kernel_size=(3, 3, 3), padding=(0, 1, 1)) 
     self.up_sample1 = nn.Conv3DTranspose(in_channels=64, channels=64, kernel_size=(1,2,2), strides=(1,2,2), padding=(1,0,0),
         use_bias=False,  weight_initializer=init.Bilinear())                 
     self.up_conv2 = nn.Conv3DTranspose(in_channels=64, channels=45, kernel_size=(3, 3, 3), padding=(1, 1, 1)) 
     self.up_conv1 = nn.Conv3DTranspose(in_channels=45, channels=3, kernel_size=(3, 3, 3), padding=(1, 1, 1))
예제 #7
0
    def __init__(self, c1, c2):
        super(Expansion, self).__init__()
        '''
            Upconvolution, concatenation, then 3 convolutions 
                channels = c1 ->c2
        '''

        self.upconv = nn.Conv3DTranspose(c1, kernel_size=2, strides=1, padding=0,\
                        use_bias=False, weight_initializer=init.Bilinear())
        self.upconv.collect_params().setattr('grad_req', 'null')
        # self.upBN = nn.BatchNorm()

        self.conv1 = BasicConv(c1)
        self.conv2 = BasicConv(c2)
        self.conv3 = BasicConv(c2)
예제 #8
0
    def __init__(self,
                 kernel,
                 n_layers,
                 feature_size,
                 device=None,
                 last=True,
                 connection='dense'):

        # kernel = 2D Kernel

        super(D3, self).__init__()

        self.n_layers = n_layers

        self.connection = connection

        self.d3 = []

        self.c = feature_size

        self.k = 1

        c = self.c

        k = self.k

        self.kd2 = kernel

        with self.name_scope():

            self.activation = nn.Activation('relu')

            self.tanh = nn.Activation('tanh')

            self.sigmoid = nn.Activation('sigmoid')

            self.relu = nn.Activation('relu')

            #self.norm = nn.BatchNorm(axis = 1)

            self.fc = nn.Dense(1, flatten=False)

            self.dropout = nn.Dropout(0.5)

            self.encoder = []

            self.decoder = []

            self.enorm = []

            self.dnorm = []

            for l in range(n_layers):

                c2 = int(c / 2)

                c4 = int(c / 4)

                stride = 1  #if l > 0 else 15

                stride_de = 1  #if l > 0 else 15

                dilation = 1  # 2 ** (l)

                dilation_de = 1  # ** (l + n_layers)

                g = 1

                ks = 3  # if l != 0 else stride

                ks_de = 3

                #"""
                self.encoder.append(
                    nn.Conv3D(c2,
                              kernel_size=[ks, 1, 1],
                              strides=[stride, 1, 1],
                              padding=[dilation, 0, 0],
                              dilation=[dilation, 1, 1],
                              groups=g))
                #"""

                #stride_w = 2 if l > 0 else 2
                """
                self.encoder.append(nn.Conv3D(c2, 
                    kernel_size = [ks, 1, ks],
                    strides = [1, 1, stride_w],
                    padding = [dilation, 0, 0],
                    dilation = [dilation, 1, 1],
                    groups = g))

                """

                channel = c4 if l == 0 else c2

                #"""
                self.decoder.insert(
                    0,
                    nn.Conv3DTranspose(channel,
                                       kernel_size=[ks_de, 1, 1],
                                       strides=[stride_de, 1, 1],
                                       padding=[dilation_de, 0, 0],
                                       dilation=[dilation_de, 1, 1],
                                       groups=g))
                #"""
                """ 
                #c2 = 1 if l == 0 else c2
                self.decoder.insert(0, nn.Conv3DTranspose(c2, 
                    kernel_size = [ks_de, 1, ks],
                    strides = [1, 1, 1],
                    padding = [dilation_de, 0, 0],
                    dilation = [dilation_de, 1, 1],
                    groups = g))
                """
                self.register_child(self.encoder[-1])

                self.register_child(self.decoder[0])
                """