Esempio n. 1
0
def batch_relu_conv3d(in_channels,
                      out_channels,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bn3d=True,
                      conv_param_attr=nn.initializer.KaimingNormal(),
                      conv_bias_attr=False,
                      bn_param_attr=None,
                      bn_bias_attr=None):
    if bn3d:
        # 3D batchnorm + relu + convolutional layer
        return nn.Sequential(
            nn.BatchNorm3D(num_features=in_channels), nn.ReLU(),
            nn.Conv3D(in_channels=in_channels,
                      out_channels=out_channels,
                      kernel_size=kernel_size,
                      padding=padding,
                      stride=stride,
                      weight_attr=conv_param_attr,
                      bias_attr=conv_bias_attr))
    else:
        # 3D relu + convolutional layer
        return nn.Sequential(
            nn.ReLU(),
            nn.Conv3D(in_channels=in_channels,
                      out_channels=out_channels,
                      kernel_size=kernel_size,
                      padding=padding,
                      stride=stride,
                      weight_attr=conv_param_attr,
                      bias_attr=conv_bias_attr))
Esempio n. 2
0
 def __init__(self, name_scope='VoxNet_', num_classes=10):
     super(VoxNet, self).__init__()
     self.backbone = nn.Sequential(nn.Conv3D(1, 32, 5, 2), nn.BatchNorm(32),
                                   nn.LeakyReLU(), nn.Conv3D(32, 32, 3, 1),
                                   nn.MaxPool3D(2, 2, 0))
     self.head = nn.Sequential(nn.Linear(32 * 6 * 6 * 6, 128),
                               nn.LeakyReLU(), nn.Dropout(0.2),
                               nn.Linear(128, num_classes))
Esempio n. 3
0
    def __init__(self):
        super(NetworkR, self).__init__()

        self.layers = nn.Sequential(
            nn.Pad3D((1, 1, 1, 1, 1, 1), mode='replicate'),
            TempConv(1,
                     64,
                     kernel_size=(3, 3, 3),
                     stride=(1, 2, 2),
                     padding=(0, 0, 0)),
            TempConv(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            TempConv(128, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            TempConv(128,
                     256,
                     kernel_size=(3, 3, 3),
                     stride=(1, 2, 2),
                     padding=(1, 1, 1)),
            TempConv(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            TempConv(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            TempConv(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            TempConv(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            Upsample(256, 128),
            TempConv(128, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            TempConv(64, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            Upsample(64, 16),
            nn.Conv3D(16,
                      1,
                      kernel_size=(3, 3, 3),
                      stride=(1, 1, 1),
                      padding=(1, 1, 1)))
Esempio n. 4
0
 def __init__(self, in_planes, out_planes, scale_factor=(1, 2, 2)):
     super(Upsample, self).__init__()
     self.scale_factor = scale_factor
     self.conv3d = nn.Conv3D(in_planes,
                             out_planes,
                             kernel_size=(3, 3, 3),
                             stride=(1, 1, 1),
                             padding=(1, 1, 1))
     self.bn = nn.BatchNorm(out_planes)
Esempio n. 5
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size=(1, 3, 3),
              stride=(1, 1, 1),
              padding=(0, 1, 1)):
     super(TempConv, self).__init__()
     self.conv3d = nn.Conv3D(in_planes,
                             out_planes,
                             kernel_size=kernel_size,
                             stride=stride,
                             padding=padding)
     self.bn = nn.BatchNorm(out_planes)
Esempio n. 6
0
 def __init__(self, in_planes_s, in_planes_r):
     """
     Parameters
     ----------
         in_planes_s: int
             Number of input source feature vector channels.
         in_planes_r: int
             Number of input reference feature vector channels.
     """
     super(SourceReferenceAttention, self).__init__()
     self.query_conv = nn.Conv3D(in_channels=in_planes_s,
                                 out_channels=in_planes_s // 8,
                                 kernel_size=1)
     self.key_conv = nn.Conv3D(in_channels=in_planes_r,
                               out_channels=in_planes_r // 8,
                               kernel_size=1)
     self.value_conv = nn.Conv3D(in_channels=in_planes_r,
                                 out_channels=in_planes_r,
                                 kernel_size=1)
     self.gamma = self.create_parameter(
         shape=[1],
         dtype=self.query_conv.weight.dtype,
         default_initializer=nn.initializer.Constant(0.0))
Esempio n. 7
0
 def paddle_nn_layer(self):
     x_var = dg.to_variable(self.input)
     conv = nn.Conv3D(self.num_channels,
                      self.num_filters,
                      self.filter_size,
                      padding=self.padding,
                      stride=self.stride,
                      dilation=self.dilation,
                      groups=self.groups,
                      data_format=self.data_format)
     conv.weight.set_value(self.weight)
     if not self.no_bias:
         conv.bias.set_value(self.bias)
     y_var = conv(x_var)
     y_np = y_var.numpy()
     return y_np
Esempio n. 8
0
 def paddle_nn_layer(self):
     x_var = paddle.to_tensor(self.input)
     x_var.stop_gradient = False
     conv = nn.Conv3D(self.num_channels,
                      self.num_filters,
                      self.filter_size,
                      padding=self.padding,
                      stride=self.stride,
                      dilation=self.dilation,
                      groups=self.groups,
                      data_format=self.data_format)
     conv.weight.set_value(self.weight)
     if not self.no_bias:
         conv.bias.set_value(self.bias)
     y_var = conv(x_var)
     y_var.backward()
     y_np = y_var.numpy()
     t1 = x_var.gradient()
     return y_np, t1
Esempio n. 9
0
    def __init__(self):
        super(NetworkC, self).__init__()

        self.down1 = nn.Sequential(
            nn.Pad3D((1, 1, 1, 1, 0, 0), mode='replicate'),
            TempConv(1, 64, stride=(1, 2, 2), padding=(0, 0, 0)),
            TempConv(64, 128), TempConv(128, 128),
            TempConv(128, 256, stride=(1, 2, 2)), TempConv(256, 256),
            TempConv(256, 256), TempConv(256, 512, stride=(1, 2, 2)),
            TempConv(512, 512), TempConv(512, 512))
        self.flat = nn.Sequential(TempConv(512, 512), TempConv(512, 512))
        self.down2 = nn.Sequential(
            TempConv(512, 512, stride=(1, 2, 2)),
            TempConv(512, 512),
        )
        self.stattn1 = SourceReferenceAttention(
            512, 512)  # Source-Reference Attention
        self.stattn2 = SourceReferenceAttention(
            512, 512)  # Source-Reference Attention
        self.selfattn1 = SourceReferenceAttention(512, 512)  # Self Attention
        self.conv1 = TempConv(512, 512)
        self.up1 = UpsampleConcat(512, 512, 512)  # 1/8
        self.selfattn2 = SourceReferenceAttention(512, 512)  # Self Attention
        self.conv2 = TempConv(512,
                              256,
                              kernel_size=(3, 3, 3),
                              stride=(1, 1, 1),
                              padding=(1, 1, 1))
        self.up2 = nn.Sequential(
            Upsample(256, 128),  # 1/4
            TempConv(128,
                     64,
                     kernel_size=(3, 3, 3),
                     stride=(1, 1, 1),
                     padding=(1, 1, 1)))
        self.up3 = nn.Sequential(
            Upsample(64, 32),  # 1/2
            TempConv(32,
                     16,
                     kernel_size=(3, 3, 3),
                     stride=(1, 1, 1),
                     padding=(1, 1, 1)))
        self.up4 = nn.Sequential(
            Upsample(16, 8),  # 1/1
            nn.Conv3D(8,
                      2,
                      kernel_size=(3, 3, 3),
                      stride=(1, 1, 1),
                      padding=(1, 1, 1)))
        self.reffeatnet1 = nn.Sequential(
            TempConv(3, 64, stride=(1, 2, 2)),
            TempConv(64, 128),
            TempConv(128, 128),
            TempConv(128, 256, stride=(1, 2, 2)),
            TempConv(256, 256),
            TempConv(256, 256),
            TempConv(256, 512, stride=(1, 2, 2)),
            TempConv(512, 512),
            TempConv(512, 512),
        )
        self.reffeatnet2 = nn.Sequential(
            TempConv(512, 512, stride=(1, 2, 2)),
            TempConv(512, 512),
            TempConv(512, 512),
        )
Esempio n. 10
0
    def func_test_layer_str(self):
        module = nn.ELU(0.2)
        self.assertEqual(str(module), 'ELU(alpha=0.2)')

        module = nn.CELU(0.2)
        self.assertEqual(str(module), 'CELU(alpha=0.2)')

        module = nn.GELU(True)
        self.assertEqual(str(module), 'GELU(approximate=True)')

        module = nn.Hardshrink()
        self.assertEqual(str(module), 'Hardshrink(threshold=0.5)')

        module = nn.Hardswish(name="Hardswish")
        self.assertEqual(str(module), 'Hardswish(name=Hardswish)')

        module = nn.Tanh(name="Tanh")
        self.assertEqual(str(module), 'Tanh(name=Tanh)')

        module = nn.Hardtanh(name="Hardtanh")
        self.assertEqual(str(module),
                         'Hardtanh(min=-1.0, max=1.0, name=Hardtanh)')

        module = nn.PReLU(1, 0.25, name="PReLU", data_format="NCHW")
        self.assertEqual(
            str(module),
            'PReLU(num_parameters=1, data_format=NCHW, init=0.25, dtype=float32, name=PReLU)'
        )

        module = nn.ReLU()
        self.assertEqual(str(module), 'ReLU()')

        module = nn.ReLU6()
        self.assertEqual(str(module), 'ReLU6()')

        module = nn.SELU()
        self.assertEqual(
            str(module),
            'SELU(scale=1.0507009873554805, alpha=1.6732632423543772)')

        module = nn.LeakyReLU()
        self.assertEqual(str(module), 'LeakyReLU(negative_slope=0.01)')

        module = nn.Sigmoid()
        self.assertEqual(str(module), 'Sigmoid()')

        module = nn.Hardsigmoid()
        self.assertEqual(str(module), 'Hardsigmoid()')

        module = nn.Softplus()
        self.assertEqual(str(module), 'Softplus(beta=1, threshold=20)')

        module = nn.Softshrink()
        self.assertEqual(str(module), 'Softshrink(threshold=0.5)')

        module = nn.Softsign()
        self.assertEqual(str(module), 'Softsign()')

        module = nn.Swish()
        self.assertEqual(str(module), 'Swish()')

        module = nn.Tanhshrink()
        self.assertEqual(str(module), 'Tanhshrink()')

        module = nn.ThresholdedReLU()
        self.assertEqual(str(module), 'ThresholdedReLU(threshold=1.0)')

        module = nn.LogSigmoid()
        self.assertEqual(str(module), 'LogSigmoid()')

        module = nn.Softmax()
        self.assertEqual(str(module), 'Softmax(axis=-1)')

        module = nn.LogSoftmax()
        self.assertEqual(str(module), 'LogSoftmax(axis=-1)')

        module = nn.Maxout(groups=2)
        self.assertEqual(str(module), 'Maxout(groups=2, axis=1)')

        module = nn.Linear(2, 4, name='linear')
        self.assertEqual(
            str(module),
            'Linear(in_features=2, out_features=4, dtype=float32, name=linear)'
        )

        module = nn.Upsample(size=[12, 12])
        self.assertEqual(
            str(module),
            'Upsample(size=[12, 12], mode=nearest, align_corners=False, align_mode=0, data_format=NCHW)'
        )

        module = nn.UpsamplingNearest2D(size=[12, 12])
        self.assertEqual(
            str(module),
            'UpsamplingNearest2D(size=[12, 12], data_format=NCHW)')

        module = nn.UpsamplingBilinear2D(size=[12, 12])
        self.assertEqual(
            str(module),
            'UpsamplingBilinear2D(size=[12, 12], data_format=NCHW)')

        module = nn.Bilinear(in1_features=5, in2_features=4, out_features=1000)
        self.assertEqual(
            str(module),
            'Bilinear(in1_features=5, in2_features=4, out_features=1000, dtype=float32)'
        )

        module = nn.Dropout(p=0.5)
        self.assertEqual(str(module),
                         'Dropout(p=0.5, axis=None, mode=upscale_in_train)')

        module = nn.Dropout2D(p=0.5)
        self.assertEqual(str(module), 'Dropout2D(p=0.5, data_format=NCHW)')

        module = nn.Dropout3D(p=0.5)
        self.assertEqual(str(module), 'Dropout3D(p=0.5, data_format=NCDHW)')

        module = nn.AlphaDropout(p=0.5)
        self.assertEqual(str(module), 'AlphaDropout(p=0.5)')

        module = nn.Pad1D(padding=[1, 2], mode='constant')
        self.assertEqual(
            str(module),
            'Pad1D(padding=[1, 2], mode=constant, value=0.0, data_format=NCL)')

        module = nn.Pad2D(padding=[1, 0, 1, 2], mode='constant')
        self.assertEqual(
            str(module),
            'Pad2D(padding=[1, 0, 1, 2], mode=constant, value=0.0, data_format=NCHW)'
        )

        module = nn.ZeroPad2D(padding=[1, 0, 1, 2])
        self.assertEqual(str(module),
                         'ZeroPad2D(padding=[1, 0, 1, 2], data_format=NCHW)')

        module = nn.Pad3D(padding=[1, 0, 1, 2, 0, 0], mode='constant')
        self.assertEqual(
            str(module),
            'Pad3D(padding=[1, 0, 1, 2, 0, 0], mode=constant, value=0.0, data_format=NCDHW)'
        )

        module = nn.CosineSimilarity(axis=0)
        self.assertEqual(str(module), 'CosineSimilarity(axis=0, eps=1e-08)')

        module = nn.Embedding(10, 3, sparse=True)
        self.assertEqual(str(module), 'Embedding(10, 3, sparse=True)')

        module = nn.Conv1D(3, 2, 3)
        self.assertEqual(str(module),
                         'Conv1D(3, 2, kernel_size=[3], data_format=NCL)')

        module = nn.Conv1DTranspose(2, 1, 2)
        self.assertEqual(
            str(module),
            'Conv1DTranspose(2, 1, kernel_size=[2], data_format=NCL)')

        module = nn.Conv2D(4, 6, (3, 3))
        self.assertEqual(str(module),
                         'Conv2D(4, 6, kernel_size=[3, 3], data_format=NCHW)')

        module = nn.Conv2DTranspose(4, 6, (3, 3))
        self.assertEqual(
            str(module),
            'Conv2DTranspose(4, 6, kernel_size=[3, 3], data_format=NCHW)')

        module = nn.Conv3D(4, 6, (3, 3, 3))
        self.assertEqual(
            str(module),
            'Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)')

        module = nn.Conv3DTranspose(4, 6, (3, 3, 3))
        self.assertEqual(
            str(module),
            'Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)')

        module = nn.PairwiseDistance()
        self.assertEqual(str(module), 'PairwiseDistance(p=2.0)')

        module = nn.InstanceNorm1D(2)
        self.assertEqual(str(module),
                         'InstanceNorm1D(num_features=2, epsilon=1e-05)')

        module = nn.InstanceNorm2D(2)
        self.assertEqual(str(module),
                         'InstanceNorm2D(num_features=2, epsilon=1e-05)')

        module = nn.InstanceNorm3D(2)
        self.assertEqual(str(module),
                         'InstanceNorm3D(num_features=2, epsilon=1e-05)')

        module = nn.GroupNorm(num_channels=6, num_groups=6)
        self.assertEqual(
            str(module),
            'GroupNorm(num_groups=6, num_channels=6, epsilon=1e-05)')

        module = nn.LayerNorm([2, 2, 3])
        self.assertEqual(
            str(module),
            'LayerNorm(normalized_shape=[2, 2, 3], epsilon=1e-05)')

        module = nn.BatchNorm1D(1)
        self.assertEqual(
            str(module),
            'BatchNorm1D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCL)'
        )

        module = nn.BatchNorm2D(1)
        self.assertEqual(
            str(module),
            'BatchNorm2D(num_features=1, momentum=0.9, epsilon=1e-05)')

        module = nn.BatchNorm3D(1)
        self.assertEqual(
            str(module),
            'BatchNorm3D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCDHW)'
        )

        module = nn.SyncBatchNorm(2)
        self.assertEqual(
            str(module),
            'SyncBatchNorm(num_features=2, momentum=0.9, epsilon=1e-05)')

        module = nn.LocalResponseNorm(size=5)
        self.assertEqual(
            str(module),
            'LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1.0)')

        module = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'AvgPool1D(kernel_size=2, stride=2, padding=0)')

        module = nn.AvgPool2D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'AvgPool2D(kernel_size=2, stride=2, padding=0)')

        module = nn.AvgPool3D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'AvgPool3D(kernel_size=2, stride=2, padding=0)')

        module = nn.MaxPool1D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'MaxPool1D(kernel_size=2, stride=2, padding=0)')

        module = nn.MaxPool2D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'MaxPool2D(kernel_size=2, stride=2, padding=0)')

        module = nn.MaxPool3D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'MaxPool3D(kernel_size=2, stride=2, padding=0)')

        module = nn.AdaptiveAvgPool1D(output_size=16)
        self.assertEqual(str(module), 'AdaptiveAvgPool1D(output_size=16)')

        module = nn.AdaptiveAvgPool2D(output_size=3)
        self.assertEqual(str(module), 'AdaptiveAvgPool2D(output_size=3)')

        module = nn.AdaptiveAvgPool3D(output_size=3)
        self.assertEqual(str(module), 'AdaptiveAvgPool3D(output_size=3)')

        module = nn.AdaptiveMaxPool1D(output_size=16, return_mask=True)
        self.assertEqual(
            str(module), 'AdaptiveMaxPool1D(output_size=16, return_mask=True)')

        module = nn.AdaptiveMaxPool2D(output_size=3, return_mask=True)
        self.assertEqual(str(module),
                         'AdaptiveMaxPool2D(output_size=3, return_mask=True)')

        module = nn.AdaptiveMaxPool3D(output_size=3, return_mask=True)
        self.assertEqual(str(module),
                         'AdaptiveMaxPool3D(output_size=3, return_mask=True)')

        module = nn.SimpleRNNCell(16, 32)
        self.assertEqual(str(module), 'SimpleRNNCell(16, 32)')

        module = nn.LSTMCell(16, 32)
        self.assertEqual(str(module), 'LSTMCell(16, 32)')

        module = nn.GRUCell(16, 32)
        self.assertEqual(str(module), 'GRUCell(16, 32)')

        module = nn.PixelShuffle(3)
        self.assertEqual(str(module), 'PixelShuffle(upscale_factor=3)')

        module = nn.SimpleRNN(16, 32, 2)
        self.assertEqual(
            str(module),
            'SimpleRNN(16, 32, num_layers=2\n  (0): RNN(\n    (cell): SimpleRNNCell(16, 32)\n  )\n  (1): RNN(\n    (cell): SimpleRNNCell(32, 32)\n  )\n)'
        )

        module = nn.LSTM(16, 32, 2)
        self.assertEqual(
            str(module),
            'LSTM(16, 32, num_layers=2\n  (0): RNN(\n    (cell): LSTMCell(16, 32)\n  )\n  (1): RNN(\n    (cell): LSTMCell(32, 32)\n  )\n)'
        )

        module = nn.GRU(16, 32, 2)
        self.assertEqual(
            str(module),
            'GRU(16, 32, num_layers=2\n  (0): RNN(\n    (cell): GRUCell(16, 32)\n  )\n  (1): RNN(\n    (cell): GRUCell(32, 32)\n  )\n)'
        )

        module1 = nn.Sequential(
            ('conv1', nn.Conv2D(1, 20, 5)), ('relu1', nn.ReLU()),
            ('conv2', nn.Conv2D(20, 64, 5)), ('relu2', nn.ReLU()))
        self.assertEqual(
            str(module1),
            'Sequential(\n  '\
            '(conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n  '\
            '(relu1): ReLU()\n  '\
            '(conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n  '\
            '(relu2): ReLU()\n)'
        )

        module2 = nn.Sequential(
            nn.Conv3DTranspose(4, 6, (3, 3, 3)),
            nn.AvgPool3D(kernel_size=2, stride=2, padding=0),
            nn.Tanh(name="Tanh"), module1, nn.Conv3D(4, 6, (3, 3, 3)),
            nn.MaxPool3D(kernel_size=2, stride=2, padding=0), nn.GELU(True))
        self.assertEqual(
            str(module2),
            'Sequential(\n  '\
            '(0): Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n  '\
            '(1): AvgPool3D(kernel_size=2, stride=2, padding=0)\n  '\
            '(2): Tanh(name=Tanh)\n  '\
            '(3): Sequential(\n    (conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n    (relu1): ReLU()\n'\
            '    (conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n    (relu2): ReLU()\n  )\n  '\
            '(4): Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n  '\
            '(5): MaxPool3D(kernel_size=2, stride=2, padding=0)\n  '\
            '(6): GELU(approximate=True)\n)'
        )