Beispiel #1
0
 def __init__(self, in_channels, out_channels, kernel_size=1, bias=True):
     self.kernel_size = kernel_size
     in_channels = in_channels
     out_channels = _single(out_channels)
     modules = []
     for channels in out_channels:
         modules.append(nn.Conv1d(in_channels, channels, 1, bias=bias))
         modules.append(nn.BatchNorm1d(channels))
         modules.append(nn.ReLU(True))
         in_channels = channels
     super(FeaturePropagation, self).__init__(*modules)
Beispiel #2
0
 def __init__(self, in_channels, out_channels, bias=True):
     self.in_channels = in_channels
     self.out_channels = _single(out_channels)
     self.bias = bias
     in_channels = self.in_channels
     modules = []
     for channels in self.out_channels:
         modules.append(nn.Conv1d(in_channels, channels, 1, bias=self.bias))
         modules.append(nn.ReLU(True))
         in_channels = channels
     modules.append(nn.Conv1d(in_channels, 3, 1, bias=self.bias))
     super(Folding3d, self).__init__(*modules)
Beispiel #3
0
 def __init__(self, in_channels, out_channels, kernel_size=1, bias=True):
     self.in_channels = in_channels
     self.kernel_size = kernel_size
     in_channels = in_channels * 2
     out_channels = _single(out_channels)
     modules = []
     for channels in out_channels:
         modules.append(nn.Conv2d(in_channels, channels, 1, bias=bias))
         modules.append(nn.BatchNorm2d(channels))
         modules.append(nn.LeakyReLU(0.2, True))
         in_channels = channels
     modules.append(nn.MaxPool2d([kernel_size, 1]))
     super(EdgeConv, self).__init__(*modules)
Beispiel #4
0
 def __init__(self, in_channels, out_channels, kernel_size=1, bias=True):
     self.in_channels = in_channels
     self.out_channels = _single(out_channels)
     self.kernel_size = kernel_size
     self.bias = bias
     in_channels = self.in_channels
     modules = []
     for channels in self.out_channels:
         modules.append(nn.Conv1d(in_channels, channels, 1, bias=self.bias))
         modules.append(nn.BatchNorm1d(channels))
         modules.append(nn.ReLU(True))
         in_channels = channels
     super(SetDeconv, self).__init__(*modules)
Beispiel #5
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=1,
              bandwidth=1.0,
              bias=True):
     super(PointDeconv, self).__init__()
     self.in_channels = in_channels
     self.out_channels = _single(out_channels)
     self.kernel_size = kernel_size
     self.bandwidth = bandwidth
     self.bias = bias
     self.scale = nn.Sequential(
         nn.Conv1d(1, 8, 1, bias=self.bias),
         nn.BatchNorm1d(8),
         nn.ReLU(True),
         nn.Conv1d(8, 8, 1, bias=self.bias),
         nn.BatchNorm1d(8),
         nn.ReLU(True),
         nn.Conv1d(8, 1, 1, bias=self.bias),
         nn.Sigmoid(),
     )
     self.weight = nn.Sequential(
         nn.Conv2d(3, 8, 1, bias=self.bias),
         nn.BatchNorm2d(8),
         nn.ReLU(True),
         nn.Conv2d(8, 8, 1, bias=self.bias),
         nn.BatchNorm2d(8),
         nn.ReLU(True),
         nn.Conv2d(8, 16, 1, bias=self.bias),
     )
     in_channels = self.in_channels
     modules = []
     for channels in self.out_channels[:-1]:
         modules.append(nn.Conv2d(in_channels, channels, 1, bias=self.bias))
         modules.append(nn.BatchNorm2d(channels))
         modules.append(nn.ReLU(True))
         in_channels = channels
     self.mlp = nn.Sequential(*modules)
     self.lin = nn.Sequential(
         nn.Conv2d(in_channels,
                   self.out_channels[-1], [16, 1],
                   bias=self.bias),
         nn.BatchNorm2d(self.out_channels[-1]),
         nn.ReLU(True),
     )
Beispiel #6
0
 def __init__(
     self,
     in_channels,
     out_channels,
     num_samples=1,
     kernel_size=1,
     radius=1.0,
     bias=True,
 ):
     self.kernel_size = kernel_size
     self.num_samples = num_samples
     self.radius = radius
     out_channels = _single(out_channels)
     modules = []
     for channels in out_channels:
         modules.append(nn.Conv2d(in_channels, channels, 1, bias=bias))
         modules.append(nn.BatchNorm2d(channels))
         modules.append(nn.ReLU(True))
         in_channels = channels
     modules.append(nn.AdaptiveMaxPool2d([1, None]))
     super(SetAbstraction, self).__init__(*modules)
Beispiel #7
0
 def __init__(
     self,
     in_channels,
     out_channels,
     kernel_size=1,
     stride=1,
     radius=None,
     bias=True,
 ):
     self.in_channels = in_channels
     self.out_channels = _single(out_channels)
     self.kernel_size = kernel_size
     self.stride = stride
     self.radius = radius
     self.bias = bias
     in_channels = self.in_channels
     modules = []
     for channels in self.out_channels:
         modules.append(nn.Conv2d(in_channels, channels, 1, bias=self.bias))
         modules.append(nn.BatchNorm2d(channels))
         modules.append(nn.ReLU(True))
         in_channels = channels
     modules.append(nn.AdaptiveMaxPool2d([1, None]))
     super(SetConv, self).__init__(*modules)