def __init__(self, bn, channels, out_channels=None): nn.Module.__init__(self) if not out_channels: out_channels = channels self.shortcut = None if out_channels != channels: self.shortcut = nn.Conv2d(channels, out_channels, kernel_size=1) self.conv = nn.Sequential( nn.InstanceNorm2d(channels, eps=1e-3), get_norm_layer(bn, 2, in_size=channels), nn.ReLU(), nn.Conv2d(channels, out_channels, kernel_size=1), nn.InstanceNorm2d(out_channels, eps=1e-3), get_norm_layer(bn, 2, in_size=out_channels), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1))
def __init__(self, bn, in_channel, output_points): nn.Module.__init__(self) self.output_points = output_points self.conv = nn.Sequential( nn.InstanceNorm2d(in_channel, eps=1e-3), get_norm_layer(bn, 2, in_size=in_channel), nn.ReLU(), nn.Conv2d(in_channel, output_points, kernel_size=1))
def __init__(self, in_channel, out_channel, hidden_unit=[8, 8], bn=None): super(WeightNet, self).__init__() self.bn = bn is not None self.mlp_convs = nn.ModuleList() self.mlp_bns = nn.ModuleList() if hidden_unit is None or len(hidden_unit) == 0: self.mlp_convs.append(nn.Conv2d(in_channel, out_channel, 1)) self.mlp_bns.append(get_norm_layer(bn, 2, in_size=out_channel)) else: self.mlp_convs.append(nn.Conv2d(in_channel, hidden_unit[0], 1)) self.mlp_bns.append(get_norm_layer(bn, 2, in_size=hidden_unit[0])) for i in range(1, len(hidden_unit)): self.mlp_convs.append(nn.Conv2d(hidden_unit[i - 1], hidden_unit[i], 1)) self.mlp_bns.append(get_norm_layer(bn, 2, in_size=hidden_unit[i])) self.mlp_convs.append(nn.Conv2d(hidden_unit[-1], out_channel, 1)) self.mlp_bns.append(get_norm_layer(bn, 2, in_size=out_channel))
def __init__(self, nsample, in_channel, out_channel, weightnet=16, bn=None, use_leaky=True, input_tp=True): super(PointConv, self).__init__() self.bn = bn is not None self.nsample = nsample self.weightnet = WeightNet(3, weightnet) self.linear = nn.Linear(weightnet * in_channel, out_channel) if self.bn: self.bn_linear = get_norm_layer(bn, 1, in_size=out_channel) self.input_tp = input_tp self.relu = nn.ReLU(inplace=True) if not use_leaky else nn.LeakyReLU(0.1, inplace=True)
def __init__(self, bn, channels, points, out_channels=None): nn.Module.__init__(self) if not out_channels: out_channels = channels self.short_cut = None if out_channels != channels: self.short_cut = nn.Conv2d(channels, out_channels, kernel_size=1) self.conv1 = nn.Sequential( nn.InstanceNorm2d(channels, eps=1e-3), get_norm_layer(bn, 2, in_size=channels), nn.ReLU(), nn.Conv2d(channels, out_channels, kernel_size=1), # b*c*n*1 Transpose(1, 2)) # Spatial Correlation Layer self.conv2 = nn.Sequential(get_norm_layer(bn, 2, in_size=points), nn.ReLU(), nn.Conv2d(points, points, kernel_size=1)) self.conv3 = nn.Sequential( Transpose(1, 2), nn.InstanceNorm2d(out_channels, eps=1e-3), get_norm_layer(bn, 2, in_size=out_channels), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1))
def __init__(self, nsample, in_channel, mlp, bn=None, use_leaky=True): super(PointConvFlow, self).__init__() self.nsample = nsample self.bn = bn is not None self.mlp_convs = nn.ModuleList() if self.bn: self.mlp_bns = nn.ModuleList() last_channel = in_channel for out_channel in mlp: self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1)) if self.bn: self.mlp_bns.append(get_norm_layer(bn, 2, in_size=out_channel)) last_channel = out_channel self.relu = nn.ReLU(inplace=True) if not use_leaky else nn.LeakyReLU(0.1, inplace=True)