Beispiel #1
0
    def __init__(self,
                 dim=3,
                 z_dim=128,
                 c_dim=128,
                 hidden_size=256,
                 leaky=False):
        super().__init__()
        self.z_dim = z_dim
        self.c_dim = c_dim

        # Submodules
        if not z_dim == 0:
            self.fc_z = nn.Linear(z_dim, hidden_size)

        if self.c_dim != 0:
            self.fc_c = nn.Linear(c_dim, hidden_size)
        self.fc_p = nn.Conv1d(dim, hidden_size, 1)
        self.block0 = ResnetBlockConv1d(hidden_size)
        self.block1 = ResnetBlockConv1d(hidden_size)
        self.block2 = ResnetBlockConv1d(hidden_size)
        self.block3 = ResnetBlockConv1d(hidden_size)
        self.block4 = ResnetBlockConv1d(hidden_size)

        self.bn = nn.BatchNorm1d(hidden_size)

        self.fc_out = nn.Conv1d(hidden_size, 1, 1)

        if not leaky:
            self.actvn = F.relu
        else:
            self.actvn = lambda x: F.leaky_relu(x, 0.2)
Beispiel #2
0
    def __init__(self, dim=3, z_dim=0, c_dim=64, hidden_size=256, leaky=False):
        super().__init__()
        self.z_dim = z_dim
        self.c_dim = c_dim

        # Submodules
        if not z_dim == 0:
            self.fc_z = nn.Linear(z_dim, hidden_size)

        assert c_dim != 0
        assert hidden_size % 2 == 0
        self.fc_c = nn.Conv1d(c_dim, hidden_size // 2, 1)
        self.fc_p = nn.Conv1d(dim, hidden_size // 2, 1)
        self.block0 = ResnetBlockConv1d(hidden_size, size_out=hidden_size)
        self.block1 = ResnetBlockConv1d(hidden_size)
        self.block2 = ResnetBlockConv1d(hidden_size)

        self.bn = nn.BatchNorm1d(hidden_size)

        self.fc_out = nn.Conv1d(hidden_size, 1, 1)

        if not leaky:
            self.actvn = nn.ReLU(inplace=True)
        else:
            self.actvn = lambda x: F.leaky_relu(x, 0.2, True)
    def __init__(self, z_dim=128, c_dim=128, hidden_size=256, leaky=False):
        super().__init__()
        self.z_dim = z_dim
        self.c_dim = c_dim

        # Submodules
        if z_dim != 0:
            self.fc_z = tf.keras.layers.Dense(hidden_size)

        if self.c_dim != 0:
            self.fc_z = tf.keras.layers.Dense(hidden_size)

        self.fc_p = tf.keras.layers.Conv1D(hidden_size, 1)
        self.block0 = ResnetBlockConv1d(hidden_size)
        self.block1 = ResnetBlockConv1d(hidden_size)
        self.block2 = ResnetBlockConv1d(hidden_size)
        self.block3 = ResnetBlockConv1d(hidden_size)
        self.block4 = ResnetBlockConv1d(hidden_size)

        self.bn = tf.keras.layers.BatchNormalization(momentum=0.1,
                                                     epsilon=1e-05)

        self.fc_out = tf.keras.Conv1D(1, 1)

        if not leaky:
            self.actvn = tf.keras.layers.ReLU()
        else:
            self.actvn = tf.keras.layers.LeakyReLU(0.2)
    def __init__(self,
                 n_primitives,
                 in_channel,
                 param_dim,
                 hidden_size=128,
                 **kwargs):
        super().__init__()
        self.n_primitives = n_primitives
        self.param_dim = param_dim
        self.conv1d = nn.Conv1d(in_channel, hidden_size, 1)
        self.act = nn.LeakyReLU(0.2, True)
        self.out_conv1d = nn.Conv1d(hidden_size, n_primitives * param_dim, 1)
        self.bn = nn.BatchNorm1d(hidden_size)

        self.block0 = ResnetBlockConv1d(hidden_size)
        self.block1 = ResnetBlockConv1d(hidden_size)
        self.block2 = ResnetBlockConv1d(hidden_size)
Beispiel #5
0
    def __init__(self, c_dim=1024, global_feat=True, feature_transform=True, channel=3, only_point_feature=False, model_pretrained=None):
        super(PointNetResEncoder, self).__init__()
        self.stn = STN3d(channel)
        self.block1 = ResnetBlockConv1d(channel, 64, 64)
        self.block2 = ResnetBlockConv1d(64, 128, 128)
        self.block3 = ResnetBlockConv1d(128, c_dim, c_dim)

        self.c_dim = c_dim
        self.global_feat = global_feat
        self.feature_transform = feature_transform

        self.only_point_feature = only_point_feature
        if self.feature_transform:
            self.fstn = STNkd(k=64)

        if model_pretrained is not None:
            print('Loading depth encoder from ', model_pretrained)
            state_dict = torch.load(model_pretrained, map_location='cpu')
            self.load_state_dict(state_dict)