def __init__(self): super(Net_D, self).__init__() self.sa1_module = SAModule(0.5, 0.1, MLP([3 + 3, 64, 64, 128])) self.sa2_module = SAModule(0.5, 0.5, MLP([128 + 3, 128, 128, 256])) self.lin1 = torch.nn.Linear(256, 128) self.lin3 = torch.nn.Linear(128, 1)
def __init__(self, feature_num=1024, k=20, aggr='max'): super().__init__() self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64]), k, aggr) self.conv2 = DynamicEdgeConv(MLP([2 * 64, 64]), k, aggr) self.conv3 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr) self.conv4 = DynamicEdgeConv(MLP([2 * 128, 256]), k, aggr) self.lin1 = MLP([64 + 64 + 128 + 256, feature_num])
def __init__(self, out_channels, k=20, aggr='max'): super().__init__() self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64, 64]), k, aggr) self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr) self.lin1 = MLP([128 + 64, 1024]) self.mlp = Seq(MLP([1024, 512]), Dropout(0.5), MLP([512, 256]), Dropout(0.5), Lin(256, out_channels))
def __init__(self, out_channels, k=10, aggr='max'): super(Net, self).__init__() self.transform_net = STN3d() self.k = k self.conv0 = DiffGCNBlock(3, 64, 20, 1) self.conv1 = DiffGCNBlock(64, 64, 5, 2, pool=True) self.conv2 = DiffGCNBlock(64, 64, 5, 2, pool=True) self.conv3 = DiffGCNBlock(64, 128, 5, 2, pool=True) self.lin1 = MLP([64 * 3 + 128, 2048]) self.mlp = Seq(MLP([2048 + 64 * 3 + 128 + 16, 512]), Dropout(0.5), MLP([512, 256]), Dropout(0.5), MLP([256, 128]), Dropout(0.5), Lin(128, out_channels))
def __init__(self, out_channels, k=30, aggr='max'): super(Net, self).__init__() self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64]), k, aggr) self.conv2 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr) self.conv3 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr) self.lin1 = MLP([3 * 64, 1024]) self.mlp = Seq(MLP([1024, 256]), Dropout(0.5), MLP([256, 128]), Dropout(0.5), Lin(128, out_channels))
def __init__(self, num_classes): super(Net, self).__init__() self.sa1_module = SAModule(0.2, 0.2, MLP([3, 64, 64, 128])) self.sa2_module = SAModule(0.25, 0.4, MLP([128 + 3, 128, 128, 256])) self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024])) self.fp3_module = FPModule(1, MLP([1024 + 256, 256, 256])) self.fp2_module = FPModule(3, MLP([256 + 128, 256, 128])) self.fp1_module = FPModule(3, MLP([128, 128, 128, 128])) self.lin1 = torch.nn.Linear(128, 128) self.lin2 = torch.nn.Linear(128, 128) self.lin3 = torch.nn.Linear(128, num_classes)
def __init__(self, num_classes): super(Net, self).__init__() # Input channels account for both `pos` and node features. self.sa1_module = SAModule(0.2, 0.2, MLP([3 + 3, 64, 64, 128])) self.sa2_module = SAModule(0.25, 0.4, MLP([128 + 3, 128, 128, 256])) self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024])) self.fp3_module = FPModule(1, MLP([1024 + 256, 256, 256])) self.fp2_module = FPModule(3, MLP([256 + 128, 256, 128])) self.fp1_module = FPModule(3, MLP([128 + 3, 128, 128, 128])) self.lin1 = torch.nn.Linear(128, 128) self.lin2 = torch.nn.Linear(128, 128) self.lin3 = torch.nn.Linear(128, num_classes)
def __init__(self, feature_num=1024, out_channels=10, aggr='max'): super().__init__() self.mlp = Seq(MLP([2 * feature_num, 512]), Dropout(0.5), MLP([512, 256]), Dropout(0.5), Lin(256, out_channels))