Пример #1
0
 def __init__(self, n_feats_fc, in_feats_g, Dropout):
     super(Net, self).__init__()
     self.knn = dgl.nn.pytorch.factory.KNNGraph(8)
     self.edge1 = EdgeConv(50, 100)
     self.edge2 = EdgeConv(100, 200)
     self.edge3 = EdgeConv(200, 600)
     self.Dropout = nn.Dropout(Dropout)
     self.pooling = MaxPooling()
     self.fc1 = nn.Linear(600, 300)
     self.fc2 = nn.Linear(300, 300)
     self.fc3 = nn.Linear(300, 100)
     self.fc4 = nn.Linear(100, 50)
     self.fc_out = nn.Linear(50, 9)
    def __init__(self, k, feature_dims, emb_dims, output_classes, input_dims=3,
                 dropout_prob=0.5):
        super(DgcnnModel, self).__init__()

        self.nng = KNNGraph(k)
        self.conv = nn.ModuleList()

        self.num_layers = len(feature_dims)
        for i in range(self.num_layers):
            self.conv.append(EdgeConv(
                feature_dims[i - 1] if i > 0 else input_dims,
                feature_dims[i],
                batch_norm=True))

        self.proj = nn.Linear(sum(feature_dims), emb_dims[0])

        self.embs = nn.ModuleList()
        self.bn_embs = nn.ModuleList()
        self.dropouts = nn.ModuleList()

        self.num_embs = len(emb_dims) - 1
        for i in range(1, self.num_embs + 1):
            self.embs.append(nn.Linear(
                # * 2 because of concatenation of max- and mean-pooling
                emb_dims[i - 1] if i > 1 else (emb_dims[i - 1] * 2),
                emb_dims[i]))
            # self.bn_embs.append(nn.BatchNorm1d(emb_dims[i]))
            self.dropouts.append(nn.Dropout(dropout_prob))

        self.proj_output = nn.Linear(emb_dims[-1], output_classes)
Пример #3
0
 def __init__(self, k = 10, feature_dims = [64, 64, 128, 256], emb_dims = [512, 512, 256],
              input_dims=3, output_dims=1024):
     super(GNN, self).__init__()
     self.nng = KNNGraph(k)
     self.conv = nn.ModuleList()
     self.num_layers = len(feature_dims)
     for i in range(self.num_layers):
         self.conv.append(EdgeConv(
             feature_dims[i - 1] if i > 0 else input_dims,
             feature_dims[i],
             batch_norm=True))
     self.proj = nn.Linear(sum(feature_dims), emb_dims[0])
    def __init__(self, k, in_dim: int, emb_dims: list, out_dim: int):
        super(DescripNet, self).__init__()

        self.knng = KNNGraph(k)
        self.conv = nn.ModuleList()

        self.feat_nn = nn.Sequential(nn.Linear(emb_dims[-2], emb_dims[-1]),
                                     nn.ReLU())
        self.gate_nn = nn.Sequential(nn.Linear(emb_dims[-2], 1), nn.ReLU())
        self.global_attention_pooling = GlobalAttentionPooling(
            gate_nn=self.gate_nn, feat_nn=self.feat_nn)
        self.last_layer = nn.Linear(emb_dims[-1], out_dim)
        for i in range(len(emb_dims) - 1):
            self.conv.append(
                EdgeConv(emb_dims[i - 1] if i > 0 else in_dim,
                         emb_dims[i],
                         batch_norm=True))
Пример #5
0
    def __init__(self, n_feats_fc, in_feats_g, Dropout):
        super(Net, self).__init__()
        self.b1 = EdgeConv(54, 54)
        self.b2 = EdgeConv(54, 54)
        self.b3 = EdgeConv(54, 54)
        self.b4 = EdgeConv(54, 54)
        self.b5 = EdgeConv(54, 54)
        self.b6 = EdgeConv(54, 54)
        self.b7 = EdgeConv(54, 54)
        self.b8 = EdgeConv(54, 54)

        self.a1 = EdgeConv(54, 54)
        self.a2 = EdgeConv(54, 54)
        self.a3 = EdgeConv(54, 54)
        self.a4 = EdgeConv(54, 54)
        self.a5 = EdgeConv(54, 25)
        self.a6 = EdgeConv(25, 25)
        self.a7 = EdgeConv(25, 25)
        self.a8 = EdgeConv(25, 25)
        self.a9 = EdgeConv(25, 25)
        self.a10 = EdgeConv(25, 25)
        self.a11 = EdgeConv(25, 25)
        self.a12 = EdgeConv(25, 25)
        self.a13 = EdgeConv(25, 25)
        self.a14 = EdgeConv(25, 25)
        self.a15 = EdgeConv(25, 1)

        self.drop = nn.Dropout(0.5)
Пример #6
0
    def __init__(self, k, feature_dims, emb_dims, output_classes, init_points = 512, input_dims=3,
                 dropout_prob=0.5, npart=1, id_skip=False, drop_connect_rate=0, res_scale = 1.0,
                 light = False, bias = False, cluster='xyz', conv='EdgeConv', use_xyz=True, use_se = True, graph_jitter = False):
        super(Model, self).__init__()

        self.npart = npart
        self.graph_jitter = graph_jitter
        self.res_scale = res_scale
        self.id_skip = id_skip
        self.drop_connect_rate = drop_connect_rate
        self.nng = KNNGraphE(k)  # with random neighbor
        self.conv = nn.ModuleList()
        self.conv_s1 = nn.ModuleList()
        self.conv_s2 = nn.ModuleList()
        self.bn = nn.ModuleList()
        self.sa = nn.ModuleList()
        self.cluster = cluster
        self.feature_dims = feature_dims
        self.conv_type = conv
        self.init_points = init_points
        self.k = k
        #self.proj_in = nn.Linear(input_dims, input_dims)

        self.num_layers = len(feature_dims)
        npoint = init_points
        for i in range(self.num_layers):
            if k==1: 
                    self.conv.append(nn.Linear(feature_dims[i-1] if i > 0 else input_dims, 
                                     feature_dims[i] ))
            elif conv == 'EdgeConv':
                if light:
                    self.conv.append(EdgeConv_Light(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        batch_norm=True))
                else: 
                    self.conv.append(EdgeConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        batch_norm=True))
            elif conv == 'GATConv':
                    self.conv.append(GATConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        feat_drop=0.2, attn_drop=0.2,
                        residual=True,
                        num_heads=1))
            elif conv == 'GraphConv':
                    self.conv.append( GraphConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i]))
            elif conv == 'SAGEConv':
                    self.conv.append( SAGEConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        feat_drop=0.2,
                        aggregator_type='mean', 
                        norm = nn.BatchNorm1d(feature_dims[i])
                        ) )
            elif conv == 'SGConv':
                    self.conv.append( SGConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i]) )
            elif conv == 'GatedGCN': # missing etypes
                    self.conv.append( GatedGCNLayer(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i], 
                        dropout=0.0, 
                        graph_norm=True, batch_norm=True, residual=True)
                        )


            if i>0 and feature_dims[i]>feature_dims[i-1]:
                npoint = npoint//2
                if id_skip and  npoint <= self.init_points//4: # Only work on high level
                    self.conv_s2.append( nn.Linear(feature_dims[i-1], feature_dims[i] ))

            self.sa.append(PointnetSAModule(
                npoint=npoint,
                radius=0.2,
                nsample=64,
                mlp=[feature_dims[i], feature_dims[i], feature_dims[i]],
                fuse = 'add',
                norml = 'bn',
                activation = 'relu',
                use_se = use_se,
                use_xyz = use_xyz,
                use_neighbor = False,
                light = light
            ))
            #if id_skip:
            #    self.conv_s1.append( nn.Linear(feature_dims[i], feature_dims[i] ))

        self.embs = nn.ModuleList()
        self.bn_embs = nn.ModuleList()
        self.dropouts = nn.ModuleList()

        self.partpool =  nn.AdaptiveAvgPool1d(self.npart)
        if self.npart == 1: 
            self.embs.append(nn.Linear(
                # * 2 because of concatenation of max- and mean-pooling
                feature_dims[-1]*2, emb_dims[0], bias=bias))
            self.bn_embs.append(nn.BatchNorm1d(emb_dims[0]))
            self.dropouts.append(nn.Dropout(dropout_prob, inplace=True))
            self.proj_output = nn.Linear(emb_dims[0], output_classes)
            self.proj_output.apply(weights_init_classifier)
        else: 
            self.proj_outputs = nn.ModuleList()
            for i in range(0, self.npart):
                self.embs.append(nn.Linear(512, 512, bias=bias))
                self.bn_embs.append(nn.BatchNorm1d(512))
                self.dropouts.append(nn.Dropout(dropout_prob, inplace=True))
                self.proj_outputs.append(nn.Linear(512, output_classes))
            self.proj_outputs.apply(weights_init_classifier)

        # initial
        #self.proj_in.apply(weights_init_kaiming)
        self.conv.apply(weights_init_kaiming)
        self.conv_s1.apply(weights_init_kaiming)
        self.conv_s2.apply(weights_init_kaiming)
        weights_init_kaiming2 = lambda x:weights_init_kaiming(x,L=self.num_layers)
        self.sa.apply(weights_init_kaiming2) 
        #self.proj.apply(weights_init_kaiming)
        self.embs.apply(weights_init_kaiming)
        self.bn.apply(weights_init_kaiming)
        self.bn_embs.apply(weights_init_kaiming)
        self.npart = npart
Пример #7
0
    def __init__(self, k, feature_dims, emb_dims):
        super(Model, self).__init__()

        self.nng = KNNGraph(k)
        self.conv = EdgeConv(feature_dims, emb_dims, False)