Esempio n. 1
0
 def forward(self, inputs: paddle.Tensor):
     x = inputs
     if self.expand_ratio != 1:
         x = self._ecn(x)
         x = F.swish(x)
     x = self._dcn(x)
     x = F.swish(x)
     if self.has_se:
         x = self._se(x)
     x = self._pcn(x)
     if self.id_skip and \
             self.block_args.stride == 1 and \
             self.block_args.input_filters == self.block_args.output_filters:
         if self.drop_connect_rate:
             x = _drop_connect(x, self.drop_connect_rate, self.is_test)
         x = paddle.elementwise_add(x, inputs)
     return x
Esempio n. 2
0
    def forward(self, inputs: paddle.Tensor):
        x = self._conv(inputs)
        if self.act == "swish":
            x = F.swish(x)
        elif self.act == "sigmoid":
            x = F.sigmoid(x)

        if self.need_crop:
            x = x[:, :, 1:, 1:]
        return x
Esempio n. 3
0
    def forward(self, inputs):
        x = inputs
        if self.expand_ratio != 1:
            x = self._ecn(x)
            x = F.swish(x)

        x = self._dcn(x)
        x = F.swish(x)
        if self.has_se:
            x = self._se(x)
        x = self._pcn(x)

        if self.id_skip and \
                self.block_args.stride == 1 and \
                self.block_args.input_filters == self.block_args.output_filters:
            if self.drop_connect_rate:
                x = _drop_connect(x, self.drop_connect_rate, not self.training)
            x = paddle.add(x, inputs)
        return x
Esempio n. 4
0
File: conv.py Progetto: WenjinW/PGL
    def forward(self, feed_dict):
        g = feed_dict["graph"]
        x = g.node_feat["feat"]
        edge_feat = g.edge_feat["feat"]

        h = self.atom_encoder(x)
        if self.config.exfeat:
            h += self.atom_encoder_float(g.node_feat["feat_float"])
        #  print("atom_encoder: ", np.sum(h.numpy()))

        if self.virtual_node:
            virtualnode_embedding = self.virtualnode_embedding.expand(
                    [g.num_graph, self.virtualnode_embedding.shape[-1]])
            h = h + paddle.gather(virtualnode_embedding, g.graph_node_id)
            #  print("virt0: ", np.sum(h.numpy()))

        if self.with_efeat:
            edge_emb = self.bond_encoder(edge_feat)
        else:
            edge_emb = edge_feat

        h = self.gnns[0](g, h, edge_emb)
        if self.config.graphnorm:
            h = self.gn(g, h)

        #  print("h0: ", np.sum(h.numpy()))
        for layer in range(1, self.num_layers):
            h1 = self.norms[layer-1](h)
            h2 = F.swish(h1)
            h2 = F.dropout(h2, p=self.drop_ratio, training=self.training)

            if self.virtual_node:
                virtualnode_embedding_temp = self.pool(g, h2) + virtualnode_embedding
                virtualnode_embedding = self.mlp_virtualnode_list[layer-1](virtualnode_embedding_temp)
                virtualnode_embedding  = F.dropout(
                        virtualnode_embedding,
                        self.drop_ratio,
                        training=self.training)

                h2 = h2 + paddle.gather(virtualnode_embedding, g.graph_node_id)
                #  print("virt_h%s: " % (layer), np.sum(h2.numpy()))

            h = self.gnns[layer](g, h2, edge_emb) + h
            if self.config.graphnorm:
                h = self.gn(g, h)
            #  print("h%s: " % (layer), np.sum(h.numpy()))

        h = self.norms[self.num_layers-1](h)
        h = F.dropout(h, p=self.drop_ratio, training=self.training)

        if self.config.appnp_k is not None:
            h = self.appnp(g, h)
        #  print("node_repr: ", np.sum(h.numpy()))
        node_representation = h
        return node_representation
Esempio n. 5
0
 def send_func(self, src_feat, dst_feat, edge_feat):
     if self.with_efeat:
         if self.concat:
             h = paddle.concat(
                 [dst_feat['h'], src_feat['h'], edge_feat['e']], axis=1)
             h = self.fc_concat(h)
         else:
             h = src_feat["h"] + edge_feat["e"]
     else:
         h = src_feat["h"]
     msg = {"h": F.swish(h) + self.eps}
     return msg
Esempio n. 6
0
File: conv.py Progetto: WenjinW/PGL
    def forward(self, feed_dict):
        g = feed_dict["graph"]
        x = g.node_feat["feat"]
        edge_feat = g.edge_feat["feat"]
        
        h_list = [self.atom_encoder(x)]

        virtualnode_embedding = self.virtualnode_embedding.expand(
                [g.num_graph, self.virtualnode_embedding.shape[-1]])

        for layer in range(self.config.num_layers):
            h_list[layer] = h_list[layer] + \
                    paddle.gather(virtualnode_embedding, g.graph_node_id)

            ### Message passing among graph nodes
            h = self.convs[layer](g, h_list[layer], edge_feat)
            h = self.batch_norms[layer](h)
            if layer == self.config.num_layers - 1:
                #remove relu for the last layer
                h = F.dropout(h, self.config.drop_ratio, training = self.training)
            else:
                h = F.dropout(F.swish(h), self.config.drop_ratio, training = self.training)

            if self.config.residual:
                h = h + h_list[layer]

            h_list.append(h)

            ### update the virtual nodes
            if layer < self.config.num_layers - 1:
                ### add message from graph nodes to virtual nodes
                virtualnode_embedding_temp = self.pool(g, h_list[layer]) + virtualnode_embedding
                ### transform virtual nodes using MLP

                if self.config.residual:
                    virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp),
                        self.config.drop_ratio, training = self.training)
                else:
                    virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp),
                        self.config.drop_ratio, training = self.training)

        ### Different implementations of Jk-concat
        if self.config.JK == "last":
            node_representation = h_list[-1]
        elif self.config.JK == "sum":
            node_representation = 0
            for layer in range(self.config.num_layers):
                node_representation += h_list[layer]
        
        return node_representation
Esempio n. 7
0
    def forward(self, g):
        """tbd"""
        h = self.atom_embedding(g.node_feat)
        h += self.atom_float_embedding(g.node_feat)

        if self.virtual_node:
            virtualnode_embedding = self.virtualnode_embedding.expand(
                [g.num_graph, self.virtualnode_embedding.shape[-1]])
            h = h + paddle.gather(virtualnode_embedding, g.graph_node_id)
            #  print("virt0: ", np.sum(h.numpy()))

        if self.with_efeat:
            edge_emb = self.init_bond_embedding(g.edge_feat)
        else:
            edge_emb = g.edge_feat

        h = self.gnns[0](g, h, edge_emb)
        if self.config["graphnorm"]:
            h = self.gn(g, h)

        #  print("h0: ", np.sum(h.numpy()))
        for layer in range(1, self.num_layers):
            h1 = self.norms[layer - 1](h)
            h2 = F.swish(h1)
            h2 = F.dropout(h2, p=self.drop_ratio, training=self.training)

            if self.virtual_node:
                virtualnode_embedding_temp = self.pool(
                    g, h2) + virtualnode_embedding
                virtualnode_embedding = self.mlp_virtualnode_list[layer - 1](
                    virtualnode_embedding_temp)
                virtualnode_embedding = F.dropout(virtualnode_embedding,
                                                  self.drop_ratio,
                                                  training=self.training)

                h2 = h2 + paddle.gather(virtualnode_embedding, g.graph_node_id)
                #  print("virt_h%s: " % (layer), np.sum(h2.numpy()))

            h = self.gnns[layer](g, h2, edge_emb) + h
            if self.config["graphnorm"]:
                h = self.gn(g, h)
            #  print("h%s: " % (layer), np.sum(h.numpy()))

        h = self.norms[self.num_layers - 1](h)
        h = F.dropout(h, p=self.drop_ratio, training=self.training)

        h_graph = self.pool(g, h)
        # return graph, node, edge representation
        return h_graph, h, edge_emb
Esempio n. 8
0
 def forward(self, inputs: paddle.Tensor):
     x = self._conv_stem(inputs)
     x = F.swish(x)
     for _mc_block in self.conv_seq:
         x = _mc_block(x)
     return x
Esempio n. 9
0
File: conv.py Progetto: WenjinW/PGL
    def forward(self, feed_dict):
        g = feed_dict['graph']

        x = g.node_feat["feat"]
        edge_feat = g.edge_feat["feat"]
        h_list = [self.atom_encoder(x)]

        ### virtual node embeddings for graphs
        virtualnode_embedding = self.virtualnode_embedding.expand(
                [g.num_graph, self.virtualnode_embedding.shape[-1]])

        junc_feat = self.junc_embed(feed_dict['junc_graph'].node_feat['feat'])
        junc_feat = paddle.squeeze(junc_feat, axis=1)
        for layer in range(self.num_layers):
            ### add message from virtual nodes to graph nodes
            h_list[layer] = h_list[layer] + paddle.gather(virtualnode_embedding, g.graph_node_id)

            ### Message passing among graph nodes
            h = self.convs[layer](g, h_list[layer], edge_feat)

            h = self.batch_norms[layer](h)
            if layer == self.num_layers - 1:
                #remove relu for the last layer
                h = F.dropout(h, self.drop_ratio, training = self.training)
            else:
                h = F.dropout(F.swish(h), self.drop_ratio, training = self.training)

            if self.residual:
                h = h + h_list[layer]

            # junction tree aggr
            atom_index = feed_dict['mol2junc'][:, 0]
            junc_index = feed_dict['mol2junc'][:, 1]
            gather_h = paddle.gather(h, atom_index)
            out_dim = gather_h.shape[-1]
            num = feed_dict['junc_graph'].num_nodes
            init_h = paddle.zeros(shape=[num, out_dim], dtype=gather_h.dtype)
            junc_h = paddle.scatter(init_h, junc_index, gather_h, overwrite=False)
            # node feature of junction tree
            junc_h = junc_feat + junc_h

            junc_h = self.junc_convs[layer](feed_dict['junc_graph'], junc_h)

            junc_h = paddle.gather(junc_h, junc_index)
            init_h = paddle.zeros(shape=[feed_dict['graph'].num_nodes, out_dim], dtype=h.dtype)
            sct_h = paddle.scatter(init_h, atom_index, junc_h, overwrite=False)
            h = h + sct_h

            h_list.append(h)

            ### update the virtual nodes
            if layer < self.num_layers - 1:
                ### add message from graph nodes to virtual nodes
                virtualnode_embedding_temp = self.pool(g, h_list[layer]) + virtualnode_embedding
                ### transform virtual nodes using MLP

                if self.residual:
                    virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
                else:
                    virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)

        ### Different implementations of Jk-concat
        if self.JK == "last":
            node_representation = h_list[-1]
        elif self.JK == "sum":
            node_representation = 0
            for layer in range(self.num_layers):
                node_representation += h_list[layer]
        
        return node_representation
Esempio n. 10
0
 def forward(self, inputs: paddle.Tensor, if_act: bool = True):
     y = self._conv(inputs)
     y = self._batch_norm(y)
     if self._if_act:
         y = F.relu(y) if self._act == 'relu' else F.swish(y)
     return y