Ejemplo n.º 1
0
    def __init__(self,
                 n_features,
                 n_classes,
                 num_layers,
                 hidden_gcn,
                 hidden_fc,
                 edge_index,
                 n_genes=15135,
                 mode='cat'):
        super().__init__()
        self.edge_index = edge_index
        self.conv1 = GCNConv(n_features, hidden_gcn)
        self.convs = torch.nn.ModuleList()
        self.mode = mode

        for i in range(num_layers - 1):
            self.convs.append(GCNConv(hidden_gcn, hidden_gcn))

        if mode == 'cat':
            self.topkpooling = TopKPooling(num_layers * hidden_gcn)
            self.fc = torch.nn.Linear(
                num_layers * hidden_gcn,
                1)  # FC layer to reduce dim of gene features to 1
        else:
            self.topkpooling = TopKPooling(hidden_gcn)
            self.fc = torch.nn.Linear(
                hidden_gcn,
                1)  # FC layer to reduce dim of pathway features to 1

        n_nodes_after_topk = int(np.ceil(0.5 * n_genes))
        self.lin1 = Linear(n_nodes_after_topk, hidden_fc)
        self.lin2 = Linear(hidden_fc, n_classes)
Ejemplo n.º 2
0
    def __init__(self, indim, ratio, nclass, k=8, R=200):
        '''

        :param indim: (int) node feature dimension
        :param ratio: (float) pooling ratio in (0,1)
        :param nclass: (int)  number of classes
        :param k: (int) number of communities
        :param R: (int) number of ROIs
        '''
        super(Network, self).__init__()

        self.indim = indim
        self.dim1 = 32
        self.dim2 = 32
        self.dim3 = 512
        self.dim4 = 256
        self.dim5 = 8
        self.k = k
        self.R = R

        self.n1 = nn.Sequential(nn.Linear(self.R, self.k, bias=False), nn.ReLU(), nn.Linear(self.k, self.dim1 * self.indim))
        self.conv1 = MyNNConv(self.indim, self.dim1, self.n1, normalize=False)
        self.pool1 = TopKPooling(self.dim1, ratio=ratio, multiplier=1, nonlinearity=torch.sigmoid)
        self.n2 = nn.Sequential(nn.Linear(self.R, self.k, bias=False), nn.ReLU(), nn.Linear(self.k, self.dim2 * self.dim1))
        self.conv2 = MyNNConv(self.dim1, self.dim2, self.n2, normalize=False)
        self.pool2 = TopKPooling(self.dim2, ratio=ratio, multiplier=1, nonlinearity=torch.sigmoid)

        #self.fc1 = torch.nn.Linear((self.dim2) * 2, self.dim2)
        self.fc1 = torch.nn.Linear((self.dim1+self.dim2)*2, self.dim2)
        self.bn1 = torch.nn.BatchNorm1d(self.dim2)
        self.fc2 = torch.nn.Linear(self.dim2, self.dim3)
        self.bn2 = torch.nn.BatchNorm1d(self.dim3)
        self.fc3 = torch.nn.Linear(self.dim3, nclass)
Ejemplo n.º 3
0
 def __init__(self):  #
     super(Net, self).__init__()  #
     self.pool1 = TopKPooling(5, ratio=0.01)  #
     self.nn1 = torch.nn.Linear(5, 64)  # THESE ARE THE LAYERS OF THE MODEL
     self.pool2 = TopKPooling(64, ratio=0.1)  #
     self.pool3 = TopKPooling(64, ratio=0.1)  #
     self.nn2 = torch.nn.Linear(64, 8)  #
    def __init__(self,
                 dataset,
                 embedding_layer,
                 hidden_dim=cmd_args.hidden_dim):
        super().__init__()

        self.embedding_layer = embedding_layer
        self.nn2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(),
                                 nn.Linear(hidden_dim, hidden_dim**2))
        self.nn4 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(),
                                 nn.Linear(hidden_dim, hidden_dim**2))

        self.conv1 = GraphConvE(hidden_dim, hidden_dim)
        self.pool1 = TopKPooling(hidden_dim, ratio=1.0)
        self.conv2 = NNConv(hidden_dim, hidden_dim, self.nn2)
        self.pool2 = TopKPooling(hidden_dim, ratio=1.0)
        self.conv3 = GraphConvE(hidden_dim, hidden_dim)
        self.pool3 = TopKPooling(hidden_dim, ratio=1.0)
        self.conv4 = NNConv(hidden_dim, hidden_dim, self.nn4)
        self.pool4 = TopKPooling(hidden_dim, ratio=1.0)

        self.lin1 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.lin2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.lin3 = torch.nn.Linear(hidden_dim, hidden_dim)

        self.l1 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.l2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.l3 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.l4 = torch.nn.Linear(hidden_dim, hidden_dim)
Ejemplo n.º 5
0
    def __init__(self, num_classes):
        super(PoolNetv2, self).__init__()

        self.att_w = nn.Sequential(nn.Linear(512, 256), nn.ELU(),
                                   nn.Linear(256, 128), nn.ELU(),
                                   nn.Linear(128, 1))

        self.att_net = nn.Sequential(
            nn.Linear(512, 512),
            #nn.Tanh()
            nn.ELU())

        self.conv1 = GATConv(3, 32, heads=2)
        self.norm1 = InstanceNorm(64, affine=True)
        self.pool1 = TopKPooling(64, ratio=0.3, nonlinearity=torch.sigmoid)

        self.conv2 = GATConv(64, 128, heads=2)
        self.norm2 = InstanceNorm(256, affine=True)
        self.pool2 = TopKPooling(256, ratio=0.3, nonlinearity=torch.sigmoid)

        self.conv3 = GATConv(256, 512, heads=2, concat=False)
        self.norm3 = InstanceNorm(512, affine=True)

        self.att = GlobalAttention(gate_nn=self.att_w, nn=self.att_net)

        self.lin1 = Linear(512, 512)
        self.lin2 = Linear(512, 256)
        self.lin3 = Linear(256, num_classes)
Ejemplo n.º 6
0
    def __init__(self):
        super(Net, self).__init__()

        self.conv1 = GCNConv(128, 128)
        # self.conv1 = GraphConv(128, 128)
        self.pool1 = TopKPooling(128, ratio=0.8)
        self.conv2 = GCNConv(128, 128)
        # self.conv2 = GraphConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=0.8)
        self.conv3 = GCNConv(128, 128)
        # self.conv3 = GraphConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=0.8)
        self.conv4 = GCNConv(128, 128)
        self.pool4 = TopKPooling(128, ratio=0.8)
        self.conv5 = GCNConv(128, 128)
        self.pool5 = TopKPooling(128, ratio=0.8)
        # self.conv6 = GCNConv(128, 128)
        # self.pool6 = TopKPooling(128, ratio=0.8)
        # self.conv7 = GCNConv(128, 128)
        # self.pool7 = TopKPooling(128, ratio=0.8)

        self.lin1 = torch.nn.Linear(256, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, 2)

        self.readout = Seq(Linear(128, 64), ReLU(), Linear(64, 2))
Ejemplo n.º 7
0
    def __init__(self, args):
        super(Topkpool, self).__init__()
        self.args = args

        missing_keys = list(set(["features_num", "num_class", "num_graph_features",
                    "ratio", "dropout", "act"]) - set(self.args.keys()))
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ','.join(missing_keys))

        self.num_features = self.args["features_num"]
        self.num_classes = self.args["num_class"]
        self.ratio = self.args["ratio"]
        self.dropout = self.args["dropout"]
        self.num_graph_features = self.args["num_graph_features"]

        self.conv1 = GraphConv(self.num_features, 128)
        self.pool1 = TopKPooling(128, ratio=self.ratio)
        self.conv2 = GraphConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=self.ratio)
        self.conv3 = GraphConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=self.ratio)

        self.lin1 = torch.nn.Linear(256 + self.num_graph_features, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, self.num_classes)
Ejemplo n.º 8
0
    def __init__(self,
                 num_features,
                 output_channels,
                 nb_neurons=128,
                 **kwargs):
        """

        Parameters
        ----------
        num_features: int
            number of node features
        output_channels: int
            number of classes
        """
        super(GCNConv3TPK, self).__init__()

        self.conv1 = GCNConv(num_features, nb_neurons)
        self.pool1 = TopKPooling(nb_neurons, ratio=0.8)
        self.conv2 = GCNConv(nb_neurons, nb_neurons)
        self.pool2 = TopKPooling(nb_neurons, ratio=0.8)
        self.conv3 = GCNConv(nb_neurons, nb_neurons)
        self.pool3 = TopKPooling(nb_neurons, ratio=0.8)

        self.lin1 = torch.nn.Linear(nb_neurons, 64)
        self.lin2 = torch.nn.Linear(64, output_channels)
Ejemplo n.º 9
0
    def __init__(self):
        super(Net, self).__init__()
        out = 5
        size = 3
        self.depth = 3
        rate = [0.8, 0.8, 0.8]
        shape = [64, 48, 32]
        self.direction = 1
        self.down_list = torch.nn.ModuleList()
        self.up_list = torch.nn.ModuleList()
        self.pool_list = torch.nn.ModuleList()
        # encoder
        conv = SGConv(size, out, shape[0])
        self.down_list.append(conv)
        for i in range(self.depth - 1):
            pool = TopKPooling(shape[i], rate[i])
            self.pool_list.append(pool)
            conv = SGConv(size, shape[i], shape[i + 1])
            self.down_list.append(conv)
        pool = TopKPooling(shape[-1], rate[-1])
        self.pool_list.append(pool)

        # decoder
        for i in range(self.depth - 1):
            conv = SGConv(size, shape[self.depth - i - 1],
                          shape[self.depth - i - 2])
            self.up_list.append(conv)
        conv = SGConv(1, shape[0], out)
        self.up_list.append(conv)

        self.x_num_nodes = 0
        self.x = 0
        self.r_graph = 0
Ejemplo n.º 10
0
    def __init__(self):
        super(Net, self).__init__()

        self.conv1 = GCNConv(128, 128)
        self.pool1 = TopKPooling(128, ratio=0.8)
        self.conv2 = GCNConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=0.8)
        self.conv3 = GCNConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=0.8)

        self.convAtt1 = torch.nn.Conv1d(in_channels=256,
                                        out_channels=64,
                                        kernel_size=1,
                                        stride=2)
        self.poolAtt1 = torch.nn.MaxPool1d(kernel_size=1, stride=2)
        self.convAtt2 = torch.nn.Conv1d(64, 16, kernel_size=1, stride=2)
        self.poolAtt2 = torch.nn.MaxPool1d(kernel_size=1, stride=2)
        self.convAtt3 = torch.nn.Conv1d(16, 2, kernel_size=1, stride=2)
        self.poolAtt3 = torch.nn.MaxPool1d(kernel_size=1, stride=2)

        self.convAtt4 = torch.nn.Conv1d(2, 16, kernel_size=1, stride=2)
        self.poolAtt4 = torch.nn.MaxPool1d(kernel_size=1, stride=2)
        self.convAtt5 = torch.nn.Conv1d(16, 64, kernel_size=1, stride=2)
        self.poolAtt5 = torch.nn.MaxPool1d(kernel_size=1, stride=2)
        self.convAtt6 = torch.nn.Conv1d(64, 256, kernel_size=1, stride=2)
        self.poolAtt6 = torch.nn.MaxPool1d(kernel_size=1, stride=2)

        self.lin1 = torch.nn.Linear(256, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, 2)
Ejemplo n.º 11
0
    def __init__(self, input_size, kernels, depth, rate, shapes, device):
        super(Net, self).__init__()
        size = kernels
        self.device = device
        self.depth = depth
        self.direction = 1
        self.down_list = torch.nn.ModuleList()
        self.up_list = torch.nn.ModuleList()
        self.pool_list = torch.nn.ModuleList()
        # encoder
        conv = SGAT(size, input_size, shapes[0])
        self.down_list.append(conv)
        for i in range(self.depth - 1):
            pool = TopKPooling(shapes[i], rate[i])
            self.pool_list.append(pool)
            conv = SGAT(size, shapes[i], shapes[i + 1])
            self.down_list.append(conv)
        pool = TopKPooling(shapes[-1], rate[-1])
        self.pool_list.append(pool)

        # decoder
        for i in range(self.depth - 1):
            conv = SAGEConv(shapes[self.depth - i - 1],
                            shapes[self.depth - i - 2])
            self.up_list.append(conv)
        conv = SAGEConv(shapes[0], input_size)
        self.up_list.append(conv)
Ejemplo n.º 12
0
    def __init__(self, num_features, num_classes):
        super(TopKNet, self).__init__()
        self.name = "topknet"
        self.version = "v1"
        self.num_features = num_features
        self.num_classes = num_classes

        self.bn1 = torch.nn.BatchNorm1d(num_features=num_features)

        self.conv1 = GATConv(num_features, 256)
        self.bn2 = torch.nn.BatchNorm1d(num_features=256)
        self.pool1 = TopKPooling(256, ratio=0.8)

        self.conv2 = GATConv(256, 256)
        self.bn3 = torch.nn.BatchNorm1d(num_features=256)
        self.pool2 = TopKPooling(256, ratio=0.8)

        self.conv3 = GATConv(256, 256)
        self.bn4 = torch.nn.BatchNorm1d(num_features=256)
        self.pool3 = TopKPooling(256, ratio=0.8)

        self.lin1 = torch.nn.Linear(512, 256)
        self.bn5 = torch.nn.BatchNorm1d(num_features=256)
        self.lin2 = torch.nn.Linear(256, 128)
        self.bn6 = torch.nn.BatchNorm1d(num_features=128)
        self.lin3 = torch.nn.Linear(128, num_classes)
Ejemplo n.º 13
0
 def __init__(self):  #
     super(Net, self).__init__()  #
     self.conv1 = GCNConv(5, 20)  #
     self.pool = TopKPooling(20, ratio=0.01)  # LAG I MODELLEN
     self.conv2 = GCNConv(20, 15)  #
     self.pool2 = TopKPooling(15, ratio=0.1)  #
     self.conv3 = GCNConv(15, 12)  #
     self.pool3 = TopKPooling(12, ratio=0.1)  #
     self.conv4 = GCNConv(12, 10)  #
     self.nn1 = torch.nn.Linear(10, 8)  #
Ejemplo n.º 14
0
    def __init__(self):  #
        super(Net, self).__init__()  #
        #self.graph_unet = GraphUNet(5,20,124,2, pool_ratios = 0.01)                                             #
        self.pool1 = TopKPooling(5, ratio=0.01)
        self.nn1 = torch.nn.Linear(5, 64)  # LAG I MODELLEN#
        self.pool2 = TopKPooling(64, ratio=0.1)  #
        self.pool3 = TopKPooling(64, ratio=0.1)  #
        #self.drop   = Dropout(0.0001)                                                                               #

        self.nn2 = torch.nn.Linear(64, 1)  #
    def __init__(self):
        super(FeatureExtractor, self).__init__()

        self.num_features = 14
        self.nhid = 128

        self.conv1 = GraphConv(self.num_features, self.nhid)
        self.pool1 = TopKPooling(self.nhid, ratio=0.8)
        self.conv2 = GraphConv(self.nhid, self.nhid)
        self.pool2 = TopKPooling(self.nhid, ratio=0.8)
    def __init__(self):
        super(Net7, self).__init__()

        self.conv1 = GCNConv(dataset.num_node_features, 256)
        self.conv2 = GCNConv(126, 62)
        self.linear1 = torch.nn.Linear(256, 11)
        self.linear2 = torch.nn.Linear(126, 11)
        self.pool1 = TopKPooling(512, ratio=0.5)
        self.pool2 = TopKPooling(62, ratio=0.5)
        self.bn1 = torch.nn.BatchNorm1d(256)
        self.bn2 = torch.nn.BatchNorm1d(62)
Ejemplo n.º 17
0
    def __init__(self):  #
        super(Net,
              self).__init__()  #                                            #
        self.pool1 = TopKPooling(5, ratio=0.01)
        self.nn1 = torch.nn.Linear(5, 64)  # LAG I MODELLEN#
        self.pool2 = TopKPooling(64, ratio=0.1)  #
        self.pool3 = TopKPooling(
            64, ratio=0.1
        )  #                                                                                   #

        self.nn2 = torch.nn.Linear(64, 3)  #
Ejemplo n.º 18
0
    def __init__(self):
        super(Net4, self).__init__()

        self.conv1 = GCNConv(
            dataset.num_node_features, 126
        )  # parameters - in_channel, out_channel, the other parameters are booleans
        self.conv2 = GCNConv(126, 62)
        self.linear1 = torch.nn.Linear(768, 11)
        self.linear2 = torch.nn.Linear(126, 11)
        self.pool1 = TopKPooling(126, ratio=0.5)
        self.pool2 = TopKPooling(62, ratio=0.5)
    def build_model(self):
        self.conv1 = GraphConv(self.hparams.num_features, 128)
        self.pool1 = TopKPooling(128, ratio=0.8)
        self.conv2 = GraphConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=0.8)
        self.conv3 = GraphConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=0.8)

        self.lin1 = torch.nn.Linear(256, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, self.hparams.num_classes)
Ejemplo n.º 20
0
    def __init__(self, dim):
        super().__init__()

        self.conv1 = GraphConv(dim, dim)
        self.pool1 = TopKPooling(dim, ratio=0.5)
        self.conv2 = GraphConv(dim, dim)
        self.pool2 = TopKPooling(dim, ratio=0.5)
        self.conv3 = GraphConv(dim, dim)
        self.pool3 = TopKPooling(dim, ratio=0.5)

        self.lin1 = nn.Linear(2 * dim, dim)
        self.lin2 = nn.Linear(dim, dim // 2)
Ejemplo n.º 21
0
 def __init__(self):  #
     #        self.nn1   = torch.nn.RNN(5,64)                                         # LAG I MODELLEN#
     super(Net, self).__init__()
     l1, l2, l3, l4 = 5, 64, 32, 8
     #                                            #
     self.relu = torch.nn.ReLU(inplace=True)  # LAG I MODELLEN#
     self.pool1 = TopKPooling(l1, ratio=0.01)
     self.nn1 = torch.nn.RNNCell(l1, l2)  # LAG I MODELLEN#
     self.pool2 = TopKPooling(l2, ratio=0.1)  #
     self.nn2 = torch.nn.Linear(l2, l3)
     self.pool3 = TopKPooling(l3, ratio=0.1)  #
     self.nn3 = torch.nn.Linear(l3, l4)  #
Ejemplo n.º 22
0
    def __init__(self, num_features, hidden, dropout, num_classes):
        super(Net, self).__init__()

        self.conv1 = GraphConv(num_features, hidden)
        self.pool1 = TopKPooling(hidden, ratio=0.8)

        self.conv2 = GraphConv(hidden, hidden)
        self.pool2 = TopKPooling(hidden, ratio=0.8)

        self.lin1 = nn.Linear(hidden * 2, hidden)
        self.lin2 = nn.Linear(hidden, num_classes)
        self.dropout = dropout
Ejemplo n.º 23
0
    def __init__(self, num_node_features, num_of_classes=2):
        super(Net_1, self).__init__()
        self.conv1 = SAGEConv(num_node_features, 128)
        self.pool1 = TopKPooling(128, ratio=0.5)
        self.conv2 = SAGEConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=0.5)
        self.conv3 = SAGEConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=0.5)

        self.lin1 = torch.nn.Linear(256, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, num_of_classes)
Ejemplo n.º 24
0
    def __init__(self, config):
        super(MRGIN, self).__init__()
        self.num_features = config.num_features
        self.num_relations = config.num_relations
        self.num_classes  = config.nclass
        self.num_layers = config.num_layers #defines number of RGCN conv layers.
        self.hidden_dim = config.hidden_dim
        self.layer_spec = None if config.layer_spec == None else list(map(int, config.layer_spec.split(',')))
        self.lstm_dim1 = config.lstm_input_dim
        self.lstm_dim2 = config.lstm_output_dim
        self.rgcn_func = FastRGCNConv if config.conv_type == "FastRGCNConv" else RGCNConv
        self.activation = F.relu if config.activation == 'relu' else F.leaky_relu
        self.pooling_type = config.pooling_type
        self.readout_type = config.readout_type
        self.temporal_type = config.temporal_type
        self.dropout = config.dropout
        self.conv = []
        self.pool = []
        total_dim = 0

        if self.layer_spec == None:
            for i in range(self.num_layers):
                if i == 0:
                    self.conv.append(self.rgcn_func(self.num_features, self.hidden_dim, self.num_relations).to(config.device))
                else:
                    self.conv.append(self.rgcn_func(self.hidden_dim, self.hidden_dim, self.num_relations).to(config.device))
                if self.pooling_type == "sagpool":
                    self.pool.append(RGCNSAGPooling(self.hidden_dim, self.num_relations, ratio=config.pooling_ratio, rgcn_func=config.conv_type).to(config.device))
                elif self.pooling_type == "topk":
                    self.pool.append(TopKPooling(self.hidden_dim, ratio=config.pooling_ratio).to(config.device))
                total_dim += self.hidden_dim
        
        else:
            print("using layer specification and ignoring hidden_dim parameter.")
            print("layer_spec: " + str(self.layer_spec))
            for i in range(self.num_layers):
                if i == 0:
                    self.conv.append(self.rgcn_func(self.num_features, self.layer_spec[0], self.num_relations).to(config.device))
                else:
                    self.conv.append(self.rgcn_func(self.layer_spec[i-1], self.layer_spec[i], self.num_relations).to(config.device))
                if self.pooling_type == "sagpool":
                    self.pool.append(RGCNSAGPooling(self.layer_spec[i], self.num_relations, ratio=config.pooling_ratio, rgcn_func=config.conv_type).to(config.device))
                elif self.pooling_type == "topk":
                    self.pool.append(TopKPooling(self.layer_spec[i], ratio=config.pooling_ratio).to(config.device))
                total_dim += self.layer_spec[i]
            
        self.fc1 = Linear(total_dim, self.lstm_dim1)
        
        if "lstm" in self.temporal_type:
            self.lstm = LSTM(self.lstm_dim1, self.lstm_dim2, batch_first=True)
            self.attn = Attention(self.lstm_dim2)
        
        self.fc2 = Linear(self.lstm_dim2, self.num_classes)
Ejemplo n.º 25
0
    def __init__(self):
        super(TopKPool, self).__init__()

        self.conv1 = GraphConv(dataset.num_features, 128)
        self.pool1 = TopKPooling(128, ratio=0.8)
        self.conv2 = GraphConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=0.8)
        self.conv3 = GraphConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=0.8)

        self.lin1 = torch.nn.Linear(256, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, dataset.num_classes)
Ejemplo n.º 26
0
    def __init__(self, graph_w=64, lin_w1=32, lin_w2=8):
        super(Net, self).__init__()

        self.conv1 = GraphConv(2, graph_w)
        self.pool1 = TopKPooling(graph_w, ratio=0.5)
        self.conv2 = GraphConv(graph_w, graph_w)
        self.pool2 = TopKPooling(graph_w, ratio=0.8)
        self.conv3 = GraphConv(graph_w, graph_w)
        self.pool3 = TopKPooling(graph_w, ratio=0.8)

        self.lin1 = torch.nn.Linear(2*graph_w, lin_w1)
        self.lin2 = torch.nn.Linear(lin_w1, lin_w2)
        self.lin3 = torch.nn.Linear(lin_w2, 3)
Ejemplo n.º 27
0
    def __init__(self, num_features, num_classes):
        super(Net, self).__init__()
        self.conv1 = GraphConv(num_features, 128)
        self.conv1.weight.data.normal_()
        self.pool1 = TopKPooling(128, ratio=0.8)
        self.conv2 = GraphConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=0.8)
        self.conv3 = GraphConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=0.8)

        self.lin1 = torch.nn.Linear(256, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, num_classes)
Ejemplo n.º 28
0
    def __init__(self, datasetroot, width):
        super(topk_pool_Net, self).__init__()

        self.conv1 = GraphConv(datasetroot.num_features, 128)
        self.pool1 = TopKPooling(128, ratio=0.8)
        self.conv2 = GraphConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=0.8)
        self.conv3 = GraphConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=0.8)

        self.lin1 = torch.nn.Linear(256, width[1])
        self.lin2 = torch.nn.Linear(width[1], width[2])
        self.lin3 = torch.nn.Linear(width[2], datasetroot.num_classes)
Ejemplo n.º 29
0
    def __init__(self):
        super(Net, self).__init__()

        self.conv1 = GCNConv(1, 30)
        self.pool1 = TopKPooling(30, ratio=0.4)
        self.conv2 = GCNConv(30, 20)
        self.pool2 = TopKPooling(20, ratio=0.2)
        self.conv3 = GCNConv(20, 10)
        self.pool3 = TopKPooling(10, ratio=0.1)

        self.lin1 = torch.nn.Linear(1380, 80)
        self.lin2 = torch.nn.Linear(80, 64)
        self.lin3 = torch.nn.Linear(64, 1)  #2)
Ejemplo n.º 30
0
    def __init__(self):
        super().__init__()

        self.conv1 = GraphConv(dataset.num_features, 32)
        self.pool1 = TopKPooling(32, ratio=0.3)
        self.conv2 = GraphConv(32, 32)
        self.pool2 = TopKPooling(32, ratio=0.3)
        self.conv3 = GraphConv(32, 32)
        self.pool3 = TopKPooling(32, ratio=0.3)

        self.lin1 = torch.nn.Linear(64, 32)
        # self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(32, dataset.num_classes)