Example #1
0
    def __init__(self,
                 raw_feature_size,
                 gcn_hidden_layer_sizes=[16, 8],
                 nn_hidden_layer_sizes=[128, 8]):
        super(GCN, self).__init__()

        r0 = raw_feature_size
        r1, r2 = gcn_hidden_layer_sizes
        n1, n2 = nn_hidden_layer_sizes

        # Define the layers of gcn
        self.gcn1 = Conv(r0, r1, aggr=aggregation_function)
        self.gcn2 = Conv(r1, r2, aggr=aggregation_function)
        # self.gcn3 = Conv(r2, r3, aggr=aggregation_function)
        # self.gcn4 = Conv(r3, r4, aggr=aggregation_function)

        self.batchnorm1 = BatchNorm(r1)
        self.batchnorm2 = BatchNorm(r2)
        # self.batchnorm3 = BatchNorm(r3)
        # self.batchnorm4 = BatchNorm(r4)

        #Define the layers of NN to predict the attractiveness function for every node
        self.nn_linear = nn.Sequential(
            linear_block(r2, n1),
            linear_block(n1, n2),
            # linear_block(n2, n3),
            linear_block(n2, 1, activation=None),
        )

        # self.activation = nn.Softplus()
        # self.activation = F.relu
        self.activation = nn.LeakyReLU()
        # self.activation = nn.Sigmoid()

        self.dropout = F.dropout
Example #2
0
    def __init__(self, in_channels, number_hidden_layers, aggr, hidden_out_channel, out_channel, pool_layer, k=1):
        super(GCN_Net, self).__init__()
        self.in_channels = in_channels
        self.number_hidden_layers = number_hidden_layers #number of hidden GraphConv layers
        self.aggr = aggr # "add", "mean" or "max"
        self.pool_layer = pool_layer # 'add', 'max', 'mean' or 'sort'
        self.hidden_out_channel = hidden_out_channel
        self.out_channel = out_channel
        self.atom_encoder = AtomEncoder(emb_dim=self.in_channels)
        self.k = k

        
        self.graph_conv_list = nn.ModuleList()
        self.graph_conv_list.append(GraphConv(in_channels= self.in_channels, out_channels=self.hidden_out_channel, aggr=self.aggr))

        self.batchnorm = BatchNorm(in_channels=self.hidden_out_channel)

        if self.number_hidden_layers != 0 : 
            for i in range(self.number_hidden_layers):
                self.graph_conv_list.append(GraphConv(in_channels= self.hidden_out_channel, out_channels= self.hidden_out_channel, aggr=self.aggr))
           
        self.graph_conv_list.append(GraphConv(in_channels = self.hidden_out_channel, out_channels = self.out_channel, aggr=self.aggr))
         
        self.linear1 = nn.Linear(self.k*self.out_channel, 16)
        self.linear2 = nn.Linear(16, 1)
Example #3
0
        def __init__(self):
            super(Discriminator1, self).__init__()
            nn = Sequential(Linear(2, (nbr_of_regions * nbr_of_regions)),
                            ReLU())
            self.conv1 = NNConv(nbr_of_regions,
                                nbr_of_regions,
                                nn,
                                aggr='mean',
                                root_weight=True,
                                bias=True)
            self.conv11 = BatchNorm(nbr_of_regions,
                                    eps=1e-03,
                                    momentum=0.1,
                                    affine=True,
                                    track_running_stats=True)

            nn = Sequential(Linear(2, nbr_of_regions), ReLU())
            self.conv2 = NNConv(nbr_of_regions,
                                1,
                                nn,
                                aggr='mean',
                                root_weight=True,
                                bias=True)
            self.conv22 = BatchNorm(1,
                                    eps=1e-03,
                                    momentum=0.1,
                                    affine=True,
                                    track_running_stats=True)
Example #4
0
    def __init__(self,
                 dim_input,
                 dim_embedding,
                 dim_values,
                 dim_hidden,
                 n_heads,
                 n_att_layers,
                 n_pool,
                 K,
                 alpha,
                 p,
                 weighted=False):
        super(DQN, self).__init__()

        self.weighted = weighted
        self.node_encoder = NodeEncoder(dim_input, n_heads, n_att_layers,
                                        dim_embedding, dim_values, dim_hidden,
                                        K, alpha, weighted)
        self.context_encoder = ContextEncoder(n_pool, dim_embedding,
                                              dim_hidden, weighted)
        # Score for each node
        if weighted:
            dim_context = dim_embedding * n_pool + 8
            first_dim = dim_context + dim_embedding + 2
        else:
            dim_context = dim_embedding * n_pool + 7
            first_dim = dim_context + dim_embedding + 1
        self.lin1 = nn.Linear(first_dim, dim_hidden)
        self.BN1 = BatchNorm(dim_hidden)
        self.lin2 = nn.Linear(dim_hidden, dim_embedding)
        self.BN2 = BatchNorm(dim_embedding)
        self.lin3 = nn.Linear(dim_embedding, 1)
        # dropout
        self.dropout = nn.Dropout(p=p)
Example #5
0
 def __init__(self, num_in_feature, num_hidden_feature):
     super(GraphResidualBlock, self).__init__()
     self.AL = AdjacencyLearning(num_in_feature, num_hidden_feature)
     self.GNN_0 = GNN(num_in_feature, num_hidden_feature)
     self.GNN_1 = GNN(num_hidden_feature, num_in_feature)
     self.batch_norm_0 = BatchNorm(num_hidden_feature)
     self.batch_norm_1 = BatchNorm(num_in_feature)
Example #6
0
    def __init__(self):
        super(Discriminator, self).__init__()
        lin = Sequential(Linear(2, 1225), ReLU())
        self.conv1 = NNConv(35,
                            35,
                            lin,
                            aggr='mean',
                            root_weight=True,
                            bias=True)
        self.conv11 = BatchNorm(35,
                                eps=1e-03,
                                momentum=0.1,
                                affine=True,
                                track_running_stats=True)

        lin = Sequential(Linear(2, 35), ReLU())
        self.conv2 = NNConv(35,
                            1,
                            lin,
                            aggr='mean',
                            root_weight=True,
                            bias=True)
        self.conv22 = BatchNorm(1,
                                eps=1e-03,
                                momentum=0.1,
                                affine=True,
                                track_running_stats=True)
def test_batch_norm(conf):
    x = torch.randn(100, 16)

    norm = BatchNorm(16, affine=conf, track_running_stats=conf)
    assert norm.__repr__() == 'BatchNorm(16)'
    torch.jit.script(norm)
    out = norm(x)
    assert out.size() == (100, 16)
Example #8
0
 def __init__(self, in_channels, hidden_channels, out_channels, num_layers):
     super(Net, self).__init__()
     self.convs = torch.nn.ModuleList()
     self.batch_norms = torch.nn.ModuleList()
     self.convs.append(SAGEConv(in_channels, hidden_channels))
     self.batch_norms.append(BatchNorm(hidden_channels))
     for _ in range(num_layers - 2):
         self.convs.append(SAGEConv(hidden_channels, hidden_channels))
         self.batch_norms.append(BatchNorm(hidden_channels))
     self.convs.append(SAGEConv(hidden_channels, out_channels))
Example #9
0
def test_batch_norm():
    norm = BatchNorm(16)
    assert norm.__repr__() == (
        'BatchNorm(16, eps=1e-05, momentum=0.1, affine=True, '
        'track_running_stats=True)')
    out = norm(torch.randn(100, 16))
    assert out.size() == (100, 16)

    norm = BatchNorm(16, affine=False, track_running_stats=False)
    out = norm(torch.randn(100, 16))
    assert out.size() == (100, 16)
Example #10
0
 def __init__(self, hidden_channels):
     super(GCN2, self).__init__()
     self.batchn1 = BatchNorm(dataset.num_node_features)
     self.conv1 = GraphConv(dataset.num_node_features, hidden_channels)
     self.batchn2 = BatchNorm(hidden_channels)
     self.conv2 = GraphConv(hidden_channels, hidden_channels)
     self.batchn3 = BatchNorm(hidden_channels)
     self.conv3 = GraphConv(hidden_channels, hidden_channels)
     self.batchn4 = BatchNorm(hidden_channels)
     self.conv4 = GraphConv(hidden_channels, hidden_channels)
     self.lin = Linear(hidden_channels, dataset.num_classes)
Example #11
0
    def __init__(self, pretrained=False, in_channel=256, out_channel=10):
        super(MRF_GCN, self).__init__()
        if pretrained == True:
            warnings.warn("Pretrained model is not available")

        self.atrr = GGL()
        self.conv1 = MultiChev(in_channel)
        self.bn1 = BatchNorm(1200)
        self.conv2 = MultiChev_B(400 * 3)
        self.bn2 = BatchNorm(300)
        self.layer5 = nn.Sequential(nn.Linear(300, 256), nn.ReLU(inplace=True),
                                    nn.Dropout())
Example #12
0
    def __init__(self, feature, out_channel):
        super(ChebyNet, self).__init__()

        self.GConv1 = ChebConv(feature,1024,K=1)
        self.bn1 = BatchNorm(1024)

        self.GConv2 = ChebConv(1024,1024,K=1)
        self.bn2 = BatchNorm(1024)

        self.fc = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(inplace=True))
        self.dropout = nn.Dropout(0.2)
        self.fc1 = nn.Sequential(nn.Linear(512, out_channel))
Example #13
0
    def __init__(self, feature, out_channel):
        super(GIN, self).__init__()

        self.GConv1 = GINConv(Seq(Lin(feature, 1024), ReLU(), Lin(1024, 1024)))
        self.bn1 = BatchNorm(1024)

        self.GConv2 = GINConv(Seq(Lin(1024, 1024), ReLU(), Lin(1024, 1024)))
        self.bn2 = BatchNorm(1024)

        self.fc = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(inplace=True))
        self.dropout = nn.Dropout(0.2)
        self.fc1 = nn.Sequential(nn.Linear(512, out_channel))
Example #14
0
    def __init__(self, feature, out_channel, pooltype):
        super(GAT, self).__init__()

        self.pool1, self.pool2 = self.poollayer(pooltype)
        self.GConv1 = GATConv(feature, 1024)
        self.bn1 = BatchNorm(1024)

        self.GConv2 = GATConv(1024, 1024)
        self.bn2 = BatchNorm(1024)

        self.fc = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(inplace=True))
        self.dropout = nn.Dropout(0.2)
        self.fc1 = nn.Sequential(nn.Linear(512, out_channel))
    def __init__(self, in_feats):
        super(Net, self).__init__()

        hs_1 = in_feats * 2
        self.conv1 = SAGEConv(in_feats, hs_1)
        self.bn1 = BatchNorm(hs_1)
        self.pool1 = SAGPooling(hs_1, ratio=0.5)

        hs_2 = int(hs_1 * 2)
        self.conv2 = SAGEConv(hs_1, hs_2)
        self.bn2 = BatchNorm(hs_2)
        self.pool2 = SAGPooling(hs_2, ratio=0.5)

        num_classes = 2
        self.lin1 = Linear(hs_2, num_classes).cuda()
Example #16
0
    def __init__(self):
        super().__init__()

        self.node_emb = Embedding(21, 75)
        self.edge_emb = Embedding(4, 50)

        aggregators = ['mean', 'min', 'max', 'std']
        scalers = ['identity', 'amplification', 'attenuation']

        self.convs = ModuleList()
        self.batch_norms = ModuleList()
        for _ in range(4):
            conv = PNAConv(in_channels=75,
                           out_channels=75,
                           aggregators=aggregators,
                           scalers=scalers,
                           deg=deg,
                           edge_dim=50,
                           towers=5,
                           pre_layers=1,
                           post_layers=1,
                           divide_input=False)
            self.convs.append(conv)
            self.batch_norms.append(BatchNorm(75))

        self.mlp = Sequential(Linear(75, 50), ReLU(), Linear(50, 25), ReLU(),
                              Linear(25, 1))
Example #17
0
	def __init__(self, in_channels, out_channels, args, aggr="add"):
		super(GraphConvolution, self).__init__(aggr=aggr)
		self.args = args
		self.lin_node = torch.nn.Linear(in_channels, out_channels)
		self.lin_message = torch.nn.Linear(out_channels * 2, out_channels)
		self.lin_passing = torch.nn.Linear(out_channels + in_channels, out_channels)
		self.batch_norm = BatchNorm(out_channels)
Example #18
0
    def __init__(self,
                 emb_dim,
                 hidden_dim,
                 rank_dim,
                 n_layers,
                 dropout):
        super(Net, self).__init__()

        self.node_emb = Embedding(21, emb_dim)
        self.edge_emb = Embedding(4, emb_dim)
        self.n_layers = n_layers
        self.dropout = dropout

        aggregators = ['mean', 'min', 'max', 'std']
        scalers = ['identity', 'amplification', 'attenuation']

        self.convs = ModuleList()
        self.pool = graph_cp_pooling(hidden_dim)
        self.batch_norms = ModuleList()
        for _ in range(n_layers):
            #conv = PNAConv(in_channels=75, out_channels=75,
            #               aggregators=aggregators, scalers=scalers, deg=deg,
            #               edge_dim=50, towers=5, pre_layers=1, post_layers=1,
            #               divide_input=False)
            conv = GCNConv(emb_dim=emb_dim, hidden_dim=hidden_dim, rank_dim=rank_dim)
            self.convs.append(conv)
            self.batch_norms.append(BatchNorm(hidden_dim))

        self.mlp = Sequential(Linear(hidden_dim, 50), ReLU(), Linear(50, 25), ReLU(),
                              Linear(25, 1))
Example #19
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.save_hyperparameters()

        kwargs = self.sanetize_kwargs(kwargs)

        self.node_emb = Embedding(kwargs["node_vocab"], kwargs["node_dim"])
        self.edge_emb = Embedding(kwargs["edge_vocab"], kwargs["edge_dim"])

        self.convs = ModuleList()
        self.batch_norms = ModuleList()
        for _ in range(kwargs["num_layers"]):
            conv = PNAConv(
                in_channels=kwargs["node_dim"],
                out_channels=kwargs["node_dim"],
                aggregators=kwargs["aggregators"],
                scalers=kwargs["scalers"],
                deg=torch.tensor(kwargs["deg"]),
                edge_dim=kwargs["edge_dim"],
                towers=kwargs["towers"],
                pre_layers=kwargs["pre_layers"],
                post_layers=kwargs["post_layers"],
                divide_input=kwargs["divide_input"],
            )
            self.convs.append(conv)
            self.batch_norms.append(BatchNorm(kwargs["node_dim"]))

        self.mlp = Sequential(
            Linear(kwargs["node_dim"], kwargs["edge_dim"]),
            ReLU(),
            Linear(kwargs["edge_dim"], kwargs["hidden_channels"]),
            ReLU(),
            Linear(kwargs["hidden_channels"], kwargs["num_classes"]),
        )
Example #20
0
    def __init__(self, hidden_channels):
        super(GCN, self).__init__()
        torch.manual_seed(126755)

        self.bnorm = BatchNorm(4)
        self.conv0 = GCNConv(4, hidden_channels)
        self.convs = torch.nn.ModuleList([
            GCNConv(hidden_channels, hidden_channels)
            for _ in range(1, hyperp['gcn_layers'])
        ])
        self.bnorm1 = BatchNorm(hidden_channels)
        self.lins = torch.nn.ModuleList([
            Linear(hidden_channels, hidden_channels)
            for _ in range(hyperp['hidden_d_layers'])
        ])
        self.out = Linear(hidden_channels, 1)
Example #21
0
    def __init__(self, n_heads, dim_embedding, dim_values, dim_hidden):
        super(AttentionLayer, self).__init__()

        self.n_heads = n_heads
        self.dim_values = dim_values

        self.GAT = GATConv(dim_embedding,
                           dim_values,
                           heads=n_heads,
                           concat=True,
                           bias=False)
        self.lin1 = nn.Linear(dim_values, dim_embedding, bias=False)
        self.BN1 = BatchNorm(dim_embedding)
        self.lin2 = nn.Linear(dim_embedding, dim_hidden)
        self.lin3 = nn.Linear(dim_hidden, dim_embedding)
        self.BN2 = BatchNorm(dim_embedding)
Example #22
0
    def __init__(self,
                 in_channels,
                 number_hidden_layers,
                 aggr,
                 hidden_out_channel,
                 out_channel,
                 pool_layer,
                 k=1,
                 device=None):
        super(InceptionNet, self).__init__()
        self.pool_layer = pool_layer  # 'add', 'max', 'mean' or 'sort'
        self.device = device
        self.k = k
        self.atom_encoder = AtomEncoder(emb_dim=in_channels)
        self.batchnorm = BatchNorm(in_channels=2 * hidden_out_channel)

        self.rgcn_list = torch.nn.ModuleList()
        self.graphconv_list = torch.nn.ModuleList()
        self.rgcn_list.append(
            FastRGCNConv(in_channels=in_channels,
                         out_channels=hidden_out_channel,
                         num_relations=NUM_RELATIONS))
        self.graphconv_list.append(
            GraphConv(in_channels=in_channels,
                      out_channels=hidden_out_channel))

        if number_hidden_layers != 0:
            for i in range(number_hidden_layers):
                self.rgcn_list.append(
                    FastRGCNConv(in_channels=2 * hidden_out_channel,
                                 out_channels=hidden_out_channel,
                                 num_relations=NUM_RELATIONS))
                self.graphconv_list.append(
                    GraphConv(in_channels=2 * hidden_out_channel,
                              out_channels=hidden_out_channel))

        self.rgcn_list.append(
            FastRGCNConv(in_channels=2 * hidden_out_channel,
                         out_channels=out_channel,
                         num_relations=NUM_RELATIONS))
        self.graphconv_list.append(
            GraphConv(in_channels=2 * hidden_out_channel,
                      out_channels=out_channel))

        self.linear1 = nn.Linear(2 * k * out_channel, 16)
        self.linear2 = nn.Linear(16, 1)
Example #23
0
 def __init__(self, in_features, out_features, heads=4):
     super(FourConvPoolBlock, self).__init__()
     self.conv1 = FeaStConv(in_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, out_features, heads=heads)
     self.batch = BatchNorm(out_features)
     self.pool = TopKPooling(16)
Example #24
0
 def __init__(self, paths, n_features=4, lin2=4, heads=4):
     super(PretrainedBlocks, self).__init__()
     self.blocks = [ThreeConvBlock(n_features, lin2, heads) for path in paths]
     self.blocks = torch.nn.ModuleList(self.blocks)
     self.batches = [BatchNorm(lin2) for n in range(1, len(paths))]
     self.batches = torch.nn.ModuleList(self.batches)
     self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
     for block, path in zip(self.blocks, paths):
         block = block.load_state_dict(torch.load(path, map_location=self.device))
Example #25
0
 def __init__(self, norm_type, in_channels):
     super(NormLayer, self).__init__()
     if norm_type == 'bn':
         self.norm = BatchNorm(in_channels)
     elif norm_type == 'ln':
         self.norm = LayerNorm(in_channels)
     elif norm_type == 'in':
         self.norm = InstanceNorm(in_channels)
     else:
         self.norm = NoNorm(in_channels)
 def __init__(self, in_feats, hidden_size, hidden_size1, hidden_size2, hidden_size3, num_classes, conv):
     super(GCN, self).__init__()
     self.conv1 = conv(in_feats, hidden_size)
     self.bn1 = BatchNorm(hidden_size)
     self.conv2 = conv(hidden_size, hidden_size1)
     self.bn2 = BatchNorm(hidden_size1)
     self.conv3 = conv(hidden_size1, hidden_size2)
     self.bn3 = BatchNorm(hidden_size2)
     self.conv4 = conv(hidden_size2, hidden_size3)
     self.bn4 = BatchNorm(hidden_size3)
     self.conv5 = conv(hidden_size3, num_classes)
     self.bn5 = BatchNorm(num_classes)
     x = 60
     self.encoder = nn.Sequential(
         nn.Conv2d(1, x, (3, 5)),
         nn.LeakyReLU(),
         nn.Dropout2d(),
         nn.Conv2d(x, 1, (3, 1))
     )
Example #27
0
 def __init__(self, n_features, heads=4, dropout=True):
     super(ThreeConv, self).__init__()
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 32, heads=heads)
     self.conv3 = FeaStConv(32, 64, heads=heads)
     self.batch = BatchNorm(64)
     self.lin1 = Linear(64, 32)
     self.lin2 = Linear(32, 16)
     self.lin3 = Linear(16, 8)
     self.lin4 = Linear(8, 4)
     self.out = Linear(4, 1)
Example #28
0
 def forward_single(self, data):
     x = self.gc1(data['x'],
                  edge_index=data['edge_index'],
                  edge_weight=data['edge_attr'])
     BatchNorm(16)
     X = F.relu(x)
     x = F.dropout(x, self.dropout, training=self.training)
     x = self.gc4(x,
                  edge_index=data['edge_index'],
                  edge_weight=data['edge_attr'])
     return x
Example #29
0
 def __init__(self, in_features, out_features, heads=4):
     super(FourConvBlock, self).__init__()
     self.conv1 = FeaStConv(in_features, 4, heads=heads)
     torch.nn.init.kaiming_normal_(self.conv1, nonlinearity='relu')
     self.conv2 = FeaStConv(4, 4, heads=heads)
     torch.nn.init.kaiming_normal_(self.conv2, nonlinearity='relu')
     self.conv3 = FeaStConv(4, 4, heads=heads)
     torch.nn.init.kaiming_normal_(self.conv3, nonlinearity='relu')
     self.conv4 = FeaStConv(4, out_features, heads=heads)
     torch.nn.init.kaiming_normal_(self.conv4, nonlinearity='relu')
     self.batch = BatchNorm(out_features)
Example #30
0
        def __init__(self):
            super(Generator, self).__init__()

            nn = Sequential(Linear(1, 1225), ReLU())
            self.conv1 = NNConv(35,
                                35,
                                nn,
                                aggr='mean',
                                root_weight=True,
                                bias=True)
            self.conv11 = BatchNorm(35,
                                    eps=1e-03,
                                    momentum=0.1,
                                    affine=True,
                                    track_running_stats=True)

            nn = Sequential(Linear(1, 35), ReLU())
            self.conv2 = NNConv(35,
                                1,
                                nn,
                                aggr='mean',
                                root_weight=True,
                                bias=True)
            self.conv22 = BatchNorm(1,
                                    eps=1e-03,
                                    momentum=0.1,
                                    affine=True,
                                    track_running_stats=True)

            nn = Sequential(Linear(1, 35), ReLU())
            self.conv3 = NNConv(1,
                                35,
                                nn,
                                aggr='mean',
                                root_weight=True,
                                bias=True)
            self.conv33 = BatchNorm(35,
                                    eps=1e-03,
                                    momentum=0.1,
                                    affine=True,
                                    track_running_stats=True)