예제 #1
0
    def __init__(self, nfeat, nhid, nclass, dropout,nlayer=2):
        super(ChebGCN2, self).__init__()
        chebgcn_para1 = 2
        self.conv1 = ChebConv(nfeat, nhid,chebgcn_para1)
        self.conv2 = ChebConv(nhid, nclass,chebgcn_para1)

        self.dropout_p = dropout
예제 #2
0
    def __init__(self,
                 n_features,
                 n_labels,
                 classification=False,
                 width=128,
                 conv_depth=3,
                 point_depth=3,
                 lin_depth=5):
        super(ChebConvNet, self).__init__()
        self.classification = classification
        self.n_features = n_features
        self.n_labels = n_labels
        self.lin_depth = lin_depth
        self.conv_depth = conv_depth
        self.width = width
        n_intermediate = self.width
        n_intermediate2 = 2 * self.conv_depth * n_intermediate

        self.conv1 = ChebConv(self.n_features, n_intermediate, 2)
        self.convfkt = torch.nn.ModuleList([
            ChebConv(n_intermediate, n_intermediate, 2)
            for i in range(self.conv_depth - 1)
        ])

        self.batchnorm1 = BatchNorm1d(n_intermediate2)
        self.linearfkt = torch.nn.ModuleList([
            torch.nn.Linear(n_intermediate2, n_intermediate2)
            for i in range(self.lin_depth)
        ])
        self.drop = torch.nn.ModuleList(
            [torch.nn.Dropout(.3) for i in range(self.lin_depth)])
        self.out = torch.nn.Linear(n_intermediate2, self.n_labels)
예제 #3
0
    def __init__(self,
                 nfeat,
                 nhid,
                 nclass,
                 num_hops=3,
                 dropout=0.5,
                 lr=0.01,
                 weight_decay=5e-4,
                 with_bias=True,
                 device=None):

        super(ChebNet, self).__init__()

        assert device is not None, "Please specify 'device'!"
        self.device = device

        self.conv1 = ChebConv(nfeat, nhid, K=num_hops, bias=with_bias)

        self.conv2 = ChebConv(nhid, nclass, K=num_hops, bias=with_bias)

        self.dropout = dropout
        self.weight_decay = weight_decay
        self.lr = lr
        self.output = None
        self.best_model = None
        self.best_output = None
예제 #4
0
    def __init__(self, num_features, num_classes):
        super(ChebNet, self).__init__()
        self.conv1 = ChebConv(num_features, 16, K=2)
        self.conv2 = ChebConv(16, num_classes, K=2)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()
예제 #5
0
    def __init__(self,
                 num_nodes,
                 learn_edge_weight,
                 edge_weight,
                 num_features,
                 num_hiddens,
                 num_classes,
                 K,
                 dropout=0.5):
        super(EEGNet, self).__init__()
        self.num_nodes = num_nodes
        self.num_features = num_features
        self.num_hiddens = num_hiddens
        self.xs, self.ys = torch.tril_indices(self.num_nodes,
                                              self.num_nodes,
                                              offset=0)
        edge_weight = edge_weight.reshape(
            self.num_nodes,
            self.num_nodes)[self.xs, self.ys]  # strict lower triangular values
        # edge_weight_gconv = torch.zeros(self.num_hiddens,1)
        # self.edge_weight_gconv = nn.Parameter(edge_weight_gconv, requires_grad=True)
        # nn.init.xavier_uniform_(self.edge_weight_gconv)
        self.edge_weight = nn.Parameter(edge_weight,
                                        requires_grad=learn_edge_weight)
        self.dropout = dropout
        self.chebconv_single = ChebConv(num_features, 1, K, node_dim=0)
        self.chebconv0 = ChebConv(num_features, num_hiddens[0], K, node_dim=0)
        self.chebconv1 = ChebConv(num_hiddens[0], 1, K, node_dim=0)

        # self.fc1 = nn.Linear(num_nodes, num_hiddens)
        self.fc2 = nn.Linear(num_nodes, num_classes)
예제 #6
0
파일: A3C.py 프로젝트: link-kut/or
    def __init__(self, chev_conv_state_dim, action_dim):
        super(A3C_Model, self).__init__()
        self.substrate_state = 0
        self.substrate_edge_index = 0
        self.v_cpu_demand_t = 0
        self.v_bw_demand_t = 0
        self.num_pending_v_nodes_t = 0

        self.actor_conv = ChebConv(in_channels=chev_conv_state_dim,
                                   out_channels=3,
                                   K=3)
        self.critic_conv = ChebConv(in_channels=chev_conv_state_dim,
                                    out_channels=3,
                                    K=3)

        self.actor_vnr_1_fc = nn.Linear(1, 3)
        self.actor_vnr_2_fc = nn.Linear(1, 3)
        self.actor_vnr_3_fc = nn.Linear(1, 3)

        self.critic_vnr_1_fc = nn.Linear(1, 3)
        self.critic_vnr_2_fc = nn.Linear(1, 3)
        self.critic_vnr_3_fc = nn.Linear(1, 3)

        self.actor_fc = nn.Linear((config.SUBSTRATE_NODES + 3) * 3, action_dim)
        self.critic_fc = nn.Linear((config.SUBSTRATE_NODES + 3) * 3, 1)

        set_init([
            self.actor_conv, self.critic_conv, self.actor_vnr_1_fc,
            self.actor_vnr_2_fc, self.actor_vnr_2_fc, self.critic_vnr_1_fc,
            self.critic_vnr_2_fc, self.critic_vnr_3_fc, self.actor_fc,
            self.critic_fc
        ])
        self.distribution = torch.distributions.Categorical
예제 #7
0
    def __init__(self, name='GCNConv'):
        super(Net, self).__init__()
        self.name = name
        if (name == 'GCNConv'):
            self.conv1 = GCNConv(dataset.num_features, 128)
            self.conv2 = GCNConv(128, 64)
        elif (name == 'ChebConv'):
            self.conv1 = ChebConv(dataset.num_features, 128, K=2)
            self.conv2 = ChebConv(128, 64, K=2)
        elif (name == 'GATConv'):
            self.conv1 = GATConv(dataset.num_features, 128)
            self.conv2 = GATConv(128, 64)
        elif (name == 'GINConv'):
            nn1 = Sequential(Linear(dataset.num_features, 128), ReLU(),
                             Linear(128, 64))
            self.conv1 = GINConv(nn1)
            self.bn1 = torch.nn.BatchNorm1d(64)
            nn2 = Sequential(Linear(64, 64), ReLU(), Linear(64, 64))
            self.conv2 = GINConv(nn2)
            self.bn2 = torch.nn.BatchNorm1d(64)

        self.attr = GCNConv(64,
                            dataset.num_classes,
                            cached=True,
                            normalize=not args.use_gdc)

        self.attack = GCNConv(64,
                              dataset.num_classes,
                              cached=True,
                              normalize=not args.use_gdc)
        self.reverse = GradientReversalLayer()
예제 #8
0
    def __init__(self, num_features, num_classes, nh=38, K=6, K_mix=2,
                    inout_skipconn=True, depth=3, p=0.5, bn=False):
        super(Graph_resnet, self).__init__()
        self.inout_skipconn = inout_skipconn
        self.depth = depth

        self.Kipfblock_list = nn.ModuleList()
        self.skipproject_list = nn.ModuleList()

        if isinstance(nh, list): 
            # if you give every layer a differnt number of channels
            # you need one number of channels for every layer!
            assert len(nh) == depth

        else:
            channels = nh
            nh = []
            for i in range(depth):
                nh.append(channels)

        for i in range(depth):
            if i == 0:
                self.Kipfblock_list.append(Kipfblock(n_input=num_features, 
                    n_hidden=nh[0], K=K, p=p, bn=bn))
                self.skipproject_list.append(ChebConv(num_features, nh[0], K=1))
            else:
                self.Kipfblock_list.append(Kipfblock(n_input=nh[i-1],
                                             n_hidden=nh[i], K=K, p=p, bn=bn))
                self.skipproject_list.append(ChebConv(nh[i-1], nh[i], K=1))

        if inout_skipconn:
            self.conv_mix = ChebConv(nh[-1]+num_features, num_classes, K=K_mix)
        else: 
            self.conv_mix = ChebConv(nh[-1], num_classes, K=K_mix)
예제 #9
0
 def __init__(self,
              d1=90,
              d2=80,
              d3=50,
              num_features=1,
              num_classes=1,
              num_layers=4,
              dK=10,
              **kwargs):
     super(Net8, self).__init__()
     self.conv1 = ChebConv(num_features, d1, K=dK, bias=False)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(ChebConv(d1, d1, K=dK))
     self.bn1 = nn.BatchNorm1d(d1)
     self.fc1 = nn.Linear(d1, d2)
     self.bn2 = nn.BatchNorm1d(d2)
     self.fc2 = nn.Linear(d2, d3)
     self.bn3 = nn.BatchNorm1d(d3)
     self.fc3 = nn.Linear(d3, 2)  # one output for regression
     self.bn4 = nn.BatchNorm1d(2)
     self.fc4 = nn.Linear(2, 1)
     self.num_layers = num_layers
     self.d1 = d1
     self.d2 = d2
     self.d3 = d3
     self.dK = dK
예제 #10
0
 def __init__(self, nfeat, nhid, nclass, dropout,nlayer=3):
     super(ChebGCNX, self).__init__()
     chebgcn_para1 = 2
     self.conv1 = ChebConv(nfeat, nhid,chebgcn_para1)
     self.conv2 = ChebConv(nhid, nclass,chebgcn_para1)
     self.convx = nn.ModuleList([ChebConv(nhid, nhid,chebgcn_para1) for _ in range(nlayer-2)])
     self.dropout_p = dropout
예제 #11
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = ChebConv(80, 16, K=2)
     self.conv2 = ChebConv(16, 32, K=2)
     self.conv3 = ChebConv(32, 64, K=2)
     self.fc1 = nn.Linear(64, 100)
     self.fc2 = nn.Linear(100, 20)
     self.fc3 = nn.Linear(20, 5)
예제 #12
0
파일: MRF_GCN.py 프로젝트: HazeDT/DAGCN
 def __init__(
     self,
     in_channels,
 ):
     super(MultiChev_B, self).__init__()
     self.scale_1 = ChebConv(in_channels, 100, K=1)
     self.scale_2 = ChebConv(in_channels, 100, K=2)
     self.scale_3 = ChebConv(in_channels, 100, K=3)
예제 #13
0
    def __init__(self, numFeatures, numClasses):

        super().__init__()

        self.conv1 = ChebConv(numFeatures, 8, 3)
        self.conv2 = ChebConv(8, 8, 3)
        self.fc1 = torch.nn.Linear(192, 64)
        self.fc2 = torch.nn.Linear(64, numClasses * 1)
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = ChebConv(8, 16, K=4)
     self.conv2 = ChebConv(16, 32, K=4)
     self.conv3 = ChebConv(32, 64, K=4)
     self.fc1 = nn.Linear(512, 256)
     self.fc2 = nn.Linear(256, 128)
     self.fc3 = nn.Linear(128, 5)
예제 #15
0
    def _create_candidate_state_parameters_and_layers(self):

        self.conv_x_h = ChebConv(in_channels=self.in_channels,
                                 out_channels=self.out_channels,
                                 K=self.K)

        self.conv_h_h = ChebConv(in_channels=self.out_channels,
                                 out_channels=self.out_channels,
                                 K=self.K)
예제 #16
0
    def _create_reset_gate_parameters_and_layers(self):

        self.conv_x_r = ChebConv(in_channels=self.in_channels,
                                 out_channels=self.out_channels,
                                 K=self.K)

        self.conv_h_r = ChebConv(in_channels=self.out_channels,
                                 out_channels=self.out_channels,
                                 K=self.K)
예제 #17
0
    def __init__(self, numFeatures, numClasses):

        super().__init__()

        self.conv1 = ChebConv(numFeatures, 8, 3)
        self.conv2 = ChebConv(8, 16, 3)
        self.conv3 = ChebConv(16, 32, 5)
        self.fc1 = torch.nn.Linear(768, 128)
        self.fc2 = torch.nn.Linear(128, numClasses * 1)
예제 #18
0
 def __init__(self, inChannels, hiddenChannels, numNodes, numClasses):
     super(VanillaGCN, self).__init__()
     self.hiddenChannels = hiddenChannels
     self.numNodes = numNodes
     # self.conv1 = GCNConv(inChannels, 2*hiddenChannels)
     # self.conv2 = GCNConv(2*hiddenChannels, hiddenChannels)
     self.conv1 = ChebConv(inChannels, 2 * hiddenChannels, K=4)
     self.conv2 = ChebConv(2 * hiddenChannels, hiddenChannels, K=4)
     self.fc = nn.Linear(hiddenChannels * numNodes, numClasses)
예제 #19
0
    def __init__(self):
        super(Net, self).__init__()
        # self.conv1 = GCNConv(dataset.num_features, 16, cached=True)
        # self.conv2 = GCNConv(16, dataset.num_classes, cached=True)
        self.conv1 = ChebConv(data.num_features, 16, K=2)
        self.conv2 = ChebConv(16, data.num_features, K=2)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()
예제 #20
0
 def __init__(self, datasetroot, width):
     super(ChebConvNet, self).__init__()
     self.NumLayers = len(width)
     self.layers = nn.ModuleList()
     self.layers.append(ChebConv(datasetroot.num_features, width[0], K=1))
     for i in range(self.NumLayers - 1):
         layer = ChebConv(width[i], width[i + 1], K=1)
         nn.init.xavier_uniform_(layer.weight)
         self.layers.append(layer)
     self.layers.append(ChebConv(width[-1], datasetroot.num_classes, K=1))
예제 #21
0
    def __init__(self, num_features, num_classes, nh1=64, K=8, K_mix=2,
                     cached=True, inout_skipconn=False):
        super(KipfNet, self).__init__()
        self.inout_skipconn = inout_skipconn
        self.Kipfblock1 = Kipfblock(n_input=num_features, n_hidden=nh1, K=K)

        if inout_skipconn:
            self.conv_mix = ChebConv(nh1+num_features, num_classes, K=K_mix)
        else: 
            self.conv_mix = ChebConv(nh1, num_classes, K=K_mix)
예제 #22
0
    def __init__(self, num_features, num_classes, training_method='dfa'):
        super(DFAChebNet, self).__init__()
        self.conv1 = ChebConv(num_features, 16, K=2)
        self.dfa_1 = DFALayer()
        self.conv2 = ChebConv(16, num_classes, K=2)

        self.dfa = DFA(dfa_layers=[self.dfa_1], no_training=training_method != 'dfa')

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()
    def _create_cell_state_parameters_and_layers(self):

        self.conv_x_c = ChebConv(in_channels=self.in_channels,
                                 out_channels=self.out_channels,
                                 K=self.K)

        self.conv_h_c = ChebConv(in_channels=self.out_channels,
                                 out_channels=self.out_channels,
                                 K=self.K)

        self.b_c = Parameter(torch.Tensor(1, self.out_channels))
예제 #24
0
    def __init__(self, feature, out_channel):
        super(ChebyNet, self).__init__()

        self.GConv1 = ChebConv(feature,1024,K=1)
        self.bn1 = BatchNorm(1024)

        self.GConv2 = ChebConv(1024,1024,K=1)
        self.bn2 = BatchNorm(1024)

        self.fc = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(inplace=True))
        self.dropout = nn.Dropout(0.2)
        self.fc1 = nn.Sequential(nn.Linear(512, out_channel))
    def _create_output_gate_parameters_and_layers(self):

        self.conv_x_o = ChebConv(in_channels=self.in_channels,
                                 out_channels=self.out_channels,
                                 K=self.K)

        self.conv_h_o = ChebConv(in_channels=self.out_channels,
                                 out_channels=self.out_channels,
                                 K=self.K)

        self.w_c_o = Parameter(torch.Tensor(1, self.out_channels))
        self.b_o = Parameter(torch.Tensor(1, self.out_channels))
예제 #26
0
 def __init__(self, in_channels, out_channels):
     super(Net, self).__init__()
     dim = 512
     self.gcn1 = ChebConv(in_channels, dim, K=1)
     self.lin1 = nn.Linear(in_channels, dim)
     self.gcn2 = ChebConv(dim, dim, K=1)
     self.lin2 = nn.Linear(dim, dim)
     self.gcn3 = ChebConv(dim, dim, K=1)
     self.lin3 = nn.Linear(dim, dim)
     self.gcn4 = ChebConv(dim, dim, K=1)
     self.lin4 = nn.Linear(dim, dim)
     self.gcn5 = ChebConv(dim, out_channels, K=1)
     self.lin5 = nn.Linear(dim, out_channels)
예제 #27
0
    def __init__(self, embedding_dim, conv_layers, polynomial_degree,
                 hidden_dim, hidden_layers, nonlinearity, output_type):
        super(ChebNet, self).__init__(embedding_dim, hidden_dim, hidden_layers,
                                      nonlinearity, output_type)
        self.num_conv_layers = conv_layers
        self.polynomial_degree = polynomial_degree

        self.conv_layers = ModuleList(
            [ChebConv(1, self.embedding_dim, self.polynomial_degree)] + [
                ChebConv(self.embedding_dim, self.embedding_dim,
                         self.polynomial_degree)
                for _ in range(self.num_conv_layers - 1)
            ])
예제 #28
0
    def _create_candidate_state_parameters_and_layers(self):

        self.conv_x_h = ChebConv(in_channels=self.in_channels,
                                 out_channels=self.out_channels,
                                 K=self.K,
                                 normalization=self.normalization,
                                 bias=self.bias)

        self.conv_h_h = ChebConv(in_channels=self.out_channels,
                                 out_channels=self.out_channels,
                                 K=self.K,
                                 normalization=self.normalization,
                                 bias=self.bias)
 def __init__(self):
     super(Net, self).__init__()
     if args.model == 'GCN':
         #cached = True is for transductive learning
         self.conv1 = GCNConv(data.x.shape[1], 16, cached=True)
         self.conv2 = GCNConv(16, 32, cached=True)
         self.conv3 = GCNConv(32, 64, cached=True)
         self.conv4 = GCNConv(64, 6, cached=True)
     elif args.model == 'ChebConv':
         self.conv1 = ChebConv(data.x.shape[1], 16, K=3)
         self.conv2 = ChebConv(16, 32, K=3)
         self.conv3 = ChebConv(32, 64, K=3)
         self.conv4 = ChebConv(64, 6, K=3)
예제 #30
0
    def __init__(self, in_channels=8, out_channels=2):
        super(MultiScaleChebConv, self).__init__()

        out_channels *= 144
        in_channels *= 144

        self.out_channels = out_channels
        self.in_channels = in_channels

        self.chebconv_k3 = ChebConv(in_channels, out_channels, K=4).cuda()
        self.chebconv_k5 = ChebConv(in_channels, out_channels, K=5).cuda()
        self.chebconv_k7 = ChebConv(in_channels, out_channels, K=6).cuda()
        self.chebconv_k9 = ChebConv(in_channels, out_channels, K=7).cuda()