def __init__(self, nfeat, nhid, nclass, nrel, dropout): super(GCN, self).__init__() self.gc1 = [GraphConvolution(nfeat, nhid) for r in range(nrel)] self.gc2 = [GraphConvolution(nhid, nclass) for r in range(nrel)] self.dropout = dropout self.nrel = nrel
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.softmax = nn.Softmax(dim=0) self.dropout = dropout
def __init__(self, nfeat, nhid, nclass, dropout): super(ImprovedGCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout pass
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout self.dc = InnerProductDecoder(dropout)
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, 16) self.gc2 = GraphConvolution(16, nhid) self.gc3 = GraphConvolution(nhid, nclass) self.dropout = dropout
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() # GCN由两个GraphConvolution层构成 self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout
def __init__(self, steps, device, edge_hid, nfeat, gcn_hid, dropout, normalize=False, op_type='FULLY_CONCAT_PRIMITIVES', transform_type_more=True): """ :param nfeat: feature dimension of each node in the graph :param nhid: hidden dimension :param dropout: dropout rate for GCN """ super(ArchTransformer, self).__init__() self.steps = steps self.device = device self.normalize = normalize self.op_type = op_type if transform_type_more: if op_type == 'LOOSE_END_PRIMITIVES': num_ops = len(genotypes.LOOSE_END_PRIMITIVES) else: num_ops = len(genotypes.FULLY_CONCAT_PRIMITIVES) else: num_ops = len(genotypes.TRANSFORM_PRIMITIVES) self.gc1 = GraphConvolution(nfeat, gcn_hid) self.gc2 = GraphConvolution(gcn_hid, gcn_hid) self.dropout = dropout self.fc = nn.Linear(gcn_hid, num_ops * 2) try: COMPACT_PRIMITIVES = eval("genotypes.%s" % op_type) except: assert False, 'not supported op type %s' % (op_type) # the first two nodes self.node_hidden = nn.Embedding(2, 2*edge_hid) self.op_hidden = nn.Embedding(len(COMPACT_PRIMITIVES), edge_hid) # [op0, op1] self.emb_attn = nn.Linear(2*edge_hid, nfeat)
def __init__(self, nfeat, nhid, nclass, dropout, sample): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout self.sample = sample
def __init__(self, nfeat, nhid, nout, dropout, nlayers): super(GCNDeep, self).__init__() self.gcstart = GraphConvolution(nfeat, nhid) self.gcn_middle = [] for i in range(nlayers-2): self.gcn_middle.append(GraphConvolution(nhid, nhid)) self.gcend = GraphConvolution(nhid, nout) self.dropout = dropout
def __init__(self, noise_dim=300, embed_dim=300, hidden_size=256, feature_dim=256): super().__init__() self.gcn1 = GraphConvolution(noise_dim + embed_dim, hidden_size) self.relu = nn.LeakyReLU(0.2) self.dropout = nn.Dropout(p=0.5) self.gcn2 = GraphConvolution(hidden_size, feature_dim) for m in self.modules(): if isinstance(m, GraphConvolution): torch.nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.01)
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid, 8) self.gc3 = GraphConvolution(nhid, 512, 8) self.gc1_pose = GraphConvolution(90, 512, 34) self.gc3_pose = GraphConvolution(512, 256, 34) self.fc = nn.Linear(512 + 256, nclass) self.dropout = dropout
def __init__(self, nfeat, nhid, nout, dropout, nlayers, K): super(GCNLinear, self).__init__() self.gcstart = GraphConvolution(nfeat, nhid) self.gcn_middle = [] for i in range(nlayers - 2): self.gcn_middle.append(GraphConvolution(nhid, nhid)) self.gcend = GraphConvolution(nhid, nout) self.dropout = dropout self.classifier = nn.Linear(nout, K)
def __init__(self, nfeat, nhid, nclass, dropout, train_idx): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.gc3 = GraphConvolution(2 * nhid, nclass) self.bi1 = BI_Intereaction(nfeat, nhid, train_idx) self.dropout = dropout self.bn1 = nn.BatchNorm1d(nhid)
def __init__(self, nfeat, nhid1, nhid2, nhid3, nclass, dropout1, dropout2): super(GCN, self).__init__() self.nhid1 = nhid1 self.nhid2 = nhid2 self.nhid3 = nhid3 self.gc1 = GraphConvolution(nfeat, self.nhid1) self.gc2 = GraphConvolution(self.nhid1, self.nhid2) self.gc3 = GraphConvolution(self.nhid2, self.nhid3) self.gc4 = GraphConvolution(self.nhid3, nclass) self.dropout1 = dropout1 self.dropout2 = dropout2
def __init__(self, nfeat, embed_size, nhid, nclass, dropout): super(GCN, self).__init__() # (300,16) (16,300) self.fc = nn.Linear(nfeat, embed_size) self.init_weights() self.gc1 = GraphConvolution(embed_size, embed_size) self.gc2 = GraphConvolution(embed_size, embed_size) self.word_rnn = nn.GRU(embed_size, embed_size, 1, batch_first=True) self.dropout = dropout
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() # super()函数是用于调用父类(超类)的方法 # super().__init__()表示子类既能重写__init__()方法又能调用父类的方法 # https://www.runoob.com/w3cnote/python-extends-init.html self.gc1 = GraphConvolution(nfeat, nhid) # self.gc1代表GraphConvolution(),gc1输入尺寸nfeat,输出尺寸nhid self.gc2 = GraphConvolution(nhid, nclass) # self.gc2代表GraphConvolution(),gc2输入尺寸nhid,输出尺寸ncalss self.dropout = dropout
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nhid) self.gc3 = GraphConvolution(nhid, nhid) self.gc4 = GraphConvolution(nhid, nclass) self.embedding_dict = { 'h0': None, 'h1': None, 'h2': None, 'h3': None, 'h4': None } self.dropout = dropout
def __init__(self, nfeat, nhid, nclass, input_droprate, hidden_droprate, use_bn=False): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.input_droprate = input_droprate self.hidden_droprate = hidden_droprate self.bn1 = nn.BatchNorm1d(nfeat) self.bn2 = nn.BatchNorm1d(nhid) self.use_bn = use_bn
def __init__(self, x_dim, h_dim, z_dim, n_layers_gcn, n_out, dropout, alpha, P): super(TNDconf, self).__init__() self.x_dim = x_dim # feature dim #self.eps = eps # ? self.h_dim = h_dim self.z_dim = z_dim self.n_layers_gcn = n_layers_gcn self.n_out = n_out self.dropout = dropout self.alpha = alpha self.P = P self.phi_x = nn.Sequential( nn.Linear(x_dim, h_dim).to(device), nn.ReLU().to(device)) # nn.BatchNorm1d(h_dim) self.gc = [GraphConvolution(h_dim, h_dim).to(device)] for i in range(n_layers_gcn - 1): self.gc.append(GraphConvolution(h_dim, h_dim).to(device)) self.fuse = nn.Sequential( nn.Linear(h_dim + h_dim, z_dim).to(device), nn.ReLU().to(device)) # phi1, z2, z3 => z # prediction # potential outcome self.out_t00 = [ nn.Linear(z_dim, z_dim).to(device) for i in range(n_out) ] self.out_t10 = [ nn.Linear(z_dim, z_dim).to(device) for i in range(n_out) ] self.out_t01 = nn.Linear(z_dim, 1).to(device) self.out_t11 = nn.Linear(z_dim, 1).to(device) # propensity score self.ps_predictor = nn.Sequential() self.ps_predictor.add_module('d_fc1', nn.Linear(z_dim, 100).to(device)) self.ps_predictor.add_module('d_bn1', nn.BatchNorm1d(100).to(device)) #self.ps_predictor.add_module('d_relu1', nn.ReLU(True).to(device)) self.ps_predictor.add_module('d_sigmoid1', nn.Sigmoid().to(device)) self.ps_predictor.add_module('d_fc2', nn.Linear(100, 2).to(device)) self.ps_predictor.add_module('d_softmax', nn.Softmax(dim=1).to(device)) # memory unit self.rnn = nn.GRUCell(z_dim + 1, h_dim).to(device) # c_t, z_t, h_{t-1} => h_t
def __init__(self, nfeat, nhid, nclass, dropout, gma, learnable, normalization=True, renormalization=False): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout self.gma = nn.Parameter(gma) self.normalization = normalization self.renormalization = renormalization if not learnable: self.gma.requires_grad = False
def __init__(self, nfeat, nhid, nlayers, nclass, dropout): super(GCN, self).__init__() assert nlayers >= 2 self.params = { 'nfeat': nfeat, 'nhid': nhid, 'nlayers': nlayers, 'nclass': nclass, 'dropout': dropout } self.nfeat = nfeat self.nhid = nhid self.nclass = nclass self.nlayers = nlayers self.layers = [] self.layers.append(GraphConvolution(nfeat, nhid)) for _ in range(self.nlayers - 2): self.layers.append(GraphConvolution(nhid, nhid)) self.layers.append(GraphConvolution(nhid, nclass)) self.dropout = dropout self.layers = nn.ModuleList(self.layers)
def __init__(self, nfeat, nhid, dropout, n_in=1, n_out=1, cuda=False): super(GCN_DECONF, self).__init__() # self.gc2 = GraphConvolution(nhid, nclass) if cuda: self.gc = [GraphConvolution(nfeat, nhid).cuda()] for i in range(n_in - 1): self.gc.append(GraphConvolution(nhid, nhid).cuda()) else: self.gc = [GraphConvolution(nfeat, nhid)] for i in range(n_in - 1): self.gc.append(GraphConvolution(nhid, nhid)) self.n_in = n_in self.n_out = n_out if cuda: self.out_t00 = [nn.Linear(nhid,nhid).cuda() for i in range(n_out)] self.out_t10 = [nn.Linear(nhid,nhid).cuda() for i in range(n_out)] self.out_t01 = nn.Linear(nhid,1).cuda() self.out_t11 = nn.Linear(nhid,1).cuda() else: self.out_t00 = [nn.Linear(nhid,nhid) for i in range(n_out)] self.out_t10 = [nn.Linear(nhid,nhid) for i in range(n_out)] self.out_t01 = nn.Linear(nhid,1) self.out_t11 = nn.Linear(nhid,1) self.dropout = dropout # a linear layer for propensity prediction self.pp = nn.Linear(nhid, 1) if cuda: self.pp = self.pp.cuda() self.pp_act = nn.Sigmoid()
def __init__(self, nfeat, nhid, nclass, dropout, nlayer): super(GCN, self).__init__() self.nlayer = nlayer gap = (nfeat-nhid)//self.nlayer self.gn = [] self.nn = [] self.gc = [] self.gn0 = GraphNormalization(nhid+gap*(nlayer-1), nhid+gap*(nlayer-1)) self.nn0 = NodeNormalization(nhid+gap*(nlayer-1), nhid+gap*(nlayer-1)) self.gc0 = GraphConvolution(nfeat, nhid+gap*(nlayer-1)) # self.gn.append(self.gn0) self.nn.append(self.nn0) self.gc.append(self.gc0) for i in range(1, self.nlayer): self.gn2 = GraphNormalization(nhid+gap*(nlayer-i-1), nhid+gap*(nlayer-i-1)) self.nn2 = NodeNormalization(nhid+gap*(nlayer-i-1), nhid+gap*(nlayer-i-1)) self.gc2 = GraphConvolution(nhid+gap*(nlayer-i), nhid+gap*(nlayer-i-1)) self.gn.append(self.gn2) self.nn.append(self.nn2) self.gc.append(self.gc2) self.gcfin = GraphConvolution(nhid, nclass) self.gnfin = GraphNormalization(nclass, nclass) self.nnfin = NodeNormalization(nclass, nclass) self.dropout = dropout
def __init__(self, feat_x_n, topo_x_n, n_output, h_layers, dropout=0., rnn_type="RNN_RELU"): super(RNNModel, self).__init__() self._comb = "topo" n_input = {"feat": feat_x_n, "topo": topo_x_n, "comb": feat_x_n + topo_x_n}[self._comb] all_layers = [n_input] + h_layers + [n_output] self._rnn_type = rnn_type self._dropout = dropout self.drop = nn.Dropout(self._dropout) if self._rnn_type not in self.RNN_TYPES: raise ValueError("An invalid rnn_type, options are %s" % (self.RNN_TYPES,)) self._rnn_in, self._rnn_out, self._rnn_layers = all_layers[0], all_layers[0], 1 self.rnn = nn.RNNBase(self._rnn_type, self._rnn_in, self._rnn_out, self._rnn_layers, dropout=self._dropout) self.gcn_layers = nn.ModuleList([GraphConvolution(first, second) for first, second in zip(all_layers[:-1], all_layers[1:])]) self._activation_func = functional.relu
def __init__(self, nfeat, nhid, nclass, dropout, data_type: str = None): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid, True, data_type) self.gc2 = GraphConvolution(nhid, nclass, True, data_type) self.dropout = dropout
def gcn_sequential_model(nfeat, nhid, nclass, adj): model = nn.Sequential( GraphConvolution(nfeat, nhid, adj, activation=F.relu), GraphConvolution(nhid, nclass, adj, activation=None)) return model
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, nclass, dropout): super(GCNModelVAE, self).__init__() self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x) self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x) self.gc4 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x) self.gc5 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x) self.dc = InnerProductDecoder(dropout, act=lambda x: x) self.gc2_1 = GraphConvolution(hidden_dim1 + input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc3_1 = GraphConvolution(hidden_dim1 + input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc4_1 = GraphConvolution(hidden_dim1 + input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc5_1 = GraphConvolution(hidden_dim1 + input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc6_1 = GraphConvolution(hidden_dim1 + input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc7_1 = GraphConvolution(hidden_dim1 + input_feat_dim, hidden_dim1, dropout, act=F.relu) self.node_regen = GraphConvolution(hidden_dim1, input_feat_dim, dropout, act=F.relu) self.gc_class = GraphConvolution(hidden_dim1 + input_feat_dim, nclass)
def __init__(self, nfeat, nhid, nclass, dropout, init_scheme="xavier"): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid, init_scheme=init_scheme) self.gc2 = GraphConvolution(nhid, nclass, init_scheme=init_scheme) self.dropout = dropout
def __init__(self, nfeat, nhid, nout, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nout) self.dropout = dropout
def __init__(self, nfeat, nhid, nout, nlin1, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nout) self.fc1 = nn.Linear(nout, nlin1) self.dropout = dropout