def __init__(self, in_channels, fnc=F.relu): super(ComplexNonLin, self).__init__() self.fnc = fnc self.bias = nn.Parameter(torch.Tensor(in_channels)) zeros(self.bias)
def reset_parameters(self): # original initialization # uniform(self.weight.size(0), self.weight) # uniform(self.weight.size(0), self.bias) # change to new initialization glorot(self.weight) zeros(self.bias)
def reset_parameters(self): glorot(self.att_msg) if self.task_channels is not None: glorot(self.att_task) zeros(self.bias) self.cached_result = None self.cached_num_edges = None
def reset_parameters(self): for i in range(self.num_node_type): glorot(getattr(self, "node_weight_%d" % i)) # torch.nn.init.uniform(getattr(self, "node_weight_%d" % i)) zeros(getattr(self, "node_bias_%d" % i)) for i in range(self.num_edge_type): glorot(getattr(self, "edge_weight_%d" % i))
def reset_parameters(self): glorot(self.weight) if cfg.gnn.self_msg == 'concat': glorot(self.weight_self) zeros(self.bias) self.cached_result = None self.cached_num_edges = None
def reset_parameters(self): self.lin_l.reset_parameters() self.lin_r.reset_parameters() if self.lin_edge is not None: self.lin_edge.reset_parameters() glorot(self.att) zeros(self.bias)
def __init__(self, emb_dim, aggr="add", input_layer=False, heads=3, negative_slope=0.2): super(GSANConv, self).__init__() self.aggr = aggr self.emb_dim = emb_dim self.heads = heads self.negative_slope = negative_slope # multi-layer perceptron self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU(), torch.nn.Linear(emb_dim, emb_dim)) ### Mapping 0/1 edge features to embedding self.edge_encoder = torch.nn.Linear(9, heads * emb_dim) ### Mapping uniform input features to embedding. self.input_layer = input_layer if self.input_layer: self.input_node_embeddings = torch.nn.Embedding(2, emb_dim) torch.nn.init.xavier_uniform_( self.input_node_embeddings.weight.data) self.att = torch.nn.Parameter(torch.Tensor(1, heads, 2 * emb_dim)) self.bias = torch.nn.Parameter(torch.Tensor(emb_dim)) glorot(self.att) zeros(self.bias)
def reset_parameters(self): # glorot(self.weight) self.weight.data = nn.init.xavier_uniform_( self.weight.data, gain=nn.init.calculate_gain('relu')) zeros(self.bias) self.cached_result = None self.cached_num_edges = None
def reset_parameters(self): if self.in_channels <= 0: pass elif self.weight_initializer == 'glorot': inits.glorot(self.weight) elif self.weight_initializer == 'uniform': bound = 1.0 / math.sqrt(self.weight.size(-1)) torch.nn.init.uniform_(self.weight.data, -bound, bound) elif self.weight_initializer == 'kaiming_uniform': inits.kaiming_uniform(self.weight, fan=self.in_channels, a=math.sqrt(5)) elif self.weight_initializer is None: inits.kaiming_uniform(self.weight, fan=self.in_channels, a=math.sqrt(5)) else: raise RuntimeError(f"Linear layer weight initializer " f"'{self.weight_initializer}' is not supported") if self.bias is None or self.in_channels <= 0: pass elif self.bias_initializer == 'zeros': inits.zeros(self.bias) elif self.bias_initializer is None: inits.uniform(self.in_channels, self.bias) else: raise RuntimeError(f"Linear layer bias initializer " f"'{self.bias_initializer}' is not supported")
def reset_parameters(self): # uniform(self.weight.size(0), self.weight) # uniform(self.weight.size(0), self.weight_id) # uniform(self.weight.size(0), self.bias) glorot(self.weight) glorot(self.weight_id) zeros(self.bias)
def __init__(self, in_channels, out_channels, improved=False, cached=False, bias=True, **kwargs): super(GCNConv, self).__init__() # super(GCNConv, self).__init__(aggr='add', **kwargs) self.in_channels = in_channels self.out_channels = out_channels self.improved = improved self.cached = cached self.weight = Parameter(torch.Tensor(in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) glorot(self.weight) zeros(self.bias) self.cached_result = None self.cached_num_edges = None
def reset_parameters(self): glorot(self.lin_l.weight) glorot(self.lin_r.weight) glorot(self.lin_e.weight) # for edge feature glorot(self.att_l) glorot(self.att_r) glorot(self.att_e) # for edge feature zeros(self.bias)
def reset_parameters(self): glorot(self.weight) glorot(self.weight1) glorot(self.weight2) glorot(self.att) glorot(self.att2) zeros(self.bias) zeros(self.bias2)
def reset_parameters(self): glorot(self.lin_l) glorot(self.lin_r) glorot(self.att_l) glorot(self.att_r) zeros(self.bias) if self.encoding == "relational" or self.encoding == "relative" or self.encoding == "multi": glorot(self.encoding_layer_weight) glorot(self.encoding_layer_bias)
def reset_parameters(self): # glorot(self.weight) # glorot(self.att) # zeros(self.bias) # gaussian initialization according to paper torch.nn.init.normal_(self.weight, 0, 0.1) torch.nn.init.normal_(self.att, 0, 0.1) zeros(self.bias)
def reset_parameters(self): tgi.glorot(self.weight) tgi.zeros(self.bias) for name, param in self.named_parameters(): if name.startswith("att_scaling"): tgi.ones(param) elif name.startswith("att_bias"): tgi.zeros(param) elif name.startswith("att_mh"): tgi.glorot(param)
def __init__(self, num_features, num_classes): super(MLP, self).__init__() self.fc1 = nn.Linear(num_features, 16) self.fc2 = nn.Linear(16, num_classes) # torch.nn.init.xavier_uniform_(self.fc1.weight) # torch.nn.init.xavier_uniform_(self.fc2.weight) glorot(self.fc1.weight) zeros(self.fc1.bias) glorot(self.fc2.weight) zeros(self.fc2.bias)
def __init__(self, in_channels: int, out_channels: int, K: int, embedding_dimensions: int): super(AVWGCN, self).__init__() self.K = K self.weights_pool = torch.nn.Parameter( torch.Tensor(embedding_dimensions, K, in_channels, out_channels)) self.bias_pool = torch.nn.Parameter( torch.Tensor(embedding_dimensions, out_channels)) glorot(self.weights_pool) zeros(self.bias_pool)
def reset_parameters(self): ones(self.beta) zeros(self.alpha) for layer in self.MLP_co: layer[0].reset_parameters() for layer in self.MLP: layer[0].reset_parameters() for layer in self.MLP1: layer[0].reset_parameters() for layer in self.MLP2: layer[0].reset_parameters()
def reset_parameters(self): glorot(self.lin_l.weight) glorot(self.lin_r.weight) # glorot(self.lin_e.weight) # for edge feature # glorot(self.att_l) # glorot(self.att_r) glorot(self.proj_cmd.weight) glorot(self.cal_cmd.weight) glorot(self.cal_x.weight) # glorot(self.att_e) # for edge feature zeros(self.bias)
def __init__(self, in_dim, hidden_dim, x_num_day): super().__init__() self.fc_weight = Parameter(torch.Tensor(in_dim, 1, hidden_dim)) self.fc_bias = Parameter(torch.Tensor(in_dim, hidden_dim)) glorot(self.fc_weight) zeros(self.fc_bias) self.conv1s = [GCNTensorInteraction(in_dim, 1, hidden_dim, interaction_ftr_dim = 3).cuda() for _ in range(x_num_day)] self.conv2s = [GCNTensorInteraction(in_dim, hidden_dim, hidden_dim, interaction_ftr_dim = 3).cuda() for _ in range(x_num_day)] # add more if necessary self.rnn = IMVTensorLSTM(in_dim, 1, hidden_dim, hidden_dim *2).cuda() self.x_num_day = x_num_day
def reset_parameters(self): glorot(self.weight) glorot(self.att) zeros(self.bias) if self.att_type in ["generalized_linear"]: glorot(self.general_att_layer.weight) if self.pool_dim != 0: for layer in self.pool_layer: glorot(layer.weight) zeros(layer.bias)
def reset_parameters(self): glorot(self.lin_l.weight) glorot(self.lin_r.weight) glorot(self.lin_e.weight) glorot(self.att) if self.lin_att is not None: glorot(self.lin_att.weight) if self.concat: glorot(self.node_lin_out.weight) glorot(self.edge_lin_out.weight) zeros(self.node_bias) zeros(self.edge_bias)
def _set_parameters(self): glorot(self.w_c_i) glorot(self.w_c_f) glorot(self.w_c_o) zeros(self.b_i) zeros(self.b_f) zeros(self.b_c) zeros(self.b_o)
def test_inits(): x = torch.empty(1, 4) uniform(size=4, tensor=x) assert x.min() >= -0.5 assert x.max() <= 0.5 glorot(x) assert x.min() >= -1.25 assert x.max() <= 1.25 zeros(x) assert x.tolist() == [[0, 0, 0, 0]] ones(x) assert x.tolist() == [[1, 1, 1, 1]]
def _set_parameters(self): glorot(self.W_i) glorot(self.W_f) glorot(self.W_c) glorot(self.W_o) zeros(self.b_i) zeros(self.b_f) zeros(self.b_c) zeros(self.b_o)
def __init__(self, in_channels, out_channels, edge_in_channels, **kwargs): super(MetaGINConv, self).__init__(aggr='add', **kwargs) self.in_channels = in_channels self.out_channels = out_channels self.edge_in_channels = edge_in_channels # 9 for bio self.w1 = Parameter(torch.Tensor(2 * in_channels, 2 * out_channels)) self.b1 = Parameter(torch.Tensor(2 * out_channels)) # batch norm. self.bn = BatchNorm1d(2 * out_channels) # linear 2 self.w2 = Parameter(torch.Tensor(out_channels, 2 * out_channels)) self.b2 = Parameter(torch.Tensor(out_channels)) self.edge_w = Parameter(torch.Tensor(out_channels, edge_in_channels)) self.edge_b = Parameter(torch.Tensor(out_channels)) glorot(self.edge_w) zeros(self.edge_b) self.reset_parameters()
def reset_parameters(self): # Use glorot (aka. xavier uniform initialization) glorot(self.W_h.weight) glorot(self.W_t.weight) glorot(self.W_r.weight) glorot(self.W_o.weight) glorot(self.att_h) glorot(self.att_t) glorot(self.att_r) zeros(self.bias_h) zeros(self.bias_t) zeros(self.bias_r) zeros(self.bias_o)
def __init__(self, in_channels, dropout): super(SAGE_Re, self).__init__() self.channels = [in_channels, in_channels, 256, 256, 256, 112] self.num_layers = 5 self.alpha = Parameter(torch.Tensor(self.num_layers)) zeros(self.alpha) self.zero = l_GCN(in_channels, in_channels) self.rezero = [1, 0, 0, 1, 0] self.relu_list = [1, 2] self.re_list = [0, 3] self.convs = torch.nn.ModuleList() for i in range(self.num_layers): if i in self.re_list: self.convs.append(l_GCN(self.channels[i], self.channels[i+1])) else: self.convs.append(l_SAGE(self.channels[i], self.channels[i+1])) self.dropout = dropout
def __init__(self, in_channels, out_channels, weight=None, bias=None, improved=False, use_bias=True, **kwargs): super().__init__(aggr='add', **kwargs) self.in_channels = in_channels self.out_channels = out_channels self.improved = improved self.cache_dict = {} # self.weight = Parameter(torch.Tensor(in_channels, out_channels)) # # if bias: # self.bias = Parameter(torch.Tensor(out_channels)) # else: # self.register_parameter('bias', None) if weight is None: self.weight = Parameter( torch.Tensor(in_channels, out_channels).to(torch.float32)) glorot(self.weight) else: self.weight = weight print("use shared weight") if bias is None: if use_bias: self.bias = Parameter( torch.Tensor(out_channels).to(torch.float32)) else: self.register_parameter('bias', None) zeros(self.bias) else: self.bias = bias print("use shared bias")