def reset_parameters(self): reset(self.k_lin) reset(self.q_lin) reset(self.v_lin) reset(self.a_lin) ones(self.skip) ones(self.p_rel) glorot(self.a_rel) glorot(self.m_rel)
def reset_parameters(self): tgi.glorot(self.weight) tgi.zeros(self.bias) for name, param in self.named_parameters(): if name.startswith("att_scaling"): tgi.ones(param) elif name.startswith("att_bias"): tgi.zeros(param) elif name.startswith("att_mh"): tgi.glorot(param)
def reset_parameters(self): ones(self.beta) zeros(self.alpha) for layer in self.MLP_co: layer[0].reset_parameters() for layer in self.MLP: layer[0].reset_parameters() for layer in self.MLP1: layer[0].reset_parameters() for layer in self.MLP2: layer[0].reset_parameters()
def reset_parameters(self): if self.num_bases is not None: glorot(self.basis) glorot(self.att) else: glorot(self.weight) glorot(self.q) glorot(self.k) zeros(self.bias) ones(self.l1) zeros(self.b1) torch.full(self.l2.size(), 1 / self.out_channels) zeros(self.b2) if self.lin_edge is not None: glorot(self.lin_edge) glorot(self.e)
def test_inits(): x = torch.empty(1, 4) uniform(size=4, tensor=x) assert x.min() >= -0.5 assert x.max() <= 0.5 glorot(x) assert x.min() >= -1.25 assert x.max() <= 1.25 zeros(x) assert x.tolist() == [[0, 0, 0, 0]] ones(x) assert x.tolist() == [[1, 1, 1, 1]]
def reset_parameters(self): glorot(self.att) ones(self.l1) zeros(self.b1) const(self.l2, 1 / self.nhid) zeros(self.b2)
def reset_parameters(self): for name, param in self.named_parameters(): if name.startswith("r_scaling"): tgi.ones(param) elif name.startswith("r_bias"): tgi.zeros(param)
def reset_parameters(self): #size = self.in_channels #uniform(size, self.weight) ones(self.weight)
def reset_parameters(self): # normal(self.weight, 0, 0.1) glorot(self.weight) # ones(self.weight) if self.bias is not None: ones(self.bias)
def reset_parameters(self): ones(self.weight) self.cached_result = None self.cached_num_edges = None
def reset_parameters(self): ones(self.weight) zeros(self.bias)