def init(self): # embeddings initialization.init_embeddings(self.embs) # rnn initialization.init_rnn(self.rnn, scheme=self.init_rnn) # linear initialization.init_linear(self.proj)
def init(self): # linear initialization.init_linear(self.decoder)
def init(self): for layer in self.layers: initialization.init_linear(layer) # bias gate to let information go untouched nn.init.constant_(layer.bias[self.in_features:], 1.)
def init(self): initialization.init_linear(self.linear_in) initialization.init_linear(self.linear_out)
def init(self): torch.nn.init.uniform_(self.v_a, -0.05, 0.05) initialization.init_linear(self.W_s) initialization.init_linear(self.W_t)
def init(self): initialization.init_linear(self.W_a)