def __init__(self, n_hidden=100, init='glorot_uniform'): self.n_hidden = n_hidden self.init = initializations.get(init) Wz = self.init([n_hidden, n_hidden]) Wr = self.init([n_hidden, n_hidden]) Wh = self.init([n_hidden, n_hidden]) Uz = self.init([n_hidden, n_hidden]) Ur = self.init([n_hidden, n_hidden]) Uh = self.init([n_hidden, n_hidden]) bz = model_ops.zeros(shape=(n_hidden, )) br = model_ops.zeros(shape=(n_hidden, )) bh = model_ops.zeros(shape=(n_hidden, )) self.trainable_weights = [Wz, Wr, Wh, Uz, Ur, Uh, bz, br, bh]
def build(self): self.W_cf = self.init([self.n_embedding, self.n_hidden]) self.W_df = self.init([self.n_distance, self.n_hidden]) self.W_fc = self.init([self.n_hidden, self.n_embedding]) self.b_cf = model_ops.zeros(shape=[ self.n_hidden, ]) self.b_df = model_ops.zeros(shape=[ self.n_hidden, ]) self.trainable_weights = [ self.W_cf, self.W_df, self.W_fc, self.b_cf, self.b_df ]
def build(self): self.W_list = [] self.b_list = [] prev_layer_size = self.n_embedding for i, layer_size in enumerate(self.layer_sizes): self.W_list.append(self.init([prev_layer_size, layer_size])) self.b_list.append(model_ops.zeros(shape=[ layer_size, ])) prev_layer_size = layer_size self.W_list.append(self.init([prev_layer_size, self.n_outputs])) self.b_list.append(model_ops.zeros(shape=[ self.n_outputs, ])) prev_layer_size = self.n_outputs self.trainable_weights = self.W_list + self.b_list
def build(self): if self.gaussian_expand: self.W = self.init([self.n_input * 11, self.n_input]) self.b = model_ops.zeros(shape=[ self.n_input, ]) self.trainable_weights = self.W + self.b else: self.trainable_weights = None
def build(self): """"Construct internal trainable weights. """ self.W_list = [] self.b_list = [] prev_layer_size = self.n_graph_feat for layer_size in self.layer_sizes: self.W_list.append(self.init([prev_layer_size, layer_size])) self.b_list.append(model_ops.zeros(shape=[ layer_size, ])) prev_layer_size = layer_size self.W_list.append(self.init([prev_layer_size, self.n_outputs])) self.b_list.append(model_ops.zeros(shape=[ self.n_outputs, ])) self.trainable_weights = self.W_list + self.b_list
def __init__(self, pair_features, n_pair_features=8, n_hidden=100, init='glorot_uniform'): self.n_pair_features = n_pair_features self.n_hidden = n_hidden self.init = initializations.get(init) W = self.init([n_pair_features, n_hidden * n_hidden]) b = model_ops.zeros(shape=(n_hidden * n_hidden, )) self.A = torch.matmul(pair_features, W) + b self.A = torch.reshape(self.A, (-1, n_hidden, n_hidden)) self.trainable_weights = [W, b]
def build(self): """ Construct internal trainable weights. TODO(rbharath): Need to make this not set instance variables to follow style in other layers. """ init = initializations.get(self.init) # Set weight initialization self.W_AA = init([self.n_atom_input_feat, self.n_hidden_AA]) self.b_AA = model_ops.zeros(shape=[ self.n_hidden_AA, ]) self.W_PA = init([self.n_pair_input_feat, self.n_hidden_PA]) self.b_PA = model_ops.zeros(shape=[ self.n_hidden_PA, ]) self.W_A = init([self.n_hidden_A, self.n_atom_output_feat]) self.b_A = model_ops.zeros(shape=[ self.n_atom_output_feat, ]) self.trainable_weights = [ self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A ] if self.update_pair: self.W_AP = init([self.n_atom_input_feat * 2, self.n_hidden_AP]) self.b_AP = model_ops.zeros(shape=[ self.n_hidden_AP, ]) self.W_PP = init([self.n_pair_input_feat, self.n_hidden_PP]) self.b_PP = model_ops.zeros(shape=[ self.n_hidden_PP, ]) self.W_P = init([self.n_hidden_P, self.n_pair_output_feat]) self.b_P = model_ops.zeros(shape=[ self.n_pair_output_feat, ]) self.trainable_weights.extend([ self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P ])