def __init__(self, n_embedding=30, n_outputs=100, layer_sizes=[100], output_activation=True, init='glorot_uniform', activation='tanh', **kwargs): """ Parameters ---------- n_embedding: int, optional Number of features for each atom n_outputs: int, optional Number of features for each molecule(output) layer_sizes: list of int, optional(default=[1000]) Structure of hidden layer(s) init: str, optional Weight initialization for filters. activation: str, optional Activation function applied """ self.n_embedding = n_embedding self.n_outputs = n_outputs self.layer_sizes = layer_sizes self.output_activation = output_activation self.init = initializations.get(init) # Set weight initialization self.activation = activations.get(activation) # Get activations super(DTNNGather, self).__init__(**kwargs)
def __init__(self, n_embedding=30, n_distance=100, n_hidden=60, init='glorot_uniform', activation='tanh', **kwargs): """ Parameters ---------- n_embedding: int, optional Number of features for each atom n_distance: int, optional granularity of distance matrix n_hidden: int, optional Number of nodes in hidden layer init: str, optional Weight initialization for filters. activation: str, optional Activation function applied """ self.n_embedding = n_embedding self.n_distance = n_distance self.n_hidden = n_hidden self.init = initializations.get(init) # Set weight initialization self.activation = activations.get(activation) # Get activations super(DTNNStep, self).__init__(**kwargs)
def __init__(self, batch_size, n_input=128, gaussian_expand=False, init='glorot_uniform', activation='tanh', eps=1e-3, momentum=0.99, **kwargs): """ Parameters ---------- batch_size: int number of molecules in a batch n_input: int, optional number of features for each input molecule gaussian_expand: boolean. optional Whether to expand each dimension of atomic features by gaussian histogram init: str, optional Weight initialization for filters. activation: str, optional Activation function applied """ self.n_input = n_input self.batch_size = batch_size self.gaussian_expand = gaussian_expand self.init = initializations.get(init) # Set weight initialization self.activation = activations.get(activation) # Get activations self.eps = eps self.momentum = momentum self.W, self.b = None, None super(WeaveGather, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs): """Creates weave tensors. parent layers: [atom_features, pair_features], pair_split, atom_to_pair """ activation = activations.get(self.activation) # Get activations if in_layers is None: in_layers = self.in_layers in_layers = convert_to_layers(in_layers) self.build() atom_features = in_layers[0].out_tensor pair_features = in_layers[1].out_tensor pair_split = in_layers[2].out_tensor atom_to_pair = in_layers[3].out_tensor AA = torch.matmul(atom_features, self.W_AA) + self.b_AA AA = activation(AA) PA = torch.matmul(pair_features, self.W_PA) + self.b_PA PA = activation(PA) PA = torch.sum(PA, pair_split) A = torch.matmul(torch.cat([AA, PA], 1), self.W_A) + self.b_A A = activation(A) if self.update_pair: AP_ij = torch.matmul( torch.reshape(torch.gather(atom_features, atom_to_pair), [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP AP_ij = activation(AP_ij) AP_ji = torch.matmul( torch.reshape( torch.gather(atom_features, torch.transpose(atom_to_pair, [1])), [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP AP_ji = activation(AP_ji) PP = torch.matmul(pair_features, self.W_PP) + self.b_PP PP = activation(PP) P = torch.matmul(torch.cat([AP_ij + AP_ji, PP], 1), self.W_P) + self.b_P P = activation(P) else: P = pair_features self.out_tensors = [A, P] if set_tensors: self.variables = self.trainable_weights self.out_tensor = A return self.out_tensors
def __init__(self, n_graph_feat=30, n_atom_feat=75, max_atoms=50, layer_sizes=[100], init='glorot_uniform', activation='relu', dropout=None, batch_size=64, **kwargs): """ Parameters ---------- n_graph_feat: int, optional Number of features for each node(and the whole grah). n_atom_feat: int, optional Number of features listed per atom. max_atoms: int, optional Maximum number of atoms in molecules. layer_sizes: list of int, optional(default=[100]) List of hidden layer size(s): length of this list represents the number of hidden layers, and each element is the width of corresponding hidden layer. init: str, optional Weight initialization for filters. activation: str, optional Activation function applied. dropout: float, optional Dropout probability in hidden layer(s). batch_size: int, optional number of molecules in a batch. """ super(DAGLayer, self).__init__(**kwargs) self.init = initializations.get(init) # Set weight initialization self.activation = activations.get(activation) # Get activations self.layer_sizes = layer_sizes self.dropout = dropout self.max_atoms = max_atoms self.batch_size = batch_size self.n_inputs = n_atom_feat + (self.max_atoms - 1) * n_graph_feat # number of inputs each step self.n_graph_feat = n_graph_feat self.n_outputs = n_graph_feat self.n_atom_feat = n_atom_feat
def __init__(self, n_graph_feat=30, n_outputs=30, max_atoms=50, layer_sizes=[100], init='glorot_uniform', activation='relu', dropout=None, **kwargs): """ Parameters ---------- n_graph_feat: int, optional Number of features for each atom. n_outputs: int, optional Number of features for each molecule. max_atoms: int, optional Maximum number of atoms in molecules. layer_sizes: list of int, optional List of hidden layer size(s): length of this list represents the number of hidden layers, and each element is the width of corresponding hidden layer. init: str, optional Weight initialization for filters. activation: str, optional Activation function applied. dropout: float, optional Dropout probability in the hidden layer(s). """ super(DAGGather, self).__init__(**kwargs) self.init = initializations.get(init) # Set weight initialization self.activation = activations.get(activation) # Get activations self.layer_sizes = layer_sizes self.dropout = dropout self.max_atoms = max_atoms self.n_graph_feat = n_graph_feat self.n_outputs = n_outputs