Esempio n. 1
0
    def __init__(self,
                 n_in,
                 n_out,
                 n_hidden=None,
                 n_layers=2,
                 activation=shifted_softplus):
        super(MLP, self).__init__()
        # If no neurons are given, initialize
        if n_hidden is None:
            c_neurons = n_in
            self.n_neurons = []
            for i in range(n_layers):
                self.n_neurons.append(c_neurons)
                c_neurons = c_neurons // 2
            self.n_neurons.append(n_out)
        else:
            if type(n_hidden) is int:
                n_hidden = [n_hidden] * (n_layers - 1)
            self.n_neurons = [n_in] + n_hidden + [n_out]

        layers = [
            Dense(self.n_neurons[i],
                  self.n_neurons[i + 1],
                  activation=activation) for i in range(n_layers - 1)
        ]
        layers.append(
            Dense(self.n_neurons[-2], self.n_neurons[-1], activation=None))

        self.out_net = nn.Sequential(*layers)
Esempio n. 2
0
    def __init__(self,
                 n_in,
                 n_out,
                 n_hidden=None,
                 n_layers=2,
                 activation=shifted_softplus):
        super(MLP, self).__init__()
        # get list of number of nodes in input, hidden & output layers
        if n_hidden is None:
            c_neurons = n_in
            self.n_neurons = []
            for i in range(n_layers):
                self.n_neurons.append(c_neurons)
                c_neurons = c_neurons // 2
            self.n_neurons.append(n_out)
        else:
            # get list of number of nodes hidden layers
            if type(n_hidden) is int:
                n_hidden = [n_hidden] * (n_layers - 1)
            self.n_neurons = [n_in] + n_hidden + [n_out]

        # assign a Dense layer (with activation function) to each hidden layer
        layers = [
            Dense(self.n_neurons[i],
                  self.n_neurons[i + 1],
                  activation=activation) for i in range(n_layers - 1)
        ]
        # assign a Dense layer (without activation function) to the output layer
        layers.append(
            Dense(self.n_neurons[-2], self.n_neurons[-1], activation=None))
        # put all layers together to make the network
        self.out_net = nn.Sequential(*layers)
Esempio n. 3
0
    def __init__(
        self,
        n_atom_basis,
        n_hidden,
        n_heads=8,
        activation=None,
    ):
        super(EgoAttention, self).__init__()

        # dense layer as mh_attention
        assert (n_atom_basis % n_heads == 0), "Mismatch Head Numbers."
        n_per_head = n_atom_basis // n_heads

        self.n_heads = n_heads
        self.n_per_head = n_per_head

        self.mh_q = Dense(n_atom_basis,
                          n_atom_basis,
                          bias=False,
                          activation=None)
        self.mh_k = Dense(n_atom_basis,
                          n_atom_basis,
                          bias=False,
                          activation=None)
        self.mh_v = Dense(n_atom_basis,
                          n_atom_basis,
                          bias=False,
                          activation=None)
        self.mh_o = Dense(n_atom_basis,
                          n_atom_basis,
                          bias=False,
                          activation=None)

        self.layer_norm_in = nn.LayerNorm([n_atom_basis
                                           ])  ###(input.size()[-1])
Esempio n. 4
0
    def __init__(
        self,
        nin,
        nout,
        elements,
        n_acsf,
        n_apf,
        n_acsf_nodes=100,
        n_apf_nodes=50,
        n_hidden=50,
        n_layers=3,
        trainable=False,
        onehot=True,
        activation=shifted_softplus,
    ):
        super(PairGatedNetwork, self).__init__()

        self.nelem = len(elements)
        self.gate = ElementalGate(elements, trainable=trainable, onehot=onehot)

        acsf_input = (n_acsf * len(elements)) + len(elements) + 1
        self.dense_radial = Dense(acsf_input,
                                  n_acsf_nodes,
                                  activation=activation)
        apf_input = (n_apf * len(elements)) + len(elements) + 1
        self.dense_apf = Dense(apf_input, n_apf_nodes, activation=activation)

        dense_input = (n_acsf_nodes + n_apf_nodes + len(elements) + 1) * 2 + 2
        self.dense_layers = MLP(
            dense_input,
            nout,
            n_hidden=n_hidden,
            n_layers=n_layers,
            activation=activation,
        )
Esempio n. 5
0
 def __init__(self, n_in, n_filters, n_out, filter_network,
              cutoff_network=None,
              activation=None, normalize_filter=False, axis=2):
     super(CFConv, self).__init__()
     self.in2f = Dense(n_in, n_filters, bias=False)
     self.f2out = Dense(n_filters, n_out, activation=activation)
     self.filter_network = filter_network
     self.cutoff_network = cutoff_network
     self.agg = Aggregate(axis=axis, mean=normalize_filter)
Esempio n. 6
0
    def __init__(self, n_atom_basis=128, max_z=100, kmax=150, n_interactions=1, activation=shifted_softplus):
        super(SchnetWithEdgeUpdate, self).__init__()
        self.n_interactions = n_interactions
        self.embedding = nn.Embedding(max_z, n_atom_basis - 1)
        self.edge_update_net = nn.Sequential(
            Dense(3 * n_atom_basis, 2 * n_atom_basis, activation=activation),
            Dense(2 * n_atom_basis, n_atom_basis),
        )
        self.msg_edge_net = nn.Sequential(
            Dense(n_atom_basis, n_atom_basis, activation=activation),
            Dense(n_atom_basis, n_atom_basis, activation=activation),
        )
        self.msg_atom_fc = Dense(n_atom_basis, n_atom_basis)
        self.state_trans_net = nn.Sequential(
            Dense(n_atom_basis, n_atom_basis, activation=activation),
            Dense(n_atom_basis, n_atom_basis),
        )

        # self.n_dihedral_edge_attrs = 2
        # n_dihedral_edge_feats = 16
        # self.dihedral_net = gnn.NNConv(n_atom_basis, n_dihedral_edge_feats, nn.Sequential(
        #     Dense(self.n_dihedral_edge_attrs, n_atom_basis, activation=F.relu),
        #     Dense(n_atom_basis, n_atom_basis * n_dihedral_edge_feats),
        # ))

        # n_angle_edge_attrs = 1
        # n_angle_edge_feats = 8
        # self.angle_net = gnn.NNConv(n_atom_basis, n_angle_edge_feats, nn.Sequential(
        #     Dense(n_angle_edge_attrs, n_atom_basis, activation=F.relu),
        #     Dense(n_atom_basis, n_atom_basis * n_angle_edge_feats),
        # ))

        # self.init_atom_fc = Dense(n_atom_basis + n_dihedral_edge_feats, n_atom_basis, activation=activation)
        self.init_edge_fc = Dense(kmax, n_atom_basis, activation=activation)
Esempio n. 7
0
    def __init__(self,
                 n_in,
                 n_filters,
                 n_out,
                 filter_network,
                 cutoff_network=None,
                 activation=None,
                 normalize_filter=False,
                 axis=2,
                 n_heads_weights=0,
                 n_heads_conv=0,
                 device=torch.device("cpu"),
                 hyperparams=[0, 0],
                 dropout=0,
                 exp=False):
        super(CFConv, self).__init__()
        self.device = device
        self.n_heads_weights = n_heads_weights
        self.n_heads_conv = n_heads_conv
        self.atomic_embedding_dim = n_out
        self.in2f = Dense(n_in, n_filters, bias=False, activation=None)
        self.f2out = Dense(n_filters, n_out, bias=True, activation=activation)
        self.filter_network = filter_network
        self.cutoff_network = cutoff_network
        #sum over indices
        self.agg = Aggregate(axis=axis, mean=normalize_filter)
        #added multiheaded attention to weights
        self.attention_dim = int(n_out / 4)  #arbitrary -> could modify at will
        if n_heads_weights > 0:
            self.Attention = AttentionHeads(n_in, self.attention_dim,n_heads=self.n_heads_weights,EXP = exp,\
                atomic_embedding_dim=n_out ,device=self.device,SM=False,hyperparams = hyperparams,dropout = dropout)
        #added multiheaded attention to convolution
        if n_heads_conv > 0:
            self.AttentionConv = AttentionHeads(n_in,self.attention_dim,n_heads=self.n_heads_conv,EXP=exp,\
                atomic_embedding_dim=n_out,device=self.device,SM=False,hyperparams = hyperparams,dropout = dropout)#for now should be single head
        #NOTE: the EXP determines if the scalar attention value should be exp(A) or just (A).
        #NOTE: exp(A) can be unstable, as can softmax below

        #add possibility to use softmax over weights
        self.softmax = nn.Softmax(dim=3)

        #not currently used, but could add if deemed beneficial
        self.dropout = nn.Dropout(dropout)
Esempio n. 8
0
 def __init__(
     self,
     n_atom_basis,
     n_hidden,
     activation=None,
     dropout_rate=0,
     epsilon=0.01,
 ):
     super(AdaptiveComputationTime, self).__init__()
     
     ### # Regularization of Atomic Embedding:          
     self.ponder_net = nn.Sequential(
         nn.Dropout(dropout_rate),
         Dense(n_atom_basis, n_hidden, activation=activation),
         Dense(n_hidden, 1, activation=None),
     )
     
     ###self.affine_net = Dense(n_atom_basis, n_atom_basis, bias=True, activation=None)
     self.epsilon = epsilon
     self.sharpen_power = 5. ### to make the linear-interpolation sharper
Esempio n. 9
0
    def __init__(
        self,
        n_atom_basis,
        n_scales,
        n_heads=8,
        use_time_embedding=True,
    ):
        super(MultiScaleAttention, self).__init__()

        # dense layer as mh_attention
        assert (n_atom_basis % n_heads == 0), "Mismatch Head Numbers."
        assert (n_atom_basis % 2 == 0), "Must Be Even number of Atom Features."

        n_per_head = n_atom_basis // n_heads
        self.n_per_head = n_per_head

        self.n_heads = n_heads
        self.n_scales = n_scales
        self.n_atom_basis = n_atom_basis
        self.use_time_embedding = use_time_embedding

        self.mh_q = Dense(n_atom_basis,
                          n_atom_basis,
                          bias=False,
                          activation=None)
        self.mh_k = Dense(n_atom_basis,
                          n_atom_basis,
                          bias=False,
                          activation=None)
        self.mh_v = Dense(n_atom_basis,
                          n_atom_basis,
                          bias=False,
                          activation=None)
        self.mh_o = Dense(n_atom_basis,
                          n_atom_basis,
                          bias=False,
                          activation=None)
Esempio n. 10
0
 def __init__(self,
              n_in,
              n_filters,
              n_out,
              filter_network,
              cutoff_network=None,
              activation=None,
              normalize_filter=False,
              axis=2,
              weight_init=xavier_uniform_):
     super(CFConv, self).__init__()
     self.in2f = Dense(n_in,
                       n_filters,
                       bias=False,
                       activation=None,
                       weight_init=weight_init)
     self.f2out = Dense(n_filters,
                        n_out,
                        bias=True,
                        activation=activation,
                        weight_init=weight_init)
     self.filter_network = filter_network
     self.cutoff_network = cutoff_network
     self.agg = Aggregate(axis=axis, mean=normalize_filter)