예제 #1
0
    def __init__(self, hidden_layer_size: int, edge_names: typing.List[str], embedding_dim: int,
                 cuda_details: utils.CudaDetails, num_time_steps: int):
        super().__init__()
        self.ggnn = ggnn_sparse.GGNNSparse(
            ggnn_base.GGNNParams(hidden_layer_size, edge_names, cuda_details, num_time_steps))

        self.embedding_dim = embedding_dim
예제 #2
0
    def __init__(self, hidden_layer_size, edge_names, cuda_details, T):
        super().__init__()
        self.ggnn = ggnn_stack.GGNNStackedLine(
            ggnn_base.GGNNParams(hidden_layer_size, edge_names,
                                       cuda_details, T))

        mlp_project_up = mlp.MLP(mlp.MlpParams(hidden_layer_size, 1, []))
        mlp_gate = mlp.MLP(mlp.MlpParams(hidden_layer_size, 1, []))
        mlp_down = lambda x: x

        self.ggnn_top = graph_tops.GraphFeaturesStackCS(mlp_project_up, mlp_gate, mlp_down, cuda_details)
예제 #3
0
    def __init__(self, hidden_layer_size, edge_names, embedding_dim, cuda_details, T):
        super().__init__()
        self.ggnn = ggnn_sparse.GGNNSparse(
            ggnn_base.GGNNParams(hidden_layer_size, edge_names, cuda_details, T))

        mlp_project_up = mlp.MLP(mlp.MlpParams(hidden_layer_size, embedding_dim, []))
        mlp_gate = mlp.MLP(mlp.MlpParams(hidden_layer_size, embedding_dim, []))
        mlp_down = lambda x: x

        self.embedding_dim = embedding_dim

        self.ggnn_top = graph_tops.GraphFeaturesStackIndexAdd(mlp_project_up, mlp_gate, mlp_down, cuda_details)
예제 #4
0
def get_model(react_pred: reaction_predictors.AbstractReactionPredictor,
              smi2graph_func,
              reactant_vocab,
              params=None):
    params = params if params is not None else default_params

    # Molecule and DAG embedders which are shared by both the encoder and decoder
    mol_embedder = molecular_graph_embedder.GraphEmbedder(
        **params['mol_graph_embedder_params'])
    dag_gnn = ggnn_sparse.GGNNSparse(
        ggnn_base.GGNNParams(**params['dag_graph_embedder_gnn_params']))
    dag_embdr = dag_embedder.DAGEmbedder(
        dag_gnn, dag_embedder.AggrType[params['dag_embedder_aggr_type_s']],
        default_params['latent_dim'] * 2)

    # Encoder
    encoder = nn_paramterised_dists.NNParamterisedDistribution(
        dag_embdr,
        final_parameterised_dist=shallow_distributions.
        IndependentGaussianDistribution())

    # Latent prior
    latent_prior = shallow_distributions.IndependentGaussianDistribution(
        nn.Parameter(torch.zeros(1,
                                 params['latent_dim'] * 2,
                                 dtype=settings.TORCH_FLT),
                     requires_grad=False))

    # Create the kernel
    c = 2 * params['latent_dim'] * (1**2)
    # ^ see section 4 of Wasserstein Auto-Encoders by Tolstikhin et al. for motivation behind this value.
    kernel = similarity_funcs.InverseMultiquadraticsKernel(c=c)

    # Decoder
    decoder_rnn_hidden_size = params['decoder_params']['gru_hsize']
    decoder_embdg_dim = mol_embedder.embedding_dim
    decoder_nets = dog_decoder.DecoderPreDefinedNetworks(
        mol_embedder,
        f_z_to_h0=nn.Linear(params['latent_dim'], decoder_rnn_hidden_size),
        f_ht_to_e_add=nn.Sequential(nn.Linear(decoder_rnn_hidden_size, 28),
                                    nn.ReLU(),
                                    nn.Linear(28, decoder_embdg_dim)),
        f_ht_to_e_reactant=nn.Sequential(
            nn.Linear(decoder_rnn_hidden_size, 28), nn.ReLU(),
            nn.Linear(28, decoder_embdg_dim)),
        f_ht_to_e_edge=nn.Sequential(nn.Linear(decoder_rnn_hidden_size, 28),
                                     nn.ReLU(),
                                     nn.Linear(28, decoder_embdg_dim)))
    decoder_params = dog_decoder.DecoderParams(**params['decoder_params'])
    decoder = dog_decoder.DOGGenerator(decoder_params,
                                       other_nets=decoder_nets,
                                       react_pred=react_pred,
                                       smi2graph=smi2graph_func,
                                       reactant_vocab=reactant_vocab)

    wae = wasserstein.WAEnMMD(encoder=encoder,
                              decoder=decoder,
                              latent_prior=latent_prior,
                              kernel=kernel)
    wae.mol_embdr = mol_embedder
    # ^ add this network on as a property of model. During training it should be used before
    # feeding data into the model as its embeddings are needed by both the encoder and decoder.

    return wae, params