Example #1
0
    def __init__(self, node_feature_dim: int, final_dim: int, cuda_details: utils.CudaDetails):
        super().__init__()
        mlp_up = mlp.MLP(mlp.MlpParams(node_feature_dim, 2*node_feature_dim, []))
        mlp_gate = mlp.MLP(mlp.MlpParams(node_feature_dim, 1, []))
        mlp_func = mlp.MLP(mlp.MlpParams(2*node_feature_dim, final_dim, []))

        self.g_top = graph_tops.GraphFeaturesStackIndexAdd(mlp_up, mlp_gate, mlp_func, cuda_details=cuda_details)
Example #2
0
    def __init__(self, hidden_layer_size, edge_names, cuda_details, T):
        super().__init__()
        self.ggnn = ggnn_stack.GGNNStackedLine(
            ggnn_base.GGNNParams(hidden_layer_size, edge_names,
                                       cuda_details, T))

        mlp_project_up = mlp.MLP(mlp.MlpParams(hidden_layer_size, 1, []))
        mlp_gate = mlp.MLP(mlp.MlpParams(hidden_layer_size, 1, []))
        mlp_down = lambda x: x

        self.ggnn_top = graph_tops.GraphFeaturesStackCS(mlp_project_up, mlp_gate, mlp_down, cuda_details)
Example #3
0
def get_mol_chef(params: MChefParams):
    # Create embedder
    ggnn = graph_embedder.GraphEmbedder(params.gnn_hidden_size,
                                        params.edge_names,
                                        params.embedding_dim,
                                        params.cuda_details,
                                        params.ggnn_time_steps)
    embedr = embedders.SymbolGNNEmbedder(ggnn, params.index_to_graph_lookup,
                                         params.total_number_of_graphs,
                                         params.stop_indx)

    # Create the encoder
    mlp_top = mlp.MLP(
        mlp.MlpParams(params.embedding_dim,
                      params.latent_dim * 2,
                      hidden_sizes=[200]))
    encd = encoder.get_encoder(embedr, mlp_top)

    # Create the decoder
    gru_hsize = params.embedding_dim
    mlp_decdr_gru_to_h = mlp.MLP(
        mlp.MlpParams(gru_hsize, params.embedding_dim, hidden_sizes=[128]))
    mlp_proj_latents_to_hidden = nn.Linear(params.latent_dim, gru_hsize)
    decoder_params = decoder.DecoderParams(
        gru_insize=params.embedding_dim,
        gru_hsize=gru_hsize,
        num_layers=params.decd_layers,
        gru_dropout=0.,
        mlp_out=mlp_decdr_gru_to_h,
        mlp_parameterise_hidden=mlp_proj_latents_to_hidden,
        max_steps=params.decd_max_steps)
    decd = decoder.get_decoder(embedr, decoder_params)

    # Create the latent prior
    latent_prior = shallow_distributions.IndependentGaussianDistribution(
        nn.Parameter(torch.zeros(1,
                                 params.latent_dim * 2,
                                 device=params.cuda_details.device_str,
                                 dtype=mchef_config.PT_FLOAT_TYPE),
                     requires_grad=False))

    # Create the kernel
    c = 2 * params.latent_dim * (1**2)
    # ^ see section 4 of Wasserstein Auto-Encoders by Tolstikhin et al. for motivation behind this value.
    kernel = similarity_funcs.InverseMultiquadraticsKernel(c=c)

    # create the autoencoder
    wae = wasserstein.WAEnMMD(encd, decd, latent_prior, kernel=kernel)

    # Add the regressor from latent space to property prediction
    wae.prop_predictor_ = mlp.MLP(
        mlp.MlpParams(params.latent_dim, params.property_dim, [40, 40]))

    return wae
Example #4
0
    def __init__(self, hidden_layer_size, edge_names, embedding_dim, cuda_details, T):
        super().__init__()
        self.ggnn = ggnn_sparse.GGNNSparse(
            ggnn_base.GGNNParams(hidden_layer_size, edge_names, cuda_details, T))

        mlp_project_up = mlp.MLP(mlp.MlpParams(hidden_layer_size, embedding_dim, []))
        mlp_gate = mlp.MLP(mlp.MlpParams(hidden_layer_size, embedding_dim, []))
        mlp_down = lambda x: x

        self.embedding_dim = embedding_dim

        self.ggnn_top = graph_tops.GraphFeaturesStackIndexAdd(mlp_project_up, mlp_gate, mlp_down, cuda_details)
Example #5
0
 def __init__(self, output_dim, hidden_layer_size, edge_names,
              embedding_dim, cuda_details, T):
     super().__init__()
     self.ggnn = graph_embedder.GraphEmbedder(hidden_layer_size, edge_names,
                                              embedding_dim, cuda_details,
                                              T)
     self.mlp = mlp.MLP(
         mlp.MlpParams(embedding_dim, output_dim, [125, 80, 50]))
Example #6
0
 def __init__(self, mlp_input_size, hidden_sizes):
     super().__init__()
     self.mlp = mlp.MLP(mlp.MlpParams(mlp_input_size, 1, hidden_sizes))