Esempio n. 1
0
    def test_graph_pool(self):
        """Tests that GraphPool transforms shapes correctly."""
        n_atoms = 5
        n_feat = 10
        batch_size = 3
        nb_filter = 7
        with self.test_session() as sess:
            graph_topology = GraphTopology(n_feat)
            graph_pool_layer = GraphPool()

            X = graph_topology.get_input_placeholders()
            out = graph_pool_layer(X)
Esempio n. 2
0
class SequentialGraphModel(object):
    """An analog of Keras Sequential class for Graph data.

  Like the Sequential class from Keras, but automatically passes topology
  placeholders from GraphTopology to each graph layer (from keras_layers) added
  to the network. Non graph layers don't get the extra placeholders. 
  """
    def __init__(self, n_feat):
        """
    Parameters
    ----------
    n_feat: int
      Number of features per atom.
    """
        #self.graph_topology = GraphTopology(n_atoms, n_feat)
        self.graph_topology = GraphTopology(n_feat)
        self.output = self.graph_topology.get_atom_features_placeholder()
        # Keep track of the layers
        self.layers = []

    def add(self, layer):
        """Adds a new layer to model."""
        # For graphical layers, add connectivity placeholders
        if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
            if (len(self.layers) > 0 and hasattr(self.layers[-1], "__name__")):
                assert self.layers[-1].__name__ != "GraphGather", \
                        'Cannot use GraphConv or GraphGather layers after a GraphGather'

            self.output = layer(
                [self.output] +
                self.graph_topology.get_topology_placeholders())
        else:
            self.output = layer(self.output)

        # Add layer to the layer list
        self.layers.append(layer)

    def get_graph_topology(self):
        return self.graph_topology

    def get_num_output_features(self):
        """Gets the output shape of the featurization layers of the network"""
        return self.layers[-1].output_shape[1]

    def return_outputs(self):
        return self.output

    def return_inputs(self):
        return self.graph_topology.get_input_placeholders()

    def get_layer(self, layer_id):
        return self.layers[layer_id]
Esempio n. 3
0
    def test_graph_convolution(self):
        """Tests that Graph Convolution transforms shapes correctly."""
        n_atoms = 5
        n_feat = 10
        nb_filter = 7
        with self.test_session() as sess:
            graph_topology = GraphTopology(n_feat)
            graph_conv_layer = GraphConv(nb_filter)

            X = graph_topology.get_input_placeholders()
            out = graph_conv_layer(X)
            # Output should be of shape (?, nb_filter)
            assert out.get_shape()[1] == nb_filter
Esempio n. 4
0
class SequentialGraph(object):
  """An analog of Keras Sequential class for Graph data.

  Like the Sequential class from Keras, but automatically passes topology
  placeholders from GraphTopology to each graph layer (from keras_layers) added
  to the network. Non graph layers don't get the extra placeholders. 
  """
  def __init__(self, n_feat):
    """
    Parameters
    ----------
    n_feat: int
      Number of features per atom.
    """
    #self.graph_topology = GraphTopology(n_atoms, n_feat)
    self.graph_topology = GraphTopology(n_feat)
    self.output = self.graph_topology.get_atom_features_placeholder()
    # Keep track of the layers
    self.layers = []  

  def add(self, layer):
    """Adds a new layer to model."""
    # For graphical layers, add connectivity placeholders 
    if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
      if (len(self.layers) > 0 and hasattr(self.layers[-1], "__name__")):
        assert self.layers[-1].__name__ != "GraphGather", \
                'Cannot use GraphConv or GraphGather layers after a GraphGather'
          
      self.output = layer(
          [self.output] + self.graph_topology.get_topology_placeholders())
    else:
      self.output = layer(self.output)

    # Add layer to the layer list
    self.layers.append(layer)

  def get_graph_topology(self):
    return self.graph_topology

  def get_num_output_features(self):
    """Gets the output shape of the featurization layers of the network"""
    return self.layers[-1].output_shape[1]
  
  def return_outputs(self):
    return self.output

  def return_inputs(self):
    return self.graph_topology.get_input_placeholders()

  def get_layer(self, layer_id):
    return self.layers[layer_id]
Esempio n. 5
0
    def test_graph_gather(self):
        """Tests that GraphGather transforms shapes correctly."""
        n_atoms = 5
        n_feat = 10
        batch_size = 3
        nb_filter = 7
        with self.test_session() as sess:
            graph_topology = GraphTopology(n_feat)
            graph_gather_layer = GraphGather(batch_size)

            X = graph_topology.get_input_placeholders()
            out = graph_gather_layer(X)
            # Output should be of shape (batch_size, n_feat)
            assert out.get_shape() == (batch_size, n_feat)
Esempio n. 6
0
    def test_attn_lstm_embedding(self):
        """Test that attention LSTM computation works properly."""
        max_depth = 5
        n_test = 5
        n_support = 11
        n_feat = 10
        nb_filter = 7
        with self.test_session() as sess:
            graph_topology_test = GraphTopology(n_feat)
            graph_topology_support = GraphTopology(n_feat)

            test = graph_topology_test.get_input_placeholders()[0]
            support = graph_topology_support.get_input_placeholders()[0]

            attn_embedding_layer = AttnLSTMEmbedding(n_test, n_support,
                                                     max_depth)
            # Try concatenating the two lists of placeholders
            feed_dict = {
                test: np.zeros((n_test, n_feat)),
                support: np.zeros((n_support, n_feat))
            }
            test_out, support_out = attn_embedding_layer([test, support])
            assert test_out.get_shape() == (n_test, n_feat)
            assert support_out.get_shape()[1] == (n_feat)