예제 #1
0
 def test_graphcnn(self):
     """ Test GraphCNN Layer From https://arxiv.org/abs/1703.00792"""
     V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
     adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
     with self.session() as sess:
         out_tensor = GraphCNN(num_filters=6)(V, adjs)
         sess.run(tf.global_variables_initializer())
         result = out_tensor.eval()
         assert result.shape == (10, 100, 6)
예제 #2
0
 def test_graphcnn(self):
   """ Test GraphCNN Layer From https://arxiv.org/abs/1703.00792"""
   V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
   adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
   with self.session() as sess:
     out_tensor = GraphCNN(num_filters=6)(V, adjs)
     sess.run(tf.global_variables_initializer())
     result = out_tensor.eval()
     assert result.shape == (10, 100, 6)
예제 #3
0
  def build_graph(self):
    self.vertex_features = Feature(shape=(None, self.max_atoms, 75))
    self.adj_matrix = Feature(shape=(None, self.max_atoms, 1, self.max_atoms))
    self.mask = Feature(shape=(None, self.max_atoms, 1))

    gcnn1 = BatchNorm(
        GraphCNN(
            num_filters=64,
            in_layers=[self.vertex_features, self.adj_matrix, self.mask]))
    gcnn1 = Dropout(self.dropout, in_layers=gcnn1)
    gcnn2 = BatchNorm(
        GraphCNN(num_filters=64, in_layers=[gcnn1, self.adj_matrix, self.mask]))
    gcnn2 = Dropout(self.dropout, in_layers=gcnn2)
    gc_pool, adj_matrix = GraphCNNPool(
        num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask])
    gc_pool = BatchNorm(gc_pool)
    gc_pool = Dropout(self.dropout, in_layers=gc_pool)
    gcnn3 = BatchNorm(GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix]))
    gcnn3 = Dropout(self.dropout, in_layers=gcnn3)
    gc_pool2, adj_matrix2 = GraphCNNPool(
        num_vertices=8, in_layers=[gcnn3, adj_matrix])
    gc_pool2 = BatchNorm(gc_pool2)
    gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2)
    flattened = Flatten(in_layers=gc_pool2)
    readout = Dense(
        out_channels=256, activation_fn=tf.nn.relu, in_layers=flattened)
    costs = []
    self.my_labels = []
    for task in range(self.n_tasks):
      if self.mode == 'classification':
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[readout])

        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.my_labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == 'regression':
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[readout])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.my_labels.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      entropy = Stack(in_layers=costs, axis=-1)
    elif self.mode == "regression":
      entropy = Stack(in_layers=costs, axis=1)
    self.my_task_weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[entropy, self.my_task_weights])
    self.set_loss(loss)
예제 #4
0
def testGraphCNN_pickle():
  V = Feature(shape=(None, 200, 50))
  A = Feature(shape=(None, 200, 1, 200))
  gcnn = GraphCNN(32, in_layers=[V, A])
  tg = TensorGraph()
  tg.add_output(gcnn)
  tg.set_loss(gcnn)
  tg.build()
  tg.save()