コード例 #1
0
    def test_dag_gather(self):
        """Test that DAGGather can be invoked."""
        n_graph_feat = 2
        n_outputs = 2
        layer_sizes = [2]
        init_method = 'one'
        activation = 'sigmoid'

        def sigmoid(x):
            return 1 / (1 + np.exp(-x))

        atom_features_np = np.array([[1, -1], [1, -1], [1, -1]])
        membership_np = np.array([0, 0, 1])
        expected_output = sigmoid(np.array([[1, 1], [1, 1]]))

        with self.session() as sess:
            atom_features_tf = tf.convert_to_tensor(atom_features_np,
                                                    dtype=tf.float32)
            membership_tf = tf.convert_to_tensor(membership_np, dtype=tf.int32)
            dag_gather = DAGGather(n_graph_feat=n_graph_feat,
                                   n_outputs=n_outputs,
                                   activation=activation,
                                   init=init_method,
                                   layer_sizes=layer_sizes)
            dag_gather.create_tensor(
                in_layers=[atom_features_tf, membership_tf, 0])

            sess.run(tf.global_variables_initializer())
            output = dag_gather.out_tensor.eval()
            self.assertAllClose(output, expected_output)
            self.assertEqual(output.shape, expected_output.shape)
コード例 #2
0
ファイル: test_layers.py プロジェクト: ktaneishi/deepchem
  def test_dag_gather(self):
    """Test that DAGGather can be invoked."""
    n_graph_feat = 2
    n_outputs = 2
    layer_sizes = [2]
    init_method = 'one'
    activation = 'sigmoid'

    def sigmoid(x):
      return 1 / (1 + np.exp(-x))

    atom_features_np = np.array([[1, -1], [1, -1], [1, -1]])
    membership_np = np.array([0, 0, 1])
    expected_output = sigmoid(np.array([[1, 1], [1, 1]]))

    with self.session() as sess:
      atom_features_tf = tf.convert_to_tensor(
          atom_features_np, dtype=tf.float32)
      membership_tf = tf.convert_to_tensor(membership_np, dtype=tf.int32)
      dag_gather = DAGGather(
          n_graph_feat=n_graph_feat,
          n_outputs=n_outputs,
          activation=activation,
          init=init_method,
          layer_sizes=layer_sizes)
      dag_gather.create_tensor(in_layers=[atom_features_tf, membership_tf])

      sess.run(tf.global_variables_initializer())
      output = dag_gather.out_tensor.eval()
      self.assertAllClose(output, expected_output)
      self.assertEqual(output.shape, expected_output.shape)
コード例 #3
0
    def build_graph(self):
        """Building graph structures:
        Features => DAGLayer => DAGGather => Classification or Regression
        """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms),
                               dtype=tf.int32)
        self.calculation_orders = Feature(shape=(None, self.max_atoms),
                                          dtype=tf.int32)
        self.calculation_masks = Feature(shape=(None, self.max_atoms),
                                         dtype=tf.bool)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)
        self.n_atoms = Feature(shape=(), dtype=tf.int32)
        dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat,
                              n_atom_feat=self.n_atom_feat,
                              max_atoms=self.max_atoms,
                              batch_size=self.batch_size,
                              in_layers=[
                                  self.atom_features, self.parents,
                                  self.calculation_orders,
                                  self.calculation_masks, self.n_atoms
                              ])
        dag_gather = DAGGather(n_graph_feat=self.n_graph_feat,
                               n_outputs=self.n_outputs,
                               max_atoms=self.max_atoms,
                               in_layers=[dag_layer1, self.membership])

        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dag_gather])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dag_gather])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
コード例 #4
0
def test_DAGGather_pickle():
  tg = TensorGraph()
  atom_features = Feature(shape=(None, 30))
  membership = Feature(shape=(None,), dtype=tf.int32)
  Gather = DAGGather(in_layers=[atom_features, membership])
  tg.add_output(Gather)
  tg.set_loss(Gather)
  tg.build()
  tg.save()
コード例 #5
0
ファイル: graph_models.py プロジェクト: domsooch/dsdc_demo
    def build_graph(self):
        """Building graph structures:
                Features => DAGLayer => DAGGather => Classification or Regression
                """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms),
                               dtype=tf.int32)
        self.calculation_orders = Feature(shape=(None, self.max_atoms),
                                          dtype=tf.int32)
        self.calculation_masks = Feature(shape=(None, self.max_atoms),
                                         dtype=tf.bool)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)
        self.n_atoms = Feature(shape=(), dtype=tf.int32)
        dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat,
                              n_atom_feat=self.n_atom_feat,
                              max_atoms=self.max_atoms,
                              layer_sizes=self.layer_sizes,
                              dropout=self.dropout,
                              batch_size=self.batch_size,
                              in_layers=[
                                  self.atom_features, self.parents,
                                  self.calculation_orders,
                                  self.calculation_masks, self.n_atoms
                              ])
        dag_gather = DAGGather(n_graph_feat=self.n_graph_feat,
                               n_outputs=self.n_outputs,
                               max_atoms=self.max_atoms,
                               layer_sizes=self.layer_sizes_gather,
                               dropout=self.dropout,
                               in_layers=[dag_layer1, self.membership])

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=dag_gather,
                                       out_channels=n_tasks * n_classes)
                             ])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=dag_gather, out_channels=n_tasks)])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(shape=(None, n_tasks),
                                  in_layers=[
                                      Dense(in_layers=dag_gather,
                                            out_channels=n_tasks)
                                  ])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)