Пример #1
0
 def test_softmax(self):
   """Test that Softmax can be invoked."""
   batch_size = 10
   n_features = 5
   in_tensor = np.random.rand(batch_size, n_features)
   with self.session() as sess:
     in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
     out_tensor = SoftMax()(in_tensor)
     out_tensor = out_tensor.eval()
     assert out_tensor.shape == (batch_size, n_features)
Пример #2
0
 def test_softmax(self):
     """Test that Softmax can be invoked."""
     batch_size = 10
     n_features = 5
     in_tensor = np.random.rand(batch_size, n_features)
     with self.session() as sess:
         in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
         out_tensor = SoftMax()(in_tensor)
         out_tensor = out_tensor.eval()
         assert out_tensor.shape == (batch_size, n_features)
Пример #3
0
    def build_graph(self):
        # Layer 1
        gc1_input = [self.atom_features, self.indexing, self.membership] + self.deg_adj_list
        gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=gc1_input)
        bn1 = BatchNorm(in_layers=[gc1])
        gp1_input = [bn1, self.indexing, self.membership] + self.deg_adj_list
        gp1 = GraphPool(in_layers=gp1_input)

        # Layer 2
        gc2_input = [gp1, self.indexing, self.membership] + self.deg_adj_list
        gc2 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=gc2_input)
        bn2 = BatchNorm(in_layers=[gc2])
        gp2_input = [bn2, self.indexing, self.membership] + self.deg_adj_list
        gp2 = GraphPool(in_layers=gp2_input)

        # Dense layer 1
        d1 = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2])
        bn3 = BatchNorm(in_layers=[d1])

        # Graph gather layer
        gg1_input = [bn3, self.indexing, self.membership] + self.deg_adj_list
        gg1 = GraphGather(batch_size=self.batch_size, activation=tf.nn.tanh, in_layers=gg1_input)

        # Output dense layer
        d2 = Dense(out_channels=2, activation_fn=None, in_layers=[gg1])
        softmax = SoftMax(in_layers=[d2])
        self.tg.add_output(softmax)

        # Set loss function
        self.label = Label(shape=(None, 2))
        cost = SoftMaxCrossEntropy(in_layers=[self.label, d2])
        self.weight = Weights(shape=(None, 1))
        loss = WeightedError(in_layers=[cost, self.weight])
        self.tg.set_loss(loss)
Пример #4
0
 def create_layers(self, state, **kwargs):
   d1 = Flatten(in_layers=state)
   d2 = Dense(
       in_layers=[d1],
       activation_fn=tf.nn.relu,
       normalizer_fn=tf.nn.l2_normalize,
       normalizer_params={"dim": 1},
       out_channels=64)
   d3 = Dense(
       in_layers=[d2],
       activation_fn=tf.nn.relu,
       normalizer_fn=tf.nn.l2_normalize,
       normalizer_params={"dim": 1},
       out_channels=32)
   d4 = Dense(
       in_layers=[d3],
       activation_fn=tf.nn.relu,
       normalizer_fn=tf.nn.l2_normalize,
       normalizer_params={"dim": 1},
       out_channels=16)
   d4 = BatchNorm(in_layers=[d4])
   d5 = Dense(in_layers=[d4], activation_fn=None, out_channels=9)
   value = Dense(in_layers=[d4], activation_fn=None, out_channels=1)
   value = Squeeze(squeeze_dims=1, in_layers=[value])
   probs = SoftMax(in_layers=[d5])
   return {'action_prob': probs, 'value': value}
Пример #5
0
 def create_layers(self, state, **kwargs):
     action = Variable(np.ones(env.n_actions))
     output = SoftMax(in_layers=[
         Reshape(in_layers=[action], shape=(-1, env.n_actions))
     ])
     value = Variable([0.0])
     return {'action_prob': output, 'value': value}
    def test_compute_model_performance_singletask_classifier(self):
        n_data_points = 20
        n_features = 10

        X = np.ones(shape=(int(n_data_points / 2), n_features)) * -1
        X1 = np.ones(shape=(int(n_data_points / 2), n_features))
        X = np.concatenate((X, X1))
        class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
        class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
        y = np.concatenate((class_0, class_1))
        dataset = NumpyDataset(X, y)

        features = Feature(shape=(None, n_features))
        label = Label(shape=(None, 2))
        dense = Dense(out_channels=2, in_layers=[features])
        output = SoftMax(in_layers=[dense])
        smce = SoftMaxCrossEntropy(in_layers=[label, dense])
        total_loss = ReduceMean(in_layers=smce)

        tg = dc.models.TensorGraph(learning_rate=0.1)
        tg.add_output(output)
        tg.set_loss(total_loss)

        tg.fit(dataset, nb_epoch=1000)
        metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
                                   np.mean,
                                   mode="classification")

        scores = tg.evaluate_generator(tg.default_generator(dataset), [metric],
                                       labels=[label],
                                       per_task_metrics=True)
        scores = list(scores[1].values())
        assert_true(np.isclose(scores, [1.0], atol=0.05))
Пример #7
0
    def test_save_load(self):
        n_data_points = 20
        n_features = 2
        X = np.random.rand(n_data_points, n_features)
        y = [[0, 1] for x in range(n_data_points)]
        dataset = NumpyDataset(X, y)
        features = Feature(shape=(None, n_features))
        dense = Dense(out_channels=2, in_layers=[features])
        output = SoftMax(in_layers=[dense])
        label = Label(shape=(None, 2))
        smce = SoftMaxCrossEntropy(in_layers=[label, dense])
        loss = ReduceMean(in_layers=[smce])
        tg = dc.models.TensorGraph(learning_rate=0.01)
        tg.add_output(output)
        tg.set_loss(loss)
        submodel_loss = ReduceSum(in_layers=smce)
        submodel_opt = Adam(learning_rate=0.002)
        submodel = tg.create_submodel(layers=[dense],
                                      loss=submodel_loss,
                                      optimizer=submodel_opt)
        tg.fit(dataset, nb_epoch=1)
        prediction = np.squeeze(tg.predict_on_batch(X))
        tg.save()

        dirpath = tempfile.mkdtemp()
        shutil.rmtree(dirpath)
        shutil.move(tg.model_dir, dirpath)

        tg1 = TensorGraph.load_from_dir(dirpath)
        prediction2 = np.squeeze(tg1.predict_on_batch(X))
        assert_true(np.all(np.isclose(prediction, prediction2, atol=0.01)))
Пример #8
0
    def test_set_optimizer(self):
        n_data_points = 20
        n_features = 2
        X = np.random.rand(n_data_points, n_features)
        y = [[0, 1] for x in range(n_data_points)]
        dataset = NumpyDataset(X, y)
        features = Feature(shape=(None, n_features))
        dense = Dense(out_channels=2, in_layers=[features])
        output = SoftMax(in_layers=[dense])
        label = Label(shape=(None, 2))
        smce = SoftMaxCrossEntropy(in_layers=[label, dense])
        loss = ReduceMean(in_layers=[smce])
        tg = dc.models.TensorGraph(learning_rate=0.01, use_queue=False)
        tg.add_output(output)
        tg.set_loss(loss)
        global_step = tg.get_global_step()
        learning_rate = ExponentialDecay(initial_rate=0.1,
                                         decay_rate=0.96,
                                         decay_steps=100000)
        tg.set_optimizer(GradientDescent(learning_rate=learning_rate))
        tg.fit(dataset, nb_epoch=1000)
        prediction = np.squeeze(tg.predict_on_batch(X))
        tg.save()

        tg1 = TensorGraph.load_from_dir(tg.model_dir)
        prediction2 = np.squeeze(tg1.predict_on_batch(X))
        assert_true(np.all(np.isclose(prediction, prediction2, atol=0.01)))
Пример #9
0
 def test_tensorboard(self):
     n_data_points = 20
     n_features = 2
     X = np.random.rand(n_data_points, n_features)
     y = [[0, 1] for x in range(n_data_points)]
     dataset = NumpyDataset(X, y)
     features = Feature(shape=(None, n_features))
     dense = Dense(out_channels=2, in_layers=[features])
     output = SoftMax(in_layers=[dense])
     label = Label(shape=(None, 2))
     smce = SoftMaxCrossEntropy(in_layers=[label, dense])
     loss = ReduceMean(in_layers=[smce])
     tg = dc.models.TensorGraph(tensorboard=True,
                                tensorboard_log_frequency=1,
                                learning_rate=0.01,
                                model_dir='/tmp/tensorgraph')
     tg.add_output(output)
     tg.set_loss(loss)
     tg.fit(dataset, nb_epoch=1000)
     files_in_dir = os.listdir(tg.model_dir)
     event_file = list(
         filter(lambda x: x.startswith("events"), files_in_dir))
     assert_true(len(event_file) > 0)
     event_file = os.path.join(tg.model_dir, event_file[0])
     file_size = os.stat(event_file).st_size
     assert_true(file_size > 0)
Пример #10
0
    def build_graph(self):
        # Build placeholders
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)

        message_passing = MessagePassing(self.T,
                                         message_fn='enn',
                                         update_fn='gru',
                                         n_hidden=self.n_hidden,
                                         in_layers=[
                                             self.atom_features,
                                             self.pair_features,
                                             self.atom_to_pair
                                         ])

        atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing])

        mol_embeddings = SetGather(
            self.M,
            self.batch_size,
            n_hidden=self.n_hidden,
            in_layers=[atom_embeddings, self.atom_split])

        dense1 = Dense(out_channels=2 * self.n_hidden,
                       activation_fn=tf.nn.relu,
                       in_layers=[mol_embeddings])
        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dense1])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dense1])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
Пример #11
0
def test_Softmax_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = SoftMax(in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
Пример #12
0
      def create_layers(self, state, **kwargs):

        reshaped = Reshape(shape=(1, -1, 10), in_layers=state)
        gru = GRU(n_hidden=10, batch_size=1, in_layers=reshaped)
        output = SoftMax(
            in_layers=[Reshape(in_layers=[gru], shape=(-1, env.n_actions))])
        value = Variable([0.0])
        return {'action_prob': output, 'value': value}
Пример #13
0
    def build_graph(self):
        """Building graph structures:
        Features => DAGLayer => DAGGather => Classification or Regression
        """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms),
                               dtype=tf.int32)
        self.calculation_orders = Feature(shape=(None, self.max_atoms),
                                          dtype=tf.int32)
        self.calculation_masks = Feature(shape=(None, self.max_atoms),
                                         dtype=tf.bool)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)
        self.n_atoms = Feature(shape=(), dtype=tf.int32)
        dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat,
                              n_atom_feat=self.n_atom_feat,
                              max_atoms=self.max_atoms,
                              batch_size=self.batch_size,
                              in_layers=[
                                  self.atom_features, self.parents,
                                  self.calculation_orders,
                                  self.calculation_masks, self.n_atoms
                              ])
        dag_gather = DAGGather(n_graph_feat=self.n_graph_feat,
                               n_outputs=self.n_outputs,
                               max_atoms=self.max_atoms,
                               in_layers=[dag_layer1, self.membership])

        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dag_gather])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dag_gather])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
Пример #14
0
  def build_graph(self):
    self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32)
    # Character embedding
    self.Embedding = DTNNEmbedding(
        n_embedding=self.n_embedding,
        periodic_table_length=len(self.char_dict.keys()) + 1,
        in_layers=[self.smiles_seqs])
    self.pooled_outputs = []
    self.conv_layers = []
    for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters):
      # Multiple convolutional layers with different filter widths
      self.conv_layers.append(
          Conv1D(
              kernel_size=filter_size,
              filters=num_filter,
              padding='valid',
              in_layers=[self.Embedding]))
      # Max-over-time pooling
      self.pooled_outputs.append(
          ReduceMax(axis=1, in_layers=[self.conv_layers[-1]]))
    # Concat features from all filters(one feature per filter)
    concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs)
    dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs])
    dense = Dense(
        out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout])
    # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
    self.gather = Highway(in_layers=[dense])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      if self.mode == "classification":
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[self.gather])
        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.labels_fd.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == "regression":
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[self.gather])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.labels_fd.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      all_cost = Stack(in_layers=costs, axis=1)
    elif self.mode == "regression":
      all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
Пример #15
0
  def build_graph(self):
    self.vertex_features = Feature(shape=(None, self.max_atoms, 75))
    self.adj_matrix = Feature(shape=(None, self.max_atoms, 1, self.max_atoms))
    self.mask = Feature(shape=(None, self.max_atoms, 1))

    gcnn1 = BatchNorm(
        GraphCNN(
            num_filters=64,
            in_layers=[self.vertex_features, self.adj_matrix, self.mask]))
    gcnn1 = Dropout(self.dropout, in_layers=gcnn1)
    gcnn2 = BatchNorm(
        GraphCNN(num_filters=64, in_layers=[gcnn1, self.adj_matrix, self.mask]))
    gcnn2 = Dropout(self.dropout, in_layers=gcnn2)
    gc_pool, adj_matrix = GraphCNNPool(
        num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask])
    gc_pool = BatchNorm(gc_pool)
    gc_pool = Dropout(self.dropout, in_layers=gc_pool)
    gcnn3 = BatchNorm(GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix]))
    gcnn3 = Dropout(self.dropout, in_layers=gcnn3)
    gc_pool2, adj_matrix2 = GraphCNNPool(
        num_vertices=8, in_layers=[gcnn3, adj_matrix])
    gc_pool2 = BatchNorm(gc_pool2)
    gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2)
    flattened = Flatten(in_layers=gc_pool2)
    readout = Dense(
        out_channels=256, activation_fn=tf.nn.relu, in_layers=flattened)
    costs = []
    self.my_labels = []
    for task in range(self.n_tasks):
      if self.mode == 'classification':
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[readout])

        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.my_labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == 'regression':
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[readout])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.my_labels.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      entropy = Stack(in_layers=costs, axis=-1)
    elif self.mode == "regression":
      entropy = Stack(in_layers=costs, axis=1)
    self.my_task_weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[entropy, self.my_task_weights])
    self.set_loss(loss)
Пример #16
0
    def test_compute_model_performance_multitask_classifier(self):
        n_data_points = 20
        n_features = 2

        X = np.ones(shape=(n_data_points // 2, n_features)) * -1
        X1 = np.ones(shape=(n_data_points // 2, n_features))
        X = np.concatenate((X, X1))
        class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
        class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
        y1 = np.concatenate((class_0, class_1))
        y2 = np.concatenate((class_1, class_0))
        X = NumpyDataset(X)
        ys = [NumpyDataset(y1), NumpyDataset(y2)]

        databag = Databag()

        features = Feature(shape=(None, n_features))
        databag.add_dataset(features, X)

        outputs = []
        entropies = []
        labels = []
        for i in range(2):
            label = Label(shape=(None, 2))
            labels.append(label)
            dense = Dense(out_channels=2, in_layers=[features])
            output = SoftMax(in_layers=[dense])
            smce = SoftMaxCrossEntropy(in_layers=[label, dense])

            entropies.append(smce)
            outputs.append(output)
            databag.add_dataset(label, ys[i])

        total_loss = ReduceMean(in_layers=entropies)

        tg = dc.models.TensorGraph(learning_rate=0.1)
        for output in outputs:
            tg.add_output(output)
        tg.set_loss(total_loss)

        tg.fit_generator(
            databag.iterbatches(epochs=1000,
                                batch_size=tg.batch_size,
                                pad_batches=True))
        metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
                                   np.mean,
                                   mode="classification")

        scores = tg.evaluate_generator(databag.iterbatches(), [metric],
                                       labels=labels,
                                       per_task_metrics=True)
        scores = list(scores[1].values())
        # Loosening atol to see if tests stop failing sporadically
        assert_true(np.all(np.isclose(scores, [1.0, 1.0], atol=0.20)))
Пример #17
0
    def test_shared_layer(self):
        n_data_points = 20
        n_features = 2

        X = np.random.rand(n_data_points, n_features)
        y1 = np.array([[0, 1] for x in range(n_data_points)])
        X = NumpyDataset(X)
        ys = [NumpyDataset(y1)]

        databag = Databag()

        features = Feature(shape=(None, n_features))
        databag.add_dataset(features, X)

        outputs = []

        label = Label(shape=(None, 2))
        dense1 = Dense(out_channels=2, in_layers=[features])
        dense2 = dense1.shared(in_layers=[features])
        output1 = SoftMax(in_layers=[dense1])
        output2 = SoftMax(in_layers=[dense2])
        smce = SoftMaxCrossEntropy(in_layers=[label, dense1])

        outputs.append(output1)
        outputs.append(output2)
        databag.add_dataset(label, ys[0])

        total_loss = ReduceMean(in_layers=[smce])

        tg = dc.models.TensorGraph(learning_rate=0.01)
        for output in outputs:
            tg.add_output(output)
        tg.set_loss(total_loss)

        tg.fit_generator(
            databag.iterbatches(epochs=1,
                                batch_size=tg.batch_size,
                                pad_batches=True))
        prediction = tg.predict_on_generator(databag.iterbatches())
        assert_true(np.all(np.isclose(prediction[0], prediction[1],
                                      atol=0.01)))
Пример #18
0
    def build_graph(self):
        d1 = Dense(out_channels=256, activation_fn=tf.nn.relu, in_layers=[self.feature])
        d2 = Dense(out_channels=64, activation_fn=tf.nn.relu, in_layers=[d1])
        d3 = Dense(out_channels=16, activation=None, in_layers=[d2])
        d4 = Dense(out_channels=2, activation=None, in_layers=[d3])
        softmax = SoftMax(in_layers=[d4])
        self.tg.add_output(softmax)

        self.label = Label(shape=(None, 2))
        cost = SoftMaxCrossEntropy(in_layers=[self.label, d4])
        loss = ReduceMean(in_layers=[cost])
        self.tg.set_loss(loss)
Пример #19
0
 def test_single_task_classifier(self):
     n_data_points = 20
     n_features = 2
     X = np.random.rand(n_data_points, n_features)
     y = [[0, 1] for x in range(n_data_points)]
     dataset = dc.data.NumpyDataset(X, y)
     model = dc.models.Sequential(learning_rate=0.01)
     model.add(Dense(out_channels=2))
     model.add(SoftMax())
     model.fit(dataset, loss="binary_crossentropy", nb_epoch=1000)
     prediction = np.squeeze(model.predict_on_batch(X))
     assert_true(np.all(np.isclose(prediction, y, atol=0.4)))
Пример #20
0
    def _build_graph(self):
        self.smiles_seqs = Feature(shape=(None, self.seq_length),
                                   dtype=tf.int32)
        # Character embedding
        Embedding = DTNNEmbedding(
            n_embedding=self.n_embedding,
            periodic_table_length=len(self.char_dict.keys()) + 1,
            in_layers=[self.smiles_seqs])
        pooled_outputs = []
        conv_layers = []
        for filter_size, num_filter in zip(self.kernel_sizes,
                                           self.num_filters):
            # Multiple convolutional layers with different filter widths
            conv_layers.append(
                Conv1D(kernel_size=filter_size,
                       filters=num_filter,
                       padding='valid',
                       in_layers=[Embedding]))
            # Max-over-time pooling
            pooled_outputs.append(
                ReduceMax(axis=1, in_layers=[conv_layers[-1]]))
        # Concat features from all filters(one feature per filter)
        concat_outputs = Concat(axis=1, in_layers=pooled_outputs)
        dropout = Dropout(dropout_prob=self.dropout,
                          in_layers=[concat_outputs])
        dense = Dense(out_channels=200,
                      activation_fn=tf.nn.relu,
                      in_layers=[dropout])
        # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
        gather = Highway(in_layers=[dense])

        if self.mode == "classification":
            logits = Dense(out_channels=self.n_tasks * 2,
                           activation_fn=None,
                           in_layers=[gather])
            logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits])
            output = SoftMax(in_layers=[logits])
            self.add_output(output)
            labels = Label(shape=(None, self.n_tasks, 2))
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])

        else:
            vals = Dense(out_channels=self.n_tasks * 1,
                         activation_fn=None,
                         in_layers=[gather])
            vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals])
            self.add_output(vals)
            labels = Label(shape=(None, self.n_tasks, 1))
            loss = ReduceSum(L2Loss(in_layers=[labels, vals]))

        weights = Weights(shape=(None, self.n_tasks))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Пример #21
0
 def test_fit_twice(self):
     n_data_points = 20
     n_features = 2
     X = np.random.rand(n_data_points, n_features)
     y = [[0, 1] for x in range(n_data_points)]
     dataset = dc.data.NumpyDataset(X, y)
     model = dc.models.Sequential(learning_rate=0.01)
     model.add(Dense(out_channels=2))
     model.add(SoftMax())
     # Should be able to call fit twice without failure.
     model.fit(dataset, loss="binary_crossentropy", nb_epoch=1000)
     model.fit(dataset, loss="binary_crossentropy", nb_epoch=1000)
Пример #22
0
    def test_mnist(self):
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        train = dc.data.NumpyDataset(mnist.train.images, mnist.train.labels)
        valid = dc.data.NumpyDataset(mnist.validation.images,
                                     mnist.validation.labels)

        # Images are square 28x28 (batch, height, width, channel)
        feature = Feature(shape=(None, 784), name="Feature")
        make_image = Reshape(shape=(-1, 28, 28, 1), in_layers=[feature])

        conv2d_1 = Conv2D(num_outputs=32,
                          normalizer_fn=tf.contrib.layers.batch_norm,
                          in_layers=[make_image])
        maxpool_1 = MaxPool(in_layers=[conv2d_1])

        conv2d_2 = Conv2D(num_outputs=64,
                          normalizer_fn=tf.contrib.layers.batch_norm,
                          in_layers=[maxpool_1])
        maxpool_2 = MaxPool(in_layers=[conv2d_2])
        flatten = Flatten(in_layers=[maxpool_2])

        dense1 = Dense(out_channels=1024,
                       activation_fn=tf.nn.relu,
                       in_layers=[flatten])
        dense2 = Dense(out_channels=10, in_layers=[dense1])
        label = Label(shape=(None, 10), name="Label")
        smce = SoftMaxCrossEntropy(in_layers=[label, dense2])
        loss = ReduceMean(in_layers=[smce])
        output = SoftMax(in_layers=[dense2])

        tg = dc.models.TensorGraph(model_dir='/tmp/mnist',
                                   batch_size=1000,
                                   use_queue=True)
        tg.add_output(output)
        tg.set_loss(loss)
        tg.fit(train, nb_epoch=2)

        prediction = np.squeeze(tg.predict_proba_on_batch(valid.X))

        fpr = dict()
        tpr = dict()
        roc_auc = dict()
        for i in range(10):
            fpr[i], tpr[i], thresh = roc_curve(valid.y[:, i], prediction[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
            assert_true(roc_auc[i] > 0.99)
Пример #23
0
 def test_single_task_classifier(self):
     n_data_points = 20
     n_features = 2
     X = np.random.rand(n_data_points, n_features)
     y = [[0, 1] for x in range(n_data_points)]
     dataset = NumpyDataset(X, y)
     features = Feature(shape=(None, n_features))
     dense = Dense(out_channels=2, in_layers=[features])
     output = SoftMax(in_layers=[dense])
     label = Label(shape=(None, 2))
     smce = SoftMaxCrossEntropy(in_layers=[label, dense])
     loss = ReduceMean(in_layers=[smce])
     tg = dc.models.TensorGraph(learning_rate=0.01)
     tg.add_output(output)
     tg.set_loss(loss)
     tg.fit(dataset, nb_epoch=1000)
     prediction = np.squeeze(tg.predict_on_batch(X))
     assert_true(np.all(np.isclose(prediction, y, atol=0.4)))
Пример #24
0
    def test_multi_task_classifier(self):
        n_data_points = 20
        n_features = 2

        X = np.random.rand(n_data_points, n_features)
        y1 = np.array([[0, 1] for x in range(n_data_points)])
        y2 = np.array([[1, 0] for x in range(n_data_points)])
        X = NumpyDataset(X)
        ys = [NumpyDataset(y1), NumpyDataset(y2)]

        databag = Databag()

        features = Feature(shape=(None, n_features))
        databag.add_dataset(features, X)

        outputs = []
        entropies = []
        for i in range(2):
            label = Label(shape=(None, 2))
            dense = Dense(out_channels=2, in_layers=[features])
            output = SoftMax(in_layers=[dense])
            smce = SoftMaxCrossEntropy(in_layers=[label, dense])

            entropies.append(smce)
            outputs.append(output)
            databag.add_dataset(label, ys[i])

        total_loss = ReduceMean(in_layers=entropies)

        tg = dc.models.TensorGraph(learning_rate=0.01)
        for output in outputs:
            tg.add_output(output)
        tg.set_loss(total_loss)

        tg.fit_generator(
            databag.iterbatches(epochs=1000,
                                batch_size=tg.batch_size,
                                pad_batches=True))
        predictions = tg.predict_on_generator(databag.iterbatches())
        for i in range(2):
            y_real = ys[i].X
            y_pred = predictions[i]
            assert_true(np.all(np.isclose(y_pred, y_real, atol=0.6)))
    def test_compute_model_performance_multitask_classifier(self):
        n_data_points = 20
        n_features = 1
        n_tasks = 2
        n_classes = 2

        X = np.ones(shape=(n_data_points // 2, n_features)) * -1
        X1 = np.ones(shape=(n_data_points // 2, n_features))
        X = np.concatenate((X, X1))
        class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
        class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
        y1 = np.concatenate((class_0, class_1))
        y2 = np.concatenate((class_1, class_0))
        y = np.stack([y1, y2], axis=1)
        dataset = NumpyDataset(X, y)

        features = Feature(shape=(None, n_features))
        label = Label(shape=(None, n_tasks, n_classes))
        dense = Dense(out_channels=n_tasks * n_classes, in_layers=[features])
        logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=dense)
        output = SoftMax(in_layers=[logits])
        smce = SoftMaxCrossEntropy(in_layers=[label, logits])
        total_loss = ReduceMean(in_layers=smce)

        tg = dc.models.TensorGraph(learning_rate=0.01,
                                   batch_size=n_data_points)
        tg.add_output(output)
        tg.set_loss(total_loss)

        tg.fit(dataset, nb_epoch=1000)
        metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
                                   np.mean,
                                   mode="classification")

        scores = tg.evaluate_generator(tg.default_generator(dataset), [metric],
                                       labels=[label],
                                       per_task_metrics=True)
        scores = list(scores[1].values())
        # Loosening atol to see if tests stop failing sporadically
        assert_true(np.all(np.isclose(scores, [1.0, 1.0], atol=0.50)))
Пример #26
0
    def create_layers(self, state, **kwargs):
        i = Reshape(in_layers=[state[0]], shape=(-1, 1))
        i = AddConstant(-1, in_layers=[i])
        i = InsertBatchIndex(in_layers=[i])
        # shape(i) = (batch_size, 1)

        q = Reshape(in_layers=[state[1]], shape=(-1, self.n_queue_obs))
        # shape(q) = (batch_size, n_queue_obs)
        #q = Dense(16, in_layers=[q], activation_fn=tensorflow.nn.relu)
        ## shape(q) = (batch_size, 16)

        x = q
        if not self.single_layer:
            for j in range(1):
                x1 = Dense(8, in_layers=[x], activation_fn=tensorflow.nn.relu)
                x = Concat(in_layers=[q, x1])
        # 1) shape(x) = (batch_size, n_queue_obs)
        # 2) shape(x) = (batch_size, n_queue_obs + 8)

        ps = []
        for j in range(self.n_products):
            p = Dense(n_actions, in_layers=[x])
            ps.append(p)
        p = Stack(in_layers=ps, axis=1)
        # shape(p) = (batch_size, n_products, n_actions)
        p = Gather(in_layers=[p, i])
        # shape(p) = (batch_size, n_actions)
        p = SoftMax(in_layers=[p])

        vs = []
        for j in range(self.n_products):
            v = Dense(1, in_layers=[x])
            vs.append(v)
        v = Stack(in_layers=vs, axis=1)
        # shape(v) = (batch_size, n_products, 1)
        v = Gather(in_layers=[v, i])
        # shape(v) = (batch_size, 1)

        return {'action_prob': p, 'value': v}
Пример #27
0
  def build_graph(self):
    """Building graph structures:
        Features => WeaveLayer => WeaveLayer => Dense => WeaveGather => Classification or Regression
        """
    self.atom_features = Feature(shape=(None, self.n_atom_feat))
    self.pair_features = Feature(shape=(None, self.n_pair_feat))
    combined = Combine_AP(in_layers=[self.atom_features, self.pair_features])
    self.pair_split = Feature(shape=(None,), dtype=tf.int32)
    self.atom_split = Feature(shape=(None,), dtype=tf.int32)
    self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
    weave_layer1 = WeaveLayer(
        n_atom_input_feat=self.n_atom_feat,
        n_pair_input_feat=self.n_pair_feat,
        n_atom_output_feat=self.n_hidden,
        n_pair_output_feat=self.n_hidden,
        in_layers=[combined, self.pair_split, self.atom_to_pair])
    weave_layer2 = WeaveLayer(
        n_atom_input_feat=self.n_hidden,
        n_pair_input_feat=self.n_hidden,
        n_atom_output_feat=self.n_hidden,
        n_pair_output_feat=self.n_hidden,
        update_pair=False,
        in_layers=[weave_layer1, self.pair_split, self.atom_to_pair])
    separated = Separate_AP(in_layers=[weave_layer2])
    dense1 = Dense(
        out_channels=self.n_graph_feat,
        activation_fn=tf.nn.tanh,
        in_layers=[separated])
    batch_norm1 = BatchNormalization(epsilon=1e-5, mode=1, in_layers=[dense1])
    weave_gather = WeaveGather(
        self.batch_size,
        n_input=self.n_graph_feat,
        gaussian_expand=True,
        in_layers=[batch_norm1, self.atom_split])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      if self.mode == "classification":
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[weave_gather])
        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.labels_fd.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == "regression":
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[weave_gather])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.labels_fd.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      all_cost = Concat(in_layers=costs, axis=1)
    elif self.mode == "regression":
      all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
Пример #28
0
def graph_conv_net(batch_size, prior, num_task):
    """
    Build a tensorgraph for multilabel classification task

    Return: features and labels layers
    """
    tg = TensorGraph(use_queue=False)
    if prior == True:
        add_on = num_task
    else:
        add_on = 0
    atom_features = Feature(shape=(None, 75 + 2 * add_on))
    circular_features = Feature(shape=(batch_size, 256), dtype=tf.float32)

    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)
    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)

    gc1 = GraphConv(64 + add_on,
                    activation_fn=tf.nn.elu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] +
                    deg_adjs)

    gc2 = GraphConv(64 + add_on,
                    activation_fn=tf.nn.elu,
                    in_layers=[gc1, degree_slice, membership] + deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] +
                    deg_adjs)

    add = Concat(in_layers=[gp1, gp2])
    add = Dropout(0.5, in_layers=[add])
    dense = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[add])
    batch_norm3 = BatchNorm(in_layers=[dense])
    readout = GraphGather(batch_size=batch_size,
                          activation_fn=tf.nn.tanh,
                          in_layers=[batch_norm3, degree_slice, membership] +
                          deg_adjs)
    batch_norm4 = BatchNorm(in_layers=[readout])

    dense1 = Dense(out_channels=128,
                   activation_fn=tf.nn.elu,
                   in_layers=[circular_features])
    dense1 = BatchNorm(in_layers=[dense1])
    dense1 = Dropout(0.5, in_layers=[dense1])
    dense1 = Dense(out_channels=128,
                   activation_fn=tf.nn.elu,
                   in_layers=[circular_features])
    dense1 = BatchNorm(in_layers=[dense1])
    dense1 = Dropout(0.5, in_layers=[dense1])
    merge_feat = Concat(in_layers=[dense1, batch_norm4])
    merge = Dense(out_channels=256,
                  activation_fn=tf.nn.elu,
                  in_layers=[merge_feat])
    costs = []
    labels = []
    for task in range(num_task):
        classification = Dense(out_channels=2,
                               activation_fn=None,
                               in_layers=[merge])
        softmax = SoftMax(in_layers=[classification])
        tg.add_output(softmax)
        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
    all_cost = Stack(in_layers=costs, axis=1)
    weights = Weights(shape=(None, num_task))
    loss = WeightedError(in_layers=[all_cost, weights])
    tg.set_loss(loss)
    #if prior == True:
    #    return tg, atom_features,circular_features, degree_slice, membership, deg_adjs, labels, weights#, prior_layer
    return tg, atom_features, circular_features, degree_slice, membership, deg_adjs, labels, weights
Пример #29
0
def graph_conv_model(batch_size, tasks):
    model = TensorGraph(model_dir=model_dir,
                        batch_size=batch_size,
                        use_queue=False)
    atom_features = Feature(shape=(None, 75))
    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)

    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)
    gc1 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] +
                    deg_adjs)
    gc2 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[gp1, degree_slice, membership] + deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] +
                    deg_adjs)
    dense = Dense(out_channels=128, activation_fn=None, in_layers=[gp2])
    batch_norm3 = BatchNorm(in_layers=[dense])
    gg1 = GraphGather(batch_size=batch_size,
                      activation_fn=tf.nn.tanh,
                      in_layers=[batch_norm3, degree_slice, membership] +
                      deg_adjs)

    costs = []
    labels = []
    for task in tasks:
        classification = Dense(out_channels=2,
                               activation_fn=None,
                               in_layers=[gg1])

        softmax = SoftMax(in_layers=[classification])
        model.add_output(softmax)

        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)

    entropy = Concat(in_layers=costs)
    task_weights = Weights(shape=(None, len(tasks)))
    loss = WeightedError(in_layers=[entropy, task_weights])
    model.set_loss(loss)

    def feed_dict_generator(dataset, batch_size, epochs=1):
        for epoch in range(epochs):
            for ind, (X_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size, pad_batches=True)):
                d = {}
                for index, label in enumerate(labels):
                    d[label] = to_one_hot(y_b[:, index])
                d[task_weights] = w_b
                multiConvMol = ConvMol.agglomerate_mols(X_b)
                d[atom_features] = multiConvMol.get_atom_features()
                d[degree_slice] = multiConvMol.deg_slice
                d[membership] = multiConvMol.membership
                for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
                    d[deg_adjs[i -
                               1]] = multiConvMol.get_deg_adjacency_lists()[i]
                yield d

    return model, feed_dict_generator, labels, task_weights
def sluice_model(batch_size, tasks):
    model = TensorGraph(model_dir=model_dir,
                        batch_size=batch_size,
                        use_queue=False,
                        tensorboard=True)
    atom_features = Feature(shape=(None, 75))
    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)

    sluice_loss = []
    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)

    gc1 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)

    as1 = AlphaShare(in_layers=[gc1, gc1])
    sluice_loss.append(gc1)

    batch_norm1a = BatchNorm(in_layers=[as1[0]])
    batch_norm1b = BatchNorm(in_layers=[as1[1]])

    gp1a = GraphPool(in_layers=[batch_norm1a, degree_slice, membership] +
                     deg_adjs)
    gp1b = GraphPool(in_layers=[batch_norm1b, degree_slice, membership] +
                     deg_adjs)

    gc2a = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1a, degree_slice, membership] + deg_adjs)
    gc2b = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1b, degree_slice, membership] + deg_adjs)

    as2 = AlphaShare(in_layers=[gc2a, gc2b])
    sluice_loss.append(gc2a)
    sluice_loss.append(gc2b)

    batch_norm2a = BatchNorm(in_layers=[as2[0]])
    batch_norm2b = BatchNorm(in_layers=[as2[1]])

    gp2a = GraphPool(in_layers=[batch_norm2a, degree_slice, membership] +
                     deg_adjs)
    gp2b = GraphPool(in_layers=[batch_norm2b, degree_slice, membership] +
                     deg_adjs)

    densea = Dense(out_channels=128, activation_fn=None, in_layers=[gp2a])
    denseb = Dense(out_channels=128, activation_fn=None, in_layers=[gp2b])

    batch_norm3a = BatchNorm(in_layers=[densea])
    batch_norm3b = BatchNorm(in_layers=[denseb])

    as3 = AlphaShare(in_layers=[batch_norm3a, batch_norm3b])
    sluice_loss.append(batch_norm3a)
    sluice_loss.append(batch_norm3b)

    gg1a = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[0], degree_slice, membership] + deg_adjs)
    gg1b = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[1], degree_slice, membership] + deg_adjs)

    costs = []
    labels = []
    count = 0
    for task in tasks:
        if count < len(tasks) / 2:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1a])
            print("first half:")
            print(task)
        else:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1b])
            print('second half')
            print(task)
        count += 1

        softmax = SoftMax(in_layers=[classification])
        model.add_output(softmax)

        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)

    entropy = Concat(in_layers=costs)
    task_weights = Weights(shape=(None, len(tasks)))
    task_loss = WeightedError(in_layers=[entropy, task_weights])

    s_cost = SluiceLoss(in_layers=sluice_loss)

    total_loss = Add(in_layers=[task_loss, s_cost])
    model.set_loss(total_loss)

    def feed_dict_generator(dataset, batch_size, epochs=1):
        for epoch in range(epochs):
            for ind, (X_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size, pad_batches=True)):
                d = {}
                for index, label in enumerate(labels):
                    d[label] = to_one_hot(y_b[:, index])
                d[task_weights] = w_b
                multiConvMol = ConvMol.agglomerate_mols(X_b)
                d[atom_features] = multiConvMol.get_atom_features()
                d[degree_slice] = multiConvMol.deg_slice
                d[membership] = multiConvMol.membership
                for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
                    d[deg_adjs[i -
                               1]] = multiConvMol.get_deg_adjacency_lists()[i]
                yield d

    return model, feed_dict_generator, labels, task_weights
Пример #31
0
  def build_graph(self):
    """
    Building graph structures:
    """
    self.atom_features = Feature(shape=(None, 75))
    self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    self.membership = Feature(shape=(None,), dtype=tf.int32)

    self.deg_adjs = []
    for i in range(0, 10 + 1):
      deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
      self.deg_adjs.append(deg_adj)
    gc1 = GraphConv(
        64,
        activation_fn=tf.nn.relu,
        in_layers=[self.atom_features, self.degree_slice, self.membership] +
        self.deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, self.degree_slice, self.membership]
                    + self.deg_adjs)
    gc2 = GraphConv(
        64,
        activation_fn=tf.nn.relu,
        in_layers=[gp1, self.degree_slice, self.membership] + self.deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, self.degree_slice, self.membership]
                    + self.deg_adjs)
    dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2])
    batch_norm3 = BatchNorm(in_layers=[dense])
    readout = GraphGather(
        batch_size=self.batch_size,
        activation_fn=tf.nn.tanh,
        in_layers=[batch_norm3, self.degree_slice, self.membership] +
        self.deg_adjs)

    if self.error_bars == True:
      readout = Dropout(in_layers=[readout], dropout_prob=0.2)

    costs = []
    self.my_labels = []
    for task in range(self.n_tasks):
      if self.mode == 'classification':
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[readout])

        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.my_labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == 'regression':
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[readout])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.my_labels.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      entropy = Concat(in_layers=costs, axis=-1)
    elif self.mode == "regression":
      entropy = Stack(in_layers=costs, axis=1)
    self.my_task_weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[entropy, self.my_task_weights])
    self.set_loss(loss)