コード例 #1
0
    def build_graph(self):
        # Layer 1
        gc1_input = [self.atom_features, self.indexing, self.membership] + self.deg_adj_list
        gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=gc1_input)
        bn1 = BatchNorm(in_layers=[gc1])
        gp1_input = [bn1, self.indexing, self.membership] + self.deg_adj_list
        gp1 = GraphPool(in_layers=gp1_input)

        # Layer 2
        gc2_input = [gp1, self.indexing, self.membership] + self.deg_adj_list
        gc2 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=gc2_input)
        bn2 = BatchNorm(in_layers=[gc2])
        gp2_input = [bn2, self.indexing, self.membership] + self.deg_adj_list
        gp2 = GraphPool(in_layers=gp2_input)

        # Dense layer 1
        d1 = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2])
        bn3 = BatchNorm(in_layers=[d1])

        # Graph gather layer
        gg1_input = [bn3, self.indexing, self.membership] + self.deg_adj_list
        gg1 = GraphGather(batch_size=self.batch_size, activation=tf.nn.tanh, in_layers=gg1_input)

        # Output dense layer
        d2 = Dense(out_channels=2, activation_fn=None, in_layers=[gg1])
        softmax = SoftMax(in_layers=[d2])
        self.tg.add_output(softmax)

        # Set loss function
        self.label = Label(shape=(None, 2))
        cost = SoftMaxCrossEntropy(in_layers=[self.label, d2])
        self.weight = Weights(shape=(None, 1))
        loss = WeightedError(in_layers=[cost, self.weight])
        self.tg.set_loss(loss)
コード例 #2
0
ファイル: graphModels.py プロジェクト: zbjzbj001/lsc
    def build_graph(self):
        self.atom_features = Feature(shape=(None, 75))
        self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)

        self.deg_adjs = []
        for i in range(0, 10 + 1):
            deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
            self.deg_adjs.append(deg_adj)
        in_layer = self.atom_features
        for layer_size in self.graph_conv_layers:
            gc1_in = [in_layer, self.degree_slice, self.membership
                      ] + self.deg_adjs
            gc1 = GraphConv(layer_size,
                            activation_fn=tf.nn.relu,
                            in_layers=gc1_in)
            batch_norm1 = MyBatchNorm(in_layers=[gc1])
            gp_in = [batch_norm1, self.degree_slice, self.membership
                     ] + self.deg_adjs
            in_layer = GraphPool(in_layers=gp_in)
        dense = Dense(out_channels=self.dense_layer_size[0],
                      activation_fn=tf.nn.relu,
                      in_layers=[in_layer])
        batch_norm3 = MyBatchNorm(in_layers=[dense])
        batch_norm3 = Dropout(self.dropout, in_layers=[batch_norm3])
        readout = GraphGather(
            batch_size=self.batch_size,
            activation_fn=tf.nn.tanh,
            in_layers=[batch_norm3, self.degree_slice, self.membership] +
            self.deg_adjs)

        curLayer = readout
        for myind in range(1, len(self.dense_layer_size) - 1):
            curLayer = Dense(out_channels=self.dense_layer_size[myind],
                             activation_fn=tf.nn.relu,
                             in_layers=[curLayer])
            curLayer = Dropout(self.dropout, in_layers=[curLayer])

        classification = Dense(out_channels=self.n_tasks,
                               activation_fn=None,
                               in_layers=[curLayer])
        sigmoid = MySigmoid(in_layers=[classification])
        self.add_output(sigmoid)

        self.label = Label(shape=(None, self.n_tasks))
        all_cost = MySigmoidCrossEntropy(
            in_layers=[self.label, classification])
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)

        self.mydense = dense
        self.myreadout = readout
        self.myclassification = classification
        self.mysigmoid = sigmoid
        self.myall_cost = all_cost
        self.myloss = loss
コード例 #3
0
def test_GraphPool_Pickle():
  tg = TensorGraph()
  atom_features = Feature(shape=(None, 75))
  degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
  membership = Feature(shape=(None,), dtype=tf.int32)
  deg_adjs = []
  for i in range(0, 10 + 1):
    deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
    deg_adjs.append(deg_adj)
  layer = GraphPool(
      in_layers=[atom_features, degree_slice, membership] + deg_adjs)
  tg.set_loss(layer)
  tg.build()
  tg.save()
コード例 #4
0
def sluice_model(batch_size, tasks):
    model = TensorGraph(model_dir=model_dir,
                        batch_size=batch_size,
                        use_queue=False,
                        tensorboard=True)
    atom_features = Feature(shape=(None, 75))
    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)

    sluice_loss = []
    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)

    gc1 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)

    as1 = AlphaShare(in_layers=[gc1, gc1])
    sluice_loss.append(gc1)

    batch_norm1a = BatchNorm(in_layers=[as1[0]])
    batch_norm1b = BatchNorm(in_layers=[as1[1]])

    gp1a = GraphPool(in_layers=[batch_norm1a, degree_slice, membership] +
                     deg_adjs)
    gp1b = GraphPool(in_layers=[batch_norm1b, degree_slice, membership] +
                     deg_adjs)

    gc2a = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1a, degree_slice, membership] + deg_adjs)
    gc2b = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1b, degree_slice, membership] + deg_adjs)

    as2 = AlphaShare(in_layers=[gc2a, gc2b])
    sluice_loss.append(gc2a)
    sluice_loss.append(gc2b)

    batch_norm2a = BatchNorm(in_layers=[as2[0]])
    batch_norm2b = BatchNorm(in_layers=[as2[1]])

    gp2a = GraphPool(in_layers=[batch_norm2a, degree_slice, membership] +
                     deg_adjs)
    gp2b = GraphPool(in_layers=[batch_norm2b, degree_slice, membership] +
                     deg_adjs)

    densea = Dense(out_channels=128, activation_fn=None, in_layers=[gp2a])
    denseb = Dense(out_channels=128, activation_fn=None, in_layers=[gp2b])

    batch_norm3a = BatchNorm(in_layers=[densea])
    batch_norm3b = BatchNorm(in_layers=[denseb])

    as3 = AlphaShare(in_layers=[batch_norm3a, batch_norm3b])
    sluice_loss.append(batch_norm3a)
    sluice_loss.append(batch_norm3b)

    gg1a = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[0], degree_slice, membership] + deg_adjs)
    gg1b = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[1], degree_slice, membership] + deg_adjs)

    costs = []
    labels = []
    count = 0
    for task in tasks:
        if count < len(tasks) / 2:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1a])
            print("first half:")
            print(task)
        else:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1b])
            print('second half')
            print(task)
        count += 1

        softmax = SoftMax(in_layers=[classification])
        model.add_output(softmax)

        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)

    entropy = Concat(in_layers=costs)
    task_weights = Weights(shape=(None, len(tasks)))
    task_loss = WeightedError(in_layers=[entropy, task_weights])

    s_cost = SluiceLoss(in_layers=sluice_loss)

    total_loss = Add(in_layers=[task_loss, s_cost])
    model.set_loss(total_loss)

    def feed_dict_generator(dataset, batch_size, epochs=1):
        for epoch in range(epochs):
            for ind, (X_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size, pad_batches=True)):
                d = {}
                for index, label in enumerate(labels):
                    d[label] = to_one_hot(y_b[:, index])
                d[task_weights] = w_b
                multiConvMol = ConvMol.agglomerate_mols(X_b)
                d[atom_features] = multiConvMol.get_atom_features()
                d[degree_slice] = multiConvMol.deg_slice
                d[membership] = multiConvMol.membership
                for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
                    d[deg_adjs[i -
                               1]] = multiConvMol.get_deg_adjacency_lists()[i]
                yield d

    return model, feed_dict_generator, labels, task_weights
コード例 #5
0
  def build_graph(self):
    """
    Building graph structures:
    """
    self.atom_features = Feature(shape=(None, 75))
    self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    self.membership = Feature(shape=(None,), dtype=tf.int32)

    self.deg_adjs = []
    for i in range(0, 10 + 1):
      deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
      self.deg_adjs.append(deg_adj)
    gc1 = GraphConv(
        64,
        activation_fn=tf.nn.relu,
        in_layers=[self.atom_features, self.degree_slice, self.membership] +
        self.deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, self.degree_slice, self.membership]
                    + self.deg_adjs)
    gc2 = GraphConv(
        64,
        activation_fn=tf.nn.relu,
        in_layers=[gp1, self.degree_slice, self.membership] + self.deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, self.degree_slice, self.membership]
                    + self.deg_adjs)
    dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2])
    batch_norm3 = BatchNorm(in_layers=[dense])
    readout = GraphGather(
        batch_size=self.batch_size,
        activation_fn=tf.nn.tanh,
        in_layers=[batch_norm3, self.degree_slice, self.membership] +
        self.deg_adjs)

    if self.error_bars == True:
      readout = Dropout(in_layers=[readout], dropout_prob=0.2)

    costs = []
    self.my_labels = []
    for task in range(self.n_tasks):
      if self.mode == 'classification':
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[readout])

        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.my_labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == 'regression':
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[readout])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.my_labels.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      entropy = Concat(in_layers=costs, axis=-1)
    elif self.mode == "regression":
      entropy = Stack(in_layers=costs, axis=1)
    self.my_task_weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[entropy, self.my_task_weights])
    self.set_loss(loss)
コード例 #6
0
ファイル: model_utils.py プロジェクト: truongbuu/Invivo
def graph_conv_net(batch_size, prior, num_task):
    """
    Build a tensorgraph for multilabel classification task

    Return: features and labels layers
    """
    tg = TensorGraph(use_queue=False)
    if prior == True:
        add_on = num_task
    else:
        add_on = 0
    atom_features = Feature(shape=(None, 75 + 2 * add_on))
    circular_features = Feature(shape=(batch_size, 256), dtype=tf.float32)

    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)
    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)

    gc1 = GraphConv(64 + add_on,
                    activation_fn=tf.nn.elu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] +
                    deg_adjs)

    gc2 = GraphConv(64 + add_on,
                    activation_fn=tf.nn.elu,
                    in_layers=[gc1, degree_slice, membership] + deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] +
                    deg_adjs)

    add = Concat(in_layers=[gp1, gp2])
    add = Dropout(0.5, in_layers=[add])
    dense = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[add])
    batch_norm3 = BatchNorm(in_layers=[dense])
    readout = GraphGather(batch_size=batch_size,
                          activation_fn=tf.nn.tanh,
                          in_layers=[batch_norm3, degree_slice, membership] +
                          deg_adjs)
    batch_norm4 = BatchNorm(in_layers=[readout])

    dense1 = Dense(out_channels=128,
                   activation_fn=tf.nn.elu,
                   in_layers=[circular_features])
    dense1 = BatchNorm(in_layers=[dense1])
    dense1 = Dropout(0.5, in_layers=[dense1])
    dense1 = Dense(out_channels=128,
                   activation_fn=tf.nn.elu,
                   in_layers=[circular_features])
    dense1 = BatchNorm(in_layers=[dense1])
    dense1 = Dropout(0.5, in_layers=[dense1])
    merge_feat = Concat(in_layers=[dense1, batch_norm4])
    merge = Dense(out_channels=256,
                  activation_fn=tf.nn.elu,
                  in_layers=[merge_feat])
    costs = []
    labels = []
    for task in range(num_task):
        classification = Dense(out_channels=2,
                               activation_fn=None,
                               in_layers=[merge])
        softmax = SoftMax(in_layers=[classification])
        tg.add_output(softmax)
        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
    all_cost = Stack(in_layers=costs, axis=1)
    weights = Weights(shape=(None, num_task))
    loss = WeightedError(in_layers=[all_cost, weights])
    tg.set_loss(loss)
    #if prior == True:
    #    return tg, atom_features,circular_features, degree_slice, membership, deg_adjs, labels, weights#, prior_layer
    return tg, atom_features, circular_features, degree_slice, membership, deg_adjs, labels, weights
コード例 #7
0
def graph_conv_model(batch_size, tasks):
    model = TensorGraph(model_dir=model_dir,
                        batch_size=batch_size,
                        use_queue=False)
    atom_features = Feature(shape=(None, 75))
    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)

    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)
    gc1 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] +
                    deg_adjs)
    gc2 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[gp1, degree_slice, membership] + deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] +
                    deg_adjs)
    dense = Dense(out_channels=128, activation_fn=None, in_layers=[gp2])
    batch_norm3 = BatchNorm(in_layers=[dense])
    gg1 = GraphGather(batch_size=batch_size,
                      activation_fn=tf.nn.tanh,
                      in_layers=[batch_norm3, degree_slice, membership] +
                      deg_adjs)

    costs = []
    labels = []
    for task in tasks:
        classification = Dense(out_channels=2,
                               activation_fn=None,
                               in_layers=[gg1])

        softmax = SoftMax(in_layers=[classification])
        model.add_output(softmax)

        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)

    entropy = Concat(in_layers=costs)
    task_weights = Weights(shape=(None, len(tasks)))
    loss = WeightedError(in_layers=[entropy, task_weights])
    model.set_loss(loss)

    def feed_dict_generator(dataset, batch_size, epochs=1):
        for epoch in range(epochs):
            for ind, (X_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size, pad_batches=True)):
                d = {}
                for index, label in enumerate(labels):
                    d[label] = to_one_hot(y_b[:, index])
                d[task_weights] = w_b
                multiConvMol = ConvMol.agglomerate_mols(X_b)
                d[atom_features] = multiConvMol.get_atom_features()
                d[degree_slice] = multiConvMol.deg_slice
                d[membership] = multiConvMol.membership
                for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
                    d[deg_adjs[i -
                               1]] = multiConvMol.get_deg_adjacency_lists()[i]
                yield d

    return model, feed_dict_generator, labels, task_weights
コード例 #8
0
ファイル: graph_models.py プロジェクト: domsooch/dsdc_demo
    def build_graph(self):
        """
    Building graph structures:
    """
        self.atom_features = Feature(shape=(None, self.number_atom_features))
        self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)

        self.deg_adjs = []
        for i in range(0, 10 + 1):
            deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
            self.deg_adjs.append(deg_adj)
        in_layer = self.atom_features
        for layer_size, dropout in zip(self.graph_conv_layers, self.dropout):
            gc1_in = [in_layer, self.degree_slice, self.membership
                      ] + self.deg_adjs
            gc1 = GraphConv(layer_size,
                            activation_fn=tf.nn.relu,
                            in_layers=gc1_in)
            batch_norm1 = BatchNorm(in_layers=[gc1])
            if dropout > 0.0:
                batch_norm1 = Dropout(dropout, in_layers=batch_norm1)
            gp_in = [batch_norm1, self.degree_slice, self.membership
                     ] + self.deg_adjs
            in_layer = GraphPool(in_layers=gp_in)
        dense = Dense(out_channels=self.dense_layer_size,
                      activation_fn=tf.nn.relu,
                      in_layers=[in_layer])
        batch_norm3 = BatchNorm(in_layers=[dense])
        if self.dropout[-1] > 0.0:
            batch_norm3 = Dropout(self.dropout[-1], in_layers=batch_norm3)
        readout = GraphGather(
            batch_size=self.batch_size,
            activation_fn=tf.nn.tanh,
            in_layers=[batch_norm3, self.degree_slice, self.membership] +
            self.deg_adjs)

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=readout,
                                       out_channels=n_tasks * n_classes)
                             ])
            logits = TrimGraphOutput([logits, weights])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=readout, out_channels=n_tasks)])
            output = TrimGraphOutput([output, weights])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(
                    shape=(None, n_tasks),
                    in_layers=[Dense(in_layers=readout, out_channels=n_tasks)])
                log_var = TrimGraphOutput([log_var, weights])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
コード例 #9
0
#  Follow: https://deepchem.io/docs/notebooks/graph_convolutional_networks_for_tox21.html
#
atom_features = Feature(shape=(None, 75))
degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
membership = Feature(shape=(None, ), dtype=tf.int32)

deg_adjs = []
for i in range(0, 10 + 1):
    deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
    deg_adjs.append(deg_adj)

gc1 = GraphConv(64,
                activation_fn=tf.nn.relu,
                in_layers=[atom_features, degree_slice, membership] + deg_adjs)
batch_norm1 = BatchNorm(in_layers=[gc1])
gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs)
gc2 = GraphConv(64,
                activation_fn=tf.nn.relu,
                in_layers=[gp1, degree_slice, membership] + deg_adjs)
batch_norm2 = BatchNorm(in_layers=[gc2])
gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs)
dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2])
batch_norm3 = BatchNorm(in_layers=[dense])
readout = GraphGather(batch_size=batch_size,
                      activation_fn=tf.nn.tanh,
                      in_layers=[batch_norm3, degree_slice, membership] +
                      deg_adjs)

costs = []
labels = []
for task in range(len(current_tasks)):
コード例 #10
0
    deg_adj = Feature(shape=(None, ii), dtype=tf.int32)
    deg_adjs.append(deg_adj)

label15 = []
for ts in range(ntask):
    label_t = Label(shape=(None, 2))
    label15.append(label_t)

## Setup Graph Convolution Network
tg = TensorGraph(use_queue=False, learning_rate=0.001, model_dir='ckpt')

gc1 = GraphConv(64,
                activation_fn=tf.nn.relu,
                in_layers=[atom_features, degree_slice, membership] + deg_adjs)
bn1 = BatchNorm(in_layers=[gc1])
gp1 = GraphPool(in_layers=[bn1, degree_slice, membership] + deg_adjs)
dp1 = Dropout(0.2, in_layers=gp1)

gc2 = GraphConv(64,
                activation_fn=tf.nn.relu,
                in_layers=[dp1, degree_slice, membership] + deg_adjs)
bn2 = BatchNorm(in_layers=[gc2])
gp2 = GraphPool(in_layers=[bn2, degree_slice, membership] + deg_adjs)
dp2 = Dropout(0.5, in_layers=gp2)

gc3 = GraphConv(64,
                activation_fn=tf.nn.relu,
                in_layers=[dp2, degree_slice, membership] + deg_adjs)
bn3 = BatchNorm(in_layers=[gc3])
gp3 = GraphPool(in_layers=[b3, degree_slice, membership] + deg_adjs)
dp3 = Dropout(0.5, in_layers=gp3)