示例#1
0
 def test_sluice_loss(self):
     """Test the sluice loss function"""
     input1 = np.ones((3, 4))
     input2 = np.ones((2, 2))
     with self.session() as sess:
         input1 = tf.convert_to_tensor(input1, dtype=tf.float32)
         input2 = tf.convert_to_tensor(input2, dtype=tf.float32)
         output_tensor = SluiceLoss()(input1, input2)
         sess.run(tf.global_variables_initializer())
         assert output_tensor.eval() == 40.0
示例#2
0
 def test_sluice_loss(self):
   """Test the sluice loss function"""
   input1 = np.ones((3, 4))
   input2 = np.ones((2, 2))
   with self.session() as sess:
     input1 = tf.convert_to_tensor(input1, dtype=tf.float32)
     input2 = tf.convert_to_tensor(input2, dtype=tf.float32)
     output_tensor = SluiceLoss()(input1, input2)
     sess.run(tf.global_variables_initializer())
     assert output_tensor.eval() == 40.0
def sluice_model(batch_size, tasks):
    model = TensorGraph(model_dir=model_dir,
                        batch_size=batch_size,
                        use_queue=False,
                        tensorboard=True)
    atom_features = Feature(shape=(None, 75))
    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)

    sluice_loss = []
    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)

    gc1 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)

    as1 = AlphaShare(in_layers=[gc1, gc1])
    sluice_loss.append(gc1)

    batch_norm1a = BatchNorm(in_layers=[as1[0]])
    batch_norm1b = BatchNorm(in_layers=[as1[1]])

    gp1a = GraphPool(in_layers=[batch_norm1a, degree_slice, membership] +
                     deg_adjs)
    gp1b = GraphPool(in_layers=[batch_norm1b, degree_slice, membership] +
                     deg_adjs)

    gc2a = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1a, degree_slice, membership] + deg_adjs)
    gc2b = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1b, degree_slice, membership] + deg_adjs)

    as2 = AlphaShare(in_layers=[gc2a, gc2b])
    sluice_loss.append(gc2a)
    sluice_loss.append(gc2b)

    batch_norm2a = BatchNorm(in_layers=[as2[0]])
    batch_norm2b = BatchNorm(in_layers=[as2[1]])

    gp2a = GraphPool(in_layers=[batch_norm2a, degree_slice, membership] +
                     deg_adjs)
    gp2b = GraphPool(in_layers=[batch_norm2b, degree_slice, membership] +
                     deg_adjs)

    densea = Dense(out_channels=128, activation_fn=None, in_layers=[gp2a])
    denseb = Dense(out_channels=128, activation_fn=None, in_layers=[gp2b])

    batch_norm3a = BatchNorm(in_layers=[densea])
    batch_norm3b = BatchNorm(in_layers=[denseb])

    as3 = AlphaShare(in_layers=[batch_norm3a, batch_norm3b])
    sluice_loss.append(batch_norm3a)
    sluice_loss.append(batch_norm3b)

    gg1a = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[0], degree_slice, membership] + deg_adjs)
    gg1b = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[1], degree_slice, membership] + deg_adjs)

    costs = []
    labels = []
    count = 0
    for task in tasks:
        if count < len(tasks) / 2:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1a])
            print("first half:")
            print(task)
        else:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1b])
            print('second half')
            print(task)
        count += 1

        softmax = SoftMax(in_layers=[classification])
        model.add_output(softmax)

        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)

    entropy = Concat(in_layers=costs)
    task_weights = Weights(shape=(None, len(tasks)))
    task_loss = WeightedError(in_layers=[entropy, task_weights])

    s_cost = SluiceLoss(in_layers=sluice_loss)

    total_loss = Add(in_layers=[task_loss, s_cost])
    model.set_loss(total_loss)

    def feed_dict_generator(dataset, batch_size, epochs=1):
        for epoch in range(epochs):
            for ind, (X_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size, pad_batches=True)):
                d = {}
                for index, label in enumerate(labels):
                    d[label] = to_one_hot(y_b[:, index])
                d[task_weights] = w_b
                multiConvMol = ConvMol.agglomerate_mols(X_b)
                d[atom_features] = multiConvMol.get_atom_features()
                d[degree_slice] = multiConvMol.deg_slice
                d[membership] = multiConvMol.membership
                for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
                    d[deg_adjs[i -
                               1]] = multiConvMol.get_deg_adjacency_lists()[i]
                yield d

    return model, feed_dict_generator, labels, task_weights