def build(self,
              depth=5,
              hidden_size=300,
              score_scale=score_scale,
              FP_len=FP_len,
              FP_rad=FP_rad):
        self.FP_len = FP_len
        self.FP_rad = FP_rad
        self.input_mol = tf.placeholder(tf.float32, [batch_size * 2, FP_len])
        self.mol_hiddens = tf.nn.relu(
            linearND(self.input_mol, hidden_size, scope="encoder0"))
        for d in xrange(1, depth):
            self.mol_hiddens = tf.nn.relu(
                linearND(self.mol_hiddens, hidden_size, scope="encoder%i" % d))

        self.score_sum = linearND(self.mol_hiddens, 1, scope="score_sum")
        self.score_sum = tf.squeeze(self.score_sum)
        self.score = 1.0 + (score_scale - 1.0) * tf.nn.sigmoid(self.score_sum)

        tf.global_variables_initializer().run(session=self.session)
        size_func = lambda v: reduce(lambda x, y: x * y,
                                     v.get_shape().as_list())
        n = sum(size_func(v) for v in tf.trainable_variables())
        print "Model size: %dK" % (n / 1000, )

        self.coord = tf.train.Coordinator()
        return self
예제 #2
0
    def build(self,
              depth=5,
              hidden_size=300,
              FP_len=FP_len,
              output_size=61142):
        self.FP_len = FP_len
        self.input_mol = tf.placeholder(tf.float32, [batch_size, FP_len])
        self.mol_hiddens = tf.nn.relu(
            linearND(self.input_mol, hidden_size, scope="encoder0"))
        for d in xrange(1, depth):
            self.mol_hiddens = tf.nn.relu(
                linearND(self.mol_hiddens, hidden_size, scope="encoder%i" % d))

        self.score = linearND(self.mol_hiddens, output_size, scope="output")
        _, self.topk = tf.nn.top_k(self.score, k=NK)

        tf.global_variables_initializer().run(session=self.session)
        size_func = lambda v: reduce(lambda x, y: x * y,
                                     v.get_shape().as_list())
        n = sum(size_func(v) for v in tf.trainable_variables())
        print "Model size: %dK" % (n / 1000, )

        self.coord = tf.train.Coordinator()
        return self
예제 #3
0
gpu_options = tf.GPUOptions(allow_growth=True)
session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

input_atom = tf.placeholder(tf.float32, [batch_size, None, adim])
input_bond = tf.placeholder(tf.float32, [batch_size, None, adim + bdim])
atom_graph = tf.placeholder(tf.float32, [batch_size, None, None])
bond_graph = tf.placeholder(tf.float32, [batch_size, None, None])
node_mask = tf.placeholder(tf.float32, [batch_size, None])
src_holder = [input_atom, input_bond, atom_graph, bond_graph, node_mask]
label = tf.placeholder(tf.float32, [batch_size])
node_mask = tf.expand_dims(node_mask, -1)

with tf.variable_scope("encoder"):
    binput = linearND(input_bond,
                      hidden_size,
                      "bond_embedding",
                      init_bias=None)
    message = tf.nn.relu(binput)
    with tf.variable_scope("loopybp") as scope:
        for i in xrange(depth):
            nei_message = linearND(tf.batch_matmul(bond_graph, message),
                                   hidden_size,
                                   "bp",
                                   init_bias=None)
            message = tf.nn.relu(binput + nei_message)
            scope.reuse_variables()

    ainput = linearND(input_atom,
                      hidden_size,
                      "atom_embedding",
                      init_bias=None)
예제 #4
0
node_mask = tf.expand_dims(node_mask, -1)

graph_inputs = (input_atom, input_bond, atom_graph, bond_graph, num_nbs,
                node_mask)
with tf.variable_scope("encoder"):
    atom_hiddens, _ = rcnn_wl_last(graph_inputs,
                                   batch_size=batch_size,
                                   hidden_size=hidden_size,
                                   depth=depth)

atom_hiddens1 = tf.reshape(atom_hiddens, [batch_size, 1, -1, hidden_size])
atom_hiddens2 = tf.reshape(atom_hiddens, [batch_size, -1, 1, hidden_size])
atom_pair = atom_hiddens1 + atom_hiddens2

att_hidden = tf.nn.relu(
    linearND(atom_pair, hidden_size, scope="att_atom_feature", init_bias=None)
    + linearND(binary, hidden_size, scope="att_bin_feature"))
att_score = linearND(att_hidden, 1, scope="att_scores")
att_score = tf.nn.sigmoid(att_score)
att_context = att_score * atom_hiddens1
att_context = tf.reduce_sum(att_context, 2)

att_context1 = tf.reshape(att_context, [batch_size, 1, -1, hidden_size])
att_context2 = tf.reshape(att_context, [batch_size, -1, 1, hidden_size])
att_pair = att_context1 + att_context2

pair_hidden = linearND(
    atom_pair, hidden_size, scope="atom_feature", init_bias=None) + linearND(
        binary, hidden_size, scope="bin_feature", init_bias=None) + linearND(
            att_pair, hidden_size, scope="ctx_feature")
pair_hidden = tf.nn.relu(pair_hidden)
예제 #5
0
gpu_options = tf.GPUOptions(allow_growth=True, visible_device_list=opts.device)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as session:
    _input_mol = tf.placeholder(tf.float32, [batch_size * 2, FP_len])
    sa_target = tf.placeholder(tf.float32, [
        batch_size * 2,
    ])

    q = tf.FIFOQueue(20, [tf.float32], shapes=[[batch_size * 2,
                                                FP_len]])  # fixed size
    enqueue = q.enqueue(_input_mol)
    input_mol = q.dequeue()
    src_holder = [input_mol]

    input_mol.set_shape([batch_size * 2, FP_len])

    mol_hiddens = tf.nn.relu(linearND(input_mol, hidden_size,
                                      scope="encoder0"))
    for d in xrange(1, depth):
        mol_hiddens = tf.nn.relu(
            linearND(mol_hiddens, hidden_size, scope="encoder%i" % d))

    score_sum = linearND(mol_hiddens, 1, scope="score_sum")
    score_sum = tf.squeeze(score_sum)
    score = 1.0 + (score_scale - 1.0) * tf.nn.sigmoid(score_sum)

    # For evaluation only - get SSE against a target
    sse = tf.reduce_sum(tf.square(score - sa_target))

    pm_one = tf.constant([-1, 1], dtype=tf.float32)
    reshape_score = tf.reshape(score, [batch_size, 2])
    reshape_score = tf.multiply(reshape_score,
                                pm_one)  # products minus reactants
예제 #6
0
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as session:
    _input_mol = tf.placeholder(tf.float32, [batch_size, FP_len])
    _label = tf.placeholder(tf.int32, [
        batch_size,
    ])

    q = tf.FIFOQueue(20, [tf.float32, tf.int32])  # fixed size
    enqueue = q.enqueue([_input_mol, _label])
    [input_mol, label] = q.dequeue()
    src_holder = [input_mol, label]

    input_mol.set_shape([batch_size, FP_len])
    label.set_shape([
        batch_size,
    ])
    mol_hiddens = tf.nn.relu(linearND(input_mol, hidden_size,
                                      scope="encoder0"))
    for d in xrange(1, depth):
        mol_hiddens = tf.nn.relu(
            linearND(mol_hiddens, hidden_size, scope="encoder%i" % d))

    score = linearND(mol_hiddens, output_size, scope="output")
    loss = tf.reduce_sum(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=score,
                                                       labels=label))
    _, topk = tf.nn.top_k(score, k=NK)

    # For normal reaction-wise training
    _lr = tf.placeholder(tf.float32, [])
    optimizer = tf.train.AdamOptimizer(learning_rate=_lr)
    param_norm = tf.global_norm(tf.trainable_variables())
    grads_and_vars = optimizer.compute_gradients(loss / batch_size)