示例#1
0
def add_metrics(is_root, is_neutral):
    """A block that adds metrics for loss and hits; output is the LSTM state."""
    c = td.Composition(name='predict(is_root=%s, is_neutral=%s)' %
                       (is_root, is_neutral))
    with c.scope():
        # destructure the input; (label, (logits, state))
        y_ = c.input[0]
        logits = td.GetItem(0).reads(c.input[1])
        state = td.GetItem(1).reads(c.input[1])

        # predict the label from the logits
        y = td.Function(lambda x: tf.cast(tf.argmax(x, 1), tf.int32)).reads(
            logits)

        # calculate loss
        loss = td.Function(_loss)
        td.Metric('all_loss').reads(loss.reads(logits, y_))
        if is_root: td.Metric('root_loss').reads(loss)

        # calculate hits
        hits = td.Function(lambda y, y_: tf.cast(tf.equal(y, y_), tf.float64))
        td.Metric('all_hits').reads(hits.reads(y, y_))
        if is_root: td.Metric('root_hits').reads(hits)

        # calculate binary hits, if the label is not neutral
        if not is_neutral:
            binary_hits = td.Function(tf_binary_hits).reads(logits, y_)
            td.Metric('all_binary_hits').reads(binary_hits)
            if is_root: td.Metric('root_binary_hits').reads(binary_hits)

        # output the state, which will be read by our by parent's LSTM cell
        c.output.reads(state)
    return c
示例#2
0
def build_sequence_transcoder(vocab_filepath, word_embedding_size):
    vocab_size = 5

    # From words to list of integers
    vocab = load_vocab(vocab_filepath)
    words2integers = td.InputTransform(
        lambda s: [word2index(vocab, w) for w in s])

    # From interger to word embedding
    word2embedding = td.Scalar('int32') >> td.Function(
        td.Embedding(vocab_size, word_embedding_size))

    # From word to array of embeddings
    sequence_transcoder = words2integers >> td.Map(word2embedding)
    return sequence_transcoder
示例#3
0
def rnn_block(input_block, state_length):
    """Get a fully connected RNN block.

  The input is concatenated with the state vector and put through a fully
  connected layer to get the next state vector.

  Args:
    input_block: Put each input through this before concatenating it with the
      current state vector.
    state_length: Length of the RNN state vector.

  Returns:
    RNN Block (seq of input_block inputs -> output state)
  """
    combine_block = ((td.Identity(), input_block) >> td.Concat() >>
                     td.Function(td.FC(state_length)))
    return td.Fold(combine_block, tf.zeros(state_length))
示例#4
0
文件: model.py 项目: zhaogang92/fold
    def __init__(self, state_size):
        # Expressions are either constants, or calculator ops that take other
        # expressions as their arguments.  Since an Expression is a recursive type,
        # the model must likewise be recursive.  A ForwardDeclaration declares the
        # type of expression, so it can be used before it before it is defined.
        expr_decl = td.ForwardDeclaration(td.PyObjectType(), state_size)

        # Create a block for each type of expression.
        # The terminals are the digits 0-9, which we map to vectors using
        # an embedding table.
        digit = (
            td.GetItem('number') >> td.Scalar(dtype='int32') >> td.Function(
                td.Embedding(10, state_size, name='terminal_embed')))

        # For non terminals, recursively apply expression to the left/right sides,
        # concatenate the results, and pass them through a fully-connected layer.
        # Each operation uses different weights in the FC layer.
        def bin_op(name):
            return (td.Record([('left', expr_decl()), ('right', expr_decl())])
                    >> td.Concat() >> td.FC(state_size, name='FC_' + name))

        # OneOf will dispatch its input to the appropriate case, based on the value
        # in the 'op'.'name' field.
        cases = td.OneOf(
            lambda x: x['op']['name'], {
                'NUM': digit,
                'PLUS': bin_op('PLUS'),
                'MINUS': bin_op('MINUS'),
                'TIMES': bin_op('TIMES'),
                'DIV': bin_op('DIV')
            })

        # We do preprocessing to add 'NUM' as a distinct case.
        expression = td.InputTransform(preprocess_expression) >> cases
        expr_decl.resolve_to(expression)

        # Get logits from the root of the expression tree
        expression_logits = (
            expression >> td.FC(NUM_LABELS, activation=None, name='FC_logits'))

        # The result is stored in the expression itself.
        # We ignore it in td.Record above, and pull it out here.
        expression_label = (
            td.GetItem('result') >> td.InputTransform(result_sign) >>
            td.OneHot(NUM_LABELS))

        # For the overall model, return a pair of (logits, labels)
        # The AllOf block will run each of its children on the same input.
        model = td.AllOf(expression_logits, expression_label)
        self._compiler = td.Compiler.create(model)

        # Get the tensorflow tensors that correspond to the outputs of model.
        # `logits` and `labels` are TF tensors, and we can use them to
        # compute losses in the usual way.
        (logits, labels) = self._compiler.output_tensors

        self._loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                    labels=labels))

        self._accuracy = tf.reduce_mean(
            tf.cast(tf.equal(tf.argmax(labels, 1), tf.argmax(logits, 1)),
                    dtype=tf.float32))

        self._global_step = tf.Variable(0, name='global_step', trainable=False)
        optr = tf.train.GradientDescentOptimizer(0.01)
        self._train_op = optr.minimize(self._loss,
                                       global_step=self._global_step)
# evaluating indivisual inputs
print("one hot vector ======")
onehot_block = td.OneHot(5)

# evaluating block using eval
print(onehot_block.eval(3))
# => array([0., 0., 0., 1., 0.], dtype=float32)

# others
print(onehot_block.eval(0))
print(onehot_block.eval(1))
print(onehot_block.eval(2))
print(onehot_block.eval(4))

print("composite blocks =====")
composite_blocks = td.Scalar() >> td.AllOf(td.Function(tf.negative),
                                           td.Function(tf.square))
print(composite_blocks.eval(2)[0])  #negative
print(composite_blocks.eval(2)[1])  #square

print("batching inputs =====")
# Compiler compiles out model down to TensorFlow graph.
# Compiler will also do type inference and validation on the model.
# The outputs are ordinary TensorFlow tensors, which can be connected to Tensorflow loss function and optimizer.

print("blocks have associated input and output types =====")
print(td.Scalar().input_type)
scalar_block = td.Scalar()
print(td.Scalar().output_type)
print(scalar_block.eval(5))
示例#6
0
def main(unused_argv):
    with tf.Session() as sess:
        random.seed(FLAGS.seed)
        tf.set_random_seed(random.randint(0, 2**32))

        # First we set up TensorFlow Fold, building an RNN and one-hot labels:

        # 'process_next_digit' maps a state vector and a digit to a state vector.
        process_next_digit = (
            # leave the state vector alone and one-hot encode the digit
            (td.Identity(), td.OneHot(FLAGS.base)) >>
            # concatenate the state vector and the encoded digit together
            td.Concat() >>
            # pass the resulting vector through a fully connected neural network
            # layer to produce a state vector as output
            td.Function(td.FC(FLAGS.state_vector_len)))

        # td.Fold unrolls the neural net defined in 'process_next_digit', and
        # applies it to every element of a sequence, using zero as the initial
        # state vector.  Thus, process_digits takes a sequence of digits as input,
        # and returns the final state vector as output.
        process_digits = td.Fold(process_next_digit,
                                 tf.zeros(FLAGS.state_vector_len))

        # This is the final model. It takes a dictionary (i.e, a record) of the
        # form {'digit': digit-sequence, 'label', label} as input,
        # and produces a tuple of (output-vector, OneHot(label)) as output.
        root_block = td.Record([('digits', process_digits),
                                ('label', td.OneHot(NUM_LABELS))])

        # An alternative to using Record is to use the following definition:
        # root_block = td.AllOf(td.GetItem('digits') >> process_digits,
        #                       td.GetItem('label') >> td.OneHot(NUM_LABELS))
        # AllOf passes its input to each of its children, and GetItem extracts
        # a field.  GetItem offers additional flexibility in more complex cases.

        # Compile root_block to get a tensorflow model that we can run.
        compiler = td.Compiler.create(root_block)

        # Get the tensorflow tensors that correspond to the outputs of root_block.
        digits_vecs, labels = compiler.output_tensors

        # We can now use digits_vecs and labels to compute losses and training
        # operations with tensorflow in the usual way.
        final_layer_weights = tf.Variable(tf.truncated_normal(
            [FLAGS.state_vector_len, NUM_LABELS]),
                                          name='final_layer_weights')
        final_layer_biases = tf.Variable(tf.truncated_normal([NUM_LABELS]),
                                         name='final_layer_biases')

        logits = tf.matmul(digits_vecs,
                           final_layer_weights) + final_layer_biases
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                    labels=labels))
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(tf.argmax(labels, 1), tf.argmax(logits, 1)),
                    dtype=tf.float32))

        train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)

        # Create a random batch of data to use for validation.
        validation_set = random_batch(FLAGS.validation_size)

        # TensorFlow Fold passes data into the model using feed_dict.
        validation_feed_dict = compiler.build_feed_dict(validation_set)

        # Now we actually train:
        sess.run(tf.global_variables_initializer())
        for step in xrange(FLAGS.steps):
            for _ in xrange(FLAGS.batches_per_step):
                # Create more random batches of training data, and build feed_dicts
                training_fd = compiler.build_feed_dict(
                    random_batch(FLAGS.batch_size))
                sess.run(train_op, feed_dict=training_fd)

            validation_loss, validation_accuracy = sess.run(
                [loss, accuracy], feed_dict=validation_feed_dict)
            print('step:{:3}, loss ({}), accuracy: {:.0%}'.format(
                step, validation_loss, validation_accuracy))
示例#7
0
def reduce_net_block():
    net_block = td.Concat() >> td.FC(20) >> td.FC(
        1, activation=None) >> td.Function(lambda xs: tf.squeeze(xs, axis=1))
    return td.Map(td.Scalar()) >> td.Reduce(net_block)
示例#8
0
from __future__ import division
from __future__ import print_function

import random
from six.moves import xrange  # pylint: disable=redefined-builtin
from six.moves import zip  # pylint: disable=redefined-builtin
import tensorflow as tf
sess = tf.InteractiveSession()
import tensorflow_fold.public.blocks as td

print("[Sandbox] start. ==========")

# Map and Reduce
print("Map Reduce Processing sample. ==========")
print(td.Map(td.Scalar()))
print((td.Map(td.Scalar()) >> td.Reduce(td.Function(tf.multiply))).eval(
    range(1, 10)))


# simple fc network
def reduce_net_block():
    net_block = td.Concat() >> td.FC(20) >> td.FC(
        1, activation=None) >> td.Function(lambda xs: tf.squeeze(xs, axis=1))
    return td.Map(td.Scalar()) >> td.Reduce(net_block)


# generate ramdom example data, result
def random_example(fn):
    length = random.randrange(1, 10)
    data = [random.uniform(0, 1) for _ in range(length)]
    result = fn(data)