Exemplo n.º 1
0
def make_graph(hparams_string='{}'):
    """Construct the model and return the graph.

  Hyperparameters are given in the hparams flag as a string
  representation of a Python dictionary.
  For example: '{"batch_size":64,"rnn_layer_sizes":[100,100]}'

  Args:
    hparams_string: A string literal of a Python dictionary. Keys are
        hyperparameter names, and values replace default values.

  Returns:
    tf.Graph instance which contains the TF ops.
  """
    with tf.Graph().as_default() as graph:
        with tf.device(lambda op: ""):
            hparams = basic_rnn_ops.default_hparams()
            hparams = hparams.parse(hparams_string)
            logging.info('hparams = %s', hparams.values())

            with tf.variable_scope('rnn_model'):
                # Define the type of RNN cell to use.
                cell = basic_rnn_ops.make_cell(hparams)

                # Construct dynamic_rnn inference.

                # Make a batch.
                melody_sequence = tf.placeholder(
                    tf.float32,
                    [hparams.batch_size, None, hparams.one_hot_length])
                lengths = tf.placeholder(tf.int32, [hparams.batch_size])

                # Make inference graph. That is, inputs to logits.
                (logits, initial_state,
                 final_state) = basic_rnn_ops.dynamic_rnn_inference(
                     melody_sequence,
                     lengths,
                     cell,
                     hparams,
                     zero_initial_state=False,
                     parallel_iterations=1,
                     swap_memory=True)

                softmax = tf.nn.softmax(
                    tf.reshape(logits, [hparams.batch_size, -1]))

            tf.add_to_collection('logits', logits)
            tf.add_to_collection('softmax', softmax)
            tf.add_to_collection('initial_state', initial_state)
            tf.add_to_collection('final_state', final_state)
            tf.add_to_collection('melody_sequence', melody_sequence)
            tf.add_to_collection('lengths', lengths)

    return graph
Exemplo n.º 2
0
def make_graph(hparams_string='{}'):
  """Construct the model and return the graph.

  Hyperparameters are given in the hparams flag as a string
  representation of a Python dictionary.
  For example: '{"batch_size":64,"rnn_layer_sizes":[100,100]}'

  Args:
    hparams_string: A string literal of a Python dictionary. Keys are
        hyperparameter names, and values replace default values.

  Returns:
    tf.Graph instance which contains the TF ops.
  """
  with tf.Graph().as_default() as graph:
    with tf.device(lambda op: ""):
      hparams = basic_rnn_ops.default_hparams()
      hparams = hparams.parse(hparams_string)
      logging.info('hparams = %s', hparams.values())

      with tf.variable_scope('rnn_model'):
        # Define the type of RNN cell to use.
        cell = basic_rnn_ops.make_cell(hparams)

        # Construct dynamic_rnn inference.

        # Make a batch.
        melody_sequence = tf.placeholder(tf.float32,
                                         [hparams.batch_size, None,
                                          hparams.one_hot_length])
        lengths = tf.placeholder(tf.int32, [hparams.batch_size])

        # Make inference graph. That is, inputs to logits.
        (logits,
         initial_state,
         final_state) = basic_rnn_ops.dynamic_rnn_inference(
            melody_sequence, lengths, cell, hparams,
            zero_initial_state=False, parallel_iterations=1,
            swap_memory=True)

        softmax = tf.nn.softmax(tf.reshape(logits, [hparams.batch_size, -1]))

      tf.add_to_collection('logits', logits)
      tf.add_to_collection('softmax', softmax)
      tf.add_to_collection('initial_state', initial_state)
      tf.add_to_collection('final_state', final_state)
      tf.add_to_collection('melody_sequence', melody_sequence)
      tf.add_to_collection('lengths', lengths)

  return graph
Exemplo n.º 3
0
def make_graph(sequence_example_file='', hparams_string='{}', is_eval_mode=False):
  """Construct the model and return the graph.

  Constructs the TensorFlow graph. Hyperparameters
  are given in the hparams flag as a string representation of a Python
  dictionary.
  For example: '{"batch_size":64,"rnn_layer_sizes":[100,100]}'

  Args:
    sequence_example_file: String path to tfrecord file containing training
        samples.
    hparams_string: String literal of a Python dictionary, where keys are
        hyperparameter names and values replace default values.
    is_eval_mode: If True, training related ops are not build.

  Returns:
    tf.Graph instance which contains the TF ops.

  Raises:
    ValueError: If sequence_example_file does not match any files.
  """
  file_list = [sequence_example_file]
  logging.info('Dataset files: %s', file_list)

  with tf.Graph().as_default() as graph:
    hparams = basic_rnn_ops.default_hparams()
    hparams = hparams.parse(hparams_string)
    logging.info('hparams = %s', hparams.values())

    with tf.variable_scope('rnn_model'):
      # Define the type of RNN cell to use.
      cell = basic_rnn_ops.make_cell(hparams)

      # There are two ways to construct a variable length RNN in TensorFlow:
      # dynamic_rnn, and state_saving_rnn. The code below demonstrates how to
      # construct an end-to-end samples on disk to labels and logits pipeline
      # for dynamic_rnn.

      # Construct dynamic_rnn reader and inference.

      # Get a batch queue.
      (melody_sequence,
       melody_labels,
       lengths) = basic_rnn_ops.dynamic_rnn_batch(file_list, hparams)

      # Make inference graph. That is, inputs to logits.
      # Note: long sequences need a lot of memory on GPU because all forward
      # pass activations are needed to compute backprop. Additionally
      # multiple steps are computed simultaneously (the parts of each step
      # which don't depend on other steps). The `parallel_iterations`
      # and `swap_memory` arguments given here trade lower GPU memory
      # footprint for speed decrease.
      logits, _, _ = basic_rnn_ops.dynamic_rnn_inference(
          melody_sequence, lengths, cell, hparams, zero_initial_state=True,
          parallel_iterations=1, swap_memory=True)

      # The first hparams.skip_first_n_losses steps of the logits tensor is
      # removed. Those first steps are given to the model as a primer during
      # generation. The model does not get penalized for incorrect
      # predictions in those first steps so the loss does not include those
      # logits.
      truncated_logits = logits[:, hparams.skip_first_n_losses:, :]

      # Reshape logits from [batch_size, sequence_length, one_hot_length] to
      # [batch_size * sequence_length, one_hot_length].
      flat_logits = tf.reshape(truncated_logits,
                               [-1, hparams.one_hot_length])

      # Reshape labels from [batch_size, num_unroll] to
      # [batch_size * sequence_length]. Also truncate first steps to match
      # truncated_logits.
      flat_labels = tf.reshape(
          melody_labels[:, hparams.skip_first_n_losses:], [-1])

      # Compute loss and gradients for training, and accuracy for evaluation.
      cross_entropy, log_perplexity = basic_rnn_ops.log_perplexity_loss(
          flat_logits, flat_labels)
      accuracy = basic_rnn_ops.eval_accuracy(flat_logits, flat_labels)

      global_step = tf.Variable(0, name='global_step', trainable=False)

      tf.add_to_collection('logits', logits)
      tf.add_to_collection('cross_entropy', cross_entropy)
      tf.add_to_collection('log_perplexity', log_perplexity)
      tf.add_to_collection('accuracy', accuracy)
      tf.add_to_collection('global_step', global_step)

      # Compute weight updates, and updates to learning rate and global step.
      if not is_eval_mode:
        training_op, learning_rate = basic_rnn_ops.train_op(
            cross_entropy, global_step, hparams)
        tf.add_to_collection('training_op', training_op)
        tf.add_to_collection('learning_rate', learning_rate)

  return graph
Exemplo n.º 4
0
def make_graph(sequence_example_file='',
               hparams_string='{}',
               is_eval_mode=False):
    """Construct the model and return the graph.

  Constructs the TensorFlow graph. Hyperparameters
  are given in the hparams flag as a string representation of a Python
  dictionary.
  For example: '{"batch_size":64,"rnn_layer_sizes":[100,100]}'

  Args:
    sequence_example_file: String path to tfrecord file containing training
        samples.
    hparams_string: String literal of a Python dictionary, where keys are
        hyperparameter names and values replace default values.
    is_eval_mode: If True, training related ops are not build.

  Returns:
    tf.Graph instance which contains the TF ops.

  Raises:
    ValueError: If sequence_example_file does not match any files.
  """
    file_list = [sequence_example_file]
    logging.info('Dataset files: %s', file_list)

    with tf.Graph().as_default() as graph:
        hparams = basic_rnn_ops.default_hparams()
        hparams = hparams.parse(hparams_string)
        logging.info('hparams = %s', hparams.values())

        with tf.variable_scope('rnn_model'):
            # Define the type of RNN cell to use.
            cell = basic_rnn_ops.make_cell(hparams)

            # There are two ways to construct a variable length RNN in TensorFlow:
            # dynamic_rnn, and state_saving_rnn. The code below demonstrates how to
            # construct an end-to-end samples on disk to labels and logits pipeline
            # for dynamic_rnn.

            # Construct dynamic_rnn reader and inference.

            # Get a batch queue.
            (melody_sequence, melody_labels,
             lengths) = basic_rnn_ops.dynamic_rnn_batch(file_list, hparams)

            # Make inference graph. That is, inputs to logits.
            # Note: long sequences need a lot of memory on GPU because all forward
            # pass activations are needed to compute backprop. Additionally
            # multiple steps are computed simultaneously (the parts of each step
            # which don't depend on other steps). The `parallel_iterations`
            # and `swap_memory` arguments given here trade lower GPU memory
            # footprint for speed decrease.
            logits, _, _ = basic_rnn_ops.dynamic_rnn_inference(
                melody_sequence,
                lengths,
                cell,
                hparams,
                zero_initial_state=True,
                parallel_iterations=1,
                swap_memory=True)

            # The first hparams.skip_first_n_losses steps of the logits tensor is
            # removed. Those first steps are given to the model as a primer during
            # generation. The model does not get penalized for incorrect
            # predictions in those first steps so the loss does not include those
            # logits.
            truncated_logits = logits[:, hparams.skip_first_n_losses:, :]

            # Reshape logits from [batch_size, sequence_length, one_hot_length] to
            # [batch_size * sequence_length, one_hot_length].
            flat_logits = tf.reshape(truncated_logits,
                                     [-1, hparams.one_hot_length])

            # Reshape labels from [batch_size, num_unroll] to
            # [batch_size * sequence_length]. Also truncate first steps to match
            # truncated_logits.
            flat_labels = tf.reshape(
                melody_labels[:, hparams.skip_first_n_losses:], [-1])

            # Compute loss and gradients for training, and accuracy for evaluation.
            cross_entropy, log_perplexity = basic_rnn_ops.log_perplexity_loss(
                flat_logits, flat_labels)
            accuracy = basic_rnn_ops.eval_accuracy(flat_logits, flat_labels)

            global_step = tf.Variable(0, name='global_step', trainable=False)

            tf.add_to_collection('logits', logits)
            tf.add_to_collection('cross_entropy', cross_entropy)
            tf.add_to_collection('log_perplexity', log_perplexity)
            tf.add_to_collection('accuracy', accuracy)
            tf.add_to_collection('global_step', global_step)

            # Compute weight updates, and updates to learning rate and global step.
            if not is_eval_mode:
                training_op, learning_rate = basic_rnn_ops.train_op(
                    cross_entropy, global_step, hparams)
                tf.add_to_collection('training_op', training_op)
                tf.add_to_collection('learning_rate', learning_rate)

    return graph