def buildModel(self, fw_lstm_layer, bw_lstm_layer, is_dynamic_rnn):
    # Weights and biases for output softmax layer.
    out_weights = tf.Variable(
        tf.random_normal([self.num_units * 2, self.n_classes]))
    out_bias = tf.Variable(tf.random_normal([self.n_classes]))

    # input image placeholder
    x = tf.placeholder(
        "float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")

    if is_dynamic_rnn:
      lstm_inputs = tf.transpose(x, [1, 0, 2])
      outputs, _ = bidirectional_dynamic_rnn(
          fw_lstm_layer,
          bw_lstm_layer,
          lstm_inputs,
          dtype="float32",
          time_major=True)
      fw_outputs, bw_outputs = outputs
      output = tf.concat([fw_outputs, bw_outputs], 2)
      output = tf.unstack(output, axis=0)
      output = output[-1]
    else:
      lstm_input = tf.unstack(x, self.time_steps, 1)
      outputs, _, _ = tf.nn.static_bidirectional_rnn(
          fw_lstm_layer, bw_lstm_layer, lstm_input, dtype="float32")
      output = outputs[-1]

    # Compute logits by multiplying output of shape [batch_size,num_units*2]
    # by the softmax layer's out_weight of shape [num_units*2,n_classes]
    # plus out_bias
    prediction = tf.matmul(output, out_weights) + out_bias
    output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

    return x, prediction, output_class
Beispiel #2
0
  def buildModel(self, fw_lstm_layer, bw_lstm_layer, is_dynamic_rnn):
    # Weights and biases for output softmax layer.
    out_weights = tf.Variable(
        tf.random_normal([self.num_units * 2, self.n_classes]))
    out_bias = tf.Variable(tf.random_normal([self.n_classes]))

    # input image placeholder
    x = tf.placeholder(
        "float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")

    if is_dynamic_rnn:
      lstm_inputs = tf.transpose(x, [1, 0, 2])
      outputs, _ = bidirectional_dynamic_rnn(
          fw_lstm_layer,
          bw_lstm_layer,
          lstm_inputs,
          dtype="float32",
          time_major=True)
      fw_outputs, bw_outputs = outputs
      output = tf.concat([fw_outputs, bw_outputs], 2)
      output = tf.unstack(output, axis=0)
      output = output[-1]
    else:
      lstm_input = tf.unstack(x, self.time_steps, 1)
      outputs, _, _ = tf.nn.static_bidirectional_rnn(
          fw_lstm_layer, bw_lstm_layer, lstm_input, dtype="float32")
      output = outputs[-1]

    # Compute logits by multiplying output of shape [batch_size,num_units*2]
    # by the softmax layer's out_weight of shape [num_units*2,n_classes]
    # plus out_bias
    prediction = tf.matmul(output, out_weights) + out_bias
    output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

    return x, prediction, output_class
  def buildModel(self,
                 fw_rnn_layer,
                 bw_rnn_layer,
                 is_dynamic_rnn,
                 is_inference,
                 use_sequence_length=False):
    # Weights and biases for output softmax layer.
    out_weights = tf.Variable(
        tf.random_normal([self.num_units * 2, self.n_classes]))
    out_bias = tf.Variable(tf.random_normal([self.n_classes]))

    batch_size = self.batch_size
    if is_inference:
      batch_size = 1
    # input image placeholder
    x = tf.placeholder(
        "float", [batch_size, self.time_steps, self.n_input],
        name="INPUT_IMAGE")

    sequence_length = None
    if use_sequence_length:
      sequence_length = [self.time_steps] * batch_size
    if is_dynamic_rnn:
      rnn_inputs = tf.transpose(x, [1, 0, 2])
      outputs, _ = bidirectional_dynamic_rnn(
          fw_rnn_layer,
          bw_rnn_layer,
          rnn_inputs,
          sequence_length,
          dtype="float32",
          time_major=True)
      fw_outputs, bw_outputs = outputs
      output = tf.concat([fw_outputs, bw_outputs], 2)
      output = tf.unstack(output, axis=0)
      output = output[-1]
    else:
      rnn_inputs = tf.unstack(x, self.time_steps, 1)
      # Sequence length is not supported for static since we don't have a
      # wrapper for it. At training phase, we can still have sequence_length,
      # but inference phase, we change it to None.
      if is_inference:
        sequence_length = None
      outputs, _, _ = tf.nn.static_bidirectional_rnn(
          fw_rnn_layer,
          bw_rnn_layer,
          rnn_inputs,
          dtype="float32",
          sequence_length=sequence_length)
      output = outputs[-1]

    # Compute logits by multiplying output of shape [batch_size,num_units*2]
    # by the softmax layer's out_weight of shape [num_units*2,n_classes]
    # plus out_bias
    prediction = tf.matmul(output, out_weights) + out_bias
    output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

    return x, prediction, output_class
    def buildModel(self,
                   fw_rnn_layer,
                   bw_rnn_layer,
                   is_dynamic_rnn,
                   is_inference,
                   use_sequence_length=False):
        # Weights and biases for output softmax layer.
        out_weights = tf.Variable(
            tf.random_normal([self.num_units * 2, self.n_classes]))
        out_bias = tf.Variable(tf.random_normal([self.n_classes]))

        batch_size = self.batch_size
        if is_inference:
            batch_size = 1
        # input image placeholder
        x = tf.placeholder("float",
                           [batch_size, self.time_steps, self.n_input],
                           name="INPUT_IMAGE")

        sequence_length = None
        if use_sequence_length:
            sequence_length = [self.time_steps] * batch_size
        if is_dynamic_rnn:
            rnn_inputs = tf.transpose(x, [1, 0, 2])
            outputs, _ = bidirectional_dynamic_rnn(fw_rnn_layer,
                                                   bw_rnn_layer,
                                                   rnn_inputs,
                                                   sequence_length,
                                                   dtype="float32",
                                                   time_major=True)
            fw_outputs, bw_outputs = outputs
            output = tf.concat([fw_outputs, bw_outputs], 2)
            output = tf.unstack(output, axis=0)
            output = output[-1]
        else:
            rnn_inputs = tf.unstack(x, self.time_steps, 1)
            # Sequence length is not supported for static since we don't have a
            # wrapper for it. At training phase, we can still have sequence_length,
            # but inference phase, we change it to None.
            if is_inference:
                sequence_length = None
            outputs, _, _ = tf.nn.static_bidirectional_rnn(
                fw_rnn_layer,
                bw_rnn_layer,
                rnn_inputs,
                dtype="float32",
                sequence_length=sequence_length)
            output = outputs[-1]

        # Compute logits by multiplying output of shape [batch_size,num_units*2]
        # by the softmax layer's out_weight of shape [num_units*2,n_classes]
        # plus out_bias
        prediction = tf.matmul(output, out_weights) + out_bias
        output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

        return x, prediction, output_class
    def buildModel(self, fw_lstm_layer, bw_lstm_layer, is_dynamic_rnn):
        """Build Mnist recognition model.

    Args:
      fw_lstm_layer: The forward lstm layer either a single lstm cell or a multi
        lstm cell.
      bw_lstm_layer: The backward lstm layer either a single lstm cell or a
        multi lstm cell.
      is_dynamic_rnn: Use dynamic_rnn or not.

    Returns:
     A tuple containing:

     - Input tensor of the model.
     - Prediction tensor of the model.
     - Output class tensor of the model.
    """
        # Weights and biases for output softmax layer.
        out_weights = tf.Variable(
            tf.random.normal([self.num_units * 2, self.n_classes]))
        out_bias = tf.Variable(tf.random.normal([self.n_classes]))

        # input image placeholder
        x = tf.placeholder("float", [None, self.time_steps, self.n_input],
                           name="INPUT_IMAGE")

        if is_dynamic_rnn:
            lstm_inputs = tf.transpose(x, [1, 0, 2])
            outputs, _ = bidirectional_dynamic_rnn(fw_lstm_layer,
                                                   bw_lstm_layer,
                                                   lstm_inputs,
                                                   dtype="float32",
                                                   time_major=True)
            fw_outputs, bw_outputs = outputs
            output = tf.concat([fw_outputs, bw_outputs], 2)
            output = tf.unstack(output, axis=0)
            output = output[-1]
        else:
            lstm_input = tf.unstack(x, self.time_steps, 1)
            outputs, _, _ = tf.nn.static_bidirectional_rnn(fw_lstm_layer,
                                                           bw_lstm_layer,
                                                           lstm_input,
                                                           dtype="float32")
            output = outputs[-1]

        # Compute logits by multiplying output of shape [batch_size,num_units*2]
        # by the softmax layer's out_weight of shape [num_units*2,n_classes]
        # plus out_bias
        prediction = tf.matmul(output, out_weights) + out_bias
        output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

        return x, prediction, output_class
  def buildModel(self, fw_lstm_layer, bw_lstm_layer, is_dynamic_rnn):
    """Build Mnist recognition model.

    Args:
      fw_lstm_layer: The forward lstm layer either a single lstm cell or a multi
        lstm cell.
      bw_lstm_layer: The backward lstm layer either a single lstm cell or a
        multi lstm cell.
      is_dynamic_rnn: Use dynamic_rnn or not.

    Returns:
     A tuple containing:

     - Input tensor of the model.
     - Prediction tensor of the model.
     - Output class tensor of the model.
    """
    # Weights and biases for output softmax layer.
    out_weights = tf.Variable(
        tf.random_normal([self.num_units * 2, self.n_classes]))
    out_bias = tf.Variable(tf.random_normal([self.n_classes]))

    # input image placeholder
    x = tf.placeholder(
        "float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")

    if is_dynamic_rnn:
      lstm_inputs = tf.transpose(x, [1, 0, 2])
      outputs, _ = bidirectional_dynamic_rnn(
          fw_lstm_layer,
          bw_lstm_layer,
          lstm_inputs,
          dtype="float32",
          time_major=True)
      fw_outputs, bw_outputs = outputs
      output = tf.concat([fw_outputs, bw_outputs], 2)
      output = tf.unstack(output, axis=0)
      output = output[-1]
    else:
      lstm_input = tf.unstack(x, self.time_steps, 1)
      outputs, _, _ = tf.nn.static_bidirectional_rnn(
          fw_lstm_layer, bw_lstm_layer, lstm_input, dtype="float32")
      output = outputs[-1]

    # Compute logits by multiplying output of shape [batch_size,num_units*2]
    # by the softmax layer's out_weight of shape [num_units*2,n_classes]
    # plus out_bias
    prediction = tf.matmul(output, out_weights) + out_bias
    output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

    return x, prediction, output_class
Beispiel #7
0
    def buildModel(self,
                   fw_rnn_layer,
                   bw_rnn_layer,
                   is_dynamic_rnn,
                   is_inference,
                   use_sequence_length=False):
        """Build Mnist recognition model.

    Args:
      fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
        rnn cell.
      bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
        rnn cell.
      is_dynamic_rnn: Use dynamic_rnn or not.
      use_sequence_length: Whether to use sequence length or not. Default to
        False.

    Returns:
     A tuple containing:

     - Input tensor of the model.
     - Prediction tensor of the model.
     - Output class tensor of the model.
    """
        # Weights and biases for output softmax layer.
        out_weights = tf.Variable(
            tf.random.normal([self.num_units * 2, self.n_classes]))
        out_bias = tf.Variable(tf.random.normal([self.n_classes]))

        batch_size = self.batch_size
        if is_inference:
            batch_size = 1
        # input image placeholder
        x = tf.compat.v1.placeholder(
            "float", [batch_size, self.time_steps, self.n_input],
            name="INPUT_IMAGE")

        sequence_length = None
        if use_sequence_length:
            sequence_length = [self.time_steps] * batch_size
        if is_dynamic_rnn:
            rnn_inputs = tf.transpose(x, [1, 0, 2])
            outputs, _ = bidirectional_dynamic_rnn(fw_rnn_layer,
                                                   bw_rnn_layer,
                                                   rnn_inputs,
                                                   sequence_length,
                                                   dtype="float32",
                                                   time_major=True)
            fw_outputs, bw_outputs = outputs
            output = tf.concat([fw_outputs, bw_outputs], 2)
            output = tf.unstack(output, axis=0)
            output = output[-1]
        else:
            rnn_inputs = tf.unstack(x, self.time_steps, 1)
            # Sequence length is not supported for static since we don't have a
            # wrapper for it. At training phase, we can still have sequence_length,
            # but inference phase, we change it to None.
            if is_inference:
                sequence_length = None
            outputs, _, _ = tf.compat.v1.nn.static_bidirectional_rnn(
                fw_rnn_layer,
                bw_rnn_layer,
                rnn_inputs,
                dtype="float32",
                sequence_length=sequence_length)
            output = outputs[-1]

        # Compute logits by multiplying output of shape [batch_size,num_units*2]
        # by the softmax layer's out_weight of shape [num_units*2,n_classes]
        # plus out_bias
        prediction = tf.matmul(output, out_weights) + out_bias
        output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

        return x, prediction, output_class