def __call__(self, inputs, state, scope=None):
    with tf.device("/gpu:"+str(self._gpu_for_layer)):
      """JZS3, mutant 2 with n units cells."""
      with tf.variable_scope(scope or type(self).__name__):  # "JZS1Cell"
        with tf.variable_scope("Zinput"):  # Reset gate and update gate.
          # We start with bias of 1.0 to not reset and not update.
          '''equation 1'''

          z = tf.sigmoid(linear.linear([inputs, tf.tanh(state)], 
                            self._num_units, True, 1.0))

          '''equation 2'''
        with tf.variable_scope("Rinput"):
          r = tf.sigmoid(linear.linear([inputs, state],
                            self._num_units, True, 1.0))
          '''equation 3'''
        with tf.variable_scope("Candidate"):
          component_0 = linear.linear([state*r,inputs],
                            self._num_units, True)
          
          component_2 = (tf.tanh(component_0))*z
          component_3 = state*(1 - z)

        h_t = component_2 + component_3

      return h_t, h_t #there is only one hidden state output to keep track of. 
Example #2
0
    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope
                               or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            c, h = tf.split(1, 2, state)
            concat = linear.linear([inputs, h], 4 * self._num_units, True)

            fs = []

            # This can be made more efficient since we're doing more than needs to be
            # done, but for now w/e
            for child_state in child_states:
                c_k, h_k = tf.split(1, 2, child_state)
                concat = linear.linear([inputs, h_k], 4 * self._num_units,
                                       True)
                i_k, j_k, f_k, o_k = tf.split(1, 4, concat)
                fs.append(f_k)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            # TODO: forget gate for each child, probably need to split by number
            # of child states or something
            i, j, f, o = tf.split(1, 4, concat)

            # If no children just treat it like a regular lstm
            if not fs:
                fs.append(f)

            new_c = sum(c * tf.sigmoid(fs + self._forget_bias)
                        ) + tf.sigmoid(i) * tf.tanh(j)
            new_h = tf.tanh(new_c) * tf.sigmoid(o)

        return new_h, tf.concat(1, [new_c, new_h])
  def __call__(self, inputs, state, scope=None):
    with tf.device("/gpu:"+str(self._gpu_for_layer)):
      """JZS1, mutant 1 with n units cells."""
      with tf.variable_scope(scope or type(self).__name__):  # "JZS1Cell"
        with tf.variable_scope("Zinput"):  # Reset gate and update gate.
          # We start with bias of 1.0 to not reset and not update.
          '''equation 1 z = sigm(WxzXt+Bz), x_t is inputs'''

          z = tf.sigmoid(linear.linear([inputs], 
                            self._num_units, True, 1.0)) 

        with tf.variable_scope("Rinput"):
          '''equation 2 r = sigm(WxrXt+Whrht+Br), h_t is the previous state'''

          r = tf.sigmoid((linear.linear([inputs,state],
                            self._num_units, True, 1.0)))
          '''equation 3'''
        with tf.variable_scope("Candidate"):
          component_0 = linear.linear([r*state], 
                            self._num_units, True) 
          component_1 = tf.tanh(tf.tanh(inputs) + component_0)
          component_2 = component_1*z
          component_3 = state*(1 - z)

        h_t = component_2 + component_3

      return h_t, h_t #there is only one hidden state output to keep track of. 
Example #4
0
  def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = tf.split(1, 2, state)
      concat = linear.linear([inputs, h], 4 * self._num_units, True)

      fs = []

      # This can be made more efficient since we're doing more than needs to be
      # done, but for now w/e
      for child_state in child_states:
          c_k, h_k = tf.split(1, 2, child_state)
          concat = linear.linear([inputs, h_k], 4 * self._num_units, True)
          i_k, j_k, f_k, o_k = tf.split(1, 4, concat)
          fs.append(f_k)


      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      # TODO: forget gate for each child, probably need to split by number
      # of child states or something
      i, j, f, o = tf.split(1, 4, concat)

      # If no children just treat it like a regular lstm
      if not fs:
        fs.append(f)

      new_c = sum(c * tf.sigmoid(fs + self._forget_bias)) + tf.sigmoid(i) * tf.tanh(j)
      new_h = tf.tanh(new_c) * tf.sigmoid(o)

    return new_h, tf.concat(1, [new_c, new_h])
Example #5
0
 def __call__(self, inputs, state, scope=None):
   """Gated recurrent unit (GRU) with nunits cells."""
   with tf.variable_scope(scope or type(self).__name__):  # "GRUCell"
     with tf.variable_scope("Gates"):  # Reset gate and update gate.
       # We start with bias of 1.0 to not reset and not udpate.
       r, u = tf.split(1, 2, linear.linear([inputs, state],
                                           2 * self._num_units, True, 1.0))
       r, u = tf.sigmoid(r), tf.sigmoid(u)
     with tf.variable_scope("Candidate"):
       c = tf.tanh(linear.linear([inputs, r * state], self._num_units, True))
     new_h = u * state + (1 - u) * c
   return new_h, new_h
Example #6
0
 def __call__(self, inputs, state, scope=None):
   """Run the cell and output projection on inputs, starting from state."""
   output, res_state = self._cell(inputs, state)
   # Default scope: "OutputProjectionWrapper"
   with tf.variable_scope(scope or type(self).__name__):
     projected = linear.linear(output, self._output_size, True)
   return projected, res_state
Example #7
0
class BasicRecursiveCell(RecursiveCell):
  """The most basic Recursive cell."""

  def __init__(self, num_units):
    self._num_units = num_units

  @property
  def input_size(self):
    return self._num_units

  @property
  def output_size(self):
    return self._num_units

  @property
  def state_size(self):
    return self._num_units

  def __call__(self, inputs, states, scope=None):
    """Most basic Recursive:
    	leaf:		output = tanh(W * input + Bl).
    	composor:	output = tanh(U * (leftstate, rightstate) + Bc)
    """
    # for a leaf cell
    if inputs not is None and states is None:
    	with tf.variable_scope(scope or type(self).__name__):  # "BasicRecursiveCell"
    		with tf.variable_scope("leaf"):
      			output = tf.tanh(linear.linear(inputs, self._num_units, True))
    			return output, None
    # for a composor cell
    elif inputs is None and states is not None:
    	with tf.variable_scope(scope or type(self).__name__):  # "BasicRecursiveCell"
      		with tf.variable_scope("composor"):
      			output = tf.tanh(linear.linear([states[0], states[1]], self._num_units, True))
    			return output, None
  def __call__(self, inputs, state,scope=None):
    with tf.device("/gpu:"+str(self._gpu_for_layer)):

      """Gated recurrent unit (GRU) with nunits cells."""
      with tf.variable_scope(scope or type(self).__name__):  # "GRUCell"
        with tf.variable_scope("Gates"):  # Reset gate and update gate.
          # We start with bias of 1.0 to not reset and not udpate.
          r, u = tf.split(1, 2, linear.linear([inputs, state],
                                              2 * self._num_units, True, 1.0))
          r, u = tf.sigmoid(r), tf.sigmoid(u)
        with tf.variable_scope("Candidate"): #you need a different one because you're doing a new linear
          #notice they have the activation/non-linear step right here! 
          c = tf.tanh(linear.linear([inputs, r * state], self._num_units, True))
        new_h = u * state + (1 - u) * c
      return new_h, new_h

      '''nick, notice that for the gru, the output and the hidden state are literally the same thing'''
Example #9
0
 def __call__(self,inputs, state, scope=None):
     
     with tf.variable_scope(scope or type(self).__name__):
         
         with tf.variable_scope("Gates"):
             r, u = tf.split(1,2,linear.linear([inputs,state],
                                               self._num_units * 2,
                                               True, 1.0))
                                               
             r, u = tf.sigmoid(r), tf.sigmoid(u)
         
         with tf.variable_scope("Candidate"):
             c = tf.tanh(linear.linear([inputs,state * r],self._num_units,
                                       True))
         new_h = u * state + (1-u) * c
         
         return new_h, new_h
Example #10
0
 def __call__(self, inputs, states, scope=None):
   """Most basic Recursive:
   	leaf:		output = tanh(W * input + Bl).
   	composor:	output = tanh(U * (leftstate, rightstate) + Bc)
   """
   # for a leaf cell
   if inputs not is None and states is None:
   	with tf.variable_scope(scope or type(self).__name__):  # "BasicRecursiveCell"
   		with tf.variable_scope("leaf"):
     			output = tf.tanh(linear.linear(inputs, self._num_units, True))
   			return output, None
   # for a composor cell
   elif inputs is None and states is not None:
   	with tf.variable_scope(scope or type(self).__name__):  # "BasicRecursiveCell"
     		with tf.variable_scope("composor"):
     			output = tf.tanh(linear.linear([states[0], states[1]], self._num_units, True))
   			return output, None
   else
   	raise NotImplementedError("Invalid type of node")
Example #11
0
  def testLinear(self):
    with self.test_session() as sess:
      with tf.variable_scope("root", initializer=tf.constant_initializer(1.0)):
        x = tf.zeros([1, 2])
        l = linear.linear([x], 2, False)
        sess.run([tf.variables.initialize_all_variables()])
        res = sess.run([l], {x.name: np.array([[1., 2.]])})
        self.assertAllClose(res[0], [[3.0, 3.0]])

        # Checks prevent you from accidentally creating a shared function.
        with self.assertRaises(ValueError) as exc:
          l1 = linear.linear([x], 2, False)
        self.assertEqual(str(exc.exception)[:12], "Over-sharing")

        # But you can create a new one in a new scope and share the variables.
        with tf.variable_scope("l1") as new_scope:
          l1 = linear.linear([x], 2, False)
        with tf.variable_scope(new_scope, reuse=True):
          linear.linear([l1], 2, False)
        self.assertEqual(len(tf.trainable_variables()), 2)
Example #12
0
  def testLinear(self):
    with self.test_session() as sess:
      with tf.variable_scope("root", initializer=tf.constant_initializer(1.0)):
        x = tf.zeros([1, 2])
        l = linear.linear([x], 2, False)
        sess.run([tf.variables.initialize_all_variables()])
        res = sess.run([l], {x.name: np.array([[1., 2.]])})
        self.assertAllClose(res[0], [[3.0, 3.0]])

        # Checks prevent you from accidentally creating a shared function.
        with self.assertRaises(ValueError) as exc:
          l1 = linear.linear([x], 2, False)
        self.assertEqual(exc.exception.message[:12], "Over-sharing")

        # But you can create a new one in a new scope and share the variables.
        with tf.variable_scope("l1") as new_scope:
          l1 = linear.linear([x], 2, False)
        with tf.variable_scope(new_scope, reuse=True):
          linear.linear([l1], 2, False)
        self.assertEqual(len(tf.trainable_variables()), 2)
Example #13
0
 def __call__(self, inputs, states, scope=None):
   """Most basic Recursive:
   	leaf:		output = tanh(W * input + Bl).
   	composor:	output = tanh(U * (leftstate, rightstate) + Bc)
   """
   # for a leaf cell
   if inputs not is None and states is None:
   	with tf.variable_scope(scope or type(self).__name__):  # "BasicRecursiveCell"
   		with tf.variable_scope("leaf"):
     			output = tf.tanh(linear.linear(inputs, self._num_units, True))
   			return output, None
Example #14
0
    def attention(query):
      """Put attention masks on hidden using hidden_features and query."""

      a = 0
      with tf.variable_scope("Attention_%i" % a):
        y = linear.linear(query, attention_vec_size, True)
        y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
        # Attention mask is a softmax of v^T * tanh(...).
        s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y), [2, 3])
        a = tf.nn.softmax(s)

        return a
Example #15
0
        def attention(query):
            """Put attention masks on hidden using hidden_features and query."""

            a = 0
            with tf.variable_scope("Attention_%i" % a):
                y = linear.linear(query, attention_vec_size, True)
                y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
                # Attention mask is a softmax of v^T * tanh(...).
                s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y),
                                  [2, 3])
                a = tf.nn.softmax(s)

                return a
Example #16
0
  def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = tf.split(1, 2, state)
      concat = linear.linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = tf.split(1, 4, concat)

      new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
      new_h = tf.tanh(new_c) * tf.sigmoid(o)

    return new_h, tf.concat(1, [new_c, new_h])
Example #17
0
 def attention(query):
   """Put attention masks on hidden using hidden_features and query."""
   ds = []  # Results of attention reads will be stored here.
   for a in xrange(num_heads):
     with tf.variable_scope("Attention_%d" % a):
       y = linear.linear(query, attention_vec_size, True)
       y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
       # Attention mask is a softmax of v^T * tanh(...).
       s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y), [2, 3])
       a = tf.nn.softmax(s)
       # Now calculate the attention-weighted vector d.
       d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,
                         [1, 2])
       ds.append(tf.reshape(d, [-1, attn_size]))
   return ds
  def __call__(self, inputs, state, scope=None):
    with tf.device("/gpu:"+str(self._gpu_for_layer)):
      """Long short-term memory cell (LSTM)."""
      with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
        # Parameters of gates are concatenated into one multiply for efficiency.
        c, h = tf.split(1, 2, state)
        concat = linear.linear([inputs, h], 4 * self._num_units, True)

        # i = input_gate, j = new_input, f = forget_gate, o = output_gate
        i, j, f, o = tf.split(1, 4, concat)

        new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
        new_h = tf.tanh(new_c) * tf.sigmoid(o)

      return new_h, tf.concat(1, [new_c, new_h])

      '''important, the second part is the hidden state!, thus a lstm with n cells had a hidden state of dimenson 2n'''
Example #19
0
def dnn(X, hidden_units, activation=tf.nn.relu, keep_prob=None):
    """Creates fully connected deep neural network subgraph.

    Args:
        X: tensor or placeholder for input features.
        hidden_units: list of counts of hidden units in each layer.
        activation: activation function between layers.
        keep_proba: if not None, will add a dropout layer with given
                    probability. 

    Returns:
        A tensor which would be a deep neural network.
    """
    with tf.variable_scope('dnn'):
        for i, n_units in enumerate(hidden_units):
            with tf.variable_scope('layer%d' % i):
                X = linear.linear(X, n_units, True)
            X = activation(X)
            if keep_prob:
                X = tf.nn.dropout(X, keep_prob)
        return X
Example #20
0
def dnn(tensor_in, hidden_units, activation=tf.nn.relu, keep_prob=None):
    """Creates fully connected deep neural network subgraph.

    Args:
        tenson_in: tensor or placeholder for input features.
        hidden_units: list of counts of hidden units in each layer.
        activation: activation function between layers.
        keep_proba: if not None, will add a dropout layer with given
                    probability.

    Returns:
        A tensor which would be a deep neural network.
    """
    with tf.variable_scope('dnn'):
        for i, n_units in enumerate(hidden_units):
            with tf.variable_scope('layer%d' % i):
                tensor_in = linear(tensor_in, n_units, True)
            tensor_in = activation(tensor_in)
            if keep_prob:
                tensor_in = tf.nn.dropout(tensor_in, keep_prob)
        return tensor_in
Example #21
0
def attention_decoder(decoder_inputs, initial_state, attention_states, cell,
                      output_size=None, num_heads=1, loop_function=None,
                      dtype=tf.float32, scope=None):
  """RNN decoder with attention for the sequence-to-sequence model.

  Args:
    decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
    initial_state: 2D Tensor [batch_size x cell.state_size].
    attention_states: 3D Tensor [batch_size x attn_length x attn_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    output_size: size of the output vectors; if None, we use cell.output_size.
    num_heads: number of attention heads that read from attention_states.
    loop_function: if not None, this function will be applied to i-th output
      in order to generate i+1-th input, and decoder_inputs will be ignored,
      except for the first element ("GO" symbol). This can be used for decoding,
      but also for training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
      Signature -- loop_function(prev, i) = next
        * prev is a 2D Tensor of shape [batch_size x cell.output_size],
        * i is an integer, the step number (when advanced control is needed),
        * next is a 2D Tensor of shape [batch_size x cell.input_size].
    dtype: The dtype to use for the RNN initial state (default: tf.float32).
    scope: VariableScope for the created subgraph; default: "attention_decoder".

  Returns:
    outputs: A list of the same length as decoder_inputs of 2D Tensors of shape
      [batch_size x output_size]. These represent the generated outputs.
      Output i is computed from input i (which is either i-th decoder_inputs or
      loop_function(output {i-1}, i)) as follows. First, we run the cell
      on a combination of the input and previous attention masks:
        cell_output, new_state = cell(linear(input, prev_attn), prev_state).
      Then, we calculate new attention masks:
        new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
      and then we calculate the output:
        output = linear(cell_output, new_attn).
    states: The state of each decoder cell in each time-step. This is a list
      with length len(decoder_inputs) -- one item for each time-step.
      Each item is a 2D Tensor of shape [batch_size x cell.state_size].

  Raises:
    ValueError: when num_heads is not positive, there are no inputs, or shapes
      of attention_states are not set.
  """
  if not decoder_inputs:
    raise ValueError("Must provide at least 1 input to attention decoder.")
  if num_heads < 1:
    raise ValueError("With less than 1 heads, use a non-attention decoder.")
  if not attention_states.get_shape()[1:2].is_fully_defined():
    raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
                     % attention_states.get_shape())
  if output_size is None:
    output_size = cell.output_size

  with tf.variable_scope(scope or "attention_decoder"):
    batch_size = tf.shape(decoder_inputs[0])[0]  # Needed for reshaping.
    attn_length = attention_states.get_shape()[1].value
    attn_size = attention_states.get_shape()[2].value

    # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
    hidden = tf.reshape(attention_states, [-1, attn_length, 1, attn_size])
    hidden_features = []
    v = []
    attention_vec_size = attn_size  # Size of query vectors for attention.
    for a in xrange(num_heads):
      k = tf.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size])
      hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
      v.append(tf.get_variable("AttnV_%d" % a, [attention_vec_size]))

    states = [initial_state]

    def attention(query):
      """Put attention masks on hidden using hidden_features and query."""
      ds = []  # Results of attention reads will be stored here.
      for a in xrange(num_heads):
        with tf.variable_scope("Attention_%d" % a):
          y = linear.linear(query, attention_vec_size, True)
          y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
          # Attention mask is a softmax of v^T * tanh(...).
          s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y), [2, 3])
          a = tf.nn.softmax(s)
          # Now calculate the attention-weighted vector d.
          d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,
                            [1, 2])
          ds.append(tf.reshape(d, [-1, attn_size]))
      return ds

    outputs = []
    prev = None
    batch_attn_size = tf.pack([batch_size, attn_size])
    attns = [tf.zeros(batch_attn_size, dtype=dtype)
             for _ in xrange(num_heads)]
    for a in attns:  # Ensure the second shape of attention vectors is set.
      a.set_shape([None, attn_size])
    for i in xrange(len(decoder_inputs)):
      if i > 0:
        tf.get_variable_scope().reuse_variables()
      inp = decoder_inputs[i]
      # If loop_function is set, we use it instead of decoder_inputs.
      if loop_function is not None and prev is not None:
        with tf.variable_scope("loop_function", reuse=True):
          inp = tf.stop_gradient(loop_function(prev, i))
      # Merge input and previous attentions into one vector of the right size.
      x = linear.linear([inp] + attns, cell.input_size, True)
      # Run the RNN.
      cell_output, new_state = cell(x, states[-1])
      states.append(new_state)
      # Run the attention mechanism.
      attns = attention(new_state)
      with tf.variable_scope("AttnOutputProjection"):
        output = linear.linear([cell_output] + attns, output_size, True)
      if loop_function is not None:
        # We do not propagate gradients over the loop function.
        prev = tf.stop_gradient(output)
      outputs.append(output)

  return outputs, states
Example #22
0
 def __call__(self, inputs, state, scope=None):
   """Run the input projection and then the cell."""
   # Default scope: "InputProjectionWrapper"
   with tf.variable_scope(scope or type(self).__name__):
     projected = linear.linear(inputs, self._cell.input_size, True)
   return self._cell(projected, state)
  def __call__(self, inputs, state, scope=None):
    with tf.device("/gpu:"+str(self._gpu_for_layer)):
      print('testing')
      with tf.variable_scope(scope or type(self).__name__):  # "UnitaryRNNCell"
        with tf.variable_scope("UnitaryGates"):  # Reset gate and update gate.


          '''just for sake of consistency, we'll keep some var names the same as authors'''

          n_hidden = self._num_units
          h_prev = state


          '''development nick version here'''
          step1 = unitary_linear.times_diag_tf(h_prev, n_hidden) #this will create a diagonal tensor with given diagonal values


          #work on times_reflection next



          modulus = T.sqrt(lin_output_re ** 2 + lin_output_im ** 2)
          rescale = T.maximum(modulus + hidden_bias.dimshuffle('x',0), 0.) / (modulus + 1e-5)
          nonlin_output_re = lin_output_re * rescale
          nonlin_output_im = lin_output_im * rescale

          h_t = tf.concat(1, [nonlin_output_re, 
                             nonlin_output_im]) 

          return h_t, h_t #check if h_t is the same as the output?????




          '''----------------------------end of unitary rnn cell--------------------------'''


          # We start with bias of 1.0 to not reset and not update.
          '''First, we will start with the hidden linear transform
          W = D3R2F-1D2PermR1FD1'''
          step1 = times_diag(h_prev, n_hidden, theta[0,:])
          step2 = step1
  #        step2 = do_fft(step1, n_hidden)
          step3 = times_reflection(step2, n_hidden, reflection[0,:])
          step4 = vec_permutation(step3, n_hidden, index_permute)
          step5 = times_diag(step4, n_hidden, theta[1,:])
          step6 = step5
  #        step6 = do_ifft(step5, n_hidden)
          step7 = times_reflection(step6, n_hidden, reflection[1,:])
          step8 = times_diag(step7, n_hidden, theta[2,:])     
          step9 = scale_diag(step8, n_hidden, scale)

          hidden_lin_output = step9

          z = tf.sigmoid(linear.linear([inputs], 
                            self._num_units, True, 1.0))

          '''equation 2 r = sigm(WxrXt+Whrht+Br), h_t is the previous state'''

          r = tf.sigmoid((linear.linear([inputs,state],
                            self._num_units, True, 1.0)))
          '''equation 3'''
        with tf.variable_scope("Candidate"):
          component_0 = linear.linear([r*state],
                            self._num_units, True)
          component_1 = tf.tanh(tf.tanh(inputs) + component_0)
          component_2 = component_1*z
          component_3 = state*(1 - z)

          h_t = component_2 + component_3

          h_t = tf.concat(concat_dim = 1, value =[nonlin_output_re, nonlin_output_im]) #I know here you need to concatenate the real and imaginary parts


        return h_t, h_t #there is only one hidden state output to keep track of. 
Example #24
0
 def __call__(self, inputs, state, scope=None):
   """Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
   with tf.variable_scope(scope or type(self).__name__):  # "BasicRNNCell"
     output = tf.tanh(linear.linear([inputs, state], self._num_units, True))
   return output, output
Example #25
0
 def __call__(self,inputs,state):
     output = tf.tanh(linear.linear([inputs,state],self.output_size,False))
     return output, output