Exemple #1
0
def step_gru(cell_inputs, cell_state, kernel, recurrent_kernel, input_bias,
             recurrent_bias):
    """Step function that will be used by Keras RNN backend."""
    h_tm1 = cell_state

    # inputs projected by all gate matrices at once
    matrix_x = K.dot(cell_inputs, kernel)
    matrix_x = K.bias_add(matrix_x, input_bias)

    x_z, x_r, x_h = array_ops.split(matrix_x, 3, axis=1)

    # hidden state projected by all gate matrices at once
    matrix_inner = K.dot(h_tm1, recurrent_kernel)
    matrix_inner = K.bias_add(matrix_inner, recurrent_bias)

    recurrent_z, recurrent_r, recurrent_h = array_ops.split(matrix_inner,
                                                            3,
                                                            axis=1)
    z = nn.sigmoid(x_z + recurrent_z)
    r = nn.sigmoid(x_r + recurrent_r)
    hh = nn.tanh(x_h + r * recurrent_h)

    # previous and candidate state mixed by update gate
    h = z * h_tm1 + (1 - z) * hh
    return h, [h]
Exemple #2
0
def tanh_and_sum(rnn_size, input_data, sh, scope='mdLSTM'):
    """Sum and tanh over four directions for MDLSTM

  Args:
    rnn_size: int, the size of the cell
    input_data: (num_images, height, width, depth) tensor
    sh: list, first element - the height of the block,
    second - the width of the lstm block
    scope: string, the function scope

  Returns:
    (num_images, height/sh[0], width/sh[1], sh[0]*sh[1]) tensor,
    the mean over four iteration with MDLSTM
  """
    outs = []
    for i in range(2):
        for j in range(2):
            dims = []
            if i != 0:
                dims.append(1)
            if j != 0:
                dims.append(2)
                outputs = multi_dimentional_rnn(
                    rnn_size, input_data, sh, dims,
                    scope + "-l{0}".format(i * 2 + j))
                outs.append(outputs)

    outs = array_ops.stack(outs, axis=0)
    mean = math_ops.reduce_mean(outs, 0)
    return nn.tanh(mean)
Exemple #3
0
def tanh(x):
    """Hyperbolic Tangent activation function.

  Arguments:
      x: Input tensor.

  Returns:
      The tanh activation: `tanh(x) = sinh(x)/cosh(x) = ((exp(x) -
      exp(-x))/(exp(x) + exp(-x)))`.
  """
    return nn.tanh(x)
Exemple #4
0
def tanh(x):
    """Element-wise hyperbolic tangent (tanh).

    Args:
        x: A tensor or variable.


    Returns: A tensor.

    """
    return nn.tanh(x)
    def __call__(self, inputs, state, attention, scope=None):
        """Gated recurrent unit (GRU) with nunits cells."""
        with tf.variable_scope(scope or 'AttrGRU'):
            with tf.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0 to not reset.
                r = tf.nn.sigmoid(self._linear(inputs, state, bias_default=1.0))
            with tf.variable_scope("Candidate"):
                c = tanh(self._linear(inputs, r * state))

            new_h = attention * c + (1 - attention) * state
        return new_h
def tanh(x):
  """Hyperbolic Tangent activation function.

  Arguments:
      x: Input tensor.

  Returns:
      The tanh activation: `tanh(x) = sinh(x)/cosh(x) = ((exp(x) -
      exp(-x))/(exp(x) + exp(-x)))`.
  """
  return nn.tanh(x)
    def __call__(self, inputs, state, attention, scope=None):
        """Gated recurrent unit (GRU) with nunits cells."""
        with tf.variable_scope(scope or 'AttrGRU'):
            with tf.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0 to not reset.
                r = tf.nn.sigmoid(self._linear(inputs, state,
                                               bias_default=1.0))
            with tf.variable_scope("Candidate"):
                c = tanh(self._linear(inputs, r * state))

            new_h = attention * c + (1 - attention) * state
        return new_h
Exemple #8
0
    def __call__(self, inputs, state, attention, scope=None):
        #input: batch_size, hidden_size
        #state: batch_size, hidden_size
        #attention: batch_size, 1

        # batch_size, hidden_size
        r = tf.nn.sigmoid(tf.matmul(inputs, self.w_r) + tf.matmul(state, self.u_r) + self.b_r)
        # batch_size, hidden_size
        c = tanh(tf.matmul(inputs, self.w_c) + tf.matmul(r*state, self.u_c) + self.b_c)
        # new_h: batch_size, hidden_size
        new_h = attention * c + (1 - attention) * state

        return new_h
Exemple #9
0
def tanh(x):
    """Hyperbolic tangent activation function.

  For example:

  >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
  >>> b = tf.keras.activations.tanh(a)
  >>> b.numpy()
  array([-0.9950547, -0.7615942,  0.,  0.7615942,  0.9950547], dtype=float32)

  Args:
      x: Input tensor.

  Returns:
      Tensor of same shape and dtype of input `x`, with tanh activation:
      `tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.
  """
    return nn.tanh(x)
def tanh(x):
    """Hyperbolic Tangent (tanh) activation function.

  For example:

  ```python
  # Constant 1-D tensor populated with value list.
  a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
  b = tf.keras.activations.tanh(a) #[-0.9950547,-0.7615942,
  0.,0.7615942,0.9950547]
  ```
  Arguments:
      x: Input tensor.

  Returns:
      A tensor of same shape and dtype of input `x`.
      The tanh activation: `tanh(x) = sinh(x)/cosh(x) = ((exp(x) -
      exp(-x))/(exp(x) + exp(-x)))`.
  """
    return nn.tanh(x)
Exemple #11
0
    def __call__(self,
                 inputs,
                 state,
                 attention,
                 scope=None):  #this can run with the object we created
        #inputs are the fution layer outputs  at each time steps
        #states are the hidden states that out from the GRU
        #attention is the new update gate..
        """Gated recurrent unit (GRU) with nunits cells."""
        with tf.variable_scope(scope or 'AttrGRU'):
            with tf.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0 to not reset.
                r = tf.nn.sigmoid(self._linear(
                    inputs, state,
                    bias_default=1.0))  #the reset gate the in modified GRU
            with tf.variable_scope("Candidate"):
                c = tanh(self._linear(inputs, r * state))  #intermidiate state

            new_h = attention * c + (
                1 - attention) * state  #this is the new hidden state
        return new_h  #return the output function
def tanh(x):
    return nn.tanh(x)
Exemple #13
0
def tanh(x):
  return nn.tanh(x)
 def body(i, x, y):
   i = i + 1
   x = nn.tanh(x)
   y = nn.tanh(y)
   return (i, x, y)