Esempio n. 1
0
 def call(self, inputs):
   output = K.dot(inputs, self.kernel)
   if self.use_bias:
     output = K.bias_add(output, self.bias)
   if self.activation is not None:
     output = self.activation(output)
   return output
Esempio n. 2
0
 def call(self, inputs):
   output = K.dot(inputs, self.kernel)
   if self.use_bias:
     output = K.bias_add(output, self.bias)
   if self.activation is not None:
     output = self.activation(output)
   return output
Esempio n. 3
0
  def step(self, inputs, states):
    h_tm1 = states[0]
    c_tm1 = states[1]
    dp_mask = states[2]
    rec_dp_mask = states[3]

    if self.implementation == 2:
      z = K.dot(inputs * dp_mask[0], self.kernel)
      z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
      if self.use_bias:
        z = K.bias_add(z, self.bias)

      z0 = z[:, :self.units]
      z1 = z[:, self.units:2 * self.units]
      z2 = z[:, 2 * self.units:3 * self.units]
      z3 = z[:, 3 * self.units:]

      i = self.recurrent_activation(z0)
      f = self.recurrent_activation(z1)
      c = f * c_tm1 + i * self.activation(z2)
      o = self.recurrent_activation(z3)
    else:
      if self.implementation == 0:
        x_i = inputs[:, :self.units]
        x_f = inputs[:, self.units:2 * self.units]
        x_c = inputs[:, 2 * self.units:3 * self.units]
        x_o = inputs[:, 3 * self.units:]
      elif self.implementation == 1:
        x_i = K.dot(inputs * dp_mask[0], self.kernel_i) + self.bias_i
        x_f = K.dot(inputs * dp_mask[1], self.kernel_f) + self.bias_f
        x_c = K.dot(inputs * dp_mask[2], self.kernel_c) + self.bias_c
        x_o = K.dot(inputs * dp_mask[3], self.kernel_o) + self.bias_o
      else:
        raise ValueError('Unknown `implementation` mode.')

      i = self.recurrent_activation(x_i + K.dot(h_tm1 * rec_dp_mask[0],
                                                self.recurrent_kernel_i))
      f = self.recurrent_activation(x_f + K.dot(h_tm1 * rec_dp_mask[1],
                                                self.recurrent_kernel_f))
      c = f * c_tm1 + i * self.activation(
          x_c + K.dot(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c))
      o = self.recurrent_activation(x_o + K.dot(h_tm1 * rec_dp_mask[3],
                                                self.recurrent_kernel_o))
    h = o * self.activation(c)
    if 0 < self.dropout + self.recurrent_dropout:
      h._uses_learning_phase = True
    return h, [h, c]
Esempio n. 4
0
  def step(self, inputs, states):
    h_tm1 = states[0]
    c_tm1 = states[1]
    dp_mask = states[2]
    rec_dp_mask = states[3]

    if self.implementation == 2:
      z = K.dot(inputs * dp_mask[0], self.kernel)
      z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
      if self.use_bias:
        z = K.bias_add(z, self.bias)

      z0 = z[:, :self.units]
      z1 = z[:, self.units:2 * self.units]
      z2 = z[:, 2 * self.units:3 * self.units]
      z3 = z[:, 3 * self.units:]

      i = self.recurrent_activation(z0)
      f = self.recurrent_activation(z1)
      c = f * c_tm1 + i * self.activation(z2)
      o = self.recurrent_activation(z3)
    else:
      if self.implementation == 0:
        x_i = inputs[:, :self.units]
        x_f = inputs[:, self.units:2 * self.units]
        x_c = inputs[:, 2 * self.units:3 * self.units]
        x_o = inputs[:, 3 * self.units:]
      elif self.implementation == 1:
        x_i = K.dot(inputs * dp_mask[0], self.kernel_i) + self.bias_i
        x_f = K.dot(inputs * dp_mask[1], self.kernel_f) + self.bias_f
        x_c = K.dot(inputs * dp_mask[2], self.kernel_c) + self.bias_c
        x_o = K.dot(inputs * dp_mask[3], self.kernel_o) + self.bias_o
      else:
        raise ValueError('Unknown `implementation` mode.')

      i = self.recurrent_activation(x_i + K.dot(h_tm1 * rec_dp_mask[0],
                                                self.recurrent_kernel_i))
      f = self.recurrent_activation(x_f + K.dot(h_tm1 * rec_dp_mask[1],
                                                self.recurrent_kernel_f))
      c = f * c_tm1 + i * self.activation(
          x_c + K.dot(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c))
      o = self.recurrent_activation(x_o + K.dot(h_tm1 * rec_dp_mask[3],
                                                self.recurrent_kernel_o))
    h = o * self.activation(c)
    if 0 < self.dropout + self.recurrent_dropout:
      h._uses_learning_phase = True
    return h, [h, c]
Esempio n. 5
0
  def step(self, inputs, states):
    h_tm1 = states[0]  # previous memory
    dp_mask = states[1]  # dropout matrices for recurrent units
    rec_dp_mask = states[2]

    if self.implementation == 2:
      matrix_x = K.dot(inputs * dp_mask[0], self.kernel)
      if self.use_bias:
        matrix_x = K.bias_add(matrix_x, self.bias)
      matrix_inner = K.dot(h_tm1 * rec_dp_mask[0],
                           self.recurrent_kernel[:, :2 * self.units])

      x_z = matrix_x[:, :self.units]
      x_r = matrix_x[:, self.units:2 * self.units]
      recurrent_z = matrix_inner[:, :self.units]
      recurrent_r = matrix_inner[:, self.units:2 * self.units]

      z = self.recurrent_activation(x_z + recurrent_z)
      r = self.recurrent_activation(x_r + recurrent_r)

      x_h = matrix_x[:, 2 * self.units:]
      recurrent_h = K.dot(r * h_tm1 * rec_dp_mask[0],
                          self.recurrent_kernel[:, 2 * self.units:])
      hh = self.activation(x_h + recurrent_h)
    else:
      if self.implementation == 0:
        x_z = inputs[:, :self.units]
        x_r = inputs[:, self.units:2 * self.units]
        x_h = inputs[:, 2 * self.units:]
      elif self.implementation == 1:
        x_z = K.dot(inputs * dp_mask[0], self.kernel_z)
        x_r = K.dot(inputs * dp_mask[1], self.kernel_r)
        x_h = K.dot(inputs * dp_mask[2], self.kernel_h)
        if self.use_bias:
          x_z = K.bias_add(x_z, self.bias_z)
          x_r = K.bias_add(x_r, self.bias_r)
          x_h = K.bias_add(x_h, self.bias_h)
      else:
        raise ValueError('Unknown `implementation` mode.')
      z = self.recurrent_activation(x_z + K.dot(h_tm1 * rec_dp_mask[0],
                                                self.recurrent_kernel_z))
      r = self.recurrent_activation(x_r + K.dot(h_tm1 * rec_dp_mask[1],
                                                self.recurrent_kernel_r))

      hh = self.activation(x_h + K.dot(r * h_tm1 * rec_dp_mask[2],
                                       self.recurrent_kernel_h))
    h = z * h_tm1 + (1 - z) * hh
    if 0 < self.dropout + self.recurrent_dropout:
      h._uses_learning_phase = True
    return h, [h]
Esempio n. 6
0
  def step(self, inputs, states):
    h_tm1 = states[0]  # previous memory
    dp_mask = states[1]  # dropout matrices for recurrent units
    rec_dp_mask = states[2]

    if self.implementation == 2:
      matrix_x = K.dot(inputs * dp_mask[0], self.kernel)
      if self.use_bias:
        matrix_x = K.bias_add(matrix_x, self.bias)
      matrix_inner = K.dot(h_tm1 * rec_dp_mask[0],
                           self.recurrent_kernel[:, :2 * self.units])

      x_z = matrix_x[:, :self.units]
      x_r = matrix_x[:, self.units:2 * self.units]
      recurrent_z = matrix_inner[:, :self.units]
      recurrent_r = matrix_inner[:, self.units:2 * self.units]

      z = self.recurrent_activation(x_z + recurrent_z)
      r = self.recurrent_activation(x_r + recurrent_r)

      x_h = matrix_x[:, 2 * self.units:]
      recurrent_h = K.dot(r * h_tm1 * rec_dp_mask[0],
                          self.recurrent_kernel[:, 2 * self.units:])
      hh = self.activation(x_h + recurrent_h)
    else:
      if self.implementation == 0:
        x_z = inputs[:, :self.units]
        x_r = inputs[:, self.units:2 * self.units]
        x_h = inputs[:, 2 * self.units:]
      elif self.implementation == 1:
        x_z = K.dot(inputs * dp_mask[0], self.kernel_z)
        x_r = K.dot(inputs * dp_mask[1], self.kernel_r)
        x_h = K.dot(inputs * dp_mask[2], self.kernel_h)
        if self.use_bias:
          x_z = K.bias_add(x_z, self.bias_z)
          x_r = K.bias_add(x_r, self.bias_r)
          x_h = K.bias_add(x_h, self.bias_h)
      else:
        raise ValueError('Unknown `implementation` mode.')
      z = self.recurrent_activation(x_z + K.dot(h_tm1 * rec_dp_mask[0],
                                                self.recurrent_kernel_z))
      r = self.recurrent_activation(x_r + K.dot(h_tm1 * rec_dp_mask[1],
                                                self.recurrent_kernel_r))

      hh = self.activation(x_h + K.dot(r * h_tm1 * rec_dp_mask[2],
                                       self.recurrent_kernel_h))
    h = z * h_tm1 + (1 - z) * hh
    if 0 < self.dropout + self.recurrent_dropout:
      h._uses_learning_phase = True
    return h, [h]
Esempio n. 7
0
  def step(self, inputs, states):
    if self.implementation == 0:
      h = inputs
    else:
      if 0 < self.dropout < 1:
        h = K.dot(inputs * states[1], self.kernel)
      else:
        h = K.dot(inputs, self.kernel)
      if self.bias is not None:
        h = K.bias_add(h, self.bias)

    prev_output = states[0]
    if 0 < self.recurrent_dropout < 1:
      prev_output *= states[2]
    output = h + K.dot(prev_output, self.recurrent_kernel)
    if self.activation is not None:
      output = self.activation(output)

    # Properly set learning phase on output tensor.
    if 0 < self.dropout + self.recurrent_dropout:
      output._uses_learning_phase = True
    return output, [output]
Esempio n. 8
0
  def step(self, inputs, states):
    if self.implementation == 0:
      h = inputs
    else:
      if 0 < self.dropout < 1:
        h = K.dot(inputs * states[1], self.kernel)
      else:
        h = K.dot(inputs, self.kernel)
      if self.bias is not None:
        h = K.bias_add(h, self.bias)

    prev_output = states[0]
    if 0 < self.recurrent_dropout < 1:
      prev_output *= states[2]
    output = h + K.dot(prev_output, self.recurrent_kernel)
    if self.activation is not None:
      output = self.activation(output)

    # Properly set learning phase on output tensor.
    if 0 < self.dropout + self.recurrent_dropout:
      output._uses_learning_phase = True
    return output, [output]
Esempio n. 9
0
def _time_distributed_dense(x,
                            w,
                            b=None,
                            dropout=None,
                            input_dim=None,
                            output_dim=None,
                            timesteps=None,
                            training=None):
  """Apply `y . w + b` for every temporal slice y of x.

  Arguments:
      x: input tensor.
      w: weight matrix.
      b: optional bias vector.
      dropout: wether to apply dropout (same dropout mask
          for every temporal slice of the input).
      input_dim: integer; optional dimensionality of the input.
      output_dim: integer; optional dimensionality of the output.
      timesteps: integer; optional number of timesteps.
      training: training phase tensor or boolean.

  Returns:
      Output tensor.
  """
  if not input_dim:
    input_dim = K.shape(x)[2]
  if not timesteps:
    timesteps = K.shape(x)[1]
  if not output_dim:
    output_dim = K.shape(w)[1]

  if dropout is not None and 0. < dropout < 1.:
    # apply the same dropout pattern at every timestep
    ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
    dropout_matrix = K.dropout(ones, dropout)
    expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
    x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)

  # collapse time dimension and batch dimension together
  x = K.reshape(x, (-1, input_dim))
  x = K.dot(x, w)
  if b is not None:
    x = K.bias_add(x, b)
  # reshape to 3D tensor
  if K.backend() == 'tensorflow':
    x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
    x.set_shape([None, None, output_dim])
  else:
    x = K.reshape(x, (-1, timesteps, output_dim))
  return x
Esempio n. 10
0
def _time_distributed_dense(x,
                            w,
                            b=None,
                            dropout=None,
                            input_dim=None,
                            output_dim=None,
                            timesteps=None,
                            training=None):
  """Apply `y . w + b` for every temporal slice y of x.

  Arguments:
      x: input tensor.
      w: weight matrix.
      b: optional bias vector.
      dropout: wether to apply dropout (same dropout mask
          for every temporal slice of the input).
      input_dim: integer; optional dimensionality of the input.
      output_dim: integer; optional dimensionality of the output.
      timesteps: integer; optional number of timesteps.
      training: training phase tensor or boolean.

  Returns:
      Output tensor.
  """
  if not input_dim:
    input_dim = K.shape(x)[2]
  if not timesteps:
    timesteps = K.shape(x)[1]
  if not output_dim:
    output_dim = K.shape(w)[1]

  if dropout is not None and 0. < dropout < 1.:
    # apply the same dropout pattern at every timestep
    ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
    dropout_matrix = K.dropout(ones, dropout)
    expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
    x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)

  # collapse time dimension and batch dimension together
  x = K.reshape(x, (-1, input_dim))
  x = K.dot(x, w)
  if b is not None:
    x = K.bias_add(x, b)
  # reshape to 3D tensor
  if K.backend() == 'tensorflow':
    x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
    x.set_shape([None, None, output_dim])
  else:
    x = K.reshape(x, (-1, timesteps, output_dim))
  return x
Esempio n. 11
0
  def call(self, inputs):
    stride_row, stride_col = self.strides
    _, feature_dim, filters = self.kernel_shape

    if self.data_format == 'channels_first':
      if K.backend() == 'theano':
        output = []
        for i in range(self.output_row):
          for j in range(self.output_col):
            slice_row = slice(i * stride_row,
                              i * stride_row + self.kernel_size[0])
            slice_col = slice(j * stride_col,
                              j * stride_col + self.kernel_size[1])
            x_flatten = K.reshape(inputs[:, :, slice_row, slice_col],
                                  (1, -1, feature_dim))
            output.append(
                K.dot(x_flatten, self.kernel[i * self.output_col + j, :, :]))
        output = K.concatenate(output, axis=0)
      else:
        xs = []
        for i in range(self.output_row):
          for j in range(self.output_col):
            slice_row = slice(i * stride_row,
                              i * stride_row + self.kernel_size[0])
            slice_col = slice(j * stride_col,
                              j * stride_col + self.kernel_size[1])
            xs.append(
                K.reshape(inputs[:, :, slice_row, slice_col], (1, -1,
                                                               feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        output = K.batch_dot(x_aggregate, self.kernel)
      output = K.reshape(output, (self.output_row, self.output_col, -1,
                                  filters))
      output = K.permute_dimensions(output, (2, 3, 0, 1))

    elif self.data_format == 'channels_last':
      xs = []
      for i in range(self.output_row):
        for j in range(self.output_col):
          slice_row = slice(i * stride_row,
                            i * stride_row + self.kernel_size[0])
          slice_col = slice(j * stride_col,
                            j * stride_col + self.kernel_size[1])
          xs.append(
              K.reshape(inputs[:, slice_row, slice_col, :], (1, -1, feature_dim
                                                            )))
      x_aggregate = K.concatenate(xs, axis=0)
      output = K.batch_dot(x_aggregate, self.kernel)
      output = K.reshape(output, (self.output_row, self.output_col, -1,
                                  filters))
      output = K.permute_dimensions(output, (2, 0, 1, 3))

    if self.use_bias:
      if self.data_format == 'channels_first':
        output += K.reshape(self.bias, (1, filters, self.output_row,
                                        self.output_col))
      elif self.data_format == 'channels_last':
        output += K.reshape(self.bias, (1, self.output_row, self.output_col,
                                        filters))
    output = self.activation(output)
    return output
Esempio n. 12
0
  def call(self, inputs):
    stride_row, stride_col = self.strides
    _, feature_dim, filters = self.kernel_shape

    if self.data_format == 'channels_first':
      if K.backend() == 'theano':
        output = []
        for i in range(self.output_row):
          for j in range(self.output_col):
            slice_row = slice(i * stride_row,
                              i * stride_row + self.kernel_size[0])
            slice_col = slice(j * stride_col,
                              j * stride_col + self.kernel_size[1])
            x_flatten = K.reshape(inputs[:, :, slice_row, slice_col],
                                  (1, -1, feature_dim))
            output.append(
                K.dot(x_flatten, self.kernel[i * self.output_col + j, :, :]))
        output = K.concatenate(output, axis=0)
      else:
        xs = []
        for i in range(self.output_row):
          for j in range(self.output_col):
            slice_row = slice(i * stride_row,
                              i * stride_row + self.kernel_size[0])
            slice_col = slice(j * stride_col,
                              j * stride_col + self.kernel_size[1])
            xs.append(
                K.reshape(inputs[:, :, slice_row, slice_col], (1, -1,
                                                               feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        output = K.batch_dot(x_aggregate, self.kernel)
      output = K.reshape(output, (self.output_row, self.output_col, -1,
                                  filters))
      output = K.permute_dimensions(output, (2, 3, 0, 1))

    elif self.data_format == 'channels_last':
      xs = []
      for i in range(self.output_row):
        for j in range(self.output_col):
          slice_row = slice(i * stride_row,
                            i * stride_row + self.kernel_size[0])
          slice_col = slice(j * stride_col,
                            j * stride_col + self.kernel_size[1])
          xs.append(
              K.reshape(inputs[:, slice_row, slice_col, :], (1, -1, feature_dim
                                                            )))
      x_aggregate = K.concatenate(xs, axis=0)
      output = K.batch_dot(x_aggregate, self.kernel)
      output = K.reshape(output, (self.output_row, self.output_col, -1,
                                  filters))
      output = K.permute_dimensions(output, (2, 0, 1, 3))

    if self.use_bias:
      if self.data_format == 'channels_first':
        output += K.reshape(self.bias, (1, filters, self.output_row,
                                        self.output_col))
      elif self.data_format == 'channels_last':
        output += K.reshape(self.bias, (1, self.output_row, self.output_col,
                                        filters))
    output = self.activation(output)
    return output