예제 #1
0
  def step(self, inputs, states):
    h_tm1 = states[0]  # previous memory
    dp_mask = states[1]  # dropout matrices for recurrent units
    rec_dp_mask = states[2]

    if self.implementation == 2:
      matrix_x = K.dot(inputs * dp_mask[0], self.kernel)
      if self.use_bias:
        matrix_x = K.bias_add(matrix_x, self.bias)
      matrix_inner = K.dot(h_tm1 * rec_dp_mask[0],
                           self.recurrent_kernel[:, :2 * self.units])

      x_z = matrix_x[:, :self.units]
      x_r = matrix_x[:, self.units:2 * self.units]
      recurrent_z = matrix_inner[:, :self.units]
      recurrent_r = matrix_inner[:, self.units:2 * self.units]

      z = self.recurrent_activation(x_z + recurrent_z)
      r = self.recurrent_activation(x_r + recurrent_r)

      x_h = matrix_x[:, 2 * self.units:]
      recurrent_h = K.dot(r * h_tm1 * rec_dp_mask[0],
                          self.recurrent_kernel[:, 2 * self.units:])
      hh = self.activation(x_h + recurrent_h)
    else:
      if self.implementation == 0:
        x_z = inputs[:, :self.units]
        x_r = inputs[:, self.units:2 * self.units]
        x_h = inputs[:, 2 * self.units:]
      elif self.implementation == 1:
        x_z = K.dot(inputs * dp_mask[0], self.kernel_z)
        x_r = K.dot(inputs * dp_mask[1], self.kernel_r)
        x_h = K.dot(inputs * dp_mask[2], self.kernel_h)
        if self.use_bias:
          x_z = K.bias_add(x_z, self.bias_z)
          x_r = K.bias_add(x_r, self.bias_r)
          x_h = K.bias_add(x_h, self.bias_h)
      else:
        raise ValueError('Unknown `implementation` mode.')
      z = self.recurrent_activation(x_z + K.dot(h_tm1 * rec_dp_mask[0],
                                                self.recurrent_kernel_z))
      r = self.recurrent_activation(x_r + K.dot(h_tm1 * rec_dp_mask[1],
                                                self.recurrent_kernel_r))

      hh = self.activation(x_h + K.dot(r * h_tm1 * rec_dp_mask[2],
                                       self.recurrent_kernel_h))
    h = z * h_tm1 + (1 - z) * hh
    if 0 < self.dropout + self.recurrent_dropout:
      h._uses_learning_phase = True
    return h, [h]
예제 #2
0
  def step(self, inputs, states):
    h_tm1 = states[0]  # previous memory
    dp_mask = states[1]  # dropout matrices for recurrent units
    rec_dp_mask = states[2]

    if self.implementation == 2:
      matrix_x = K.dot(inputs * dp_mask[0], self.kernel)
      if self.use_bias:
        matrix_x = K.bias_add(matrix_x, self.bias)
      matrix_inner = K.dot(h_tm1 * rec_dp_mask[0],
                           self.recurrent_kernel[:, :2 * self.units])

      x_z = matrix_x[:, :self.units]
      x_r = matrix_x[:, self.units:2 * self.units]
      recurrent_z = matrix_inner[:, :self.units]
      recurrent_r = matrix_inner[:, self.units:2 * self.units]

      z = self.recurrent_activation(x_z + recurrent_z)
      r = self.recurrent_activation(x_r + recurrent_r)

      x_h = matrix_x[:, 2 * self.units:]
      recurrent_h = K.dot(r * h_tm1 * rec_dp_mask[0],
                          self.recurrent_kernel[:, 2 * self.units:])
      hh = self.activation(x_h + recurrent_h)
    else:
      if self.implementation == 0:
        x_z = inputs[:, :self.units]
        x_r = inputs[:, self.units:2 * self.units]
        x_h = inputs[:, 2 * self.units:]
      elif self.implementation == 1:
        x_z = K.dot(inputs * dp_mask[0], self.kernel_z)
        x_r = K.dot(inputs * dp_mask[1], self.kernel_r)
        x_h = K.dot(inputs * dp_mask[2], self.kernel_h)
        if self.use_bias:
          x_z = K.bias_add(x_z, self.bias_z)
          x_r = K.bias_add(x_r, self.bias_r)
          x_h = K.bias_add(x_h, self.bias_h)
      else:
        raise ValueError('Unknown `implementation` mode.')
      z = self.recurrent_activation(x_z + K.dot(h_tm1 * rec_dp_mask[0],
                                                self.recurrent_kernel_z))
      r = self.recurrent_activation(x_r + K.dot(h_tm1 * rec_dp_mask[1],
                                                self.recurrent_kernel_r))

      hh = self.activation(x_h + K.dot(r * h_tm1 * rec_dp_mask[2],
                                       self.recurrent_kernel_h))
    h = z * h_tm1 + (1 - z) * hh
    if 0 < self.dropout + self.recurrent_dropout:
      h._uses_learning_phase = True
    return h, [h]
예제 #3
0
 def call(self, inputs):
   output = K.dot(inputs, self.kernel)
   if self.use_bias:
     output = K.bias_add(output, self.bias)
   if self.activation is not None:
     output = self.activation(output)
   return output
예제 #4
0
 def call(self, inputs):
   output = K.dot(inputs, self.kernel)
   if self.use_bias:
     output = K.bias_add(output, self.bias)
   if self.activation is not None:
     output = self.activation(output)
   return output
예제 #5
0
파일: local.py 프로젝트: lengjia/RRL
  def call(self, inputs):
    output = K.local_conv1d(inputs, self.kernel, self.kernel_size, self.strides)

    if self.use_bias:
      output = K.bias_add(output, self.bias)
    if self.activation is not None:
      output = self.activation(output)
    return output
예제 #6
0
 def input_conv(self, x, w, b=None, padding='valid'):
     conv_out = K.conv2d(x,
                         w,
                         strides=self.strides,
                         padding=padding,
                         data_format=self.data_format,
                         dilation_rate=self.dilation_rate)
     if b is not None:
         conv_out = K.bias_add(conv_out, b, data_format=self.data_format)
     return conv_out
 def input_conv(self, x, w, b=None, padding='valid'):
   conv_out = K.conv2d(
       x,
       w,
       strides=self.strides,
       padding=padding,
       data_format=self.data_format,
       dilation_rate=self.dilation_rate)
   if b is not None:
     conv_out = K.bias_add(conv_out, b, data_format=self.data_format)
   return conv_out
예제 #8
0
파일: local.py 프로젝트: Dr4KK/tensorflow
  def call(self, inputs):
    output = K.local_conv2d(inputs,
                            self.kernel,
                            self.kernel_size,
                            self.strides,
                            (self.output_row, self.output_col),
                            self.data_format)
    if self.use_bias:
      output = K.bias_add(output, self.bias, data_format=self.data_format)

    output = self.activation(output)
    return output
예제 #9
0
    def call(self, inputs):
        output = K.local_conv2d(inputs, self.kernel, self.kernel_size,
                                self.strides,
                                (self.output_row, self.output_col),
                                self.data_format)
        if self.use_bias:
            output = K.bias_add(output,
                                self.bias,
                                data_format=self.data_format)

        output = self.activation(output)
        return output
예제 #10
0
def _time_distributed_dense(x,
                            w,
                            b=None,
                            dropout=None,
                            input_dim=None,
                            output_dim=None,
                            timesteps=None,
                            training=None):
  """Apply `y . w + b` for every temporal slice y of x.

  Arguments:
      x: input tensor.
      w: weight matrix.
      b: optional bias vector.
      dropout: wether to apply dropout (same dropout mask
          for every temporal slice of the input).
      input_dim: integer; optional dimensionality of the input.
      output_dim: integer; optional dimensionality of the output.
      timesteps: integer; optional number of timesteps.
      training: training phase tensor or boolean.

  Returns:
      Output tensor.
  """
  if not input_dim:
    input_dim = K.shape(x)[2]
  if not timesteps:
    timesteps = K.shape(x)[1]
  if not output_dim:
    output_dim = K.shape(w)[1]

  if dropout is not None and 0. < dropout < 1.:
    # apply the same dropout pattern at every timestep
    ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
    dropout_matrix = K.dropout(ones, dropout)
    expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
    x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)

  # collapse time dimension and batch dimension together
  x = K.reshape(x, (-1, input_dim))
  x = K.dot(x, w)
  if b is not None:
    x = K.bias_add(x, b)
  # reshape to 3D tensor
  if K.backend() == 'tensorflow':
    x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
    x.set_shape([None, None, output_dim])
  else:
    x = K.reshape(x, (-1, timesteps, output_dim))
  return x
예제 #11
0
def _time_distributed_dense(x,
                            w,
                            b=None,
                            dropout=None,
                            input_dim=None,
                            output_dim=None,
                            timesteps=None,
                            training=None):
  """Apply `y . w + b` for every temporal slice y of x.

  Arguments:
      x: input tensor.
      w: weight matrix.
      b: optional bias vector.
      dropout: wether to apply dropout (same dropout mask
          for every temporal slice of the input).
      input_dim: integer; optional dimensionality of the input.
      output_dim: integer; optional dimensionality of the output.
      timesteps: integer; optional number of timesteps.
      training: training phase tensor or boolean.

  Returns:
      Output tensor.
  """
  if not input_dim:
    input_dim = K.shape(x)[2]
  if not timesteps:
    timesteps = K.shape(x)[1]
  if not output_dim:
    output_dim = K.shape(w)[1]

  if dropout is not None and 0. < dropout < 1.:
    # apply the same dropout pattern at every timestep
    ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
    dropout_matrix = K.dropout(ones, dropout)
    expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
    x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)

  # collapse time dimension and batch dimension together
  x = K.reshape(x, (-1, input_dim))
  x = K.dot(x, w)
  if b is not None:
    x = K.bias_add(x, b)
  # reshape to 3D tensor
  if K.backend() == 'tensorflow':
    x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
    x.set_shape([None, None, output_dim])
  else:
    x = K.reshape(x, (-1, timesteps, output_dim))
  return x
예제 #12
0
    def call(self, inputs):
        outputs = depthwise_conv2d(inputs,
                                   self.depthwise_kernel,
                                   data_format=self.data_format,
                                   strides=self.strides,
                                   padding=self.padding)

        if self.bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
예제 #13
0
  def call(self, inputs, training=None):
    outputs = K.depthwise_conv2d(
        inputs,
        self.depthwise_kernel,
        strides=self.strides,
        padding=self.padding,
        dilation_rate=self.dilation_rate,
        data_format=self.data_format)

    if self.bias:
      outputs = K.bias_add(outputs, self.bias, data_format=self.data_format)

    if self.activation is not None:
      return self.activation(outputs)

    return outputs
예제 #14
0
  def step(self, inputs, states):
    h_tm1 = states[0]
    c_tm1 = states[1]
    dp_mask = states[2]
    rec_dp_mask = states[3]

    if self.implementation == 2:
      z = K.dot(inputs * dp_mask[0], self.kernel)
      z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
      if self.use_bias:
        z = K.bias_add(z, self.bias)

      z0 = z[:, :self.units]
      z1 = z[:, self.units:2 * self.units]
      z2 = z[:, 2 * self.units:3 * self.units]
      z3 = z[:, 3 * self.units:]

      i = self.recurrent_activation(z0)
      f = self.recurrent_activation(z1)
      c = f * c_tm1 + i * self.activation(z2)
      o = self.recurrent_activation(z3)
    else:
      if self.implementation == 0:
        x_i = inputs[:, :self.units]
        x_f = inputs[:, self.units:2 * self.units]
        x_c = inputs[:, 2 * self.units:3 * self.units]
        x_o = inputs[:, 3 * self.units:]
      elif self.implementation == 1:
        x_i = K.dot(inputs * dp_mask[0], self.kernel_i) + self.bias_i
        x_f = K.dot(inputs * dp_mask[1], self.kernel_f) + self.bias_f
        x_c = K.dot(inputs * dp_mask[2], self.kernel_c) + self.bias_c
        x_o = K.dot(inputs * dp_mask[3], self.kernel_o) + self.bias_o
      else:
        raise ValueError('Unknown `implementation` mode.')

      i = self.recurrent_activation(x_i + K.dot(h_tm1 * rec_dp_mask[0],
                                                self.recurrent_kernel_i))
      f = self.recurrent_activation(x_f + K.dot(h_tm1 * rec_dp_mask[1],
                                                self.recurrent_kernel_f))
      c = f * c_tm1 + i * self.activation(
          x_c + K.dot(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c))
      o = self.recurrent_activation(x_o + K.dot(h_tm1 * rec_dp_mask[3],
                                                self.recurrent_kernel_o))
    h = o * self.activation(c)
    if 0 < self.dropout + self.recurrent_dropout:
      h._uses_learning_phase = True
    return h, [h, c]
예제 #15
0
  def step(self, inputs, states):
    h_tm1 = states[0]
    c_tm1 = states[1]
    dp_mask = states[2]
    rec_dp_mask = states[3]

    if self.implementation == 2:
      z = K.dot(inputs * dp_mask[0], self.kernel)
      z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
      if self.use_bias:
        z = K.bias_add(z, self.bias)

      z0 = z[:, :self.units]
      z1 = z[:, self.units:2 * self.units]
      z2 = z[:, 2 * self.units:3 * self.units]
      z3 = z[:, 3 * self.units:]

      i = self.recurrent_activation(z0)
      f = self.recurrent_activation(z1)
      c = f * c_tm1 + i * self.activation(z2)
      o = self.recurrent_activation(z3)
    else:
      if self.implementation == 0:
        x_i = inputs[:, :self.units]
        x_f = inputs[:, self.units:2 * self.units]
        x_c = inputs[:, 2 * self.units:3 * self.units]
        x_o = inputs[:, 3 * self.units:]
      elif self.implementation == 1:
        x_i = K.dot(inputs * dp_mask[0], self.kernel_i) + self.bias_i
        x_f = K.dot(inputs * dp_mask[1], self.kernel_f) + self.bias_f
        x_c = K.dot(inputs * dp_mask[2], self.kernel_c) + self.bias_c
        x_o = K.dot(inputs * dp_mask[3], self.kernel_o) + self.bias_o
      else:
        raise ValueError('Unknown `implementation` mode.')

      i = self.recurrent_activation(x_i + K.dot(h_tm1 * rec_dp_mask[0],
                                                self.recurrent_kernel_i))
      f = self.recurrent_activation(x_f + K.dot(h_tm1 * rec_dp_mask[1],
                                                self.recurrent_kernel_f))
      c = f * c_tm1 + i * self.activation(
          x_c + K.dot(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c))
      o = self.recurrent_activation(x_o + K.dot(h_tm1 * rec_dp_mask[3],
                                                self.recurrent_kernel_o))
    h = o * self.activation(c)
    if 0 < self.dropout + self.recurrent_dropout:
      h._uses_learning_phase = True
    return h, [h, c]
예제 #16
0
    def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(inputs,
                                     self.depthwise_kernel,
                                     strides=self.strides,
                                     padding=self.padding,
                                     dilation_rate=self.dilation_rate,
                                     data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs
예제 #17
0
  def step(self, inputs, states):
    if self.implementation == 0:
      h = inputs
    else:
      if 0 < self.dropout < 1:
        h = K.dot(inputs * states[1], self.kernel)
      else:
        h = K.dot(inputs, self.kernel)
      if self.bias is not None:
        h = K.bias_add(h, self.bias)

    prev_output = states[0]
    if 0 < self.recurrent_dropout < 1:
      prev_output *= states[2]
    output = h + K.dot(prev_output, self.recurrent_kernel)
    if self.activation is not None:
      output = self.activation(output)

    # Properly set learning phase on output tensor.
    if 0 < self.dropout + self.recurrent_dropout:
      output._uses_learning_phase = True
    return output, [output]
예제 #18
0
  def step(self, inputs, states):
    if self.implementation == 0:
      h = inputs
    else:
      if 0 < self.dropout < 1:
        h = K.dot(inputs * states[1], self.kernel)
      else:
        h = K.dot(inputs, self.kernel)
      if self.bias is not None:
        h = K.bias_add(h, self.bias)

    prev_output = states[0]
    if 0 < self.recurrent_dropout < 1:
      prev_output *= states[2]
    output = h + K.dot(prev_output, self.recurrent_kernel)
    if self.activation is not None:
      output = self.activation(output)

    # Properly set learning phase on output tensor.
    if 0 < self.dropout + self.recurrent_dropout:
      output._uses_learning_phase = True
    return output, [output]