Ejemplo n.º 1
0
  def call(self, inputs, training=None, mask=None):
    kwargs = {}
    if has_arg(self.layer.call, 'training'):
      kwargs['training'] = training
    if has_arg(self.layer.call, 'mask'):
      kwargs['mask'] = mask

    y = self.forward_layer.call(inputs, **kwargs)
    y_rev = self.backward_layer.call(inputs, **kwargs)
    if self.return_sequences:
      y_rev = K.reverse(y_rev, 1)
    if self.merge_mode == 'concat':
      output = K.concatenate([y, y_rev])
    elif self.merge_mode == 'sum':
      output = y + y_rev
    elif self.merge_mode == 'ave':
      output = (y + y_rev) / 2
    elif self.merge_mode == 'mul':
      output = y * y_rev
    elif self.merge_mode is None:
      output = [y, y_rev]

    # Properly set learning phase
    if 0 < self.layer.dropout + self.layer.recurrent_dropout:
      if self.merge_mode is None:
        for out in output:
          out._uses_learning_phase = True
      else:
        output._uses_learning_phase = True
    return output
Ejemplo n.º 2
0
    def call(self, inputs, training=None, mask=None):
        kwargs = {}
        if has_arg(self.layer.call, 'training'):
            kwargs['training'] = training
        if has_arg(self.layer.call, 'mask'):
            kwargs['mask'] = mask

        y = self.forward_layer.call(inputs, **kwargs)
        y_rev = self.backward_layer.call(inputs, **kwargs)
        if self.return_sequences:
            y_rev = K.reverse(y_rev, 1)
        if self.merge_mode == 'concat':
            output = K.concatenate([y, y_rev])
        elif self.merge_mode == 'sum':
            output = y + y_rev
        elif self.merge_mode == 'ave':
            output = (y + y_rev) / 2
        elif self.merge_mode == 'mul':
            output = y * y_rev
        elif self.merge_mode is None:
            output = [y, y_rev]

        # Properly set learning phase
        if (getattr(y, '_uses_learning_phase', False)
                or getattr(y_rev, '_uses_learning_phase', False)):
            if self.merge_mode is None:
                for out in output:
                    out._uses_learning_phase = True
            else:
                output._uses_learning_phase = True
        return output
Ejemplo n.º 3
0
    def call(self, inputs, training=None, mask=None, initial_state=None):
        kwargs = {}
        if has_arg(self.layer.call, 'training'):
            kwargs['training'] = training
        if has_arg(self.layer.call, 'mask'):
            kwargs['mask'] = mask

        if initial_state is not None and has_arg(self.layer.call,
                                                 'initial_state'):
            if not isinstance(initial_state, list):
                raise ValueError(
                    'When passing `initial_state` to a Bidirectional RNN, the state '
                    'should be a list containing the states of the underlying RNNs. '
                    'Found: ' + str(initial_state))
            forward_state = initial_state[:len(initial_state) // 2]
            backward_state = initial_state[len(initial_state) // 2:]
            y = self.forward_layer.call(inputs,
                                        initial_state=forward_state,
                                        **kwargs)
            y_rev = self.backward_layer.call(inputs,
                                             initial_state=backward_state,
                                             **kwargs)
        else:
            y = self.forward_layer.call(inputs, **kwargs)
            y_rev = self.backward_layer.call(inputs, **kwargs)

        if self.return_state:
            states = y[1:] + y_rev[1:]
            y = y[0]
            y_rev = y_rev[0]

        if self.return_sequences:
            y_rev = K.reverse(y_rev, 1)
        if self.merge_mode == 'concat':
            output = K.concatenate([y, y_rev])
        elif self.merge_mode == 'sum':
            output = y + y_rev
        elif self.merge_mode == 'ave':
            output = (y + y_rev) / 2
        elif self.merge_mode == 'mul':
            output = y * y_rev
        elif self.merge_mode is None:
            output = [y, y_rev]

        # Properly set learning phase
        if (getattr(y, '_uses_learning_phase', False)
                or getattr(y_rev, '_uses_learning_phase', False)):
            if self.merge_mode is None:
                for out in output:
                    out._uses_learning_phase = True
            else:
                output._uses_learning_phase = True

        if self.return_state:
            if self.merge_mode is None:
                return output + states
            return [output] + states
        return output
Ejemplo n.º 4
0
  def call(self, inputs,
           training=None,
           mask=None,
           initial_state=None,
           constants=None):
    """`Bidirectional.call` implements the same API as the wrapped `RNN`."""
    kwargs = {}
    if generic_utils.has_arg(self.layer.call, 'training'):
      kwargs['training'] = training
    if generic_utils.has_arg(self.layer.call, 'mask'):
      kwargs['mask'] = mask
    if generic_utils.has_arg(self.layer.call, 'constants'):
      kwargs['constants'] = constants

    if initial_state is not None and generic_utils.has_arg(
        self.layer.call, 'initial_state'):
      forward_state = initial_state[:len(initial_state) // 2]
      backward_state = initial_state[len(initial_state) // 2:]
      y = self.forward_layer.call(inputs, initial_state=forward_state, **kwargs)
      y_rev = self.backward_layer.call(
          inputs, initial_state=backward_state, **kwargs)
    else:
      y = self.forward_layer.call(inputs, **kwargs)
      y_rev = self.backward_layer.call(inputs, **kwargs)

    if self.return_state:
      states = y[1:] + y_rev[1:]
      y = y[0]
      y_rev = y_rev[0]

    if self.return_sequences:
      y_rev = K.reverse(y_rev, 1)
    if self.merge_mode == 'concat':
      output = K.concatenate([y, y_rev])
    elif self.merge_mode == 'sum':
      output = y + y_rev
    elif self.merge_mode == 'ave':
      output = (y + y_rev) / 2
    elif self.merge_mode == 'mul':
      output = y * y_rev
    elif self.merge_mode is None:
      output = [y, y_rev]

    # Properly set learning phase
    if (getattr(y, '_uses_learning_phase', False) or
        getattr(y_rev, '_uses_learning_phase', False)):
      if self.merge_mode is None:
        for out in output:
          out._uses_learning_phase = True
      else:
        output._uses_learning_phase = True

    if self.return_state:
      if self.merge_mode is None:
        return output + states
      return [output] + states
    return output
Ejemplo n.º 5
0
  def call(self, inputs, training=None, mask=None, initial_state=None):
    kwargs = {}
    if has_arg(self.layer.call, 'training'):
      kwargs['training'] = training
    if has_arg(self.layer.call, 'mask'):
      kwargs['mask'] = mask

    if initial_state is not None and has_arg(self.layer.call, 'initial_state'):
      if not isinstance(initial_state, list):
        raise ValueError(
            'When passing `initial_state` to a Bidirectional RNN, the state '
            'should be a list containing the states of the underlying RNNs. '
            'Found: ' + str(initial_state))
      forward_state = initial_state[:len(initial_state) // 2]
      backward_state = initial_state[len(initial_state) // 2:]
      y = self.forward_layer.call(inputs, initial_state=forward_state, **kwargs)
      y_rev = self.backward_layer.call(
          inputs, initial_state=backward_state, **kwargs)
    else:
      y = self.forward_layer.call(inputs, **kwargs)
      y_rev = self.backward_layer.call(inputs, **kwargs)

    if self.return_state:
      states = y[1:] + y_rev[1:]
      y = y[0]
      y_rev = y_rev[0]

    if self.return_sequences:
      y_rev = K.reverse(y_rev, 1)
    if self.merge_mode == 'concat':
      output = K.concatenate([y, y_rev])
    elif self.merge_mode == 'sum':
      output = y + y_rev
    elif self.merge_mode == 'ave':
      output = (y + y_rev) / 2
    elif self.merge_mode == 'mul':
      output = y * y_rev
    elif self.merge_mode is None:
      output = [y, y_rev]

    # Properly set learning phase
    if (getattr(y, '_uses_learning_phase', False) or
        getattr(y_rev, '_uses_learning_phase', False)):
      if self.merge_mode is None:
        for out in output:
          out._uses_learning_phase = True
      else:
        output._uses_learning_phase = True

    if self.return_state:
      if self.merge_mode is None:
        return output + states
      return [output] + states
    return output
Ejemplo n.º 6
0
  def call(self, inputs, training=None, mask=None, initial_state=None):
    kwargs = {}
    if generic_utils.has_arg(self.layer.call, 'training'):
      kwargs['training'] = training
    if generic_utils.has_arg(self.layer.call, 'mask'):
      kwargs['mask'] = mask

    if initial_state is not None and generic_utils.has_arg(
        self.layer.call, 'initial_state'):
      forward_state = initial_state[:len(initial_state) // 2]
      backward_state = initial_state[len(initial_state) // 2:]
      y = self.forward_layer.call(inputs, initial_state=forward_state, **kwargs)
      y_rev = self.backward_layer.call(
          inputs, initial_state=backward_state, **kwargs)
    else:
      y = self.forward_layer.call(inputs, **kwargs)
      y_rev = self.backward_layer.call(inputs, **kwargs)

    if self.return_state:
      states = y[1:] + y_rev[1:]
      y = y[0]
      y_rev = y_rev[0]

    if self.return_sequences:
      y_rev = K.reverse(y_rev, 1)
    if self.merge_mode == 'concat':
      output = K.concatenate([y, y_rev])
    elif self.merge_mode == 'sum':
      output = y + y_rev
    elif self.merge_mode == 'ave':
      output = (y + y_rev) / 2
    elif self.merge_mode == 'mul':
      output = y * y_rev
    elif self.merge_mode is None:
      output = [y, y_rev]

    # Properly set learning phase
    if (getattr(y, '_uses_learning_phase', False) or
        getattr(y_rev, '_uses_learning_phase', False)):
      if self.merge_mode is None:
        for out in output:
          out._uses_learning_phase = True
      else:
        output._uses_learning_phase = True

    if self.return_state:
      if self.merge_mode is None:
        return output + states
      return [output] + states
    return output
Ejemplo n.º 7
0
  def call(self, inputs, mask=None, training=None, initial_state=None):
    if isinstance(mask, list):
      mask = mask[0]
    if mask is not None:
      raise ValueError('Masking is not supported for CuDNN RNNs.')

    # input shape: `(samples, time (padded with zeros), input_dim)`
    # note that the .build() method of subclasses MUST define
    # self.input_spec and self.state_spec with complete input shapes.
    if isinstance(inputs, list):
      initial_state = inputs[1:]
      inputs = inputs[0]
    elif initial_state is not None:
      pass
    elif self.stateful:
      initial_state = self.states
    else:
      initial_state = self.get_initial_state(inputs)

    if len(initial_state) != len(self.states):
      raise ValueError('Layer has ' + str(len(self.states)) +
                       ' states but was passed ' + str(len(initial_state)) +
                       ' initial states.')

    if self.go_backwards:
      # Reverse time axis.
      inputs = K.reverse(inputs, 1)
    output, states = self._process_batch(inputs, initial_state)

    if self.stateful:
      updates = []
      for i in range(len(states)):
        updates.append(state_ops.assign(self.states[i], states[i]))
      self.add_update(updates, inputs)

    if self.return_state:
      return [output] + states
    else:
      return output
Ejemplo n.º 8
0
    def call(self, inputs, mask=None, training=None, initial_state=None):
        if isinstance(mask, list):
            mask = mask[0]
        if mask is not None:
            raise ValueError('Masking is not supported for CuDNN RNNs.')

        # input shape: `(samples, time (padded with zeros), input_dim)`
        # note that the .build() method of subclasses MUST define
        # self.input_spec and self.state_spec with complete input shapes.
        if isinstance(inputs, list):
            initial_state = inputs[1:]
            inputs = inputs[0]
        elif initial_state is not None:
            pass
        elif self.stateful:
            initial_state = self.states
        else:
            initial_state = self.get_initial_state(inputs)

        if len(initial_state) != len(self.states):
            raise ValueError('Layer has ' + str(len(self.states)) +
                             ' states but was passed ' +
                             str(len(initial_state)) + ' initial states.')

        if self.go_backwards:
            # Reverse time axis.
            inputs = K.reverse(inputs, 1)
        output, states = self._process_batch(inputs, initial_state)

        if self.stateful:
            updates = []
            for i in range(len(states)):
                updates.append(state_ops.assign(self.states[i], states[i]))
            self.add_update(updates, inputs)

        if self.return_state:
            return [output] + states
        else:
            return output