示例#1
0
 def call(self, inputs):
   if not isinstance(inputs, list):
     raise ValueError('A merge layer should be called ' 'on a list of inputs.')
   if self._reshape_required:
     reshaped_inputs = []
     input_ndims = list(map(K.ndim, inputs))
     if None not in input_ndims:
       # If ranks of all inputs are available,
       # we simply expand each of them at axis=1
       # until all of them have the same rank.
       max_ndim = max(input_ndims)
       for x in inputs:
         x_ndim = K.ndim(x)
         for _ in range(max_ndim - x_ndim):
           x = K.expand_dims(x, 1)
         reshaped_inputs.append(x)
       return self._merge_function(reshaped_inputs)
     else:
       # Transpose all inputs so that batch size is the last dimension.
       # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
       transposed = False
       for x in inputs:
         x_ndim = K.ndim(x)
         if x_ndim is None:
           x_shape = K.shape(x)
           batch_size = x_shape[0]
           new_shape = K.concatenate([x_shape[1:], K.expand_dims(batch_size)])
           x_transposed = K.reshape(x,
                                    K.stack([batch_size,
                                             K.prod(x_shape[1:])]))
           x_transposed = K.permute_dimensions(x_transposed, (1, 0))
           x_transposed = K.reshape(x_transposed, new_shape)
           reshaped_inputs.append(x_transposed)
           transposed = True
         elif x_ndim > 1:
           dims = list(range(1, x_ndim)) + [0]
           reshaped_inputs.append(K.permute_dimensions(x, dims))
           transposed = True
         else:
           # We don't transpose inputs if they are 1D vectors or scalars.
           reshaped_inputs.append(x)
       y = self._merge_function(reshaped_inputs)
       y_ndim = K.ndim(y)
       if transposed:
         # If inputs have been transposed, we have to transpose the output too.
         if y_ndim is None:
           y_shape = K.shape(y)
           y_ndim = K.shape(y_shape)[0]
           batch_size = y_shape[y_ndim - 1]
           new_shape = K.concatenate(
               [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
           y = K.reshape(y, (-1, batch_size))
           y = K.permute_dimensions(y, (1, 0))
           y = K.reshape(y, new_shape)
         elif y_ndim > 1:
           dims = [y_ndim - 1] + list(range(y_ndim - 1))
           y = K.permute_dimensions(y, dims)
       return y
   else:
     return self._merge_function(inputs)
示例#2
0
 def compute_mask(self, inputs, mask=None):
     if mask is None:
         return None
     if not isinstance(mask, list):
         raise ValueError('`mask` should be a list.')
     if not isinstance(inputs, list):
         raise ValueError('`inputs` should be a list.')
     if len(mask) != len(inputs):
         raise ValueError('The lists `inputs` and `mask` '
                          'should have the same length.')
     if all([m is None for m in mask]):
         return None
     # Make a list of masks while making sure
     # the dimensionality of each mask
     # is the same as the corresponding input.
     masks = []
     for input_i, mask_i in zip(inputs, mask):
         if mask_i is None:
             # Input is unmasked. Append all 1s to masks,
             masks.append(K.ones_like(input_i, dtype='bool'))
         elif K.ndim(mask_i) < K.ndim(input_i):
             # Mask is smaller than the input, expand it
             masks.append(K.expand_dims(mask_i))
         else:
             masks.append(mask_i)
     concatenated = K.concatenate(masks, axis=self.axis)
     return K.all(concatenated, axis=-1, keepdims=False)
示例#3
0
 def compute_mask(self, inputs, mask=None):
   if mask is None:
     return None
   if not isinstance(mask, list):
     raise ValueError('`mask` should be a list.')
   if not isinstance(inputs, list):
     raise ValueError('`inputs` should be a list.')
   if len(mask) != len(inputs):
     raise ValueError('The lists `inputs` and `mask` '
                      'should have the same length.')
   if all([m is None for m in mask]):
     return None
   # Make a list of masks while making sure
   # the dimensionality of each mask
   # is the same as the corresponding input.
   masks = []
   for input_i, mask_i in zip(inputs, mask):
     if mask_i is None:
       # Input is unmasked. Append all 1s to masks,
       # but cast it to bool first
       masks.append(K.cast(K.ones_like(input_i), 'bool'))
     elif K.ndim(mask_i) < K.ndim(input_i):
       # Mask is smaller than the input, expand it
       masks.append(K.expand_dims(mask_i))
     else:
       masks.append(mask_i)
   concatenated = K.concatenate(masks, axis=self.axis)
   return K.all(concatenated, axis=-1, keepdims=False)
示例#4
0
    def _create_gates(self, inputs, memory):
        """Create input and forget gates for this step using `inputs` and `memory`.
            Args:
              inputs: Tensor input.
              memory: The current state of memory.
            Returns:
              input_gate: A LSTM-like insert gate.
              forget_gate: A LSTM-like forget gate.
            """
        # We'll create the input and forget gates at once. Hence, calculate double
        # the gate size.
        memory = K.tanh(memory)
        inputs = K.batch_flatten(inputs)
        gate_inputs = self._linear(inputs, self.kernel_gi, self.bias_gi)
        gate_inputs = K.expand_dims(gate_inputs, axis=1)
        gate_memory = self._linear(memory, self.kernel_gm, self.bias_gm)
        gates = tf.split(gate_memory + gate_inputs,
                         num_or_size_splits=2,
                         axis=2)
        input_gate, forget_gate = gates

        input_gate = tf.sigmoid(input_gate + self.input_bias)
        forget_gate = tf.sigmoid(forget_gate + self.forget_bias)

        return input_gate, forget_gate
示例#5
0
 def get_initial_state(self, inputs):
   # build an all-zero tensor of shape (samples, output_dim)
   initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
   initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
   initial_state = K.expand_dims(initial_state)  # (samples, 1)
   initial_state = K.tile(initial_state, [1,
                                          self.units])  # (samples, output_dim)
   initial_state = [initial_state for _ in range(len(self.states))]
   return initial_state
示例#6
0
 def compute_mask(self, inputs, mask=None):
     if mask is None:
         return None
     if not isinstance(mask, list):
         raise ValueError('`mask` should be a list.')
     if not isinstance(inputs, list):
         raise ValueError('`inputs` should be a list.')
     if len(mask) != len(inputs):
         raise ValueError('The lists `inputs` and `mask` '
                          'should have the same length.')
     if all([m is None for m in mask]):
         return None
     masks = [K.expand_dims(m, 0) for m in mask if m is not None]
     return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
示例#7
0
 def compute_mask(self, inputs, mask=None):
   if mask is None:
     return None
   if not isinstance(mask, list):
     raise ValueError('`mask` should be a list.')
   if not isinstance(inputs, list):
     raise ValueError('`inputs` should be a list.')
   if len(mask) != len(inputs):
     raise ValueError('The lists `inputs` and `mask` '
                      'should have the same length.')
   if all([m is None for m in mask]):
     return None
   masks = [K.expand_dims(m, 0) for m in mask if m is not None]
   return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
示例#8
0
    def call(self, inputs, memory, training=None):
        batch_size = int(inputs.shape[0])

        memory = K.reshape(memory, (batch_size, self.mem_slots, self.mem_size))
        inputs = self._linear(inputs, self.kernel_in, self.bias_in)
        inputs_reshape = K.expand_dims(inputs, axis=1)

        memory_plus_input = K.concatenate([memory, inputs_reshape], axis=1)
        next_memory = self._attend_over_memory(memory_plus_input)

        n = inputs_reshape.get_shape().as_list()[1]
        next_memory = next_memory[:, :-n, :]

        input_gate, forget_gate = self._create_gates(inputs_reshape, memory)
        next_memory = input_gate * K.tanh(next_memory)
        next_memory += forget_gate * memory
        next_memory = K.batch_flatten(next_memory)

        return next_memory, (next_memory, )
示例#9
0
 def call(self, inputs):
     if not isinstance(inputs, list):
         raise ValueError('A merge layer should be called '
                          'on a list of inputs.')
     if self._reshape_required:
         reshaped_inputs = []
         input_ndims = list(map(K.ndim, inputs))
         if None not in input_ndims:
             # If ranks of all inputs are available,
             # we simply expand each of them at axis=1
             # until all of them have the same rank.
             max_ndim = max(input_ndims)
             for x in inputs:
                 x_ndim = K.ndim(x)
                 for _ in range(max_ndim - x_ndim):
                     x = K.expand_dims(x, 1)
                 reshaped_inputs.append(x)
             return self._merge_function(reshaped_inputs)
         else:
             # Transpose all inputs so that batch size is the last dimension.
             # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
             transposed = False
             for x in inputs:
                 x_ndim = K.ndim(x)
                 if x_ndim is None:
                     x_shape = K.shape(x)
                     batch_size = x_shape[0]
                     new_shape = K.concatenate(
                         [x_shape[1:],
                          K.expand_dims(batch_size)])
                     x_transposed = K.reshape(
                         x, K.stack([batch_size,
                                     K.prod(x_shape[1:])]))
                     x_transposed = K.permute_dimensions(
                         x_transposed, (1, 0))
                     x_transposed = K.reshape(x_transposed, new_shape)
                     reshaped_inputs.append(x_transposed)
                     transposed = True
                 elif x_ndim > 1:
                     dims = list(range(1, x_ndim)) + [0]
                     reshaped_inputs.append(K.permute_dimensions(x, dims))
                     transposed = True
                 else:
                     # We don't transpose inputs if they are 1D vectors or scalars.
                     reshaped_inputs.append(x)
             y = self._merge_function(reshaped_inputs)
             y_ndim = K.ndim(y)
             if transposed:
                 # If inputs have been transposed, we have to transpose the output too.
                 if y_ndim is None:
                     y_shape = K.shape(y)
                     y_ndim = K.shape(y_shape)[0]
                     batch_size = y_shape[y_ndim - 1]
                     new_shape = K.concatenate(
                         [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
                     y = K.reshape(y, (-1, batch_size))
                     y = K.permute_dimensions(y, (1, 0))
                     y = K.reshape(y, new_shape)
                 elif y_ndim > 1:
                     dims = [y_ndim - 1] + list(range(y_ndim - 1))
                     y = K.permute_dimensions(y, dims)
             return y
     else:
         return self._merge_function(inputs)