Beispiel #1
0
 def call(self, inputs):
   if not isinstance(inputs, list):
     raise ValueError('A merge layer should be called ' 'on a list of inputs.')
   if self._reshape_required:
     reshaped_inputs = []
     input_ndims = list(map(K.ndim, inputs))
     if None not in input_ndims:
       # If ranks of all inputs are available,
       # we simply expand each of them at axis=1
       # until all of them have the same rank.
       max_ndim = max(input_ndims)
       for x in inputs:
         x_ndim = K.ndim(x)
         for _ in range(max_ndim - x_ndim):
           x = K.expand_dims(x, 1)
         reshaped_inputs.append(x)
       return self._merge_function(reshaped_inputs)
     else:
       # Transpose all inputs so that batch size is the last dimension.
       # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
       transposed = False
       for x in inputs:
         x_ndim = K.ndim(x)
         if x_ndim is None:
           x_shape = K.shape(x)
           batch_size = x_shape[0]
           new_shape = K.concatenate([x_shape[1:], K.expand_dims(batch_size)])
           x_transposed = K.reshape(x,
                                    K.stack([batch_size,
                                             K.prod(x_shape[1:])]))
           x_transposed = K.permute_dimensions(x_transposed, (1, 0))
           x_transposed = K.reshape(x_transposed, new_shape)
           reshaped_inputs.append(x_transposed)
           transposed = True
         elif x_ndim > 1:
           dims = list(range(1, x_ndim)) + [0]
           reshaped_inputs.append(K.permute_dimensions(x, dims))
           transposed = True
         else:
           # We don't transpose inputs if they are 1D vectors or scalars.
           reshaped_inputs.append(x)
       y = self._merge_function(reshaped_inputs)
       y_ndim = K.ndim(y)
       if transposed:
         # If inputs have been transposed, we have to transpose the output too.
         if y_ndim is None:
           y_shape = K.shape(y)
           y_ndim = K.shape(y_shape)[0]
           batch_size = y_shape[y_ndim - 1]
           new_shape = K.concatenate(
               [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
           y = K.reshape(y, (-1, batch_size))
           y = K.permute_dimensions(y, (1, 0))
           y = K.reshape(y, new_shape)
         elif y_ndim > 1:
           dims = [y_ndim - 1] + list(range(y_ndim - 1))
           y = K.permute_dimensions(y, dims)
       return y
   else:
     return self._merge_function(inputs)
Beispiel #2
0
 def compute_mask(self, inputs, mask=None):
     if mask is None:
         return None
     if not isinstance(mask, list):
         raise ValueError('`mask` should be a list.')
     if not isinstance(inputs, list):
         raise ValueError('`inputs` should be a list.')
     if len(mask) != len(inputs):
         raise ValueError('The lists `inputs` and `mask` '
                          'should have the same length.')
     if all([m is None for m in mask]):
         return None
     # Make a list of masks while making sure
     # the dimensionality of each mask
     # is the same as the corresponding input.
     masks = []
     for input_i, mask_i in zip(inputs, mask):
         if mask_i is None:
             # Input is unmasked. Append all 1s to masks,
             masks.append(K.ones_like(input_i, dtype='bool'))
         elif K.ndim(mask_i) < K.ndim(input_i):
             # Mask is smaller than the input, expand it
             masks.append(K.expand_dims(mask_i))
         else:
             masks.append(mask_i)
     concatenated = K.concatenate(masks, axis=self.axis)
     return K.all(concatenated, axis=-1, keepdims=False)
Beispiel #3
0
 def compute_mask(self, inputs, mask=None):
   if mask is None:
     return None
   if not isinstance(mask, list):
     raise ValueError('`mask` should be a list.')
   if not isinstance(inputs, list):
     raise ValueError('`inputs` should be a list.')
   if len(mask) != len(inputs):
     raise ValueError('The lists `inputs` and `mask` '
                      'should have the same length.')
   if all([m is None for m in mask]):
     return None
   # Make a list of masks while making sure
   # the dimensionality of each mask
   # is the same as the corresponding input.
   masks = []
   for input_i, mask_i in zip(inputs, mask):
     if mask_i is None:
       # Input is unmasked. Append all 1s to masks,
       # but cast it to bool first
       masks.append(K.cast(K.ones_like(input_i), 'bool'))
     elif K.ndim(mask_i) < K.ndim(input_i):
       # Mask is smaller than the input, expand it
       masks.append(K.expand_dims(mask_i))
     else:
       masks.append(mask_i)
   concatenated = K.concatenate(masks, axis=self.axis)
   return K.all(concatenated, axis=-1, keepdims=False)
Beispiel #4
0
  def call(self, inputs, training=None, mask=None):
    kwargs = {}
    if has_arg(self.layer.call, 'training'):
      kwargs['training'] = training
    if has_arg(self.layer.call, 'mask'):
      kwargs['mask'] = mask

    y = self.forward_layer.call(inputs, **kwargs)
    y_rev = self.backward_layer.call(inputs, **kwargs)
    if self.return_sequences:
      y_rev = K.reverse(y_rev, 1)
    if self.merge_mode == 'concat':
      output = K.concatenate([y, y_rev])
    elif self.merge_mode == 'sum':
      output = y + y_rev
    elif self.merge_mode == 'ave':
      output = (y + y_rev) / 2
    elif self.merge_mode == 'mul':
      output = y * y_rev
    elif self.merge_mode is None:
      output = [y, y_rev]

    # Properly set learning phase
    if 0 < self.layer.dropout + self.layer.recurrent_dropout:
      if self.merge_mode is None:
        for out in output:
          out._uses_learning_phase = True
      else:
        output._uses_learning_phase = True
    return output
Beispiel #5
0
    def call(self, inputs, training=None, mask=None):
        kwargs = {}
        if has_arg(self.layer.call, 'training'):
            kwargs['training'] = training
        if has_arg(self.layer.call, 'mask'):
            kwargs['mask'] = mask

        y = self.forward_layer.call(inputs, **kwargs)
        y_rev = self.backward_layer.call(inputs, **kwargs)
        if self.return_sequences:
            y_rev = K.reverse(y_rev, 1)
        if self.merge_mode == 'concat':
            output = K.concatenate([y, y_rev])
        elif self.merge_mode == 'sum':
            output = y + y_rev
        elif self.merge_mode == 'ave':
            output = (y + y_rev) / 2
        elif self.merge_mode == 'mul':
            output = y * y_rev
        elif self.merge_mode is None:
            output = [y, y_rev]

        # Properly set learning phase
        if (getattr(y, '_uses_learning_phase', False)
                or getattr(y_rev, '_uses_learning_phase', False)):
            if self.merge_mode is None:
                for out in output:
                    out._uses_learning_phase = True
            else:
                output._uses_learning_phase = True
        return output
 def bias_initializer(_, *args, **kwargs):
     return K.concatenate([
         self.bias_initializer((self.filters, ), *args,
                               **kwargs),
         initializers.Ones()((self.filters, ), *args, **kwargs),
         self.bias_initializer((self.filters * 2, ), *args,
                               **kwargs),
     ])
Beispiel #7
0
    def call(self, inputs, training=None, mask=None, initial_state=None):
        kwargs = {}
        if has_arg(self.layer.call, 'training'):
            kwargs['training'] = training
        if has_arg(self.layer.call, 'mask'):
            kwargs['mask'] = mask

        if initial_state is not None and has_arg(self.layer.call,
                                                 'initial_state'):
            if not isinstance(initial_state, list):
                raise ValueError(
                    'When passing `initial_state` to a Bidirectional RNN, the state '
                    'should be a list containing the states of the underlying RNNs. '
                    'Found: ' + str(initial_state))
            forward_state = initial_state[:len(initial_state) // 2]
            backward_state = initial_state[len(initial_state) // 2:]
            y = self.forward_layer.call(inputs,
                                        initial_state=forward_state,
                                        **kwargs)
            y_rev = self.backward_layer.call(inputs,
                                             initial_state=backward_state,
                                             **kwargs)
        else:
            y = self.forward_layer.call(inputs, **kwargs)
            y_rev = self.backward_layer.call(inputs, **kwargs)

        if self.return_state:
            states = y[1:] + y_rev[1:]
            y = y[0]
            y_rev = y_rev[0]

        if self.return_sequences:
            y_rev = K.reverse(y_rev, 1)
        if self.merge_mode == 'concat':
            output = K.concatenate([y, y_rev])
        elif self.merge_mode == 'sum':
            output = y + y_rev
        elif self.merge_mode == 'ave':
            output = (y + y_rev) / 2
        elif self.merge_mode == 'mul':
            output = y * y_rev
        elif self.merge_mode is None:
            output = [y, y_rev]

        # Properly set learning phase
        if (getattr(y, '_uses_learning_phase', False)
                or getattr(y_rev, '_uses_learning_phase', False)):
            if self.merge_mode is None:
                for out in output:
                    out._uses_learning_phase = True
            else:
                output._uses_learning_phase = True

        if self.return_state:
            if self.merge_mode is None:
                return output + states
            return [output] + states
        return output
Beispiel #8
0
  def call(self, inputs,
           training=None,
           mask=None,
           initial_state=None,
           constants=None):
    """`Bidirectional.call` implements the same API as the wrapped `RNN`."""
    kwargs = {}
    if generic_utils.has_arg(self.layer.call, 'training'):
      kwargs['training'] = training
    if generic_utils.has_arg(self.layer.call, 'mask'):
      kwargs['mask'] = mask
    if generic_utils.has_arg(self.layer.call, 'constants'):
      kwargs['constants'] = constants

    if initial_state is not None and generic_utils.has_arg(
        self.layer.call, 'initial_state'):
      forward_state = initial_state[:len(initial_state) // 2]
      backward_state = initial_state[len(initial_state) // 2:]
      y = self.forward_layer.call(inputs, initial_state=forward_state, **kwargs)
      y_rev = self.backward_layer.call(
          inputs, initial_state=backward_state, **kwargs)
    else:
      y = self.forward_layer.call(inputs, **kwargs)
      y_rev = self.backward_layer.call(inputs, **kwargs)

    if self.return_state:
      states = y[1:] + y_rev[1:]
      y = y[0]
      y_rev = y_rev[0]

    if self.return_sequences:
      y_rev = K.reverse(y_rev, 1)
    if self.merge_mode == 'concat':
      output = K.concatenate([y, y_rev])
    elif self.merge_mode == 'sum':
      output = y + y_rev
    elif self.merge_mode == 'ave':
      output = (y + y_rev) / 2
    elif self.merge_mode == 'mul':
      output = y * y_rev
    elif self.merge_mode is None:
      output = [y, y_rev]

    # Properly set learning phase
    if (getattr(y, '_uses_learning_phase', False) or
        getattr(y_rev, '_uses_learning_phase', False)):
      if self.merge_mode is None:
        for out in output:
          out._uses_learning_phase = True
      else:
        output._uses_learning_phase = True

    if self.return_state:
      if self.merge_mode is None:
        return output + states
      return [output] + states
    return output
Beispiel #9
0
  def call(self, inputs, training=None, mask=None, initial_state=None):
    kwargs = {}
    if has_arg(self.layer.call, 'training'):
      kwargs['training'] = training
    if has_arg(self.layer.call, 'mask'):
      kwargs['mask'] = mask

    if initial_state is not None and has_arg(self.layer.call, 'initial_state'):
      if not isinstance(initial_state, list):
        raise ValueError(
            'When passing `initial_state` to a Bidirectional RNN, the state '
            'should be a list containing the states of the underlying RNNs. '
            'Found: ' + str(initial_state))
      forward_state = initial_state[:len(initial_state) // 2]
      backward_state = initial_state[len(initial_state) // 2:]
      y = self.forward_layer.call(inputs, initial_state=forward_state, **kwargs)
      y_rev = self.backward_layer.call(
          inputs, initial_state=backward_state, **kwargs)
    else:
      y = self.forward_layer.call(inputs, **kwargs)
      y_rev = self.backward_layer.call(inputs, **kwargs)

    if self.return_state:
      states = y[1:] + y_rev[1:]
      y = y[0]
      y_rev = y_rev[0]

    if self.return_sequences:
      y_rev = K.reverse(y_rev, 1)
    if self.merge_mode == 'concat':
      output = K.concatenate([y, y_rev])
    elif self.merge_mode == 'sum':
      output = y + y_rev
    elif self.merge_mode == 'ave':
      output = (y + y_rev) / 2
    elif self.merge_mode == 'mul':
      output = y * y_rev
    elif self.merge_mode is None:
      output = [y, y_rev]

    # Properly set learning phase
    if (getattr(y, '_uses_learning_phase', False) or
        getattr(y_rev, '_uses_learning_phase', False)):
      if self.merge_mode is None:
        for out in output:
          out._uses_learning_phase = True
      else:
        output._uses_learning_phase = True

    if self.return_state:
      if self.merge_mode is None:
        return output + states
      return [output] + states
    return output
Beispiel #10
0
 def compute_mask(self, inputs, mask=None):
     if mask is None:
         return None
     if not isinstance(mask, list):
         raise ValueError('`mask` should be a list.')
     if not isinstance(inputs, list):
         raise ValueError('`inputs` should be a list.')
     if len(mask) != len(inputs):
         raise ValueError('The lists `inputs` and `mask` '
                          'should have the same length.')
     if all([m is None for m in mask]):
         return None
     masks = [K.expand_dims(m, 0) for m in mask if m is not None]
     return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
Beispiel #11
0
 def compute_mask(self, inputs, mask=None):
   if mask is None:
     return None
   if not isinstance(mask, list):
     raise ValueError('`mask` should be a list.')
   if not isinstance(inputs, list):
     raise ValueError('`inputs` should be a list.')
   if len(mask) != len(inputs):
     raise ValueError('The lists `inputs` and `mask` '
                      'should have the same length.')
   if all([m is None for m in mask]):
     return None
   masks = [K.expand_dims(m, 0) for m in mask if m is not None]
   return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
Beispiel #12
0
  def call(self, inputs, training=None, mask=None, initial_state=None):
    kwargs = {}
    if generic_utils.has_arg(self.layer.call, 'training'):
      kwargs['training'] = training
    if generic_utils.has_arg(self.layer.call, 'mask'):
      kwargs['mask'] = mask

    if initial_state is not None and generic_utils.has_arg(
        self.layer.call, 'initial_state'):
      forward_state = initial_state[:len(initial_state) // 2]
      backward_state = initial_state[len(initial_state) // 2:]
      y = self.forward_layer.call(inputs, initial_state=forward_state, **kwargs)
      y_rev = self.backward_layer.call(
          inputs, initial_state=backward_state, **kwargs)
    else:
      y = self.forward_layer.call(inputs, **kwargs)
      y_rev = self.backward_layer.call(inputs, **kwargs)

    if self.return_state:
      states = y[1:] + y_rev[1:]
      y = y[0]
      y_rev = y_rev[0]

    if self.return_sequences:
      y_rev = K.reverse(y_rev, 1)
    if self.merge_mode == 'concat':
      output = K.concatenate([y, y_rev])
    elif self.merge_mode == 'sum':
      output = y + y_rev
    elif self.merge_mode == 'ave':
      output = (y + y_rev) / 2
    elif self.merge_mode == 'mul':
      output = y * y_rev
    elif self.merge_mode is None:
      output = [y, y_rev]

    # Properly set learning phase
    if (getattr(y, '_uses_learning_phase', False) or
        getattr(y_rev, '_uses_learning_phase', False)):
      if self.merge_mode is None:
        for out in output:
          out._uses_learning_phase = True
      else:
        output._uses_learning_phase = True

    if self.return_state:
      if self.merge_mode is None:
        return output + states
      return [output] + states
    return output
Beispiel #13
0
  def preprocess_input(self, inputs, training=None):
    if self.implementation == 0:
      input_shape = inputs.get_shape().as_list()
      input_dim = input_shape[2]
      timesteps = input_shape[1]

      x_i = _time_distributed_dense(
          inputs,
          self.kernel_i,
          self.bias_i,
          self.dropout,
          input_dim,
          self.units,
          timesteps,
          training=training)
      x_f = _time_distributed_dense(
          inputs,
          self.kernel_f,
          self.bias_f,
          self.dropout,
          input_dim,
          self.units,
          timesteps,
          training=training)
      x_c = _time_distributed_dense(
          inputs,
          self.kernel_c,
          self.bias_c,
          self.dropout,
          input_dim,
          self.units,
          timesteps,
          training=training)
      x_o = _time_distributed_dense(
          inputs,
          self.kernel_o,
          self.bias_o,
          self.dropout,
          input_dim,
          self.units,
          timesteps,
          training=training)
      return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
    else:
      return inputs
Beispiel #14
0
    def call(self, inputs, memory, training=None):
        batch_size = int(inputs.shape[0])

        memory = K.reshape(memory, (batch_size, self.mem_slots, self.mem_size))
        inputs = self._linear(inputs, self.kernel_in, self.bias_in)
        inputs_reshape = K.expand_dims(inputs, axis=1)

        memory_plus_input = K.concatenate([memory, inputs_reshape], axis=1)
        next_memory = self._attend_over_memory(memory_plus_input)

        n = inputs_reshape.get_shape().as_list()[1]
        next_memory = next_memory[:, :-n, :]

        input_gate, forget_gate = self._create_gates(inputs_reshape, memory)
        next_memory = input_gate * K.tanh(next_memory)
        next_memory += forget_gate * memory
        next_memory = K.batch_flatten(next_memory)

        return next_memory, (next_memory, )
Beispiel #15
0
  def preprocess_input(self, inputs, training=None):
    if self.implementation == 0:
      input_shape = inputs.get_shape().as_list()
      input_dim = input_shape[2]
      timesteps = input_shape[1]

      x_z = _time_distributed_dense(
          inputs,
          self.kernel_z,
          self.bias_z,
          self.dropout,
          input_dim,
          self.units,
          timesteps,
          training=training)
      x_r = _time_distributed_dense(
          inputs,
          self.kernel_r,
          self.bias_r,
          self.dropout,
          input_dim,
          self.units,
          timesteps,
          training=training)
      x_h = _time_distributed_dense(
          inputs,
          self.kernel_h,
          self.bias_h,
          self.dropout,
          input_dim,
          self.units,
          timesteps,
          training=training)
      return K.concatenate([x_z, x_r, x_h], axis=2)
    else:
      return inputs
Beispiel #16
0
 def bias_initializer(shape, *args, **kwargs):
     return K.concatenate([
         self.bias_initializer((self.units * 5, ), *args, **kwargs),
         initializers.Ones()((self.units, ), *args, **kwargs),
         self.bias_initializer((self.units * 2, ), *args, **kwargs),
     ])
Beispiel #17
0
 def bias_initializer(_, *args, **kwargs):
   return K.concatenate([
       self.bias_initializer((self.units,), *args, **kwargs),
       initializers.Ones()((self.units,), *args, **kwargs),
       self.bias_initializer((self.units * 2,), *args, **kwargs),
   ])
Beispiel #18
0
 def call(self, inputs):
     if not isinstance(inputs, list):
         raise ValueError('A `Concatenate` layer should be called '
                          'on a list of inputs.')
     return K.concatenate(inputs, axis=self.axis)
Beispiel #19
0
 def _merge_function(self, inputs):
   return K.concatenate(inputs, axis=self.axis)
Beispiel #20
0
 def _merge_function(self, inputs):
     return K.concatenate(inputs, axis=self.axis)
Beispiel #21
0
 def call(self, inputs):
   if not isinstance(inputs, list):
     raise ValueError('A `Concatenate` layer should be called '
                      'on a list of inputs.')
   return K.concatenate(inputs, axis=self.axis)
Beispiel #22
0
 def call(self, inputs):
     if not isinstance(inputs, list):
         raise ValueError('A merge layer should be called '
                          'on a list of inputs.')
     if self._reshape_required:
         reshaped_inputs = []
         input_ndims = list(map(K.ndim, inputs))
         if None not in input_ndims:
             # If ranks of all inputs are available,
             # we simply expand each of them at axis=1
             # until all of them have the same rank.
             max_ndim = max(input_ndims)
             for x in inputs:
                 x_ndim = K.ndim(x)
                 for _ in range(max_ndim - x_ndim):
                     x = K.expand_dims(x, 1)
                 reshaped_inputs.append(x)
             return self._merge_function(reshaped_inputs)
         else:
             # Transpose all inputs so that batch size is the last dimension.
             # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
             transposed = False
             for x in inputs:
                 x_ndim = K.ndim(x)
                 if x_ndim is None:
                     x_shape = K.shape(x)
                     batch_size = x_shape[0]
                     new_shape = K.concatenate(
                         [x_shape[1:],
                          K.expand_dims(batch_size)])
                     x_transposed = K.reshape(
                         x, K.stack([batch_size,
                                     K.prod(x_shape[1:])]))
                     x_transposed = K.permute_dimensions(
                         x_transposed, (1, 0))
                     x_transposed = K.reshape(x_transposed, new_shape)
                     reshaped_inputs.append(x_transposed)
                     transposed = True
                 elif x_ndim > 1:
                     dims = list(range(1, x_ndim)) + [0]
                     reshaped_inputs.append(K.permute_dimensions(x, dims))
                     transposed = True
                 else:
                     # We don't transpose inputs if they are 1D vectors or scalars.
                     reshaped_inputs.append(x)
             y = self._merge_function(reshaped_inputs)
             y_ndim = K.ndim(y)
             if transposed:
                 # If inputs have been transposed, we have to transpose the output too.
                 if y_ndim is None:
                     y_shape = K.shape(y)
                     y_ndim = K.shape(y_shape)[0]
                     batch_size = y_shape[y_ndim - 1]
                     new_shape = K.concatenate(
                         [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
                     y = K.reshape(y, (-1, batch_size))
                     y = K.permute_dimensions(y, (1, 0))
                     y = K.reshape(y, new_shape)
                 elif y_ndim > 1:
                     dims = [y_ndim - 1] + list(range(y_ndim - 1))
                     y = K.permute_dimensions(y, dims)
             return y
     else:
         return self._merge_function(inputs)
    style_reference_images.append(K.variable(preprocess_image(style_path)))

# this will contain our generated image
combination_image = K.placeholder((1, img_width, img_height, 3))

image_tensors = [base_image]
for style_image_tensor in style_reference_images:
    image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)

nb_tensors = len(image_tensors)
print("nb_tensors", nb_tensors)
nb_style_images = nb_tensors - 2  # Content and Output image not considered

# combine the various image into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)

shape = (nb_tensors, img_width, img_height, 3)
inp_shape = (img_width, img_height, 3)

ip = Input(tensor=input_tensor, batch_shape=shape)

model = VGG16(include_top=False, weights='Imagenet', input_tensor=input_tensor,
              input_shape=inp_shape, pooling='max')

print("Model loaded")


# get the symbolic outputs of each "key" layer (we gave them unique names)
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])