コード例 #1
0
ファイル: merge.py プロジェクト: japrogramer/tensorflow
 def call(self, inputs):
   if not isinstance(inputs, list):
     raise ValueError('A merge layer should be called ' 'on a list of inputs.')
   if self._reshape_required:
     reshaped_inputs = []
     input_ndims = list(map(K.ndim, inputs))
     if None not in input_ndims:
       # If ranks of all inputs are available,
       # we simply expand each of them at axis=1
       # until all of them have the same rank.
       max_ndim = max(input_ndims)
       for x in inputs:
         x_ndim = K.ndim(x)
         for _ in range(max_ndim - x_ndim):
           x = K.expand_dims(x, 1)
         reshaped_inputs.append(x)
       return self._merge_function(reshaped_inputs)
     else:
       # Transpose all inputs so that batch size is the last dimension.
       # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
       transposed = False
       for x in inputs:
         x_ndim = K.ndim(x)
         if x_ndim is None:
           x_shape = K.shape(x)
           batch_size = x_shape[0]
           new_shape = K.concatenate([x_shape[1:], K.expand_dims(batch_size)])
           x_transposed = K.reshape(x,
                                    K.stack([batch_size,
                                             K.prod(x_shape[1:])]))
           x_transposed = K.permute_dimensions(x_transposed, (1, 0))
           x_transposed = K.reshape(x_transposed, new_shape)
           reshaped_inputs.append(x_transposed)
           transposed = True
         elif x_ndim > 1:
           dims = list(range(1, x_ndim)) + [0]
           reshaped_inputs.append(K.permute_dimensions(x, dims))
           transposed = True
         else:
           # We don't transpose inputs if they are 1D vectors or scalars.
           reshaped_inputs.append(x)
       y = self._merge_function(reshaped_inputs)
       y_ndim = K.ndim(y)
       if transposed:
         # If inputs have been transposed, we have to transpose the output too.
         if y_ndim is None:
           y_shape = K.shape(y)
           y_ndim = K.shape(y_shape)[0]
           batch_size = y_shape[y_ndim - 1]
           new_shape = K.concatenate(
               [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
           y = K.reshape(y, (-1, batch_size))
           y = K.permute_dimensions(y, (1, 0))
           y = K.reshape(y, new_shape)
         elif y_ndim > 1:
           dims = [y_ndim - 1] + list(range(y_ndim - 1))
           y = K.permute_dimensions(y, dims)
       return y
   else:
     return self._merge_function(inputs)
コード例 #2
0
ファイル: recurrent.py プロジェクト: 1000sprites/tensorflow
def _time_distributed_dense(x,
                            w,
                            b=None,
                            dropout=None,
                            input_dim=None,
                            output_dim=None,
                            timesteps=None,
                            training=None):
  """Apply `y . w + b` for every temporal slice y of x.

  Arguments:
      x: input tensor.
      w: weight matrix.
      b: optional bias vector.
      dropout: whether to apply dropout (same dropout mask
          for every temporal slice of the input).
      input_dim: integer; optional dimensionality of the input.
      output_dim: integer; optional dimensionality of the output.
      timesteps: integer; optional number of timesteps.
      training: training phase tensor or boolean.

  Returns:
      Output tensor.
  """
  if not input_dim:
    input_dim = K.shape(x)[2]
  if not timesteps:
    timesteps = K.shape(x)[1]
  if not output_dim:
    output_dim = K.shape(w)[1]

  if dropout is not None and 0. < dropout < 1.:
    # apply the same dropout pattern at every timestep
    ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
    dropout_matrix = K.dropout(ones, dropout)
    expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
    x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)

  # collapse time dimension and batch dimension together
  x = K.reshape(x, (-1, input_dim))
  x = K.dot(x, w)
  if b is not None:
    x = K.bias_add(x, b)
  # reshape to 3D tensor
  if K.backend() == 'tensorflow':
    x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
    x.set_shape([None, None, output_dim])
  else:
    x = K.reshape(x, (-1, timesteps, output_dim))
  return x
コード例 #3
0
ファイル: wrappers.py プロジェクト: keithc61/tensorflow
  def call(self, inputs, training=None, mask=None):
    kwargs = {}
    if has_arg(self.layer.call, 'training'):
      kwargs['training'] = training
    uses_learning_phase = False  # pylint: disable=redefined-outer-name

    input_shape = K.int_shape(inputs)
    if input_shape[0]:
      # batch size matters, use rnn-based implementation
      def step(x, _):
        global uses_learning_phase  # pylint: disable=global-variable-undefined
        output = self.layer.call(x, **kwargs)
        if hasattr(output, '_uses_learning_phase'):
          uses_learning_phase = (output._uses_learning_phase or
                                 uses_learning_phase)
        return output, []

      _, outputs, _ = K.rnn(
          step,
          inputs,
          initial_states=[],
          unroll=False)
      y = outputs
    else:
      # No batch size specified, therefore the layer will be able
      # to process batches of any size.
      # We can go with reshape-based implementation for performance.
      input_length = input_shape[1]
      if not input_length:
        input_length = K.shape(inputs)[1]
      # Shape: (num_samples * timesteps, ...). And track the
      # transformation in self._input_map.
      input_uid = tf_layers_util.object_list_uid(inputs)
      inputs = K.reshape(inputs, (-1,) + input_shape[2:])
      self._input_map[input_uid] = inputs
      # (num_samples * timesteps, ...)
      y = self.layer.call(inputs, **kwargs)
      if hasattr(y, '_uses_learning_phase'):
        uses_learning_phase = y._uses_learning_phase
      # Shape: (num_samples, timesteps, ...)
      output_shape = self.compute_output_shape(input_shape).as_list()
      y = K.reshape(y, (-1, input_length) + tuple(output_shape[2:]))

    # Apply activity regularizer if any:
    if (hasattr(self.layer, 'activity_regularizer') and
        self.layer.activity_regularizer is not None):
      regularization_loss = self.layer.activity_regularizer(y)
      self.add_loss(regularization_loss, inputs)

    if uses_learning_phase:
      y._uses_learning_phase = True
    return y
コード例 #4
0
    def call(self, inputs, training=None, mask=None):
        kwargs = {}
        if has_arg(self.layer.call, 'training'):
            kwargs['training'] = training
        uses_learning_phase = False  # pylint: disable=redefined-outer-name

        input_shape = K.int_shape(inputs)
        if input_shape[0]:
            # batch size matters, use rnn-based implementation
            def step(x, _):
                global uses_learning_phase  # pylint: disable=global-variable-undefined
                output = self.layer.call(x, **kwargs)
                if hasattr(output, '_uses_learning_phase'):
                    uses_learning_phase = (output._uses_learning_phase
                                           or uses_learning_phase)
                return output, []

            _, outputs, _ = K.rnn(step,
                                  inputs,
                                  initial_states=[],
                                  unroll=False)
            y = outputs
        else:
            # No batch size specified, therefore the layer will be able
            # to process batches of any size.
            # We can go with reshape-based implementation for performance.
            input_length = input_shape[1]
            if not input_length:
                input_length = K.shape(inputs)[1]
            # Shape: (num_samples * timesteps, ...). And track the
            # transformation in self._input_map.
            input_uid = tf_layers_util.object_list_uid(inputs)
            inputs = K.reshape(inputs, (-1, ) + input_shape[2:])
            self._input_map[input_uid] = inputs
            # (num_samples * timesteps, ...)
            y = self.layer.call(inputs, **kwargs)
            if hasattr(y, '_uses_learning_phase'):
                uses_learning_phase = y._uses_learning_phase
            # Shape: (num_samples, timesteps, ...)
            output_shape = self.compute_output_shape(input_shape).as_list()
            y = K.reshape(y, (-1, input_length) + tuple(output_shape[2:]))

        # Apply activity regularizer if any:
        if (hasattr(self.layer, 'activity_regularizer')
                and self.layer.activity_regularizer is not None):
            regularization_loss = self.layer.activity_regularizer(y)
            self.add_loss(regularization_loss, inputs)

        if uses_learning_phase:
            y._uses_learning_phase = True
        return y
コード例 #5
0
def content_loss(base, combination):
    channel_dim = -1

    try:
        channels = K.int_shape(base)[channel_dim]
    except TypeError:
        channels = K.shape(base)[channel_dim]
    size = img_width * img_height

    if args.content_loss_type == 1:
        multiplier = 1. / (2. * (channels ** 0.5) * (size ** 0.5))
    elif args.content_loss_type == 2:
        multiplier = 1. / (channels * size)
    else:
        multiplier = 1.

    return multiplier * K.sum(K.square(combination - base))
コード例 #6
0
ファイル: noise.py プロジェクト: ChengYuXiang/tensorflow
 def _get_noise_shape(self, inputs):
   return self.noise_shape if self.noise_shape else K.shape(inputs)
コード例 #7
0
 def call(self, inputs):
     if not isinstance(inputs, list):
         raise ValueError('A merge layer should be called '
                          'on a list of inputs.')
     if self._reshape_required:
         reshaped_inputs = []
         input_ndims = list(map(K.ndim, inputs))
         if None not in input_ndims:
             # If ranks of all inputs are available,
             # we simply expand each of them at axis=1
             # until all of them have the same rank.
             max_ndim = max(input_ndims)
             for x in inputs:
                 x_ndim = K.ndim(x)
                 for _ in range(max_ndim - x_ndim):
                     x = K.expand_dims(x, 1)
                 reshaped_inputs.append(x)
             return self._merge_function(reshaped_inputs)
         else:
             # Transpose all inputs so that batch size is the last dimension.
             # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
             transposed = False
             for x in inputs:
                 x_ndim = K.ndim(x)
                 if x_ndim is None:
                     x_shape = K.shape(x)
                     batch_size = x_shape[0]
                     new_shape = K.concatenate(
                         [x_shape[1:],
                          K.expand_dims(batch_size)])
                     x_transposed = K.reshape(
                         x, K.stack([batch_size,
                                     K.prod(x_shape[1:])]))
                     x_transposed = K.permute_dimensions(
                         x_transposed, (1, 0))
                     x_transposed = K.reshape(x_transposed, new_shape)
                     reshaped_inputs.append(x_transposed)
                     transposed = True
                 elif x_ndim > 1:
                     dims = list(range(1, x_ndim)) + [0]
                     reshaped_inputs.append(K.permute_dimensions(x, dims))
                     transposed = True
                 else:
                     # We don't transpose inputs if they are 1D vectors or scalars.
                     reshaped_inputs.append(x)
             y = self._merge_function(reshaped_inputs)
             y_ndim = K.ndim(y)
             if transposed:
                 # If inputs have been transposed, we have to transpose the output too.
                 if y_ndim is None:
                     y_shape = K.shape(y)
                     y_ndim = K.shape(y_shape)[0]
                     batch_size = y_shape[y_ndim - 1]
                     new_shape = K.concatenate(
                         [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
                     y = K.reshape(y, (-1, batch_size))
                     y = K.permute_dimensions(y, (1, 0))
                     y = K.reshape(y, new_shape)
                 elif y_ndim > 1:
                     dims = [y_ndim - 1] + list(range(y_ndim - 1))
                     y = K.permute_dimensions(y, dims)
             return y
     else:
         return self._merge_function(inputs)
コード例 #8
0
 def noised():
   return inputs + K.random_normal(
       shape=K.shape(inputs), mean=0., stddev=self.stddev)
コード例 #9
0
 def noised():
   stddev = np.sqrt(self.rate / (1.0 - self.rate))
   return inputs * K.random_normal(
       shape=K.shape(inputs), mean=1.0, stddev=stddev)
コード例 #10
0
ファイル: core.py プロジェクト: ChengYuXiang/tensorflow
 def _get_noise_shape(self, inputs):
   input_shape = K.shape(inputs)
   if self.data_format == 'channels_first':
     return (input_shape[0], input_shape[1], 1, 1, 1)
   elif self.data_format == 'channels_last':
     return (input_shape[0], 1, 1, 1, input_shape[4])
コード例 #11
0
 def _get_noise_shape(self, inputs):
   return self.noise_shape if self.noise_shape else K.shape(inputs)
コード例 #12
0
ファイル: core.py プロジェクト: ChengYuXiang/tensorflow
 def _get_noise_shape(self, inputs):
   input_shape = K.shape(inputs)
   noise_shape = (input_shape[0], 1, input_shape[2])
   return noise_shape
コード例 #13
0
ファイル: noise.py プロジェクト: ChengYuXiang/tensorflow
 def noised():
   return inputs + K.random_normal(
       shape=K.shape(inputs), mean=0., stddev=self.stddev)
コード例 #14
0
ファイル: noise.py プロジェクト: ChengYuXiang/tensorflow
 def noised():
   stddev = np.sqrt(self.rate / (1.0 - self.rate))
   return inputs * K.random_normal(
       shape=K.shape(inputs), mean=1.0, stddev=stddev)
コード例 #15
0
 def call(self, inputs):
     return K.reshape(inputs, (K.shape(inputs)[0], ) + self.target_shape)
コード例 #16
0
 def _get_noise_shape(self, inputs):
     input_shape = K.shape(inputs)
     if self.data_format == 'channels_first':
         return (input_shape[0], input_shape[1], 1, 1, 1)
     elif self.data_format == 'channels_last':
         return (input_shape[0], 1, 1, 1, input_shape[4])
コード例 #17
0
 def _get_noise_shape(self, inputs):
     input_shape = K.shape(inputs)
     noise_shape = (input_shape[0], 1, input_shape[2])
     return noise_shape
コード例 #18
0
ファイル: core.py プロジェクト: ChengYuXiang/tensorflow
 def call(self, inputs):
   return K.reshape(inputs, (K.shape(inputs)[0],) + self.target_shape)