コード例 #1
0
    def weighted(y_true, y_pred, weights, mask=None):
        """Wrapper function.

    Arguments:
        y_true: `y_true` argument of `fn`.
        y_pred: `y_pred` argument of `fn`.
        weights: Weights tensor.
        mask: Mask tensor.

    Returns:
        Scalar tensor.
    """
        # score_array has ndim >= 2
        score_array = fn(y_true, y_pred)
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            mask = math_ops.cast(mask, K.floatx())
            # mask should have the same shape as score_array
            score_array *= mask
            #  the loss per batch should be proportional
            #  to the number of unmasked samples.
            score_array /= K.mean(mask)

        # apply sample weighting
        if weights is not None:
            # reduce score_array to same ndim as weight array
            ndim = K.ndim(score_array)
            weight_ndim = K.ndim(weights)
            score_array = K.mean(score_array,
                                 axis=list(range(weight_ndim, ndim)))
            score_array *= weights
            score_array /= K.mean(
                math_ops.cast(math_ops.not_equal(weights, 0), K.floatx()))
        return K.mean(score_array)
コード例 #2
0
ファイル: merge.py プロジェクト: japrogramer/tensorflow
 def call(self, inputs):
   if not isinstance(inputs, list):
     raise ValueError('A merge layer should be called ' 'on a list of inputs.')
   if self._reshape_required:
     reshaped_inputs = []
     input_ndims = list(map(K.ndim, inputs))
     if None not in input_ndims:
       # If ranks of all inputs are available,
       # we simply expand each of them at axis=1
       # until all of them have the same rank.
       max_ndim = max(input_ndims)
       for x in inputs:
         x_ndim = K.ndim(x)
         for _ in range(max_ndim - x_ndim):
           x = K.expand_dims(x, 1)
         reshaped_inputs.append(x)
       return self._merge_function(reshaped_inputs)
     else:
       # Transpose all inputs so that batch size is the last dimension.
       # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
       transposed = False
       for x in inputs:
         x_ndim = K.ndim(x)
         if x_ndim is None:
           x_shape = K.shape(x)
           batch_size = x_shape[0]
           new_shape = K.concatenate([x_shape[1:], K.expand_dims(batch_size)])
           x_transposed = K.reshape(x,
                                    K.stack([batch_size,
                                             K.prod(x_shape[1:])]))
           x_transposed = K.permute_dimensions(x_transposed, (1, 0))
           x_transposed = K.reshape(x_transposed, new_shape)
           reshaped_inputs.append(x_transposed)
           transposed = True
         elif x_ndim > 1:
           dims = list(range(1, x_ndim)) + [0]
           reshaped_inputs.append(K.permute_dimensions(x, dims))
           transposed = True
         else:
           # We don't transpose inputs if they are 1D vectors or scalars.
           reshaped_inputs.append(x)
       y = self._merge_function(reshaped_inputs)
       y_ndim = K.ndim(y)
       if transposed:
         # If inputs have been transposed, we have to transpose the output too.
         if y_ndim is None:
           y_shape = K.shape(y)
           y_ndim = K.shape(y_shape)[0]
           batch_size = y_shape[y_ndim - 1]
           new_shape = K.concatenate(
               [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
           y = K.reshape(y, (-1, batch_size))
           y = K.permute_dimensions(y, (1, 0))
           y = K.reshape(y, new_shape)
         elif y_ndim > 1:
           dims = [y_ndim - 1] + list(range(y_ndim - 1))
           y = K.permute_dimensions(y, dims)
       return y
   else:
     return self._merge_function(inputs)
コード例 #3
0
  def weighted(y_true, y_pred, weights, mask=None):
    """Wrapper function.

    Arguments:
        y_true: `y_true` argument of `fn`.
        y_pred: `y_pred` argument of `fn`.
        weights: Weights tensor.
        mask: Mask tensor.

    Returns:
        Scalar tensor.
    """
    # score_array has ndim >= 2
    score_array = fn(y_true, y_pred)
    if mask is not None:
      # Cast the mask to floatX to avoid float64 upcasting in theano
      mask = math_ops.cast(mask, K.floatx())
      # mask should have the same shape as score_array
      score_array *= mask
      #  the loss per batch should be proportional
      #  to the number of unmasked samples.
      score_array /= K.mean(mask)

    # apply sample weighting
    if weights is not None:
      # reduce score_array to same ndim as weight array
      ndim = K.ndim(score_array)
      weight_ndim = K.ndim(weights)
      score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
      score_array *= weights
      score_array /= K.mean(
          math_ops.cast(math_ops.not_equal(weights, 0), K.floatx()))
    return K.mean(score_array)
コード例 #4
0
 def compute_mask(self, inputs, mask=None):
     if mask is None:
         return None
     if not isinstance(mask, list):
         raise ValueError('`mask` should be a list.')
     if not isinstance(inputs, list):
         raise ValueError('`inputs` should be a list.')
     if len(mask) != len(inputs):
         raise ValueError('The lists `inputs` and `mask` '
                          'should have the same length.')
     if all([m is None for m in mask]):
         return None
     # Make a list of masks while making sure
     # the dimensionality of each mask
     # is the same as the corresponding input.
     masks = []
     for input_i, mask_i in zip(inputs, mask):
         if mask_i is None:
             # Input is unmasked. Append all 1s to masks,
             masks.append(K.ones_like(input_i, dtype='bool'))
         elif K.ndim(mask_i) < K.ndim(input_i):
             # Mask is smaller than the input, expand it
             masks.append(K.expand_dims(mask_i))
         else:
             masks.append(mask_i)
     concatenated = K.concatenate(masks, axis=self.axis)
     return K.all(concatenated, axis=-1, keepdims=False)
コード例 #5
0
ファイル: merge.py プロジェクト: 1000sprites/tensorflow
 def compute_mask(self, inputs, mask=None):
   if mask is None:
     return None
   if not isinstance(mask, list):
     raise ValueError('`mask` should be a list.')
   if not isinstance(inputs, list):
     raise ValueError('`inputs` should be a list.')
   if len(mask) != len(inputs):
     raise ValueError('The lists `inputs` and `mask` '
                      'should have the same length.')
   if all([m is None for m in mask]):
     return None
   # Make a list of masks while making sure
   # the dimensionality of each mask
   # is the same as the corresponding input.
   masks = []
   for input_i, mask_i in zip(inputs, mask):
     if mask_i is None:
       # Input is unmasked. Append all 1s to masks,
       # but cast it to bool first
       masks.append(K.cast(K.ones_like(input_i), 'bool'))
     elif K.ndim(mask_i) < K.ndim(input_i):
       # Mask is smaller than the input, expand it
       masks.append(K.expand_dims(mask_i))
     else:
       masks.append(mask_i)
   concatenated = K.concatenate(masks, axis=self.axis)
   return K.all(concatenated, axis=-1, keepdims=False)
コード例 #6
0
def style_loss(style, combination, nb_channels=None):
    assert K.ndim(style) == 3
    assert K.ndim(combination) == 3

    S = gram_matrix(style)
    C = gram_matrix(combination)
    channels = 3
    size = img_width * img_height
    return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
コード例 #7
0
def total_variation_loss(x):
    assert K.ndim(x) == 4
    a = K.square(x[:, :img_width - 1, :img_height - 1, :] -
                 x[:, 1:, :img_height - 1, :])
    b = K.square(x[:, :img_width - 1, :img_height - 1, :] -
                 x[:, :img_width - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
コード例 #8
0
ファイル: imagenet_utils.py プロジェクト: Utsal20/poGANmon
def _preprocess_symbolic_input(x, data_format, mode):
    """Preprocesses a tensor encoding a batch of images.

  Arguments:
      x: Input tensor, 3D or 4D.
      data_format: Data format of the image tensor.
      mode: One of "caffe", "tf" or "torch".
          - caffe: will convert the images from RGB to BGR,
              then will zero-center each color channel with
              respect to the ImageNet dataset,
              without scaling.
          - tf: will scale pixels between -1 and 1,
              sample-wise.
          - torch: will scale pixels between 0 and 1 and then
              will normalize each channel with respect to the
              ImageNet dataset.

  Returns:
      Preprocessed tensor.
  """
    global _IMAGENET_MEAN

    if mode == 'tf':
        x /= 127.5
        x -= 1.
        return x

    if mode == 'torch':
        x /= 255.
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
    else:
        if data_format == 'channels_first':
            # 'RGB'->'BGR'
            if K.ndim(x) == 3:
                x = x[::-1, ...]
            else:
                x = x[:, ::-1, ...]
        else:
            # 'RGB'->'BGR'
            x = x[..., ::-1]
        mean = [103.939, 116.779, 123.68]
        std = None

    if _IMAGENET_MEAN is None:
        _IMAGENET_MEAN = constant_op.constant(-np.array(mean),
                                              dtype=K.floatx())

    # Zero-center by mean pixel
    if K.dtype(x) != K.dtype(_IMAGENET_MEAN):
        x = K.bias_add(x, math_ops.cast(_IMAGENET_MEAN, K.dtype(x)),
                       data_format)
    else:
        x = K.bias_add(x, _IMAGENET_MEAN, data_format)
    if std is not None:
        x /= std
    return x
コード例 #9
0
def _preprocess_symbolic_input(x, data_format, mode):
  """Preprocesses a tensor encoding a batch of images.

  Arguments:
      x: Input tensor, 3D or 4D.
      data_format: Data format of the image tensor.
      mode: One of "caffe", "tf" or "torch".
          - caffe: will convert the images from RGB to BGR,
              then will zero-center each color channel with
              respect to the ImageNet dataset,
              without scaling.
          - tf: will scale pixels between -1 and 1,
              sample-wise.
          - torch: will scale pixels between 0 and 1 and then
              will normalize each channel with respect to the
              ImageNet dataset.

  Returns:
      Preprocessed tensor.
  """
  global _IMAGENET_MEAN

  if mode == 'tf':
    x /= 127.5
    x -= 1.
    return x

  if mode == 'torch':
    x /= 255.
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
  else:
    if data_format == 'channels_first':
      # 'RGB'->'BGR'
      if K.ndim(x) == 3:
        x = x[::-1, ...]
      else:
        x = x[:, ::-1, ...]
    else:
      # 'RGB'->'BGR'
      x = x[..., ::-1]
    mean = [103.939, 116.779, 123.68]
    std = None

  if _IMAGENET_MEAN is None:
    _IMAGENET_MEAN = K.constant(-np.array(mean))

  # Zero-center by mean pixel
  if K.dtype(x) != K.dtype(_IMAGENET_MEAN):
    x = K.bias_add(x, K.cast(_IMAGENET_MEAN, K.dtype(x)), data_format)
  else:
    x = K.bias_add(x, _IMAGENET_MEAN, data_format)
  if std is not None:
    x /= std
  return x
コード例 #10
0
ファイル: merge.py プロジェクト: 1000sprites/tensorflow
 def call(self, inputs):
   x1 = inputs[0]
   x2 = inputs[1]
   if isinstance(self.axes, int):
     if self.axes < 0:
       axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
     else:
       axes = [self.axes] * 2
   else:
     axes = []
     for i in range(len(self.axes)):
       if self.axes[i] < 0:
         axes.append(self.axes[i] % K.ndim(inputs[i]))
       else:
         axes.append(self.axes[i])
   if self.normalize:
     x1 = K.l2_normalize(x1, axis=axes[0])
     x2 = K.l2_normalize(x2, axis=axes[1])
   output = K.batch_dot(x1, x2, axes)
   return output
コード例 #11
0
    def call(self, inputs):
        if self.data_format == 'channels_first':
            permutation = [0]
            permutation.extend([i for i in range(2, K.ndim(inputs))])
            permutation.append(1)
            inputs = array_ops.transpose(inputs, perm=permutation)

        outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
        if not context.executing_eagerly():
            outputs.set_shape(self.compute_output_shape(inputs.get_shape()))
        return outputs
コード例 #12
0
ファイル: merge.py プロジェクト: 568xiaoma/WeChatProgram
 def call(self, inputs):
     x1 = inputs[0]
     x2 = inputs[1]
     if isinstance(self.axes, int):
         if self.axes < 0:
             axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
         else:
             axes = [self.axes] * 2
     else:
         axes = []
         for i in range(len(self.axes)):
             if self.axes[i] < 0:
                 axes.append(self.axes[i] % K.ndim(inputs[i]))
             else:
                 axes.append(self.axes[i])
     if self.normalize:
         x1 = K.l2_normalize(x1, axis=axes[0])
         x2 = K.l2_normalize(x2, axis=axes[1])
     output = K.batch_dot(x1, x2, axes)
     return output
コード例 #13
0
ファイル: merge.py プロジェクト: japrogramer/tensorflow
 def _merge_function(self, inputs):
   if len(inputs) != 2:
     raise ValueError('A `Dot` layer should be called ' 'on exactly 2 inputs')
   x1 = inputs[0]
   x2 = inputs[1]
   if isinstance(self.axes, int):
     if self.axes < 0:
       axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
     else:
       axes = [self.axes] * 2
   else:
     axes = []
     for i in range(len(self.axes)):
       if self.axes[i] < 0:
         axes.append(self.axes[i] % K.ndim(inputs[i]))
       else:
         axes.append(self.axes[i])
   if self.normalize:
     x1 = K.l2_normalize(x1, axis=axes[0])
     x2 = K.l2_normalize(x2, axis=axes[1])
   output = K.batch_dot(x1, x2, axes)
   return output
コード例 #14
0
 def _merge_function(self, inputs):
     if len(inputs) != 2:
         raise ValueError('A `Dot` layer should be called '
                          'on exactly 2 inputs')
     x1 = inputs[0]
     x2 = inputs[1]
     if isinstance(self.axes, int):
         if self.axes < 0:
             axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
         else:
             axes = [self.axes] * 2
     else:
         axes = []
         for i in range(len(self.axes)):
             if self.axes[i] < 0:
                 axes.append(self.axes[i] % K.ndim(inputs[i]))
             else:
                 axes.append(self.axes[i])
     if self.normalize:
         x1 = K.l2_normalize(x1, axis=axes[0])
         x2 = K.l2_normalize(x2, axis=axes[1])
     output = K.batch_dot(x1, x2, axes)
     return output
コード例 #15
0
def _preprocess_symbolic_input(x, data_format, mode):
  """Preprocesses a symbolic image tensor.

  Arguments:
      x: symoblic tensor, 3D or 4D.
      data_format: data format of the image tensor.
      mode: One of "caffe", "tf".
          - caffe: will convert the images from RGB to BGR,
              then will zero-center each color channel with
              respect to the ImageNet dataset,
              without scaling.
          - tf: will scale pixels between -1 and 1,
              sample-wise.

  Returns:
      Preprocessed tensor.
  """
  global _IMAGENET_MEAN

  if mode == 'tf':
    x /= 127.5
    x -= 1.
    return x

  if data_format == 'channels_first':
    # 'RGB'->'BGR'
    if K.ndim(x) == 3:
      x = x[::-1, ...]
    else:
      x = x[:, ::-1, ...]
  else:
    # 'RGB'->'BGR'
    x = x[..., ::-1]

  if _IMAGENET_MEAN is None:
    _IMAGENET_MEAN = K.constant(-np.array([103.939, 116.779, 123.68]))
  # Zero-center by mean pixel
  if K.dtype(x) != K.dtype(_IMAGENET_MEAN):
    x = K.bias_add(x, K.cast(_IMAGENET_MEAN, K.dtype(x)), data_format)
  else:
    x = K.bias_add(x, _IMAGENET_MEAN, data_format)
  return x
コード例 #16
0
ファイル: activations.py プロジェクト: syed-ahmed/tensorflow
def softmax(x, axis=-1):
  """Softmax activation function.

  Arguments:
      x : Tensor.
      axis: Integer, axis along which the softmax normalization is applied.

  Returns:
      Tensor, output of softmax transformation.

  Raises:
      ValueError: In case `dim(x) == 1`.
  """
  ndim = K.ndim(x)
  if ndim == 2:
    return nn.softmax(x)
  elif ndim > 2:
    e = math_ops.exp(x - math_ops.reduce_max(x, axis=axis, keepdims=True))
    s = math_ops.reduce_sum(e, axis=axis, keepdims=True)
    return e / s
  else:
    raise ValueError('Cannot apply softmax to a tensor that is 1D')
コード例 #17
0
def softmax(x, axis=-1):
  """Softmax activation function.

  Arguments:
      x : Tensor.
      axis: Integer, axis along which the softmax normalization is applied.

  Returns:
      Tensor, output of softmax transformation.

  Raises:
      ValueError: In case `dim(x) == 1`.
  """
  ndim = K.ndim(x)
  if ndim == 2:
    return K.softmax(x)
  elif ndim > 2:
    e = K.exp(x - K.max(x, axis=axis, keepdims=True))
    s = K.sum(e, axis=axis, keepdims=True)
    return e / s
  else:
    raise ValueError('Cannot apply softmax to a tensor that is 1D')
コード例 #18
0
def _model_loss(model, inputs, targets):
  """Calculates the loss for a given model.

  Arguments:
     model: The model on which metrics are being calculated.
     inputs: The inputs of the given model. This is typically the mini batch of
              data that is fed to the model.
     targets: The predictions or targets of the given model.

  Returns:
     Returns the model output, total loss and loss value calculated using the
     specified loss function. The total loss includes regularization losses and
     applies masking and sample weighting to the loss value.
  """
  total_loss = 0
  if len(inputs) == 1:
    outs = model.call(inputs[0])
  else:
    outs = model.call(inputs)
  if not isinstance(outs, list):
    outs = [outs]

  if not isinstance(targets, list):
    targets = [targets]

  loss_metrics = []
  with K.name_scope('loss'):
    for i, loss_fn in enumerate(model.loss_functions):
      # compute the loss
      output_loss = _eager_loss_fn(outs[i], targets[i], loss_fn,
                                   model.output_names[i])
      loss_metrics.append(K.mean(output_loss))

      mask = outs[i]._keras_mask
      # adapted from weighted_loss_fn
      if mask is not None:
        # mask should have the same shape as output_loss
        output_loss *= mask
        #  the loss per batch should be proportional
        #  to the number of unmasked samples.
        output_loss /= K.mean(mask)

      # adapted from weighted_loss_fn
      # apply sample weighting
      if model.sample_weights:
        # reduce score_array to same ndim as weight array
        ndim = K.ndim(output_loss)
        weight_ndim = K.ndim(model.sample_weights)
        output_loss = K.mean(output_loss, axis=list(range(weight_ndim, ndim)))
        output_loss *= model.sample_weights
        output_loss /= K.mean(K.cast(K.not_equal(model.sample_weights, 0),
                                     K.floatx()))
        output_loss = K.mean(output_loss)

      loss_weight = model.loss_weights_list[i]
      if total_loss is None:
        total_loss = loss_weight * output_loss
      else:
        total_loss += loss_weight * output_loss

    total_loss = K.mean(total_loss)
    # Add regularization losses
    custom_losses = []
    for layer in model.layers:
      if layer.losses:
        custom_losses += layer.losses

    if custom_losses:
      total_loss += sum(custom_losses)

  return outs, total_loss, loss_metrics
コード例 #19
0
 def call(self, inputs):
     if not isinstance(inputs, list):
         raise ValueError('A merge layer should be called '
                          'on a list of inputs.')
     if self._reshape_required:
         reshaped_inputs = []
         input_ndims = list(map(K.ndim, inputs))
         if None not in input_ndims:
             # If ranks of all inputs are available,
             # we simply expand each of them at axis=1
             # until all of them have the same rank.
             max_ndim = max(input_ndims)
             for x in inputs:
                 x_ndim = K.ndim(x)
                 for _ in range(max_ndim - x_ndim):
                     x = K.expand_dims(x, 1)
                 reshaped_inputs.append(x)
             return self._merge_function(reshaped_inputs)
         else:
             # Transpose all inputs so that batch size is the last dimension.
             # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
             transposed = False
             for x in inputs:
                 x_ndim = K.ndim(x)
                 if x_ndim is None:
                     x_shape = K.shape(x)
                     batch_size = x_shape[0]
                     new_shape = K.concatenate(
                         [x_shape[1:],
                          K.expand_dims(batch_size)])
                     x_transposed = K.reshape(
                         x, K.stack([batch_size,
                                     K.prod(x_shape[1:])]))
                     x_transposed = K.permute_dimensions(
                         x_transposed, (1, 0))
                     x_transposed = K.reshape(x_transposed, new_shape)
                     reshaped_inputs.append(x_transposed)
                     transposed = True
                 elif x_ndim > 1:
                     dims = list(range(1, x_ndim)) + [0]
                     reshaped_inputs.append(K.permute_dimensions(x, dims))
                     transposed = True
                 else:
                     # We don't transpose inputs if they are 1D vectors or scalars.
                     reshaped_inputs.append(x)
             y = self._merge_function(reshaped_inputs)
             y_ndim = K.ndim(y)
             if transposed:
                 # If inputs have been transposed, we have to transpose the output too.
                 if y_ndim is None:
                     y_shape = K.shape(y)
                     y_ndim = K.shape(y_shape)[0]
                     batch_size = y_shape[y_ndim - 1]
                     new_shape = K.concatenate(
                         [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
                     y = K.reshape(y, (-1, batch_size))
                     y = K.permute_dimensions(y, (1, 0))
                     y = K.reshape(y, new_shape)
                 elif y_ndim > 1:
                     dims = [y_ndim - 1] + list(range(y_ndim - 1))
                     y = K.permute_dimensions(y, dims)
             return y
     else:
         return self._merge_function(inputs)
コード例 #20
0
def _model_loss(model, inputs, targets):
    """Calculates the loss for a given model.

  Arguments:
     model: The model on which metrics are being calculated.
     inputs: The inputs of the given model. This is typically the mini batch of
              data that is fed to the model.
     targets: The predictions or targets of the given model.

  Returns:
     Returns the model output, total loss and loss value calculated using the
     specified loss function. The total loss includes regularization losses and
     applies masking and sample weighting to the loss value.
  """
    total_loss = 0
    outs = model(inputs)
    if not isinstance(outs, list):
        outs = [outs]

    if not isinstance(targets, list):
        targets = [targets]

    loss_metrics = []
    with K.name_scope('loss'):
        for i, loss_fn in enumerate(model.loss_functions):
            # compute the loss
            output_loss = _eager_loss_fn(outs[i], targets[i], loss_fn,
                                         model.output_names[i])
            loss_metrics.append(K.mean(output_loss))

            mask = outs[i]._keras_mask
            # adapted from weighted_loss_fn
            if mask is not None:
                # mask should have the same shape as output_loss
                output_loss *= mask
                #  the loss per batch should be proportional
                #  to the number of unmasked samples.
                output_loss /= K.mean(mask)

            # adapted from weighted_loss_fn
            # apply sample weighting
            if model.sample_weights:
                # reduce score_array to same ndim as weight array
                ndim = K.ndim(output_loss)
                weight_ndim = K.ndim(model.sample_weights)
                output_loss = K.mean(output_loss,
                                     axis=list(range(weight_ndim, ndim)))
                output_loss *= model.sample_weights
                output_loss /= K.mean(
                    K.cast(K.not_equal(model.sample_weights, 0), K.floatx()))
                output_loss = K.mean(output_loss)

            loss_weight = model.loss_weights_list[i]
            if total_loss is None:
                total_loss = loss_weight * output_loss
            else:
                total_loss += loss_weight * output_loss

        total_loss = K.mean(total_loss)
        # Add regularization losses
        custom_losses = []
        for layer in model.layers:
            if layer.losses:
                custom_losses += layer.losses

        if custom_losses:
            total_loss += sum(custom_losses)

    return outs, total_loss, loss_metrics
コード例 #21
0
def gram_matrix(x):
    assert K.ndim(x) == 3
    features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
    gram = K.dot(features -1, K.transpose(features -1))
    return gram