def precision_male(y_true, y_pred):
    """ Compute precision for the class "male"
    
    :param y_true: true labels (dummy numpy array, column 0 for male, column 1 for female)
    :param y_pred: predicted labels (dummy numpy array, column 0 for male, column 1 for female)
    :return: precision (float)
    """
    nb_male_pred = K.sum(K.round(K.clip(y_pred[:, 0], 0, 1)))
    male_true_positives = K.sum(
        K.round(K.clip(y_true[:, 0] * y_pred[:, 0], 0, 1)))
    precision = male_true_positives / (nb_male_pred + K.epsilon())
    return precision
def recall_female(y_true, y_pred):
    """ Compute recall for the class "female"

    :param y_true: true labels (dummy numpy array, column 0 for male, column 1 for female)
    :param y_pred: predicted labels (dummy numpy array, column 0 for male, column 1 for female)
    :return: recall (float)
    """
    nb_female = K.sum(K.round(K.clip(y_true[:, 1], 0, 1)))
    male_true_positives = K.sum(
        K.round(K.clip(y_true[:, 1] * y_pred[:, 1], 0, 1)))
    recall = male_true_positives / (nb_female + K.epsilon())
    return recall
Пример #3
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(K.square(first_log - second_log), axis=-1)
Пример #4
0
def mean_absolute_percentage_error(y_true, y_pred):
  # Equivalent to MAE, but sometimes easier to interpret.
  diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
Пример #5
0
  def fit(self, x, augment=False, rounds=1, seed=None):
    """Fits internal statistics to some sample data.

    Required for featurewise_center, featurewise_std_normalization
    and zca_whitening.

    Arguments:
        x: Numpy array, the data to fit on. Should have rank 4.
            In case of grayscale data,
            the channels axis should have value 1, and in case
            of RGB data, it should have value 3.
        augment: Whether to fit on randomly augmented samples
        rounds: If `augment`,
            how many augmentation passes to do over the data
        seed: random seed.

    Raises:
        ValueError: in case of invalid input `x`.
        ImportError: if Scipy is not available.
    """
    x = np.asarray(x, dtype=K.floatx())
    if x.ndim != 4:
      raise ValueError('Input to `.fit()` should have rank 4. '
                       'Got array with shape: ' + str(x.shape))
    if x.shape[self.channel_axis] not in {1, 3, 4}:
      raise ValueError(
          'Expected input to be images (as Numpy array) '
          'following the data format convention "' + self.data_format + '" '
          '(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
          'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
          'However, it was passed an array with shape ' + str(
              x.shape) + ' (' + str(x.shape[self.channel_axis]) + ' channels).')

    if seed is not None:
      np.random.seed(seed)

    x = np.copy(x)
    if augment:
      ax = np.zeros(
          tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
      for r in range(rounds):
        for i in range(x.shape[0]):
          ax[i + r * x.shape[0]] = self.random_transform(x[i])
      x = ax

    if self.featurewise_center:
      self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
      broadcast_shape = [1, 1, 1]
      broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
      self.mean = np.reshape(self.mean, broadcast_shape)
      x -= self.mean

    if self.featurewise_std_normalization:
      self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
      broadcast_shape = [1, 1, 1]
      broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
      self.std = np.reshape(self.std, broadcast_shape)
      x /= (self.std + K.epsilon())

    if self.zca_whitening:
      if linalg is None:
        raise ImportError('Scipy is required for zca_whitening.')

      flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
      sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
      u, s, _ = linalg.svd(sigma)
      self.principal_components = np.dot(
          np.dot(u, np.diag(1. / np.sqrt(s + 10e-7))), u.T)
Пример #6
0
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = (self.rate * K.clip(norms, self.min_value, self.max_value) +
              (1 - self.rate) * norms)
   w *= (desired / (K.epsilon() + norms))
   return w
Пример #7
0
def sample_mean_absolute_percentage_error(y_true, y_pred):
    diff = K.abs((y_true - y_pred) /
                 K.clip(K.abs(y_true) + K.abs(y_pred), K.epsilon(), None))
    return 200. * K.mean(diff, axis=-1)
Пример #8
0
 def __call__(self, w):
     return w / (K.epsilon() +
                 K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True)))
Пример #9
0
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = (self.rate * K.clip(norms, self.min_value, self.max_value) +
                (1 - self.rate) * norms)
     w *= (desired / (K.epsilon() + norms))
     return w
Пример #10
0
def kullback_leibler_divergence(y_true, y_pred):
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
Пример #11
0
def poisson(y_true, y_pred):
  return K.mean(y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1)
Пример #12
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(K.square(first_log - second_log), axis=-1)
Пример #13
0
def mean_absolute_percentage_error(y_true, y_pred):
  # Equivalent to MAE, but sometimes easier to interpret.
  diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
Пример #14
0
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = K.clip(norms, 0, self.max_value)
   w *= (desired / (K.epsilon() + norms))
   return w
Пример #15
0
def kullback_leibler_divergence(y_true, y_pred):
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
Пример #16
0
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = K.clip(norms, 0, self.max_value)
     w *= (desired / (K.epsilon() + norms))
     return w
Пример #17
0
def poisson(y_true, y_pred):
  return K.mean(y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1)
Пример #18
0
 def __call__(self, w):
   return w / (
       K.epsilon() + K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True)))