コード例 #1
0
def precision_male(y_true, y_pred):
    """ Compute precision for the class "male"
    
    :param y_true: true labels (dummy numpy array, column 0 for male, column 1 for female)
    :param y_pred: predicted labels (dummy numpy array, column 0 for male, column 1 for female)
    :return: precision (float)
    """
    nb_male_pred = K.sum(K.round(K.clip(y_pred[:, 0], 0, 1)))
    male_true_positives = K.sum(
        K.round(K.clip(y_true[:, 0] * y_pred[:, 0], 0, 1)))
    precision = male_true_positives / (nb_male_pred + K.epsilon())
    return precision
コード例 #2
0
def recall_female(y_true, y_pred):
    """ Compute recall for the class "female"

    :param y_true: true labels (dummy numpy array, column 0 for male, column 1 for female)
    :param y_pred: predicted labels (dummy numpy array, column 0 for male, column 1 for female)
    :return: recall (float)
    """
    nb_female = K.sum(K.round(K.clip(y_true[:, 1], 0, 1)))
    male_true_positives = K.sum(
        K.round(K.clip(y_true[:, 1] * y_pred[:, 1], 0, 1)))
    recall = male_true_positives / (nb_female + K.epsilon())
    return recall
コード例 #3
0
 def get_gradients(self, loss, params):
     grads = K.gradients(loss, params)
     if hasattr(self, 'clipnorm') and self.clipnorm > 0:
         norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
         grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
     if hasattr(self, 'clipvalue') and self.clipvalue > 0:
         grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
     return grads
コード例 #4
0
ファイル: optimizers.py プロジェクト: jiayouwyhit/tensorflow
 def get_gradients(self, loss, params):
   grads = K.gradients(loss, params)
   if hasattr(self, 'clipnorm') and self.clipnorm > 0:
     norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
     grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
   if hasattr(self, 'clipvalue') and self.clipvalue > 0:
     grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
   return grads
コード例 #5
0
def kullback_leibler_divergence(y_true, y_pred):
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
コード例 #6
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(K.square(first_log - second_log), axis=-1)
コード例 #7
0
def mean_absolute_percentage_error(y_true, y_pred):
  # Equivalent to MAE, but sometimes easier to interpret.
  diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
コード例 #8
0
def sample_mean_absolute_percentage_error(y_true, y_pred):
    diff = K.abs((y_true - y_pred) /
                 K.clip(K.abs(y_true) + K.abs(y_pred), K.epsilon(), None))
    return 200. * K.mean(diff, axis=-1)
コード例 #9
0
ファイル: constraints.py プロジェクト: lengjia/RRL
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = (self.rate * K.clip(norms, self.min_value, self.max_value) +
                (1 - self.rate) * norms)
     w *= (desired / (K.epsilon() + norms))
     return w
コード例 #10
0
ファイル: constraints.py プロジェクト: lengjia/RRL
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = K.clip(norms, 0, self.max_value)
     w *= (desired / (K.epsilon() + norms))
     return w
コード例 #11
0
ファイル: losses.py プロジェクト: AutumnQYN/tensorflow
def kullback_leibler_divergence(y_true, y_pred):
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
コード例 #12
0
ファイル: losses.py プロジェクト: AutumnQYN/tensorflow
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(K.square(first_log - second_log), axis=-1)
コード例 #13
0
ファイル: losses.py プロジェクト: AutumnQYN/tensorflow
def mean_absolute_percentage_error(y_true, y_pred):
  # Equivalent to MAE, but sometimes easier to interpret.
  diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
コード例 #14
0
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = K.clip(norms, 0, self.max_value)
   w *= (desired / (K.epsilon() + norms))
   return w
コード例 #15
0
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = (self.rate * K.clip(norms, self.min_value, self.max_value) +
              (1 - self.rate) * norms)
   w *= (desired / (K.epsilon() + norms))
   return w