Exemple #1
0
 def __call__(self, x):
   regularization = 0.
   if self.l1:
     regularization += K.sum(self.l1 * K.abs(x))
   if self.l2:
     regularization += K.sum(self.l2 * K.square(x))
   return regularization
Exemple #2
0
def dice_loss(y_true, y_pred):
    smooth = 1.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) +
                                           smooth)
 def __call__(self, x):
   regularization = 0.
   if self.l1:
     regularization += K.sum(self.l1 * K.abs(x))
   if self.l2:
     regularization += K.sum(self.l2 * K.square(x))
   return regularization
Exemple #4
0
def disc_mutual_info_loss(c_disc, aux_dist):
    """
    Mutual Information lower bound loss for discrete distribution.
    """
    reg_disc_dim = aux_dist.get_shape().as_list()[-1]
    cross_ent = -K.mean(K.sum(K.log(aux_dist + EPSILON) * c_disc, axis=1))
    ent = -K.mean(K.sum(K.log(1. / reg_disc_dim + EPSILON) * c_disc, axis=1))

    return -(ent - cross_ent)
def precision_male(y_true, y_pred):
    """ Compute precision for the class "male"
    
    :param y_true: true labels (dummy numpy array, column 0 for male, column 1 for female)
    :param y_pred: predicted labels (dummy numpy array, column 0 for male, column 1 for female)
    :return: precision (float)
    """
    nb_male_pred = K.sum(K.round(K.clip(y_pred[:, 0], 0, 1)))
    male_true_positives = K.sum(
        K.round(K.clip(y_true[:, 0] * y_pred[:, 0], 0, 1)))
    precision = male_true_positives / (nb_male_pred + K.epsilon())
    return precision
def recall_female(y_true, y_pred):
    """ Compute recall for the class "female"

    :param y_true: true labels (dummy numpy array, column 0 for male, column 1 for female)
    :param y_pred: predicted labels (dummy numpy array, column 0 for male, column 1 for female)
    :return: recall (float)
    """
    nb_female = K.sum(K.round(K.clip(y_true[:, 1], 0, 1)))
    male_true_positives = K.sum(
        K.round(K.clip(y_true[:, 1] * y_pred[:, 1], 0, 1)))
    recall = male_true_positives / (nb_female + K.epsilon())
    return recall
Exemple #7
0
 def get_gradients(self, loss, params):
     grads = K.gradients(loss, params)
     if hasattr(self, 'clipnorm') and self.clipnorm > 0:
         norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
         grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
     if hasattr(self, 'clipvalue') and self.clipvalue > 0:
         grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
     return grads
Exemple #8
0
 def get_gradients(self, loss, params):
   grads = K.gradients(loss, params)
   if hasattr(self, 'clipnorm') and self.clipnorm > 0:
     norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
     grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
   if hasattr(self, 'clipvalue') and self.clipvalue > 0:
     grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
   return grads
Exemple #9
0
 def get_initial_states(self, inputs):
   # build an all-zero tensor of shape (samples, output_dim)
   initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
   initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
   initial_state = K.expand_dims(initial_state)  # (samples, 1)
   initial_state = K.tile(initial_state, [1,
                                          self.units])  # (samples, output_dim)
   initial_states = [initial_state for _ in range(len(self.states))]
   return initial_states
Exemple #10
0
 def get_initial_state(self, inputs):
   # build an all-zero tensor of shape (samples, output_dim)
   initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
   initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
   initial_state = K.expand_dims(initial_state)  # (samples, 1)
   initial_state = K.tile(initial_state, [1,
                                          self.units])  # (samples, output_dim)
   initial_state = [initial_state for _ in range(len(self.states))]
   return initial_state
  def get_initial_state(self, inputs):
    # (samples, timesteps, rows, cols, filters)
    initial_state = K.zeros_like(inputs)
    # (samples, rows, cols, filters)
    initial_state = K.sum(initial_state, axis=1)
    shape = list(self.kernel_shape)
    shape[-1] = self.filters
    initial_state = self.input_conv(
        initial_state, K.zeros(tuple(shape)), padding=self.padding)

    initial_states = [initial_state for _ in range(2)]
    return initial_states
Exemple #12
0
def softmax(x):
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim == 3:
        e = K.exp(x - K.max(x, axis=-1, keepdims=True))
        s = K.sum(e, axis=-1, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor '
                         'that is not 2D or 3D. '
                         'Here, ndim=' + str(ndim))
  def get_initial_states(self, inputs):
    # (samples, timesteps, rows, cols, filters)
    initial_state = K.zeros_like(inputs)
    # (samples, rows, cols, filters)
    initial_state = K.sum(initial_state, axis=1)
    shape = list(self.kernel_shape)
    shape[-1] = self.filters
    initial_state = self.input_conv(
        initial_state, K.zeros(tuple(shape)), padding=self.padding)

    initial_states = [initial_state for _ in range(2)]
    return initial_states
  def get_constants(self, inputs, training=None):
    constants = []
    if self.implementation == 0 and 0 < self.dropout < 1:
      ones = K.zeros_like(inputs)
      ones = K.sum(ones, axis=1)
      ones += 1

      def dropped_inputs():
        return K.dropout(ones, self.dropout)

      dp_mask = [
          K.in_train_phase(dropped_inputs, ones, training=training)
          for _ in range(4)
      ]
      constants.append(dp_mask)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(4)])

    if 0 < self.recurrent_dropout < 1:
      shape = list(self.kernel_shape)
      shape[-1] = self.filters
      ones = K.zeros_like(inputs)
      ones = K.sum(ones, axis=1)
      ones = self.input_conv(ones, K.zeros(shape), padding=self.padding)
      ones += 1.

      def dropped_inputs():  # pylint: disable=function-redefined
        return K.dropout(ones, self.recurrent_dropout)

      rec_dp_mask = [
          K.in_train_phase(dropped_inputs, ones, training=training)
          for _ in range(4)
      ]
      constants.append(rec_dp_mask)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(4)])
    return constants
  def get_constants(self, inputs, training=None):
    constants = []
    if self.implementation == 0 and 0 < self.dropout < 1:
      ones = K.zeros_like(inputs)
      ones = K.sum(ones, axis=1)
      ones += 1

      def dropped_inputs():
        return K.dropout(ones, self.dropout)

      dp_mask = [
          K.in_train_phase(dropped_inputs, ones, training=training)
          for _ in range(4)
      ]
      constants.append(dp_mask)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(4)])

    if 0 < self.recurrent_dropout < 1:
      shape = list(self.kernel_shape)
      shape[-1] = self.filters
      ones = K.zeros_like(inputs)
      ones = K.sum(ones, axis=1)
      ones = self.input_conv(ones, K.zeros(shape), padding=self.padding)
      ones += 1.

      def dropped_inputs():  # pylint: disable=function-redefined
        return K.dropout(ones, self.recurrent_dropout)

      rec_dp_mask = [
          K.in_train_phase(dropped_inputs, ones, training=training)
          for _ in range(4)
      ]
      constants.append(rec_dp_mask)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(4)])
    return constants
Exemple #16
0
def grad_cam(input_model, image, category_index):
    """
    Args: model to make predictions, image to predict, index of categories and
    their predicted probabilities.
    
    Constructs a colour map showing where the classifier puts the highest weight
    for a given image in making its prediction.
    
    Returns: numpy array of same dimension as image but instead displaying colours
    according to where the classifier puts the most weight.
    """
    model = Sequential()
    model.add(input_model)
    nb_classes = 10
    target_layer = lambda x: target_category_loss(x, category_index, nb_classes
                                                  )
    model.add(Lambda(target_layer))
    loss = K.sum(model.layers[-1].output)
    conv_output = model.layers[0].layers[
        29].output  #this needs changed depending on NN structure
    grads = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input],
                                   [conv_output, grads])

    output, grads_val = gradient_function([image])
    output, grads_val = output[0, :], grads_val[0, :, :, :]

    weights = np.mean(grads_val, axis=(0, 1))
    cam = np.ones(output.shape[0:2], dtype=np.float32)

    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    cam = cv2.resize(cam, (224, 224))
    cam = np.maximum(cam, 0)
    heatmap = cam / np.max(cam)

    #Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image -= np.min(image)
    image = np.minimum(image, 255)

    cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
    cam = np.float32(cam) + np.float32(image)
    cam = 255.0 * cam / np.max(cam)
    return np.uint8(cam)
Exemple #17
0
def softmax(x, axis=-1):
  """Softmax activation function.

  Arguments:
      x : Tensor.
      axis: Integer, axis along which the softmax normalization is applied.

  Returns:
      Tensor, output of softmax transformation.

  Raises:
      ValueError: In case `dim(x) == 1`.
  """
  ndim = K.ndim(x)
  if ndim == 2:
    return K.softmax(x)
  elif ndim > 2:
    e = K.exp(x - K.max(x, axis=axis, keepdims=True))
    s = K.sum(e, axis=axis, keepdims=True)
    return e / s
  else:
    raise ValueError('Cannot apply softmax to a tensor that is 1D')
Exemple #18
0
def softmax(x, axis=-1):
    """Softmax activation function.

  Arguments:
      x : Tensor.
      axis: Integer, axis along which the softmax normalization is applied.

  Returns:
      Tensor, output of softmax transformation.

  Raises:
      ValueError: In case `dim(x) == 1`.
  """
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim > 2:
        e = K.exp(x - K.max(x, axis=axis, keepdims=True))
        s = K.sum(e, axis=axis, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor that is 1D')
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = K.clip(norms, 0, self.max_value)
   w *= (desired / (K.epsilon() + norms))
   return w
Exemple #20
0
def categorical_hinge(y_true, y_pred):
  pos = K.sum(y_true * y_pred, axis=-1)
  neg = K.max((1. - y_true) * y_pred, axis=-1)
  return K.maximum(neg - pos + 1., 0.)
Exemple #21
0
def kullback_leibler_divergence(y_true, y_pred):
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
Exemple #22
0
 def __call__(self, w):
     return w / (K.epsilon() +
                 K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True)))
Exemple #23
0
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = (self.rate * K.clip(norms, self.min_value, self.max_value) +
                (1 - self.rate) * norms)
     w *= (desired / (K.epsilon() + norms))
     return w
Exemple #24
0
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = K.clip(norms, 0, self.max_value)
     w *= (desired / (K.epsilon() + norms))
     return w
Exemple #25
0
def categorical_hinge(y_true, y_pred):
    pos = K.sum(y_true * y_pred, axis=-1)
    neg = K.max((1. - y_true) * y_pred, axis=-1)
    return K.maximum(neg - pos + 1., 0.)
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = (self.rate * K.clip(norms, self.min_value, self.max_value) +
              (1 - self.rate) * norms)
   w *= (desired / (K.epsilon() + norms))
   return w
Exemple #27
0
def kullback_leibler_divergence(y_true, y_pred):
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
 def __call__(self, w):
   return w / (
       K.epsilon() + K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True)))