예제 #1
0
def binary_crossentropy_with_ranking(y_true, y_pred):
    """ Trying to combine ranking loss with numeric precision"""
    # first get the log loss like normal
    logloss = K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)

    # next, build a rank loss

    # clip the probabilities to keep stability
    y_pred_clipped = K.clip(y_pred, K.epsilon(), 1-K.epsilon())

    # translate into the raw scores before the logit
    y_pred_score = K.log(y_pred_clipped / (1 - y_pred_clipped))

    # determine what the maximum score for a zero outcome is
    y_pred_score_zerooutcome_max = K.max(y_pred_score * (y_true <1))

    # determine how much each score is above or below it
    rankloss = y_pred_score - y_pred_score_zerooutcome_max

    # only keep losses for positive outcomes
    rankloss = rankloss * y_true

    # only keep losses where the score is below the max
    rankloss = K.square(K.clip(rankloss, -100, 0))

    # average the loss for just the positive outcomes
    rankloss = K.sum(rankloss, axis=-1) / (K.sum(y_true > 0) + 1)

    # return (rankloss + 1) * logloss - an alternative to try
    return rankloss + logloss
예제 #2
0
def test_ReduceLROnPlateau():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    def make_model():
        np.random.seed(1337)
        model = Sequential()
        model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
        model.add(Dense(num_class, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.SGD(lr=0.1),
                      metrics=['accuracy'])
        return model

    model = make_model()

    # This should reduce the LR after the first epoch (due to high epsilon).
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())

    model = make_model()
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
예제 #3
0
def root_mean_squared_logarithmic_loss(y_true, y_pred):
    y_true = tf.Print(y_true, [y_true], message='y_true', summarize=30)
    y_pred = tf.Print(y_pred, [y_pred], message='y_pred', summarize=30)
    first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
    second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)

    return K.sqrt(K.mean(K.square(first_log - second_log), axis=-1)+0.00001)
예제 #4
0
def eigen_loss(y_true, y_pred):
    y_true = tf.Print(y_true, [y_true], message='y_true', summarize=30)
    y_pred = tf.Print(y_pred, [y_pred], message='y_pred', summarize=30)

    y_true_clipped = K.clip(y_true, K.epsilon(), None)
    y_pred_clipped = K.clip(y_pred, K.epsilon(), None)

    first_log = K.log(y_pred_clipped + 1.)
    second_log = K.log(y_true_clipped + 1.)
    w_x = K.variable(np.array([[-1., 0., 1.],
                                [-1., 0., 1.],
                                [-1., 0., 1.]]).reshape(3, 3, 1, 1))

    grad_x_pred = K.conv2d(first_log, w_x, padding='same')
    grad_x_true = K.conv2d(second_log, w_x, padding='same')

    w_y = K.variable(np.array([[-1., -1., -1.],
                                [0., 0., 0.],
                                [1., 1., 1.]]).reshape(3, 3, 1, 1))

    grad_y_pred = K.conv2d(first_log, w_y, padding='same')
    grad_y_true = K.conv2d(second_log, w_y, padding='same')
    diff_x = grad_x_pred - grad_x_true
    diff_y = grad_y_pred - grad_y_true

    log_term = K.mean(K.square((first_log - second_log)), axis=-1)
    sc_inv_term = K.square(K.mean((first_log - second_log),axis=-1))
    grad_loss = K.mean(K.square(diff_x) + K.square(diff_y), axis=-1)

    return log_term - (0.5 * sc_inv_term) + grad_loss
예제 #5
0
    def get_split_averages(input_tensor, input_mask, indices):
        # Splits input tensor into three parts based on the indices and
        # returns average of values prior to index, values at the index and
        # average of values after the index.
        # input_tensor: (batch_size, input_length, input_dim)
        # input_mask: (batch_size, input_length)
        # indices: (batch_size, 1)
        # (1, input_length)
        length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0)
        # (batch_size, input_length)
        batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0)
        tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1)  # (batch_size, input_length)
        greater_mask = K.greater(batched_range, tiled_indices)  # (batch_size, input_length)
        lesser_mask = K.lesser(batched_range, tiled_indices)  # (batch_size, input_length)
        equal_mask = K.equal(batched_range, tiled_indices)  # (batch_size, input_length)

        # We also need to mask these masks using the input mask.
        # (batch_size, input_length)
        if input_mask is not None:
            greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask))
            lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask))

        post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        pre_sum = K.sum(switch(K.expand_dims(lesser_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        values_at_indices = K.sum(switch(K.expand_dims(equal_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)

        post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)
        pre_normalizer = K.expand_dims(K.sum(lesser_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)

        return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32')
예제 #6
0
def label_reg_loss(y_true, y_pred):
  # KL-div
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)

  y_true_mean = K.mean(y_true, axis=0)
  y_pred_mean = K.mean(y_pred, axis=0)
  return K.sum(y_true_mean * K.log(y_true_mean / y_pred_mean), axis=-1)
예제 #7
0
def calc_loss(pred, target, loss='l2'):
    """ Calculate Loss from Shoanlu GAN """
    if loss.lower() == "l2":
        return K.mean(K.square(pred - target))
    if loss.lower() == "l1":
        return K.mean(K.abs(pred - target))
    if loss.lower() == "cross_entropy":
        return -K.mean(K.log(pred + K.epsilon()) * target +
                       K.log(1 - pred + K.epsilon()) * (1 - target))
    raise ValueError('Recieve an unknown loss type: {}.'.format(loss))
예제 #8
0
def f1(y_true, y_pred):
    y_true_f = KB.flatten(y_true)
    y_pred_f = KB.flatten(y_pred)
    true_positives = KB.sum(KB.round(KB.clip(y_true_f * y_pred_f, 0, 1)), axis=-1)
    possible_positives = KB.sum(KB.round(KB.clip(y_true_f, 0, 1)), axis=-1)
    recall = true_positives / (possible_positives + KB.epsilon())
    predicted_positives = KB.sum(KB.round(KB.clip(y_pred_f, 0, 1)), axis=-1)
    precision = true_positives / (predicted_positives + KB.epsilon())

    return 2*((precision*recall)/(precision+recall+KB.epsilon()))
예제 #9
0
파일: metrics.py 프로젝트: ymcidence/neuron
    def dice(self, y_true, y_pred):
        """
        compute dice for given Tensors

        """
        if self.crop_indices is not None:
            y_true = utils.batch_gather(y_true, self.crop_indices)
            y_pred = utils.batch_gather(y_pred, self.crop_indices)

        if self.input_type == 'prob':
            # We assume that y_true is probabilistic, but just in case:
            y_true /= K.sum(y_true, axis=-1, keepdims=True)
            y_true = K.clip(y_true, K.epsilon(), 1)

            # make sure pred is a probability
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            y_pred = K.clip(y_pred, K.epsilon(), 1)

        # Prepare the volumes to operate on
        # If we're doing 'hard' Dice, then we will prepare one-hot-based matrices of size
        # [batch_size, nb_voxels, nb_labels], where for each voxel in each batch entry,
        # the entries are either 0 or 1
        if self.dice_type == 'hard':

            # if given predicted probability, transform to "hard max""
            if self.input_type == 'prob':
                if self.approx_hard_max:
                    y_pred_op = _hard_max(y_pred, axis=-1)
                    y_true_op = _hard_max(y_true, axis=-1)
                else:
                    y_pred_op = _label_to_one_hot(K.argmax(y_pred, axis=-1), self.nb_labels)
                    y_true_op = _label_to_one_hot(K.argmax(y_true, axis=-1), self.nb_labels)

            # if given predicted label, transform to one hot notation
            else:
                assert self.input_type == 'max_label'
                y_pred_op = _label_to_one_hot(y_pred, self.nb_labels)
                y_true_op = _label_to_one_hot(y_true, self.nb_labels)

        # If we're doing soft Dice, require prob output, and the data already is as we need it
        # [batch_size, nb_voxels, nb_labels]
        else:
            assert self.input_type == 'prob', "cannot do soft dice with max_label input"
            y_pred_op = y_pred
            y_true_op = y_true

        # compute dice for each entry in batch.
        # dice will now be [batch_size, nb_labels]
        sum_dim = 1
        top = 2 * K.sum(y_true_op * y_pred_op, sum_dim)
        bottom = K.sum(K.square(y_true_op), sum_dim) + K.sum(K.square(y_pred_op), sum_dim)
        # make sure we have no 0s on the bottom. K.epsilon()
        bottom = K.maximum(bottom, self.area_reg)
        return top / bottom
예제 #10
0
파일: metrics.py 프로젝트: ymcidence/neuron
def _hard_max(tens, axis):
    """
    we can't use the argmax function in a loss, as it's not differentiable
    We can use it in a metric, but not in a loss function
    therefore, we replace the 'hard max' operation (i.e. argmax + onehot)
    with this approximation
    """
    tensmax = K.max(tens, axis=axis, keepdims=True)
    eps_hot = K.maximum(tens - tensmax + K.epsilon(), 0)
    one_hot = eps_hot / K.epsilon()
    return one_hot
예제 #11
0
    def compute_loss(self, y_true, y_pred):
        #weights = K.tile(self.class_weights, [tf.shape(y_true)[0], tf.shape(y_true)[1], tf.shape(y_true)[2], 1])
        # softmax
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)

        # clip to prevent NaN's and Inf's
        y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())

        # calc
        loss = y_true * K.log(y_pred) * self.class_weights
        loss = -K.sum(loss, -1)
        return loss
예제 #12
0
def softmax(x, axis, mask=None):
    if mask is None:
        mask = K.constant(True)
    mask = K.cast(mask, K.floatx())
    if K.ndim(x) is K.ndim(mask) + 1:
        mask = K.expand_dims(mask)

    m = K.max(x, axis=axis, keepdims=True)
    e = K.exp(x - m) * mask
    s = K.sum(e, axis=axis, keepdims=True)
    s += K.cast(K.cast(s < K.epsilon(), K.floatx()) * K.epsilon(), K.floatx())
    return e / s
예제 #13
0
  def get_scores(self):
    """Adds tensor operations for computing scores.

    Computes prediction yhat (eqn (1) in Matching networks) of class for test
    compounds.
    """
    # Get featurization for test 
    # Shape (n_test, n_feat)
    test_feat = self.model.get_test_output()  
    # Get featurization for support
    # Shape (n_support, n_feat)
    support_feat = self.model.get_support_output()  

    # Computes the inner part c() of the kernel
    # (the inset equation in section 2.1.1 of Matching networks paper). 
    # Normalize
    if self.similarity == 'cosine':
      g = model_ops.cosine_distances(test_feat, support_feat)
    else:
      raise ValueError("Only cosine similarity is supported.")
    # TODO(rbharath): euclidean kernel is broken!
    #elif self.similarity == 'euclidean':
    #  g = model_ops.euclidean_distance(test_feat, support_feat)
    # Note that gram matrix g has shape (n_test, n_support)

    # soft corresponds to a(xhat, x_i) in eqn (1) of Matching Networks paper 
    # https://arxiv.org/pdf/1606.04080v1.pdf
    # Computes softmax across axis 1, (so sums distances to support set for
    # each test entry) to get attention vector
    # Shape (n_test, n_support)
    attention = tf.nn.softmax(g)  # Renormalize

    # Weighted sum of support labels
    # Shape (n_support, 1)
    support_labels = tf.expand_dims(self.support_label_placeholder, 1)
    # pred is yhat in eqn (1) of Matching Networks.
    # Shape squeeze((n_test, n_support) * (n_support, 1)) = (n_test,)
    pred = tf.squeeze(tf.matmul(attention, support_labels), [1])

    # Clip softmax probabilities to range [epsilon, 1-epsilon]
    # Shape (n_test,)
    pred = tf.clip_by_value(pred, K.epsilon(), 1.-K.epsilon())

    # Convert to logit space using inverse sigmoid (logit) function
    # logit function: log(pred) - log(1-pred)
    # Used to invoke tf.nn.sigmoid_cross_entropy_with_logits
    # in Cross Entropy calculation.
    # Shape (n_test,)
    scores = tf.log(pred) - tf.log(tf.constant(1., dtype=tf.float32)-pred)

    return pred, scores
예제 #14
0
 def call(self, x, mask=None):
     input_shape = K.int_shape(x)
     reduction_axes = list(range(len(input_shape)))
     del reduction_axes[self.axis]
     broadcast_shape = [1] * len(input_shape)
     broadcast_shape[self.axis] = input_shape[self.axis]
     alpha_pos = K.reshape(self.alpha_pos, broadcast_shape)
     alpha_neg = K.reshape(self.alpha_neg, broadcast_shape)
     beta_pos = K.reshape(self.beta_pos, broadcast_shape)
     beta_neg = K.reshape(self.beta_neg, broadcast_shape)
     rho_pos = K.reshape(self.rho_pos, broadcast_shape)
     rho_neg = K.reshape(self.rho_neg, broadcast_shape)
     pos = alpha_pos * K.pow(K.relu(x + beta_pos) + K.epsilon(), rho_pos)
     neg = alpha_neg * K.pow(K.relu(-x + beta_neg) + K.epsilon(), rho_neg)
     return pos + neg
예제 #15
0
파일: decode.py 프로젝트: the-moliver/kfs
    def call(self, x, mask=None):
        x = K.permute_dimensions(x, (0, 2, 1))
        x = K.reshape(x, (-1, self.input_length))
        x = K.expand_dims(x, 1)
        x = K.expand_dims(x, -1)
        if self.real_filts is not None:
            conv_out_r = K.conv2d(x, self.W_r, strides=self.subsample,
                                  border_mode=self.border_mode,
                                  dim_ordering='th')
        else:
            conv_out_r = x

        if self.complex_filts is not None:
            conv_out_c1 = K.conv2d(x, self.W_c1, strides=self.subsample,
                                   border_mode=self.border_mode,
                                   dim_ordering='th')
            conv_out_c2 = K.conv2d(x, self.W_c2, strides=self.subsample,
                                   border_mode=self.border_mode,
                                   dim_ordering='th')
            conv_out_c = K.sqrt(K.square(conv_out_c1) + K.square(conv_out_c2) + K.epsilon())
            output = K.concatenate((conv_out_r, conv_out_c), axis=1)
        else:
            output = conv_out_r

        output_shape = self.get_output_shape_for((None, self.input_length, self.input_dim))
        output = K.squeeze(output, 3)  # remove the dummy 3rd dimension
        output = K.permute_dimensions(output, (2, 1, 0))
        output = K.reshape(output, (-1, output_shape[1], output.shape[1]*output.shape[2]))
        return output
예제 #16
0
def get_sample_weights(y, class_weights=None):
    """Compute sample weights for model training.

    Computes sample weights given  a vector of output labels `y`. Sets weights
    of samples without label (`CPG_NAN`) to zero.

    Parameters
    ----------
    y: :class:`numpy.ndarray`
        1d numpy array of output labels.
    class_weights: dict
        Weight of output classes, e.g. methylation states.

    Returns
    -------
    :class:`numpy.ndarray`
        Sample weights of size `y`.
    """
    y = y[:]
    sample_weights = np.ones(y.shape, dtype=K.floatx())
    sample_weights[y == dat.CPG_NAN] = K.epsilon()
    if class_weights is not None:
        for cla, weight in class_weights.items():
            sample_weights[y == cla] = weight
    return sample_weights
예제 #17
0
 def loss(y_true, y_pred):
     y_true = denormalize(y_true, y_mean, y_std)
     y_pred = denormalize(y_pred, y_mean, y_std)
     diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true),
                                             K.epsilon(),
                                             None))
     return 100. * K.mean(diff, axis=-1)
예제 #18
0
  def __init__(self, sess, model,
               test_batch_size=10, support_batch_size=10,
               learning_rate=.001, similarity="cosine", **kwargs):
    """Builds a support-based classifier.

    See https://arxiv.org/pdf/1606.04080v1.pdf for definition of support.

    Parameters
    ----------
    sess: tf.Session
      Session for this model
    model: SequentialSupportModel 
      Contains core layers in model. 
    n_pos: int
      Number of positive examples in support.
    n_neg: int
      Number of negative examples in support.
    """
    self.sess = sess
    self.similarity = similarity
    self.model = model  
    self.test_batch_size = test_batch_size
    self.support_batch_size = support_batch_size

    self.learning_rate = learning_rate
    self.epsilon = K.epsilon()

    self.add_placeholders()
    self.pred_op, self.scores_op, self.loss_op = self.add_training_loss()
    # Get train function
    self.train_op = self.get_training_op(self.loss_op)

    # Initialize
    self.init_fn = tf.initialize_all_variables()
    sess.run(self.init_fn)  
예제 #19
0
def recall(y_true, y_pred):
    y_true_f = KB.flatten(y_true)
    y_pred_f = KB.flatten(y_pred)
    true_positives = true_positive(y_true_f, y_pred_f)
    possible_positives = possible_positive(y_true_f, y_pred_f)
    recall = true_positives / (possible_positives + KB.epsilon())
    return recall
예제 #20
0
    def specificity(y_true, y_pred):
        y_pred_neg = 1 - K.round(K.clip(y_pred, 0, 1))
        y_neg = 1 - K.round(K.clip(y_true, 0, 1))
        tn = K.sum(y_neg * y_pred_neg)
        neg = K.sum(y_neg)

        return tn / (neg + K.epsilon())
예제 #21
0
    def call(self, x, mask=None):
        eij = dot_product(x, self.W)

        if self.bias:
            eij += self.b

        eij = K.tanh(eij)

        a = K.exp(eij)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        weighted_input = x * K.expand_dims(a)

        result = K.sum(weighted_input, axis=1)

        if self.return_attention:
            return [result, a]
        return result
예제 #22
0
    def call(self, x, mask=None):
        # eij = K.dot(x, self.W) TF backend doesn't support it

        # features_dim = self.W.shape[0]
        # step_dim = x._keras_shape[1]

        features_dim = self.features_dim
        step_dim = self.step_dim

        eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))

        if self.bias:
            eij += self.b

        eij = K.tanh(eij)

        a = K.exp(eij)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        # print weigthted_input.shape
        return K.sum(weighted_input, axis=1)
예제 #23
0
    def sensitivity(y_true, y_pred):
        y_pred_pos = K.round(K.clip(y_pred, 0, 1))
        y_pos = K.round(K.clip(y_true, 0, 1))
        tp = K.sum(y_pos * y_pred_pos)
        pos = K.sum(y_pos)

        return tp / (pos + K.epsilon())
예제 #24
0
파일: metrics.py 프로젝트: ymcidence/neuron
    def loss(self, y_true, y_pred):
        """ categorical crossentropy loss """

        if self.crop_indices is not None:
            y_true = utils.batch_gather(y_true, self.crop_indices)
            y_pred = utils.batch_gather(y_pred, self.crop_indices)

        if self.use_float16:
            y_true = K.cast(y_true, 'float16')
            y_pred = K.cast(y_pred, 'float16')

        # scale and clip probabilities
        # this should not be necessary for softmax output.
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute log probability
        log_post = K.log(y_pred)  # likelihood

        # loss
        loss = - y_true * log_post

        # weighted loss
        if self.weights is not None:
            loss *= self.weights

        if self.vox_weights is not None:
            loss *= self.vox_weights

        # take the total loss
        # loss = K.batch_flatten(loss)
        mloss = K.mean(K.sum(K.cast(loss, 'float32'), -1))
        tf.verify_tensor_all_finite(mloss, 'Loss not finite')
        return mloss
예제 #25
0
def fbeta_score(y_true, y_pred, beta=1):
    '''Calculates the F score, the weighted harmonic mean of precision and recall.
    This is useful for multi-label classification, where input samples can be
    classified as sets of labels. By only using accuracy (precision) a model
    would achieve a perfect score by simply assigning every class to every
    input. In order to avoid this, a metric should penalize incorrect class
    assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
    computes this, as a weighted mean of the proportion of correct class
    assignments vs. the proportion of incorrect class assignments.
    With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
    correct classes becomes more important, and with beta > 1 the metric is
    instead weighted towards penalizing incorrect class assignments.
    '''
    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')

    # If there are no true positives, fix the F score at 0 like sklearn.
    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0

    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score
예제 #26
0
 def __call__(self, x):
     xshape = K.int_shape(x)
     if self.division_idx is None:
         self.division_idx = xshape[-1]/2
     x = K.reshape(x, (-1, xshape[-1]))
     x /= K.sqrt(K.sum(K.square(x), axis=0, keepdims=True))
     xx = K.sum(x[:,:self.division_idx] * x[:,self.division_idx:], axis=0)
     return self.gamma * K.sqrt(K.sum(K.square(xx)) + K.epsilon())
예제 #27
0
def metrics_mape(rate_true, rate_pred):
    if args.norm_ans:
        rate_true = denormalize(rate_true, rate_mean, rate_std)
        rate_pred = denormalize(rate_pred, rate_mean, rate_std)
    diff = K.abs((rate_true - rate_pred) / K.clip(K.abs(rate_true),
					    K.epsilon(),
					    None))
    return 100. * K.mean(diff, axis=-1)
예제 #28
0
def precision(y_true, y_pred):
    '''Calculates the precision, a metric for multi-label classification of
    how many selected items are relevant.
    '''
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
예제 #29
0
def recall(y_true, y_pred):
    '''Calculates the recall, a metric for multi-label classification of
    how many relevant items are selected.
    '''
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
예제 #30
0
    def myloss(self,y_true, y_pred):        
#         scale preds so that the class probas of each sample sum to 1
        y_pred /= K.sum(y_pred,axis=-1, keepdims=True)
        # clip
        y_pred = K.clip(y_pred, K.epsilon(), 1)
        # calc
        loss = y_true*K.log(y_pred)*self.weights
        loss =-K.sum(loss,-1)
        return loss
예제 #31
0
def pc_class_accs(y_true, y_pred):
    # accuracy on +1 class is the recall
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
예제 #32
0
 def __call__(self, p):
     norms = K.sqrt(K.sum(K.square(p), axis=self.axis, keepdims=True))
     desired = K.clip(norms, 0, self.m)
     p = p * (desired / (K.epsilon() + norms))
     return p
예제 #33
0
# This file contains my custom loss functions I experimented with
# look at charbonnier() to see the loss function I decided to go with

import numpy as np
from keras import objectives
from keras import backend as K

_EPSILON = K.epsilon()

def charbonnier(y_true, y_pred):
    return K.sqrt(K.square(y_true - y_pred) + 0.01**2)

def huber(y_true, y_pred):
    d = y_true - y_pred
    a = .5 * K.square(d)
    b = 0.1 * (K.abs(d) - 0.1 / 2.)
    l = K.switch(K.abs(d) <= 0.1, a, b)
    return K.sum(l, axis=-1)


def smooth_huber(y_true, y_pred):
    return K.mean(K.log(np.cosh(y_true - y_pred)))

def binary_crossentropy_test(y_true, y_pred):
    y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
    out = -(y_true * K.log(y_pred) + (1.0 - y_true) * K.log(1.0 - y_pred))
    return K.mean(out, axis=-1)
예제 #34
0
def euclidean_distance(vects):
    x, y = vects
    # x = tf.Print(x,[tf.shape(x)])
    return K.sqrt(
        K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
예제 #35
0
 def cosine_similarity(self):
     dot = lambda a, b: K.batch_dot(a, b, axes=1)
     return lambda x: dot(x[0], x[1]) / K.maximum(
         K.sqrt(dot(x[0], x[0]) * dot(x[1], x[1])), K.epsilon())
예제 #36
0
def root_mean_squared_logarithmic_error(y_true, y_pred):
    first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
    second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
    return K.sqrt(
        K.mean(K.square(first_log - second_log), axis=-1) + 0.0000001)
예제 #37
0
def _mdn_loss_tensor(y_true, w, mu, sigma):
    """Gaussian mixture loss for 1D output."""
    phi = _mdn_phi_tensor(y_true, mu, sigma)
    E = -K.log(K.sum(w * phi, axis=1) + K.epsilon())
    return E
예제 #38
0
def f1_m(y_true, y_pred):
    precision = precision_m(y_true, y_pred)
    recall = recall_m(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
예제 #39
0
 def call(self, inputs, **kwargs):
     return K.sqrt(K.sum(K.square(inputs), -1) + K.epsilon())
예제 #40
0
def normalize(x):
    # utility function to normalize a tensor by its L2 norm
    return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
예제 #41
0
 def objective_function_for_policy(y_true, y_pred):
     # can use categorical_crossentropy??
     return K.sum(-y_true * K.log(y_pred + K.epsilon()), axis=-1)
예제 #42
0
def f1(y_true, y_pred):
    precisionx = precision(y_true, y_pred)
    recallx = recall(y_true, y_pred)
    return 2*((precisionx*recallx)/(precisionx+recallx+K.epsilon()))
예제 #43
0
def precision(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
예제 #44
0
def recall(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
from keras.models import model_from_json
from keras import backend as k
from keras.utils import generic_utils
import setGPU
import time

# Parameter initialization
img_w = 224
img_h = 224
channels = 3
batch_size = 16
weight_decay = 5e-4
classes = 332
gamma = 0.5
kLambda = 0.001
epsilon = k.epsilon()
k_t = k.epsilon()
Current_Refined = np.zeros([batch_size, img_w, img_h, channels], dtype=np.float32)

# Read training data
# GT file list
GT_file = open("/home/zhaojian/Keras/GAN_APPLE/lists/IJBA_split1_finetune.txt", "r")
GT_lines = GT_file.readlines()
GT_file.close()
N_GT = len(GT_lines)
GT_file_list = []
for i in range(N_GT):
    GT_file_list.append(GT_lines[i].split()[0])
# Syn file list + Syn label
Syn_file = open("/home/zhaojian/Keras/GAN_APPLE/lists/IJBA_split1_syn_rand.txt", "r")
Syn_lines = Syn_file.readlines()
예제 #46
0
def _mdn_phi_tensor(y_true, mu, sigma):
    inv_sigma_2 = 1 / K.square(sigma + K.epsilon())
    phi = inv_sigma_2 * K.exp(-inv_sigma_2 * K.square(y_true - mu))
    return phi
예제 #47
0
def r2(y_true, y_pred):
    SS_res = K.sum(K.square(y_true - y_pred))
    SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
    r2 = 1 - SS_res / (SS_tot + K.epsilon())
    return r2
예제 #48
0
 def __call__(self, p):
     return p / (K.epsilon() +
                 K.sqrt(K.sum(K.square(p), axis=self.axis, keepdims=True)))
예제 #49
0
def euclidean_distance(vectors):
    vector1, vector2 = vectors
    sum_square = K.sum(K.square(vector1 - vector2), axis=1, keepdims=True)
    return K.sqrt(K.maximum(sum_square, K.epsilon()))
예제 #50
0
def kld(y_true, y_pred):
    y_true = K.clip(y_true, K.epsilon(), 1)
    y_pred = K.clip(y_pred, K.epsilon(), 1)
    return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
예제 #51
0
def euclidean_distance(vects):
    x, y = vects
    sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
    return K.sqrt(K.maximum(sum_square, K.epsilon()))
def r_square(y_obs, y_pred):
    SS_res = backend.sum(backend.square(y_obs - y_pred))
    SS_tot = backend.sum(backend.square(y_obs - backend.mean(y_obs)))
    return (1 - SS_res / (SS_tot + backend.epsilon()))
예제 #53
0
def coeff_determination(y_true, y_pred):
    SS_res = K.sum(K.square(y_true - y_pred))
    SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
    return (1 - SS_res / (SS_tot + K.epsilon()))
예제 #54
0
def R2(y_true, y_pred):
    ''' R^2 (coefficient of determination) regression score function. To avoid NaN, added 1e-8 to denominator '''
    SS_num = K.sum(K.square(y_true - y_pred))
    SS_den = K.sum(K.square(y_true - K.mean(y_true)))
    return 1 - SS_num / (SS_den + K.epsilon())
예제 #55
0
 def newcos_similarity(self, y_true, y_pred):
     y_true = K.l2_normalize(y_true, axis=1)
     y_pred = K.l2_normalize(y_pred, axis=1)
     mat_dot = K.dot(y_true, K.transpose(y_pred))
     dp = tf.diag_part(mat_dot)
     return K.sum(dp) + K.epsilon()
예제 #56
0
def fmeasure_onehot(y_true, y_pred):
    '''Custom implementation of the keras fmeasure metric to work with
    one-hot encoded outputs.'''
    p = precision_onehot(y_true, y_pred)
    r = recall_onehot(y_true, y_pred)
    return 2 * (p * r) / (p + r + K.epsilon())
예제 #57
0
def log_loss(y_true, y_pred):
    '''Keras 'loss' function for the REINFORCE algorithm, where y_true is the action that was
    taken, and updates with the negative gradient will make that action more likely. We use the
    negative gradient because keras expects training data to minimize a loss function.
    '''
    return -y_true * K.log(K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon()))
예제 #58
0
def f1(y_true, y_pred):
    

    precision = pp(y_true, y_pred)
    recall = se(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon()))
예제 #59
0
def coeff_determination(y_true, y_pred):
    from keras import backend as K
    SS_res =  K.sum(K.square( y_true-y_pred ))
    SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
    return ( 1 - SS_res/(SS_tot + K.epsilon()) )
예제 #60
0
def jaccard_coef_int(y_true, y_pred):
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    intersection = K.sum(y_true * y_pred_pos, axis=[0, -1])
    sum_ = K.sum(y_true + y_pred, axis=[0, -1])
    jac = (intersection + K.epsilon()) / (sum_ - intersection + K.epsilon())
    return K.mean(jac)