Example #1
0
def build_train_fn(model):
    # cost
    lr = T.scalar()
    labels = K.placeholder(ndim=2, dtype='int32')
    ob_input = model.inputs[0]
    raw_softmax_outputs = model.outputs[0]

    softmax_outputs = raw_softmax_outputs.dimshuffle((2,0,1))
    softmax_outputs = softmax_outputs.reshape((softmax_outputs.shape[0], softmax_outputs.shape[1]*softmax_outputs.shape[2]))
    softmax_outputs = softmax_outputs.dimshuffle((1,0))

    cost = categorical_crossentropy(softmax_outputs, labels).mean()

    # gradients
    trainable_vars = model.trainable_weights
    grads = K.gradients(cost, trainable_vars)
    grads = lasagne.updates.total_norm_constraint(grads, 100)
    updates = lasagne.updates.nesterov_momentum(grads, trainable_vars, lr, 0.99)

    for key, val in model.updates:                              
        updates[key] = val

    # train_fn
    train_fn = K.function([ob_input, labels, K.learning_phase(), lr],
                          [softmax_outputs, cost],
                          updates=updates)

    return train_fn
    def starGAN_train(self, D_lr, G_lr, lamda_gp, lamda_cls, lamda_rec):

        x_real = Input(shape=self.image_size)
        label_real = Input(shape=(self.n_class,))
        label_fake = Input(shape=(self.n_class,))
        label_real_matrix = Input(shape=(self.image_size[0],self.image_size[1],self.n_class))
        label_fake_matrix = Input(shape=(self.image_size[0],self.image_size[1],self.n_class))
        x_fake = self.generator([x_real, label_fake_matrix])

        # loss for discriminator
        d_out_src_real, d_out_cls_real = self.discriminator(x_real)
        d_loss_real = -K.mean(d_out_src_real)
        d_loss_cls = K.mean(K.categorical_crossentropy(label_real, d_out_cls_real))
        # cal acc
        label_sub = d_out_cls_real - label_real
        c1 = 1 + K.min(label_sub, axis=1)  # label为1的最小置信度
        c2 = K.max(label_sub, axis=1)  # label为0的最大置信度
        d_acc = K.mean(K.cast(K.greater(c1 - c2, 0), K.floatx()))  # 如果label为1的最小置信度大于label为0的最大置信度,则正确,否则错误
        # label_pred = K.cast(K.greater(K.clip(d_out_cls_real, 0, 1), 0.5), K.floatx())
        # d_acc = 1 - K.mean(K.clip(K.sum(K.abs(label_real - label_pred), axis=1), 0, 1))
        d_out_src_fake, d_out_cls_fake = self.discriminator(x_fake)
        d_loss_fake = K.mean(d_out_src_fake)

        # gradient penalty
        e = K.placeholder(shape=(None, 1, 1, 1))
        x_mixed = Input(shape=self.image_size, tensor=e * x_real + (1 - e) * x_fake)
        x_mixed_gradient = K.gradients(self.discriminator(x_mixed), [x_mixed])[0]
        x_mixed_gradient_norm = K.sqrt(K.sum(K.square(x_mixed_gradient), axis=[1, 2, 3]))  # not norm in batch_size
        gradient_penalty = K.mean(K.square(x_mixed_gradient_norm - 1))

        d_loss = d_loss_real + d_loss_fake + lamda_gp * gradient_penalty + lamda_cls * d_loss_cls
        d_training_updates = RMSprop(lr=D_lr).get_updates(d_loss, self.discriminator.trainable_weights)
        D_train = K.function([x_real, label_real, label_real_matrix, label_fake, label_fake_matrix, e], [d_loss, d_acc], d_training_updates)

        # loss for generator
        x_rec = self.generator([x_fake, label_real_matrix])
        g_out_src_fake, g_out_cls_fake = self.discriminator(x_fake)
        g_loss_fake = -K.mean(g_out_src_fake)
        g_loss_rec = K.mean(K.abs(x_real - x_rec))
        g_loss_cls = K.mean(K.categorical_crossentropy(label_fake, g_out_cls_fake))

        g_loss = g_loss_fake + lamda_rec * g_loss_rec + lamda_cls * g_loss_cls
        g_training_updates = RMSprop(lr=G_lr).get_updates(g_loss, self.generator.trainable_weights)
        G_train = K.function([x_real, label_real, label_real_matrix, label_fake, label_fake_matrix], [g_loss], g_training_updates)

        return D_train, G_train
Example #3
0
    def loss(y_true, y_pred):
#        nb_cl = len(weights)
#        final_mask = K.zeros_like(y_pred[:, 0])
#        y_pred_max = K.max(y_pred, axis=1, keepdims=True)
#        y_pred_max_mat = K.equal(y_pred, y_pred_max)
#        for c_p, c_t in product(range(nb_cl), range(nb_cl)):
#            final_mask += (weights[c_t, c_p] * y_pred_max_mat[:, c_p] * y_true[:, c_t])
#        return K.categorical_crossentropy(y_pred, y_true) * final_mask
        return K.categorical_crossentropy(y_pred, y_true)
Example #4
0
def w_categorical_crossentropyold(y_true, y_pred, weights):
    nb_cl = len(weights)
    final_mask = K.zeros_like(y_pred[:, 0])
    y_pred_max = K.max(y_pred, axis=0)
    y_pred_max = K.reshape(y_pred_max, (K.shape(y_pred)[0], 1))
    y_pred_max_mat = K.cast(K.equal(y_pred, y_pred_max), K.floatx())
    for c_p, c_t in product(range(nb_cl), range(nb_cl)):
        final_mask += (weights[c_t, c_p] * y_pred_max_mat[:, c_p] * y_true[:, c_t])
    return K.categorical_crossentropy(y_pred, y_true) * final_mask
Example #5
0
def amsoftmax_loss(y_true, y_pred, scale=30, margin=0.35):
    y_pred = y_true * (y_pred - margin) + (1 - y_true) * y_pred
    y_pred *= scale
    return K.categorical_crossentropy(y_true, y_pred, from_logits=True)
Example #6
0
 def custom_loss(ytrue, ypred):
     return alpha*K.categorical_crossentropy(ytrue[:,:num_classes], ypred) + (1. - alpha)*K.categorical_crossentropy(ytrue[:,num_classes:], ypred)
Example #7
0
 def loss_advmodel(z_true, z_pred):
     return c * K.categorical_crossentropy(z_true, z_pred)
Example #8
0
def rawloss(y_true, y_pred):
    return K.mean(K.categorical_crossentropy(y_true, y_pred))
def cc_loss(y_true, y_pred):
    return K.mean(K.categorical_crossentropy(y_pred, y_true))
Example #10
0
def class_loss_cls(y_true, y_pred):
    return lambda_cls_class * K.mean(
        categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))
def adv_loss2(y_true, y_pred):
    current_loss = -K.categorical_crossentropy(y_true, y_pred)
    return current_loss
Example #12
0
def RL_policy_loss(y_true, y_pred):
    ''' y_true: 이겼을 때 1, 졌을 때 -1, bsize: 게임 횟수 * 착수 횟수 '''
    #return K.categorical_crossentropy(y_pred * K.sum(y_true) * bsize , K.abs(y_true)) / 32
    #return K.sum(K.log(K.max(y_pred, axis=0)) * K.sum(y_true, axis=0), axis=-1) / 32.0
    return K.categorical_crossentropy(y_pred, y_true) / 32.0 * float(bsize)
Example #13
0
def identity_loss(y_true, y_pred):
    return K.categorical_crossentropy(y_true, y_pred)  # K.mean(y_pred - 0 * y_true)
Example #14
0
def categorical_crossentropy(y_true, y_pred):
    '''Expects a binary class matrix instead of a vector of scalar classes.
    '''
    return K.categorical_crossentropy(y_pred, y_true)
Example #15
0
def class_loss_cls(y_true, y_pred):
    return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))
Example #16
0
x = np.load('new_x_pick.npy')
label = np.load('new_y_hat.npy')

#x,label = np.load("data_hat.npz")

model = load_model('new_CNN.h5')
for i in range(5):

    x_hat = x.reshape(-1, 28, 28, 1) / 255
    y_hat = to_categorical(label, num_classes=10)
    #epsilon=0.3
    #above=x+epsilon
    #below=x-epsilon
    model.fit(x_hat, y_hat, epochs=1, steps_per_epoch=1)
    loss = K.categorical_crossentropy(y_hat, model.output)
    gra = K.gradients(loss, model.input)[0]
    x_hat -= 0.02 * gra

np.save('new_x_hat.npy', x)
"""
x_hat=np.load('x_hat.npy')
history=model.fit(x_hat, y_hat, epochs=30, batch_size=100,)
acc_adv=history.history['acc']
print(acc_adv)
print(type(acc_adv))
"""

x_hat = np.load('new_x_hat.npy')
model = load_model('new_CNN.h5')
x_output = model.predict(x, steps=1)
def weight_categorical_crossentropy(y_true, y_pred):
    # 权重惩罚和类别保持一致
    weight = tf.constant([1.0, 1.0, 1.0, 1.0])
    y_coe = y_true * weight
    loss1 = K.categorical_crossentropy(y_coe, y_pred)
    return loss1
Example #18
0
File: pr3.py Project: tobiagru/LIS
def objective(self, y_true, y_pred):
        return K.categorical_crossentropy(y_pred, y_true)
 def identity_loss_v3(y_true, y_pred):
     y_true_reshaped = K.mean(K.reshape(y_true, (-1, select, 30)), axis=1)
     y_pred_reshaped = K.softmax(K.mean(K.reshape(y_pred, (-1, select, 30)), axis=1))
     final_val = K.mean(K.categorical_crossentropy(y_pred_reshaped, y_true_reshaped))
     return final_val + y_pred * 0
Example #20
0
def launch_adversarial_training(training_params):
    """
    Load the data, and train a Keras model.

    :param training_params: a TrainingParams object which contains each parameter of the training
    :return:
    """
    if os.path.exists(training_params.path_out) is False:
        os.mkdir(os.path.abspath(training_params.path_out))

    ###### LOADING VALIDATION DATA #######
    validset, valid_targets = load_dataset_in_memory_and_resize(training_params.data_access, "valid", training_params.dataset_path,
                                                                training_params.targets_path, training_params.final_size,
                                                                training_params.final_size, training_params.test_batch_size)
    valid_targets = convert_labels(valid_targets)

    ###### Preprocessing VALIDATION DATA #######
    for mode in training_params.valid_preprocessing:
        validset = preprocess_dataset(validset, training_params, mode)
    # Transpose validset >> (N, channel, X, Y)
    validset = validset.transpose(0,3,1,2)
    # Multiple input ?
    if training_params.multiple_inputs>1:
        validset = [validset for i in range(training_params.multiple_inputs)]

    ###### MODEL INITIALIZATION #######
    with timer("Model initialization"):
        model = training_params.initialize_model()
    if training_params.pretrained_model is not None:
        with timer("Pretrained Model initialization"):
            pretrained_model = training_params.initialize_pretrained_model()
            training_params.generator_args.append(pretrained_model)
            # preprocessed the validset
            if type(pretrained_model) is list:
                features = []
                for pmodel in pretrained_model:
                    features.append(pmodel.predict(validset))
                validset = np.concatenate(features, axis=1)
            else:
                validset = pretrained_model.predict(validset)

    ###### SAVE PARAMS ######
    s = training_params.print_params()
    # Save command
    f = open(training_params.path_out+"/command.txt", "w")
    f.writelines(" ".join(sys.argv))
    f.writelines(s)
    f.close()
    # Print architecture
    print_architecture(model, path_out=training_params.path_out + "/architecture.txt")

    ###### TRAINING SET #######

    train_dataset = FuelDataset("train", training_params.tmp_size,
                                batch_size=training_params.batch_size,
                                bagging=training_params.bagging_size,
                                bagging_iterator=training_params.bagging_iterator)

    ###### ADVERSARIAL MAPPING ######

    input_ = model.layers[0].input
    y_ = model.y
    layer_output = model.layers[-1].get_output()
    xent = K.categorical_crossentropy(y_, layer_output)
    loss = xent.mean()
    grads = K.gradients(loss, input_)
    get_grads = K.function([input_, y_], [loss, grads])

    ###### TRAINING LOOP #######
    count = training_params.fine_tuning
    epoch_count = 0

    with timer("Training"):
        while training_params.learning_rate >= training_params.learning_rate_min and epoch_count<training_params.nb_max_epoch:

            if count != 0: # Restart from the best model with a lower LR
                model = training_params.initialize_model()
                model.load_weights(training_params.path_out+"/MEM_%d/best_model.cnn"%(count-1))
                # Recompile get_grads
                input_ = model.layers[0].input
                y_ = model.y
                layer_output = model.layers[-1].get_output()
                xent = K.categorical_crossentropy(y_, layer_output)
                loss = xent.mean()
                grads = K.gradients(loss, input_)
                get_grads = K.function([input_, y_], [loss, grads])

            best = 0.0
            patience = training_params.max_no_best
            losses = []
            adv_losses = []
            accuracies = []
            adv_accuracies = []
            valid_losses = []
            valid_accuracies = []
            epoch_count = 0
            no_best_count = 0
            path = training_params.path_out + "/MEM_%d"%count
            if os.path.exists(path) is False:
                os.mkdir(path)
            # Log file
            f = open(path+"/log.txt", "w")
            f.write("LR = %.2f\n"%training_params.learning_rate)
            f.close()
            # Config file
            open(path+"/config.netconf", 'w').write(model.to_json())

            while no_best_count < patience and epoch_count < training_params.nb_max_epoch:
                new = True
                loss = 0.0
                adv_loss = 0.0
                accuracy = 0.0
                adv_accuracy = 0.0
                # Trainset Loop
                N = training_params.Ntrain/(training_params.batch_size*1)
                for i in range(N):
                    # Train
                    print "\rEpoch %d : Batch %d over %d"%(epoch_count, i, N),
                    processed_batch, labels = get_next_batch(train_dataset, training_params.batch_size,
                                                             training_params.final_size,
                                                             training_params.preprocessing_func,
                                                             training_params.preprocessing_args)
                    l, acc = model.train_on_batch(processed_batch, labels, accuracy=True)
                    # Update stats
                    if new:
                        loss = l
                        accuracy = acc
                    else:
                        loss = 0.9*loss + 0.1*l
                        accuracy = 0.9*accuracy + 0.1*acc
                    # Get adversarial examples
                    l, grads = get_grads([processed_batch, labels])
                    updates = np.sign(grads)
                    adversarials = processed_batch + updates
                    # Train on adv examples
                    adv_l, adv_acc = model.train_on_batch(adversarials, labels, accuracy=True)
                    # Update stats
                    if new:
                        adv_loss = adv_l
                        adv_accuracy = adv_acc
                        new = False
                    else:
                        adv_loss = 0.9*adv_loss + 0.1*adv_l
                        adv_accuracy = 0.9*adv_accuracy + 0.1*adv_acc
                # Store stats
                losses.append(loss)
                accuracies.append(accuracy)
                adv_losses.append(adv_loss)
                adv_accuracies.append(adv_accuracy)
                # Validset loss and accuracy
                out = model.predict(validset)
                valid_loss = categorical_crossentropy(valid_targets, out)
                count = np.sum(np.argmax(valid_targets, axis=1) - np.argmax(out, axis=1) == 0)
                score = float(count)/valid_targets.shape[0]
                valid_losses.append(valid_loss)
                valid_accuracies.append(score)

                # Stop criterion and Save model
                string = "***\nEpoch %d: Loss : %0.5f, Adv loss : %0.5f, Valid loss : %0.5f, " \
                         "Acc : %0.5f, Adv acc : %0.5f, Valid acc : %0.5f"%(epoch_count, losses[-1], adv_losses[-1],
                                                                            valid_losses[-1], accuracies[-1],
                                                                            adv_accuracies[-1], valid_accuracies[-1])
                if score > best:
                    no_best_count = 0
                    save_path = path+"/best_model.cnn"
                    if training_params.verbose>0:
                        string = string +"\tBEST\n"
                        print string
                        write_log(path+"/log.txt", string)
                    best = score
                    model.save_weights(save_path, overwrite=True)
                else:
                    no_best_count += 1
                    save_path = path+"/last_epoch.cnn"
                    if training_params.verbose>0:
                        string = string + "\n"
                        print string
                        write_log(path+"/log.txt", string)
                    model.save_weights(save_path, overwrite=True)
                epoch_count += 1

            # Update learning rate
            training_params.learning_rate *= 0.1
            training_params.update_model_args()
            with open(path + "/history.pkl","w") as f:
                pickle.dump(losses,f)
                pickle.dump(adv_losses,f)
                pickle.dump(valid_losses,f)
                pickle.dump(accuracies,f)
                pickle.dump(adv_accuracies,f)
                pickle.dump(valid_accuracies,f)
            count += 1
Example #21
0
def nllk_categorical(y_true, y_pred, time_norm=True):
    T = 1
    if not time_norm:
        T = get_seq_length(y_true)
    return T * K.categorical_crossentropy(y_true, y_pred)
Example #22
0
 def cat_cross(y_true, y_pred):
     '''A test of custom loss function
     '''
     return K.categorical_crossentropy(y_pred, y_true)
Example #23
0
def loss_function(category_size, attribute_size, y_true, y_pred):
    category_loss = K.categorical_crossentropy(y_true[:,:category_size], y_pred[:,:category_size])
    attribute_loss = K.binary_crossentropy(y_true[:, category_size:], y_pred[:, category_size:])
    return category_loss + K.mean(attribute_loss, -1)
Example #24
0
ytrue = K.placeholder(name="y", shape=(None, output_dim))

# model parameters are variables
hidden_dim = 128
W1 = glorot_uniform((input_dim, hidden_dim))
b1 = zero((hidden_dim, ))
W2 = glorot_uniform((hidden_dim, output_dim))
b2 = zero((output_dim, ))
params = [W1, b1, W2, b2]

# two-layer model
hidden = K.sigmoid(K.dot(x, W1) + b1)
ypred = K.softmax(K.dot(hidden, W2) + b2)

# categorical cross entropy loss
loss = K.mean(K.categorical_crossentropy(ytrue, ypred), axis=None)

# categorical accuracy
accuracy = categorical_accuracy(ytrue, ypred)

# Train function
opt = Adam()
updates = opt.get_updates(params, [], loss)
train = K.function([x, ytrue], [loss, accuracy], updates=updates)

# Test function
test = K.function([x, ytrue], [loss, accuracy])

# Train the network
((xtrain, ytrain), (xtest, ytest)) = mnist.load_data()
(xtrain, xtest) = [x.reshape((-1, input_dim)) / 255.0 for x in (xtrain, xtest)]
Example #25
0
def mycrossentropy(y_true, y_pred, e=0.1, nb_classes=2):
    return (1 - e) * K.categorical_crossentropy(
        y_pred, y_true) + e * K.categorical_crossentropy(
            y_pred,
            K.ones_like(y_pred) / nb_classes)
Example #26
0
 def catcross_by_string(self, target, output):
     loss = 0
     for i in range(self.num_strings):
         loss += K.categorical_crossentropy(target[:,i,:], output[:,i,:])
     return loss
                                              dtype='float32')
bias_variable_1 = K.zeros(shape=(num_units[0], ), dtype='float32')
bias_variable_2 = K.zeros(shape=(num_units[1], ), dtype='float32')
bias_variable_3 = K.zeros(shape=(10, ), dtype='float32')
# define sigmoid output tensor

#

l1_tensor = K.dot(input_tensor, weight_variable_1) + bias_variable_1
l1_tensor = K.sigmoid(l1_tensor)
l2_tensor = K.dot(l1_tensor, weight_variable_2) + bias_variable_2
l2_tensor = K.sigmoid(l2_tensor)
output_tensor = K.dot(l2_tensor, weight_variable_3) + bias_variable_3
output_tensor = K.sigmoid(output_tensor)
# #losstensor #update #einverstanden
loss_tensor = K.mean(K.categorical_crossentropy(target_tensor, output_tensor))

# gradient
gradient_tensors_1 = K.gradients(
    loss=loss_tensor, variables=[weight_variable_1, bias_variable_1])
gradient_tensors_2 = K.gradients(
    loss=loss_tensor, variables=[weight_variable_2, bias_variable_2])
gradient_tensors_3 = K.gradients(
    loss=loss_tensor, variables=[weight_variable_3, bias_variable_3])
# #update
updates = [(weight_variable_1, weight_variable_1 - lr * gradient_tensors_1[0]),
           (bias_variable_1, bias_variable_1 - lr * gradient_tensors_1[1]),
           (weight_variable_2, weight_variable_2 - lr * gradient_tensors_2[0]),
           (bias_variable_2, bias_variable_2 - lr * gradient_tensors_2[1]),
           (weight_variable_3, weight_variable_3 - lr * gradient_tensors_3[0]),
           (bias_variable_3, bias_variable_3 - lr * gradient_tensors_3[1])]
Example #28
0
def cat_crossentropy_from_logit(
    target,
    output,
):
    return categorical_crossentropy(target, output, from_logits=True)
def masked_categorical_crossentropy(y_true, y_pred):
    mask = K.equal(y_true[..., 0], K.variable(1))
    mask = 1 - K.cast(mask, K.floatx())

    loss = K.categorical_crossentropy(y_true, y_pred) * mask
    return loss
Example #30
0
def c_x(index, actual):
    return (
        (1/get_neighbors(index)) *
        K.categorical_crossentropy(g_theta(EDGE_MATRIX.loc[index]), actual)
    )
Example #31
0
def kd_loss(y_true, y_pred):
    return K.categorical_crossentropy(y_pred, y_true, from_logits=True)
Example #32
0
 def loss(y_true, y_pred):
     return K.categorical_crossentropy(y_true, y_pred) + mse(
         get_gradients(model, X[batch * 128:(batch + 1) * 128],
                       y[batch * 128:(batch + 1) * 128]), pre_grad)
 def call(self, x):
     # label, logit
     return K.categorical_crossentropy(x[1],
                                       x[0],
                                       from_logits=self._from_logits)
 def selective_loss(y_true, y_pred):
     loss = K.categorical_crossentropy(
         K.repeat_elements(y_pred[:, -1:], self.num_classes, axis=1) *
         y_true[:, :-1], y_pred[:, :-1]) + lamda * K.maximum(
             -K.mean(y_pred[:, -1]) + c, 0)**2
     return loss
Example #35
0
 def loss(y_true, y_pred):
     return K.equal(mask, 2) * K.categorical_crossentropy(y_pred, y_true)
Example #36
0
 def lp_categ_of(y_true, y_pred):
     # return K.squeeze(K.dot(y_true, K.expand_dims(K.variable(weights))), -1)
     return K.sum(weights*y_true,axis=1)*K.categorical_crossentropy(y_true, y_pred)
def class_loss(y_true, y_pred):
    current_loss = K.sum(K.categorical_crossentropy(y_true, y_pred))
    return current_loss
Example #38
0
 def crossentropy(y_true, y_pred):
     loss = K.categorical_crossentropy(y_true, y_pred) + l1 * K.mean(
         K.abs(y_true - y_pred)) + l2 * K.mean(K.square(y_true - y_pred))
     return loss
Example #39
0
 def __call__(self, y_true, y_pred):
     w = y_true[:, :, :, -1]
     y_true = y_true[:, :, :, :-1]
     loss = K.categorical_crossentropy(y_true, y_pred, from_logits=True) * w
     return loss
def customized_loss(y_true, y_pred):
    return (1 * K.categorical_crossentropy(y_true, y_pred)) + (
        0.5 * dice_loss(y_true, y_pred))
Example #41
0
# -*- coding: utf-8 -*-
# @Author  : qiaohezhe
# @github : https://github.com/fengduqianhe
# @Date    :  2020/3/4
# version: Python 3.7.8
# @File : test.py
# @Software: PyCharm

import tensorflow as tf
import numpy as np
import math

target = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
predict = np.array([0.1, 0.6, 0.3, 0, 0, 0, 0, 0, 0, 0])
predict = tf.convert_to_tensor(predict)
from keras import backend as K
loss = K.categorical_crossentropy(target=target,
                                  output=predict,
                                  from_logits=False)
# cross_entropy = tf.reduce_mean(-tf.reduce_sum(tf.log(y)*y',axis=1)))
#loss = tf.nn.softmax_cross_entropy_with_logits(logits=predict, labels=target)

with tf.Session() as sess:
    print(sess.run(loss))
    print(math.log(0.6))
Example #42
0
def DARC1(y_true, y_pred):
    y_pred_softmax = K.softmax(y_pred)
    xentropy = K.categorical_crossentropy(y_true, y_pred_softmax)
    reg = K.max(K.sum(K.abs(y_pred), axis=0))
    alpha = 0.001
    return xentropy + alpha * reg
def biased_cce(y_true, y_pred):
    return K.mean(K.categorical_crossentropy(y_pred, label_normalization * y_true), axis=-1)
Example #44
0
 def part(y_true, y_pred):
     y_pred = K.reshape(y_pred, (-1, PROPOSAL_NUM, num_cls))
     y_true = K.repeat_elements(K.expand_dims(y_true, axis=1),
                                PROPOSAL_NUM,
                                axis=1)
     return K.mean(K.categorical_crossentropy(y_true, y_pred))
 def softmax_categorical_crossentropy(y_true, y_pred):
     """
     Uses categorical cross-entropy from logits in order to improve numerical stability.
     This is especially useful for TensorFlow (less useful for Theano).
     """
     return K.categorical_crossentropy(y_pred, y_true, from_logits=True)
Example #46
0
def categorical_crossentropy(y_true, y_pred):
	return K.categorical_crossentropy(y_true, y_pred,from_logits=True)