예제 #1
0
def test_cce_one_hot():
    y_a = K.variable(np.random.randint(0, 7, (5, 6)))
    y_b = K.variable(np.random.random((5, 6, 7)))
    objective_output = losses.sparse_categorical_crossentropy(y_a, y_b)
    assert K.eval(objective_output).shape == (5, 6)

    y_a = K.variable(np.random.randint(0, 7, (6,)))
    y_b = K.variable(np.random.random((6, 7)))
    assert K.eval(losses.sparse_categorical_crossentropy(y_a, y_b)).shape == (6,)
예제 #2
0
def test_cce_one_hot():
    y_a = K.variable(np.random.randint(0, 7, (5, 6)))
    y_b = K.variable(np.random.random((5, 6, 7)))
    objective_output = sparse_categorical_crossentropy(y_a, y_b)
    assert K.eval(objective_output).shape == (5, 6)

    y_a = K.variable(np.random.randint(0, 7, (6, )))
    y_b = K.variable(np.random.random((6, 7)))
    assert K.eval(sparse_categorical_crossentropy(y_a, y_b)).shape == (6, )
예제 #3
0
    def sample(self, model):
        """
            Sort the loss values of the training samples.
            Variables
        """
        idx = np.random.choice(np.arange(0, x_train.shape[0]),
                               size=args.fwd_batch_size,
                               replace=False)
        if args.dataset == "ptb":
            res = model.predict_proba(x_train[idx])
            print("res.shape", res.shape)
            res = K.get_value(
                sparse_categorical_crossentropy(
                    tf.convert_to_tensor(y_train[idx], np.float32),
                    tf.convert_to_tensor(res)))
            print("res.shape", res.shape)

        else:
            res = model.predict_proba(x_train[idx])
            print(y_train.shape)
            print(res.shape)
            res = K.get_value(
                tf.nn.softmax_cross_entropy_with_logits(labels=y_train[idx],
                                                        logits=res))
        res = res / np.sum(res)
        return np.random.choice(idx,
                                size=args.batch_size,
                                replace=False,
                                p=res)
예제 #4
0
    def crf_loss(self, y_true, y_pred):
        """General CRF loss function depanding on the learning mode.
        
        # Arguments
            y_true: tensor with true targets.
            y_pred: tensor with predicted targets.
            
        # Returns
            If the CRF layer is being trained in the join mode, returns the negative
            log-likelihood. Otherwise returns the categorical crossentropy implemented
            by the underlying Keras backend.
            
        # About Codes
            This code is from Keras-Team/keras_contrib.losses.crf_losses, 
            change some details.
        """

        crf, idx = y_pred._keras_history[:2]
        if crf.learn_mode == "join":
            return self.crf_nll(y_true, y_pred)
        else:
            if crf.sparse_target:
                return sparse_categorical_crossentropy(y_true, y_pred)
            else:
                return categorical_crossentropy(y_true, y_pred)
예제 #5
0
 def __triplet_loss(y_true, y_pred):
     from keras.losses import sparse_categorical_crossentropy
     triplet_contribution = triplet_semihard_loss(
         K.argmax(y_true, axis=1), embeddings)
     classification_contribution = sparse_categorical_crossentropy(
         y_true, y_pred)
     return triplet_contribution + classification_contribution
예제 #6
0
 def custom_loss(y_true, y_pred):
     epsilon = 0.001
     main_loss = losses.sparse_categorical_crossentropy(y_true, y_pred)
     pred_indices = K.argmax(y_pred, axis=-1)
     pred_indices = K.cast(pred_indices, dtype='float32')
     distance_penalty = K.constant(1.0, dtype='float32') / (K.abs(pred_indices - K.constant(50/ 2.0, dtype='float32')) + epsilon)
     return main_loss + 5000 * distance_penalty
예제 #7
0
def test_sparse_categorical_crossentropy():
    y_pred = K.variable(np.array([[0.3, 0.6, 0.1],
                                  [0.1, 0.2, 0.7]]))
    y_true = K.variable(np.array([1, 2]))
    expected_loss = - (np.log(0.6) + np.log(0.7)) / 2
    loss = K.eval(losses.sparse_categorical_crossentropy(y_true, y_pred))
    assert np.isclose(expected_loss, np.mean(loss))
예제 #8
0
def test_sparse_categorical_crossentropy():
    y_pred = K.variable(np.array([[0.3, 0.6, 0.1],
                                  [0.1, 0.2, 0.7]]))
    y_true = K.variable(np.array([1, 2]))
    expected_loss = - (np.log(0.6) + np.log(0.7)) / 2
    loss = K.eval(losses.sparse_categorical_crossentropy(y_true, y_pred))
    assert np.isclose(expected_loss, np.mean(loss))
예제 #9
0
파일: models.py 프로젝트: rateixei/HAT
 def loss_D(y_true, y_pred):
     if self.n_disc_outputs == 1:
         return c * KL.binary_crossentropy(y_pred, y_true)
         # return c * KL.binary_crossentropy(1.001/(K.exp(-y_pred) + 1), y_true)
         # return c * KL.binary_crossentropy(y_pred, y_true)
     else:
         return c * KL.sparse_categorical_crossentropy(y_pred, y_true)
예제 #10
0
def my_loss(y_true, y_pred):
  crossentropy = losses.sparse_categorical_crossentropy(y_true, y_pred, axis=-1)
  bool = tf.ones(shape=(1, 13, 13, 1), dtype=tf.float32)
  mask = tf.math.minimum(bool, y_true)
  mask = tf.squeeze(mask, axis=-1)
  final = mask * crossentropy
  loss = K.sum(final, axis=(1,2))
  return loss
예제 #11
0
def evaluate(model,
             dataset,
             section,
             adv=None,
             validation_size=5000,
             adv_iterations=40,
             adv_restarts=1,
             verbose=1):
    if dataset == "CIFAR10":
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    elif dataset == "MNIST":
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = np.expand_dims(x_train, axis=-1)
        x_test = np.expand_dims(x_test, axis=-1)
    else:
        raise ValueError("Unrecognised dataset")

    # Leave aside a validation set
    x_valid = x_train[-validation_size:]
    y_valid = y_train[-validation_size:]
    x_train = x_train[:-validation_size]
    y_train = y_train[:-validation_size]

    if section == "train":
        x = x_train
        y = y_train
    elif section == "validation":
        x = x_valid
        y = y_valid
    elif section == "test":
        x = x_test
        y = y_test
    else:
        raise ValueError("Invalid dataset section")

    # Normalize data
    x = x.astype("float32") / 255

    if adv is None or adv == 0:
        return model.evaluate(x, y, verbose=verbose)[1]
    else:
        # Keras gives no easy way of just getting the cross-entropy loss (without the
        # regularisation/rs loss) and it's needed for PGD, so we need to create it again
        model.xent_loss = sparse_categorical_crossentropy(
            model.targets[0], model.outputs[0])

        adv_generator = AdversarialExampleGenerator(model,
                                                    x,
                                                    y,
                                                    batch_size=64,
                                                    epsilon=adv,
                                                    k=adv_iterations,
                                                    a=adv / 10.0,
                                                    incremental=False)

        return model.evaluate_generator(adv_generator,
                                        workers=0,
                                        verbose=verbose)[1]
예제 #12
0
def crf_loss(y_true, y_pred):
    """General CRF loss function, depending on the learning mode."""
    crf, idx = y_pred._keras_history[:2]
    if crf.learn_mode == 'join':
        return crf_nll(y_true, y_pred)
    else:
        if crf.sparse_target:
            return sparse_categorical_crossentropy(y_true, y_pred)
        else:
            return categorical_crossentropy(y_true, y_pred)
예제 #13
0
def scce(y_true, y_pred):
    """ sparse categorical cross entropy for sparse labels

    # Args:
        y_true: (batch_size, height, width, 1).
        y_pred: (batch_size, height, width, 1).
    """
    y_true_f = tf.reshape(y_true, [-1])
    y_pred_f = tf.reshape(y_pred, [-1])
    return losses.sparse_categorical_crossentropy(y_true_f, y_pred_f)
예제 #14
0
def test_sparse_categorical_crossentropy_4d():
    y_pred = K.variable(
        np.array([[[[0.7, 0.1, 0.2], [0.0, 0.3, 0.7], [0.1, 0.1, 0.8]],
                   [[0.3, 0.7, 0.0], [0.3, 0.4, 0.3], [0.2, 0.5, 0.3]],
                   [[0.8, 0.1, 0.1], [1.0, 0.0, 0.0], [0.4, 0.3, 0.3]]]]))
    y_true = K.variable(np.array([[[0, 1, 0], [2, 1, 0], [2, 2, 1]]]))
    expected_loss = -(np.log(0.7) + np.log(0.3) + np.log(0.1) +
                      np.log(K.epsilon()) + np.log(0.4) + np.log(0.2) +
                      np.log(0.1) + np.log(K.epsilon()) + np.log(0.3)) / 9
    loss = K.eval(losses.sparse_categorical_crossentropy(y_true, y_pred))
    assert np.isclose(expected_loss, np.mean(loss))
예제 #15
0
def policy_gradient_loss(args):
    y_true, y_pred = args
    #adv = k.squeeze(advantage, axis=-1)

    #y_true = k.stack([k.arange(k.shape(y_true)[0]), k.cast(k.squeeze(y_true, axis=-1), dtype='int32')], axis=-1)
    y_true = k.squeeze(y_true, axis=-1)
    #policy_loss = k.log(k.clip(tf.gather_nd(y_pred, y_true), 1e-12, 1.))
    policy_loss = sparse_categorical_crossentropy(
        y_true, y_pred)  # self.compute_log_prob(y_true, y_pred)

    return policy_loss
예제 #16
0
def create_model(observation_space, action_space, args):
    assert isinstance(observation_space, gym.spaces.Box)
    assert isinstance(action_space, gym.spaces.Box) \
        or isinstance(action_space, gym.spaces.Discrete)

    h = x = Input(shape=observation_space.shape)
    for i in range(args.hidden_layers):
        h = Dense(args.hidden_nodes, activation=args.activation_function)(h)

    # baseline output
    bh = x
    for i in range(args.hidden_layers):
        bh = Dense(args.hidden_nodes, activation=args.activation_function)(bh)
    b = Dense(1)(bh)

    if isinstance(action_space, gym.spaces.Discrete):
        # produce logits for all actions
        h = Dense(action_space.n)(h)
        # sample action from logits
        a = Lambda(lambda x: tf.multinomial(x, num_samples=1))(h)
        # turn logits into probabilities
        p = Activation('softmax')(h)
        # model outputs sampled action and baseline
        model = Model(x, [a, b])
        # loss is between true values and probabilities
        model.compile(optimizer=RMSprop(lr=args.learning_rate),
                      loss=[
                          lambda y_true, y_pred:
                          sparse_categorical_crossentropy(y_true, p), 'mse'
                      ],
                      loss_weights=[1, args.baseline_weight])
    else:
        # number of actions
        n = np.prod(action_space.shape)
        # produce means and stddevs for Gaussian
        mu = Dense(n)(h)
        std = args.stddev
        # sample action from Gaussian
        a = Lambda(lambda mu: mu + std * K.random_normal(K.shape(mu)))(mu)
        # model outputs sampled action
        model = Model(x, [a, b])
        # negative log likelihood of Gaussian
        model.compile(optimizer=RMSprop(lr=args.learning_rate, clipnorm=1.),
                      loss=[lambda y_true, y_pred: mse(y_true, mu), 'mse'],
                      loss_weights=[1, args.baseline_weight])

    model.summary()
    return model
예제 #17
0
def test_sparse_categorical_crossentropy_4d():
    y_pred = K.variable(np.array([[[[0.7, 0.1, 0.2],
                                    [0.0, 0.3, 0.7],
                                    [0.1, 0.1, 0.8]],
                                   [[0.3, 0.7, 0.0],
                                    [0.3, 0.4, 0.3],
                                    [0.2, 0.5, 0.3]],
                                   [[0.8, 0.1, 0.1],
                                    [1.0, 0.0, 0.0],
                                    [0.4, 0.3, 0.3]]]]))
    y_true = K.variable(np.array([[[0, 1, 0],
                                   [2, 1, 0],
                                   [2, 2, 1]]]))
    expected_loss = - (np.log(0.7) + np.log(0.3) + np.log(0.1) +
                       np.log(K.epsilon()) + np.log(0.4) + np.log(0.2) +
                       np.log(0.1) + np.log(K.epsilon()) + np.log(0.3)) / 9
    loss = K.eval(losses.sparse_categorical_crossentropy(y_true, y_pred))
    assert np.isclose(expected_loss, np.mean(loss))
예제 #18
0
def sparse_categorical_cross_entropy_pos_contrib(target_rank, pred_rank):
    target_class_ids = tf.reshape(target_rank, (-1,))

    # Only positive ROIs contribute to the loss. And only
    # the right class_id of each ROI. Get their indices.
    positive_ix = tf.where(target_class_ids > 0)[:, 0]

    # Gather the ranks (predicted and true) that contribute to loss
    y_true = tf.gather(target_rank, positive_ix, axis=1)
    y_pred = tf.gather(pred_rank, positive_ix, axis=1)

    loss = K.switch(tf.size(y_true) > 0,
                    losses.sparse_categorical_crossentropy(y_true=y_true, y_pred=y_pred),
                    tf.constant(0.0))

    loss = K.mean(loss)

    return loss
예제 #19
0
def create_model(observation_space, action_space, args):
    assert isinstance(observation_space, gym.spaces.Box)
    assert isinstance(action_space, gym.spaces.Box) \
        or isinstance(action_space, gym.spaces.Discrete)

    h = x = Input(shape=observation_space.shape)
    for i in range(args.hidden_layers):
        h = Dense(args.hidden_nodes, activation=args.activation_function)(h)

    if isinstance(action_space, gym.spaces.Discrete):
        # produce logits for all actions
        h = Dense(action_space.n)(h)
        # sample action from logits
        a = Lambda(lambda x: tf.multinomial(x, num_samples=1))(h)
        # turn logits into probabilities
        p = Activation('softmax')(h)
        # model outputs sampled action
        model = Model(x, a)
        # loss is between true values and probabilities
        model.compile(optimizer=RMSprop(lr=args.learning_rate),
                      loss=lambda y_true, y_pred:
                      sparse_categorical_crossentropy(y_true, p))
    else:
        # number of actions
        n = np.prod(action_space.shape)
        # produce means and stddevs for Gaussian
        mu = Dense(n)(h)
        # sample action from Gaussian
        gaussian = SampleGaussian(initial_std=args.stddev)
        a = gaussian(mu)
        global std
        std = gaussian.std
        # model outputs sampled action
        model = Model(x, a)
        # negative log likelihood of Gaussian
        model.compile(optimizer=RMSprop(lr=args.learning_rate, clipnorm=1.),
                      loss=lambda y_true, y_pred: 0.5 * np.log(
                          2 * np.pi) + gaussian.logstd + 0.5 *
                      ((y_true - mu) / gaussian.std)**2)

    model.summary()
    return model
예제 #20
0
파일: layers.py 프로젝트: IgorMunizS/SeqTag
def crf_loss(y_true, y_pred):
    """General CRF loss function depending on the learning mode.
    # Arguments
        y_true: tensor with true targets.
        y_pred: tensor with predicted targets.
    # Returns
        If the CRF layer is being trained in the join mode, returns the negative
        log-likelihood. Otherwise returns the categorical crossentropy implemented
        by the underlying Keras backend.
    # About GitHub
        If you open an issue or a pull request about CRF, please
        add `cc @lzfelix` to notify Luiz Felix.
    """
    crf, idx = y_pred._keras_history[:2]
    if crf.learn_mode == 'join':
        return crf_nll(y_true, y_pred)
    else:
        if crf.sparse_target:
            return sparse_categorical_crossentropy(y_true, y_pred)
        else:
            return categorical_crossentropy(y_true, y_pred)
def custom_loss(y_true, y_pred):
    return sparse_categorical_crossentropy(K.reshape(y_true[:, 0], (-1, 1)),
                                           y_pred) * y_true[:, 1]
예제 #22
0
def masked_loss(y_true, y_pred):
    masked_true = K.not_equal(y_true, 0)
    masked_true = K.cast(masked_true, tf.float32)
    loss = sparse_categorical_crossentropy(y_true, y_pred)
    masked_loss = masked_true * loss
    return K.mean(masked_loss)
예제 #23
0
tmp.iloc[-1, ]
#> loss            0.067497
#> accuracy        0.973214
#> val_loss        0.143529
#> val_accuracy    0.921053

tmp = my_model.predict(X)
y_ = np.argmax(tmp, axis=-1)
(y_ == y).mean()
#> 0.96

### 11.2.1 交差エントロピー

-np.log([0.8, 0.7, 0.3, 0.8]).mean()
#> 0.5017337127232719

-np.log([0.7, 0.6, 0.2, 0.7]).mean()
#> 0.708403356019389

y = [2, 1, 0, 1]
y_1 = [[0.1, 0.1, 0.8], [0.1, 0.7, 0.2], [0.3, 0.4, 0.3], [0.1, 0.8, 0.1]]
y_2 = [[0.1, 0.2, 0.7], [0.2, 0.6, 0.2], [0.2, 0.5, 0.3], [0.2, 0.7, 0.1]]

[
    losses.sparse_categorical_crossentropy(y_true=y,
                                           y_pred=y_1).numpy().mean(),
    losses.sparse_categorical_crossentropy(y_true=y,
                                           y_pred=y_2).numpy().mean()
]
#> [0.5017337, 0.70840335]
예제 #24
0
파일: model.py 프로젝트: cpgaffney1/cs231n
 def loss_with_var(y_true, y_pred):
     main_loss = losses.sparse_categorical_crossentropy(y_true, y_pred)
     pred_indices = K.argmax(y_pred, axis=-1)
     pred_indices = K.cast(pred_indices, dtype='float32')
     var_penalty = K.var(pred_indices)
     return main_loss + config.distance_weight * var_penalty
예제 #25
0
                               min_lr=config.lr_reduce_min,
                               verbose=1)
early_stopping = EarlyStopping(monitor="val_loss",
                               patience=config.early_stop_patience,
                               verbose=1)

callbacks = [checkpoint, tensor_board]
if (config.lr_reduce):
    callbacks.append(lr_reducer)
if (config.early_stop):
    callbacks.append(early_stopping)

if config.adv_train:
    # Keras gives no easy way of just getting the cross-entropy loss (without the
    # regularisation/rs loss) and it's needed for PGD, so we need to create it again
    model.xent_loss = sparse_categorical_crossentropy(model.targets[0],
                                                      model.outputs[0])

    incremental = (1, config.epsilon_incremental
                   ) if config.epsilon_incremental > 0 else False
    train_generator = AdversarialExampleGenerator(model,
                                                  x_train,
                                                  y_train,
                                                  config.batch_size,
                                                  epsilon=config.epsilon,
                                                  k=config.pgd_iter_train,
                                                  a=0.03,
                                                  incremental=incremental)
    valid_generator = AdversarialExampleGenerator(model,
                                                  x_valid,
                                                  y_valid,
                                                  config.batch_size,
예제 #26
0
print(f"compute CCE by python: {loss}")
loss = log_loss(y_true, y_pred)
print(f"compute CCE by sklearn: {loss}")
loss = K.sum(categorical_crossentropy(tf.constant(y_true),
                                      tf.constant(y_pred))) / 3
loss = K.eval(loss)
print(f"compute CCE by Keras: {loss}")

# Sparse Categorical Cross Entropy
t = LabelEncoder()
y_pred = tf.constant([[0.1, 0.1, 0.8], [0.1, 0.4, 0.5], [0.5, 0.3, 0.2],
                      [0.6, 0.3, 0.1]])
y_true = t.fit_transform(['Rain', 'Rain', 'High Changes of Rain', 'No Rain'])
print("transformed label: ", y_true)
y_true = tf.constant(y_true)
loss = sparse_categorical_crossentropy(y_true, y_pred)
loss = K.eval(loss)
print(f'Value of Sparse Categorical Cross Entropy is ', loss)

# hinge loss
y_true = tf.constant([[0., 1.], [0., 0.]])
y_pred = tf.constant([[0.7, 0.3], [0.4, 0.6]])
loss = hinge(y_true, y_pred)
a = K.eval(loss)
print("hinge loss: ", a)


# Custom Loss Function: categorical_crossentropy_with_label_smoothing
def categorical_crossentropy_with_label_smoothing(y_true,
                                                  y_pred,
                                                  label_smoothing=0.1):
예제 #27
0
def show_saliency(args):
    model, config = load_model(args.name)
    input_type = util.get_input_type(config)

    def custom_loss(y_true, y_pred):
        epsilon = 0.001
        main_loss = losses.sparse_categorical_crossentropy(y_true, y_pred)
        pred_indices = K.argmax(y_pred, axis=-1)
        pred_indices = K.cast(pred_indices, dtype='float32')
        distance_penalty = K.constant(1.0, dtype='float32') / (K.abs(pred_indices - K.constant(config.n_classes / 2.0, dtype='float32')) + epsilon)
        return main_loss + config.distance_weight * distance_penalty
    keras.losses.custom_loss = custom_loss

    numeric_data, text_data, prices = preprocessing.load_tabular_data()
    additional_num_data = np.load('tabular_data/add_num_data.npy')
    numeric_data = util.preprocess_numeric_data(numeric_data, additional_num_data)
    bins = util.get_bins(prices, num=config.n_classes)

    number = 1024
    img_files = os.listdir('imgs/')
    np.random.shuffle(img_files)
    img_files = img_files[:number]
    x, y = util.load_data_batch(img_files, numeric_data, text_data, bins, config.img_shape,
                                False, number, 'train')
    sequences = np.asarray(config.tokenizer.texts_to_matrix(x[2]))
    sequences = pad_sequences(sequences, maxlen=config.max_seq_len)
    x[2] = sequences

    if input_type == 'full':
        x = [x[0], x[1], sequences]
        indices = np.arange(x[0].shape[0])
    elif input_type == 'img':
        x = x[1]
        indices = np.arange(x.shape[0])
    elif input_type == 'num':
        x = x[0]
        indices = np.arange(x.shape[0])
    elif input_type == 'rnn':
        x = sequences
        indices = np.arange(x.shape[0])
    else:
        print('error')
        exit()

    _ = model.predict(x)
    print('Visualizing saliency...')

    folder = 'models/' + args.name + '/'

    np.random.shuffle(indices)
    indices = indices[:64]
    if input_type == 'full':
        imgs = x[1][indices]
        x = [x[0][indices], x[1][indices], x[2][indices]]
    else:
        x = x[indices]
        imgs = x
    y = y[indices]

    label_tensor = K.constant(y)
    fn = K.function(model.inputs,
                K.gradients(losses.sparse_categorical_crossentropy(label_tensor, model.outputs[0]),
                                    model.inputs))
    grads = fn([x])
    grads = grads[0]

    saliency = np.absolute(grads).max(axis=-1)

    merged_sal = np.concatenate([saliency[i] for i in range(5)], axis=0)
    merged_real = np.concatenate([imgs[i] for i in range(5)], axis=0)

    plt.imsave(folder + 'saliency.jpg', merged_sal, cmap=plt.cm.hot)
    plt.imsave(folder + 'saliency_real.jpg', merged_real, cmap=plt.cm.hot)
예제 #28
0
 def lossfunction(y_true, y_pred):
     return sparse_categorical_crossentropy(y_true, y_pred)