Пример #1
0
def vae_loss(input_phono,phono_decoded):
    mse_loss_phono = objectives.mse(input_phono, phono_decoded)
    ent_loss_concept = objectives.categorical_crossentropy(input_concept, concept_decoded)
    mse_loss_geo = objectives.mse(input_geo, geo_decoded)

    kl_loss = - 0.5 * K.mean(1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
    return (
             mse_loss_phono 
             + ent_loss_concept
             + kl_loss
             +mse_loss_geo
             )
Пример #2
0
 def vrae_loss(x, x_decoded_mean):
     xent_loss = objectives.mse(x, x_decoded_mean)  # Calculate the MSE
     kl_loss = -0.5 * K.mean(
         1 + z_log_sigma - K.square(z_mean) -
         K.exp(z_log_sigma))  # Calculate the Kullback-Leibler loss
     loss = xent_loss + kl_loss  # Sum the two calculated results together
     return loss  # Return the final loss
Пример #3
0
 def vae_loss(self, x, x_decoded_mean):
     xent_loss = objectives.mse(x, x_decoded_mean)
     kl_loss = - 0.5 * K.mean(1 + self.z_log_sigma -
                              K.square(self.z_mean) - K.exp(self.z_log_sigma))
     # xent_loss = K.mean(xent_loss)
     loss = xent_loss + kl_loss
     return loss
Пример #4
0
def vae_loss(x_phono,decoded_phono):
    mse_loss_phono = objectives.mse(x_phono, decoded_phono)
    #kl_loss = - 0.5 * K.mean(1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
    return (
             mse_loss_phono 
            # + kl_loss
             )
Пример #5
0
 def vae_loss_f(input, y):
     # xent_loss = objectives.binary_crossentropy(input_x, y)
     num = self.timesteps
     xent_loss = 0
     for i in range(num):
         xent_loss += objectives.mse(input[:, i, :], y[:, i, :])
     return xent_loss
Пример #6
0
def vae_loss(inputs, inputs_decoded_mu):
    cross_ent_loss = objectives.mse(inputs, inputs_decoded_mu)
    kl_loss = -.5 * K.mean(1 + z_log_sigma - K.square(z_mu) -
                           K.exp(z_log_sigma))
    loss = cross_ent_loss + kl_loss
    print('kl_loss = ', kl_loss)
    return loss
Пример #7
0
def vae_loss(x, x_decoded_mean):
    xent_loss = objectives.mse(x, x_decoded_mean)
    kl_loss = -0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) -
                            K.exp(z_log_sigma))
    loss = xent_loss + kl_loss

    return loss
Пример #8
0
 def grid_loss(g_outmap):
     g_out = g_outmap['output']
     grid_idx = g_outmap["grid_idx"]
     m = g_out[:, :1]
     b = binary_mask(grid_idx, ignore=0, black=0,
                     white=1 - variation_weight)
     return grid_loss_weight*mse(b, m)
Пример #9
0
def vae_loss(x, x_decoded_mean):
    x = tf.reshape(x, (-1, ))
    x_decoded_mean = tf.reshape(x_decoded_mean, (-1, ))
    xent_loss = original_dim * objectives.mse(x, x_decoded_mean)
    kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                           axis=-1)
    return xent_loss  #xent_loss #+ kl_loss
Пример #10
0
    def custom_vae_loss(y_true, y_pred):

        # https://github.com/keras-team/keras/blob/master/examples/variational_autoencoder.py
        #xent_loss = objectives.binary_crossentropy(y_true, y_pred)
        xent_loss = objectives.mse(y_true, y_pred)
        kl_loss = -0.5 * K.mean(1 + log_var - K.square(mu) - K.exp(log_var),
                                axis=-1)
        return K.mean(xent_loss + kl_loss)
Пример #11
0
def dae_loss(input_phono,phono_decoded):
    mse_loss_phono = objectives.mse(input_phono, phono_decoded)
    ent_loss_concept = objectives.categorical_crossentropy(input_concept, concept_decoded)

    return (
             mse_loss_phono 
             + ent_loss_concept
             )
Пример #12
0
 def vae_loss(input_x, y):
     xent_loss = objectives.mse(input_x, y)
     # z_log_sigma = self.z_log_sigma
     # z_mean = self.z_mean
     kl_loss = -0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) -
                             K.exp(z_log_sigma))
     loss = xent_loss + kl_loss
     return loss
Пример #13
0
def vae_loss(input_phono,phono_decoded):
    mse_loss_phono = objectives.mse(input_phono, phono_decoded)

    kl_loss = - 0.5 * K.mean(1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
    return (
             mse_loss_phono 
             + kl_loss
             )
Пример #14
0
	def vae_loss(x, x_decoded_mean):
		mse_loss = objectives.mse(x, x_decoded_mean)
		kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
		kl_loss = K.sum(kl_loss, axis=-1)
		kl_loss *= -0.5
		beta=10**(-6)
		loss = K.mean((1-beta)*mse_loss + beta*kl_loss)
		return loss
Пример #15
0
 def vae_loss(self, x, x_decoded_mean):
     reconstruction_loss = objectives.mse(x, x_decoded_mean)
     reconstruction_loss *= vae_config.ORIGINAL_DIM
     kl_loss = 1 + self.z_log_sigma - \
         K.square(self.z_mean) - K.exp(self.z_log_sigma)
     kl_loss = K.sum(kl_loss, axis=-1)
     kl_loss *= -0.5
     vae_loss = K.mean(reconstruction_loss + kl_loss)
     return vae_loss
Пример #16
0
    def vae_loss(self, inputs, outputs):

        mse_loss = objectives.mse(inputs, outputs)
        kl_loss = 1 + self.z_log_var - K.square(self.z_mean) - K.exp(
            self.z_log_var)
        kl_loss = K.mean(kl_loss, axis=-1)
        kl_loss *= -0.5
        loss = K.mean(kl_loss + mse_loss)
        return loss
Пример #17
0
 def vae_loss(x, x_decoded_mean):
     if bce:
         xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
     else:
         xent_loss = objectives.mse(x, x_decoded_mean)
     kl_loss = -0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) -
                             K.exp(z_log_sigma))
     loss = xent_loss + kl_loss
     return loss
Пример #18
0
 def grid_loss(g_outmap):
     g_out = g_outmap['output']
     grid_idx = g_outmap["grid_idx"]
     m = g_out[:, :1]
     b = binary_mask(grid_idx,
                     ignore=0,
                     black=0,
                     white=1 - variation_weight)
     return grid_loss_weight * mse(b, m)
 def generator_loss(combined, imputed_vector):
     """
     Ignores y_true and y_pred
     """
     original_vector = combined[:, :n_dims]
     missing_mask = combined[:, n_dims:]
     input_variable = decoder.get_input()
     decoder_compute_graph = decoder.get_output()
     mask_prediction = theano.clone(
         decoder_compute_graph,
         {input_variable: imputed_vector},
         share_inputs=True)
     reconstruction_loss = mse(
         y_true=original_vector * (1 - missing_mask),
         y_pred=imputed_vector * (1 - missing_mask))
     decoder_mask_loss = mse(missing_mask, missing_mask * mask_prediction)
     return (
         reconstruction_weight * reconstruction_loss
         - adversarial_weight * decoder_mask_loss)
Пример #20
0
def vae_loss(x, x_hat):
    kl_loss = 0.01 + K.mean(q_z * (log_q_z - K.log(1.0 / nb_classes)))
    xent_loss = n * objectives.binary_crossentropy(x, x_hat)
    mse_loss = n * objectives.mse(x, x_hat) 
    if use_loss == 'xent':
        return xent_loss - kl_loss
    elif use_loss == 'mse':
        return mse_loss - kl_loss
    else:
        raise Expception, 'Nonknow loss!'
Пример #21
0
def vae_loss(x, x_hat):
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    xent_loss = n * objectives.binary_crossentropy(x, x_hat)
    mse_loss = n * objectives.mse(x, x_hat) 
    if use_loss == 'xent':
        return xent_loss + kl_loss
    elif use_loss == 'mse':
        return mse_loss + kl_loss
    else:
        raise Expception, 'Nonknow loss!'
Пример #22
0
def loss(x, x_hat):
    loss_kl = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                           axis=-1)
    loss_xent = dim_x * objectives.binary_crossentropy(x, x_hat)
    loss_mse = dim_x * objectives.mse(x, x_hat)
    if use_loss == 'xent':
        return loss_kl + loss_xent
    elif use_loss == 'mse':
        return loss_kl + loss_mse
    else:
        raise Exception('Undefined Loss: %s' % (use_loss))
Пример #23
0
    def vae_loss(x, x_decoded_mean, use_mse=False):
        if use_mse:
            reconstruction_loss = objectives.mse(x, x_decoded_mean)
        else:
            reconstruction_loss = objectives.binary_crossentropy(x, x_decoded_mean)

        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var))
        # kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
        # kl_loss = K.sum(kl_loss, axis=-1)
        # kl_loss *= -0.5
        loss = reconstruction_loss + kl_loss
        return loss
Пример #24
0
 def loss(y_true, y_pred):
     # mse loss
     reconstruction_loss = mse(K.flatten(y_true), K.flatten(y_pred))
     reconstruction_loss *= self.n_features * self.train_matrix.shape[
         1]
     kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
     kl_loss = K.mean(kl_loss, axis=-1)
     kl_loss *= -0.5
     #                 if self.kl_anneal:
     #                     return K.mean(reconstruction_loss + self.weight * kl_loss)
     #                 else:
     return K.mean(reconstruction_loss + self.beta * kl_loss)
    def gumbel_loss(self, x, x_hat):
        q_y = K.reshape(self.encoder_logits, (-1, self.N, self.M))
        q_y = softmax(q_y)
        log_q_y = K.log(q_y + 1e-20)
        KL = q_y * (log_q_y - K.log(1.0 / self.M))
        KL = K.sum(KL, axis=(1, 2))

        x = K.reshape(x, (1, -1))
        x_hat = K.reshape(x_hat, (1, -1))
        rec_loss = self.data_dim * mse(x, x_hat)

        # elbo = rec_loss - KL*self.KL_boost
        elbo = rec_loss + KL * self.KL_boost
        return elbo
Пример #26
0
    def vae_loss(x, x_decoded_mean):
        xent_loss = objectives.mse(x[:, :, 0:2], x_decoded_mean[:, :, 0:2])
        bol_x = x[:, :, 2:5]
        bol_decoded_x = x_decoded_mean[:, :, 2:5]

        s_x = K.exp(bol_x) / K.sum(K.exp(bol_x), axis=-1, keepdims=True)
        s_x_decoded_mean = K.exp(bol_decoded_x) / K.sum(
            K.exp(bol_decoded_x), axis=-1, keepdims=True)
        cross = objectives.binary_crossentropy(s_x, s_x_decoded_mean)

        kl_loss = -0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) -
                                K.exp(z_log_sigma))
        loss = xent_loss + kl_loss + cross

        return loss
Пример #27
0
    def reconstruction_loss(input_and_mask, y_pred):
        X_values = input_and_mask[:, :n_features]
        #X_values.name = "$X_values"

        missing_mask = input_and_mask[:, n_features:]
        #missing_mask.name = "$missing_mask"
        observed_mask = 1 - missing_mask
        #observed_mask.name = "$observed_mask"

        X_values_observed = X_values * observed_mask
        #X_values_observed.name = "$X_values_observed"

        pred_observed = y_pred * observed_mask
        #pred_observed.name = "$y_pred_observed"

        return mse(y_true=X_values_observed, y_pred=pred_observed)
Пример #28
0
def vae_loss(x, decoded):
    xent_loss = K.sum((objectives.mse(x, decoded)), axis=-1)
    #kl_loss_d1 = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    m = K.constant(1)
    s = K.constant(1)
    #kl_loss_d1 = K.sum(K.log(2/K.exp(z_log_var/2))+(K.square(z_mean)+(K.exp(z_log_var/2)-K.constant(1))*(K.exp(z_log_var/2)+K.constant(1)))/(K.constant(2)),axis = -1)
    kl_loss_d1 = K.sum(
        K.log(2 * s / K.exp(z_log_var / 2)) +
        (K.constant(2) * m *
         (-K.exp(-(K.square(z_mean)) /
                 ((K.constant(2)) * K.exp(z_log_var))) * K.exp(z_log_var / 2) +
          K.sqrt(K.constant(np.pi / 2)) * z_mean * (K.constant(1) - K.tanh(
              K.constant(1.19) * z_mean / K.constant(np.sqrt(2)) /
              K.exp(z_log_var / 2))))) / (K.square(s)) +
        (K.square(m - z_mean) + (K.exp(z_log_var / 2) - s) *
         (K.exp(z_log_var / 2) + s)) / (K.constant(2) * K.square(s)),
        axis=-1)
    return 1 * xent_loss + 0.1 * kl_loss_d1
Пример #29
0
    def reconstruction_loss(input_and_mask, y_pred):
        X_values = input_and_mask[:, :n_features]
        X_values.name = "$X_values"

        if mask_indicates_missing_values:
            missing_mask = input_and_mask[:, n_features:]
            missing_mask.name = "$missing_mask"
            observed_mask = 1 - missing_mask
        else:
            observed_mask = input_and_mask[:, n_features:]
        observed_mask.name = "$observed_mask"

        X_values_observed = X_values * observed_mask
        X_values_observed.name = "$X_values_observed"

        pred_observed = y_pred * observed_mask
        pred_observed.name = "$y_pred_observed"

        return mse(y_true=X_values_observed, y_pred=pred_observed)
Пример #30
0
def test_batchnorm_generator():
    batch_size = 32
    input_dim = 10

    generator_input = layers.Input(shape=(input_dim, ))
    generator_output = get_generator_discriminator_model(
        generator_input, batch_normalization=True)
    generator_model = models.Model(input=[generator_input],
                                   output=[generator_output])

    discriminator_input = layers.Input(shape=(1, ))
    discriminator_output = get_generator_discriminator_model(
        discriminator_input)
    discriminator_model = models.Model(input=[discriminator_input],
                                       output=[discriminator_output])

    combined_output = discriminator_model(generator_model(generator_input))
    combined_model = models.Model(input=[generator_input],
                                  output=[combined_output])

    generator_model.compile('adam', loss='mse')
    discriminator_model.compile('adam', loss='mse')
    combined_model.compile('adam', loss='mse')

    # there is some randomness in test so do it a few times to be sure
    for _ in range(10):
        x = np.random.uniform(low=0.0, high=1.0, size=(batch_size, input_dim))
        y = np.ones(shape=batch_size)

        combined_preds = combined_model.predict_on_batch(x)
        # reshape `combined_preds` so it is the same shape as `y` for the objective function
        combined_preds = np.reshape(combined_preds, newshape=batch_size)

        loss_validate = K.eval(objectives.mse(y, combined_preds))
        loss = combined_model.train_on_batch(x, y)

        assert '{0:.4f}'.format(loss_validate) == '{0:.4f}'.format(loss)
Пример #31
0
 def _mse(y_true, y_pred):
     return mse(embedding_var[y_true].dimshuffle(1, 0, 2), y_pred)
Пример #32
0
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.mse(x, x_decoded_mean)
    kl_loss = -0.5 * K.mean(
        1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
Пример #33
0
 def grid_loss(grid_idx, g_outmap):
     g_out = g_outmap['output']
     m = g_out[:, :1]
     b = binary_mask(grid_idx, ignore=0.0,  white=1.)
     return grid_loss_weight*mse(b, m)
Пример #34
0
def MSE(x, y):
    return mse(K.batch_flatten(x),
               K.batch_flatten(y))
Пример #35
0
 def loss_fn(y_true, g_outmap):
     y_predicted = g_outmap['output']
     return mse(y_true, y_predicted)
Пример #36
0
def vae_loss(input_phono, phono_decoded):
    mse_loss_phono = objectives.mse(input_phono, phono_decoded)

    kl_loss = -0.5 * K.mean(
        1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
    return (mse_loss_phono + kl_loss)
Пример #37
0
def fun_speed(inputs):
    from keras.objectives import mean_squared_error as mse
    k0, k1 = inputs.keys()
    return mse(inputs[k0], inputs[k1])
Пример #38
0
 def vae_mse_loss(x, x_decoded_mean):
     mse_loss = objectives.mse(x, x_decoded_mean)
     kl_loss = -0.5 * K.mean(
         1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return mse_loss + kl_loss
Пример #39
0
 def wrapper(x, y):
     return weight*mse(x, y)
Пример #40
0
def mse_crossentropy(y_true, y_pred):
    vuv_loss = binary_crossentropy(y_true[:, -1], y_pred[:, -1])
    return mse(y_true[:, :-1], y_pred[:, :-1]) * vuv_loss
Пример #41
0
 def custom_fun(X):
     from keras.objectives import mean_squared_error as mse
     print X.keys()
     return mse(X['proj'], X['lstm'])
 def vae_loss(input_placeholder, decoded_final):
     loss = objectives.mse(input_placeholder, decoded_final)
     return loss
Пример #43
0
 def vae_loss(x, x_decoded_mean):
     xent_loss = objectives.mse(x, x_decoded_mean)
     kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma))
     loss = xent_loss + kl_loss
     return loss