Exemplo n.º 1
0
    def __call__(self, y_true, y_pred):
        result = None
        if self.loss_type == LossType.mae:
            result = objectives.mean_absolute_error(y_true, y_pred)
        elif self.loss_type == LossType.mse:
            result = objectives.mean_squared_error(y_true, y_pred)
        elif self.loss_type == LossType.rmse:
            result = K.sqrt(objectives.mean_squared_error(y_true, y_pred))
        elif self.loss_type == LossType.variance:
            result = K.sqrt(objectives.mean_squared_error(
                y_true, y_pred)) - objectives.mean_absolute_error(
                    y_true, y_pred)
        elif self.loss_type == LossType.weighted_mae_mse:
            loss1 = objectives.mean_absolute_error(y_true, y_pred)
            loss2 = objectives.mean_squared_error(y_true, y_pred)
            result = self.loss_ratio * loss1 + (1.0 - self.loss_ratio) * loss2
        elif self.loss_type == LossType.weighted_mae_rmse:
            loss1 = objectives.mean_absolute_error(y_true, y_pred)
            loss2 = K.sqrt(objectives.mean_squared_error(y_true, y_pred))
            result = self.loss_ratio * loss1 + (1.0 - self.loss_ratio) * loss2
        elif self.loss_type == LossType.binary_crossentropy:
            result = objectives.binary_crossentropy(y_true, y_pred)
        elif self.loss_type == LossType.weighted_tanhmse_mse:
            loss1 = losses.mean_squared_error(
                K.tanh(self.data_input_scale * y_true),
                K.tanh(self.data_input_scale * y_pred))
            loss2 = losses.mean_squared_error(y_true, y_pred)
            result = self.loss_ratio * loss1 + (1.0 - self.loss_ratio) * loss2
        else:
            assert False, ("Loss function not supported")

        return result
Exemplo n.º 2
0
def tailored_loss(true_output, loss_input):
    flux_loss = objectives.mean_squared_error(
        loss_input[:, 0, :, :] * true_output[:, 2, :, :], true_output[:,
                                                                      0, :, :])
    flux_err_loss = objectives.mean_squared_error(
        loss_input[:, 1, :, :] * true_output[:, 2, :, :], true_output[:,
                                                                      1, :, :])

    return 0.5 * flux_loss + 0.5 * flux_err_loss
 def feature_matching(self, out_true, out_pred):
     "Feature matching objective function for use in G."
     mse = mean_squared_error(out_true, out_pred)
     activations = K.function([self.D.layers[0].input, K.learning_phase()],
                               [self.D.layers[1].output,])
     # Inputs to theano functions must be numeric not tensors like out_true
     pred_batch = self.D.predict(self.batch, batch_size=self.batch_size)
     pred_activ = activations([pred_batch, 0])[0]
     true_activ = activations([self.batch, 0])[0]
     feat_match = mean_squared_error(true_activ, pred_activ)
     return mse + self.gamma * feat_match
Exemplo n.º 4
0
def loss_function_multiple_distance_and_area(y_true, y_pred):
    # 点損失
    error = mean_squared_error(y_true, y_pred)
    # 線の損失
    for i in range(8):
        error += mean_squared_error(y_true[:, ((i+1)*2):18]-y_true[:, 0:(16-i*2)], y_pred[:, ((i+1)*2):18]-y_pred[:, 0:(16-i*2)])
    # 右耳と左耳のエリア
    left_ear_true, left_ear_pred = y_true[:, 6:12], y_pred[:, 6:12]
    right_ear_true, right_ear_pred = y_true[:, 12:18], y_pred[:, 12:18]
    error += calc_area_loss(left_ear_true, left_ear_pred)
    error += calc_area_loss(right_ear_true, right_ear_pred)
    return error
 def feature_matching(self, out_true, out_pred):
     "Feature matching objective function for use in G."
     mse = mean_squared_error(out_true, out_pred)
     activations = K.function([self.D.layers[0].input,
                               K.learning_phase()], [
                                   self.D.layers[1].output,
                               ])
     # Inputs to theano functions must be numeric not tensors like out_true
     pred_batch = self.D.predict(self.batch, batch_size=self.batch_size)
     pred_activ = activations([pred_batch, 0])[0]
     true_activ = activations([self.batch, 0])[0]
     feat_match = mean_squared_error(true_activ, pred_activ)
     return mse + self.gamma * feat_match
Exemplo n.º 6
0
 def vae_loss(x, x_decoded_mean):
     # NOTE: binary_crossentropy expects a batch_size by dim for x and x_decoded_mean, so we MUST flatten these!
     x = K.flatten(x)
     x_decoded_mean = K.flatten(x_decoded_mean)
     xent_loss = objectives.mean_squared_error(x, x_decoded_mean)
     kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return xent_loss +  kl_loss
Exemplo n.º 7
0
 def __init_model(self):
     '''
     Initializes model, does not load weights. 
     '''
     ## get args 
     data_dim = self.data_dim 
     label_dim = self.label_dim 
     latent_dim = self.latent_dim 
     n_hidden = self.n_hidden 
     batch_size = self.batch_size 
     kl_coef = self.kl_coef 
     ## encoder 
     x = Input(shape=(data_dim,)) 
     condition = Input(shape=(label_dim,))
     inputs = concatenate([x, condition]) 
     x_encoded = Dense(n_hidden, activation='relu')(inputs) 
     x_encoded = Dense(n_hidden//2, activation='relu')(x_encoded) 
     x_encoded = Dense(n_hidden//4, activation='relu')(x_encoded) 
     mu = Dense(latent_dim, activation='linear')(x_encoded) 
     log_var = Dense(latent_dim, activation='linear')(x_encoded) 
     ## latent sampler 
     def sampling(args): 
         mu, log_var = args 
         eps = K.random_normal(shape=(batch_size, latent_dim), mean=0., stddev=1.) 
         return mu + K.exp(log_var/2.) * eps 
     ## sample 
     z = Lambda(sampling, output_shape=(latent_dim,))([mu, log_var]) 
     z_cond = concatenate([z, condition]) 
     ## decoder 
     z_decoder1 = Dense(n_hidden//4, activation='relu') 
     z_decoder2 = Dense(n_hidden//2, activation='relu') 
     z_decoder3 = Dense(n_hidden, activation='relu') 
     y_decoder = Dense(data_dim, activation='linear') 
     z_decoded = z_decoder1(z_cond) 
     z_decoded = z_decoder2(z_decoded) 
     z_decoded = z_decoder3(z_decoded) 
     y = y_decoder(z_decoded) 
     ## loss 
     reconstruction_loss = objectives.mean_squared_error(x, y) 
     kl_loss = .5 * K.mean(K.square(mu) + K.exp(log_var) - log_var - 1, axis=-1) 
     cvae_loss = reconstruction_loss + kl_coef * kl_loss 
     ## define full model 
     cvae = Model([x, condition], y) 
     cvae.add_loss(cvae_loss) 
     cvae.compile(optimizer='adam') 
     cvae.summary() 
     self.cvae = cvae 
     ## define encoder model 
     encoder = Model([x, condition], mu) 
     self.encoder = encoder 
     ## define decoder model 
     decoder_input = Input(shape=(latent_dim + label_dim,)) 
     _z_decoded = z_decoder1(decoder_input) 
     _z_decoded = z_decoder2(_z_decoded) 
     _z_decoded = z_decoder3(_z_decoded) 
     _y = y_decoder(_z_decoded) 
     generator = Model(decoder_input, _y) 
     generator.summary() 
     self.generator = generator 
     pass
Exemplo n.º 8
0
def vae_loss(x, x_decoded_mean):
    Z = T.transpose(K.repeat(z, n_centroid), [0, 2, 1])
    z_mean_t = T.transpose(K.repeat(z_mean, n_centroid), [0, 2, 1])
    z_log_var_t = T.transpose(K.repeat(z_log_var, n_centroid), [0, 2, 1])
    u_tensor3 = T.repeat(u_p.dimshuffle('x', 0, 1), batch_size, axis=0)
    lambda_tensor3 = T.repeat(lambda_p.dimshuffle('x', 0, 1),
                              batch_size,
                              axis=0)
    theta_tensor3 = theta_p.dimshuffle('x', 'x', 0) * T.ones(
        (batch_size, latent_dim, n_centroid))

    p_c_z=K.exp(K.sum((K.log(theta_tensor3)-0.5*K.log(2*math.pi*lambda_tensor3)-\
                       K.square(Z-u_tensor3)/(2*lambda_tensor3)),axis=1))+1e-10

    gamma = p_c_z / K.sum(p_c_z, axis=-1, keepdims=True)
    gamma_t = K.repeat(gamma, latent_dim)

    if datatype == 'sigmoid':
        loss=alpha*original_dim * objectives.binary_crossentropy(x, x_decoded_mean)\
        +K.sum(0.5*gamma_t*(latent_dim*K.log(math.pi*2)+K.log(lambda_tensor3)+K.exp(z_log_var_t)/lambda_tensor3+K.square(z_mean_t-u_tensor3)/lambda_tensor3),axis=(1,2))\
        -0.5*K.sum(z_log_var+1,axis=-1)\
        -K.sum(K.log(K.repeat_elements(theta_p.dimshuffle('x',0),batch_size,0))*gamma,axis=-1)\
        +K.sum(K.log(gamma)*gamma,axis=-1)
    else:
        loss=alpha*original_dim * objectives.mean_squared_error(x, x_decoded_mean)\
        +K.sum(0.5*gamma_t*(latent_dim*K.log(math.pi*2)+K.log(lambda_tensor3)+K.exp(z_log_var_t)/lambda_tensor3+K.square(z_mean_t-u_tensor3)/lambda_tensor3),axis=(1,2))\
        -0.5*K.sum(z_log_var+1,axis=-1)\
        -K.sum(K.log(K.repeat_elements(theta_p.dimshuffle('x',0),batch_size,0))*gamma,axis=-1)\
        +K.sum(K.log(gamma)*gamma,axis=-1)

    return loss
Exemplo n.º 9
0
def vae_loss(x, x_decoded_mean):
    x_d = Flatten()(x)
    x_dec_d = Flatten()(x_decoded_mean)
    xent_loss = input_dim * objectives.mean_squared_error(x_d, x_dec_d)
    kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                           axis=-1)
    return xent_loss + kl_loss
Exemplo n.º 10
0
def vae_loss(x, x_decoded_mean):
    z_mean_t = K.permute_dimensions(K.repeat(z_mean, n_centroid),[0,2,1])
    z_log_var_t = K.permute_dimensions(K.repeat(z_log_var, n_centroid),[0,2,1])

    u_p = vade.get_layer('latent').u_p
    theta_p = vade.get_layer('latent').theta_p
    lambda_p = vade.get_layer('latent').lambda_p
    u_tensor3 = K.repeat_elements(K.expand_dims(u_p, 0), batch_size,axis=0)
    lambda_tensor3 = K.repeat_elements(K.expand_dims(lambda_p, 0), batch_size,axis=0)
    gamma = vade.get_layer('latent').gamma
    gamma_t = K.repeat(gamma, latent_dim)

    # We are trying to minimize the ELBO, i.e. maximizing its opposite
    if datatype == 'sigmoid':
        loss = alpha*original_dim * objectives.binary_crossentropy(x, x_decoded_mean)\
        + K.sum(0.5*gamma_t*(latent_dim*K.log(math.pi*2)+K.log(lambda_tensor3)+K.exp(z_log_var_t)/lambda_tensor3 + K.square(z_mean_t-u_tensor3)/lambda_tensor3), axis=(1, 2))\
        - 0.5*K.sum(z_log_var+1,axis=-1)\
        - K.sum(K.log(K.repeat_elements(K.expand_dims(theta_p, 0),batch_size,0))*gamma,axis=-1)\
        + K.sum(K.log(gamma)*gamma,axis=-1)

    else:
        loss=alpha*original_dim * objectives.mean_squared_error(x, x_decoded_mean)\
        + K.sum(0.5*gamma_t*(latent_dim*K.log(math.pi*2)+K.log(lambda_tensor3)+K.exp(z_log_var_t)/lambda_tensor3+K.square(z_mean_t-u_tensor3)/lambda_tensor3),axis=(1,2))\
        - 0.5*K.sum(z_log_var+1,axis=-1)\
        - K.sum(K.log(K.repeat_elements(K.expand_dims(theta_p, 0),batch_size,0))*gamma,axis=-1)\
        + K.sum(K.log(gamma)*gamma,axis=-1)
    return loss
 def vae_loss(real_image, generated_image):
     gen_loss = K.mean(
         objectives.mean_squared_error(real_image, generated_image))
     kl_loss = -0.5 * K.mean(
         1 + std_vector - K.square(mean_vector) - K.exp(std_vector),
         axis=-1)
     # kl_loss = 0.5 * K.mean(K.square(std_vector) + K.square(mean_vector) - K.log(K.square(std_vector)) -1, axis=-1)
     return gen_loss + kl_loss
Exemplo n.º 12
0
def print_regression_model_summary(prefix, y_test, y_pred, parmsFromNormalization):
    y_test = (y_test*parmsFromNormalization.std*parmsFromNormalization.sqrtx2) + parmsFromNormalization.mean
    y_pred = (y_pred*parmsFromNormalization.std*parmsFromNormalization.sqrtx2) + parmsFromNormalization.mean

    mse = mean_squared_error(y_test, y_pred)
    error_AC, rmsep, mape, rmse = almost_correct_based_accuracy(y_test, y_pred, 10)
    rmsle = calculate_rmsle(y_test, y_pred)
    print ">> %s AC_errorRate=%.1f RMSEP=%.6f MAPE=%6f RMSE=%6f mse=%f rmsle=%.5f" %(prefix, error_AC, rmsep, mape, rmse, mse, rmsle)
    log.write("%s AC_errorRate=%.1f RMSEP=%.6f MAPE=%6f RMSE=%6f mse=%f rmsle=%.5f" %(prefix, error_AC, rmsep, mape, rmse, mse, rmsle))
Exemplo n.º 13
0
    def rec_loss(self, x, y):

        ec = int(self.edge_clip)
        x_f = K.flatten(x[:, :, ec:-1 * ec, ec:-1 * ec, ec:-1 * ec])
        y_f = K.flatten(y[:, :, ec:-1 * ec, ec:-1 * ec, ec:-1 * ec])

        reconstruction_loss = objectives.mean_squared_error(
            x_f, y_f) * 16**3 * 4**3 * self.rec_loss_factor
        return reconstruction_loss
Exemplo n.º 14
0
def vae_loss(x, x_decoded_mean):
    x = K.flatten(x)
    x_decoded_mean = K.flatten(x_decoded_mean)
    xent_loss = SEQUENCE_LENGTH * objectives.mean_squared_error(
        x, x_decoded_mean)
    kl_loss = -0.5 * K.mean(
        1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)

    return xent_loss + kl_loss
Exemplo n.º 15
0
    def vade_loss_function(self, args):
        inputs = self.inputs
        #reconst = self.x_decoded
        reconst, z, z_mean, z_log_var = args[:self.num_views], args[-3], args[
            -2], args[-1]
        Z = T.transpose(K.repeat(z, self.n_centroid), [0, 2, 1])
        z_mean_t = T.transpose(K.repeat(z_mean, self.n_centroid), [0, 2, 1])
        z_log_var_t = T.transpose(K.repeat(z_log_var, self.n_centroid),
                                  [0, 2, 1])
        u_tensor3 = T.repeat(self.u_p.dimshuffle('x', 0, 1),
                             self.batch_size,
                             axis=0)
        lambda_tensor3 = T.repeat(self.lambda_p.dimshuffle('x', 0, 1),
                                  self.batch_size,
                                  axis=0)

        #version1
        theta_tensor3 = self.theta_p.dimshuffle('x', 'x', 0) * T.ones(
            (self.batch_size, self.latent_dim, self.n_centroid))

        p_c_z = K.exp(K.sum((K.log(theta_tensor3) - 0.5 * K.log(2 * math.pi * lambda_tensor3) - \
                             K.square(Z - u_tensor3) / (2 * lambda_tensor3)), axis=1)) + 1e-10

        gamma = p_c_z / K.sum(p_c_z, axis=-1, keepdims=True)
        gamma_t = K.repeat(gamma, self.latent_dim)

        #version2
        # theta_tensor2 = self.theta_p.dimshuffle('x', 0) * T.ones((self.batch_size, self.n_centroid))
        # p_c_z = K.exp(K.log(theta_tensor2) - K.sum((0.5 * K.log(2 * math.pi * lambda_tensor3) + \
        #                                             K.square(Z - u_tensor3) / (
        #                                                     2 * lambda_tensor3)), axis=1)) + 1e-10
        # gamma = p_c_z / K.sum(p_c_z, axis=1, keepdims=True)
        reconst_loss = 0
        for i in range(0, num_views):
            #version 1
            r_loss = self.original_dim[i] * objectives.mean_squared_error(
                inputs[i], reconst[i])
            #version 2
            #r_loss = self.original_dim[i]*objectives.binary_crossentropy(inputs[i], reconst[i])
            reconst_loss += r_loss
        #version 1
        loss = reconst_loss + self.alpha * (K.sum(0.5 * gamma_t * (
                    self.latent_dim * K.log(math.pi * 2) + K.log(lambda_tensor3) + K.exp(
                z_log_var_t) / lambda_tensor3 + K.square(z_mean_t - u_tensor3) / lambda_tensor3), axis=(1, 2)) \
                               - 0.5 * K.sum(z_log_var + 1, axis=-1) \
                               - K.sum(
                    K.log(K.repeat_elements(self.theta_p.dimshuffle('x', 0), self.batch_size, 0)) * gamma, axis=-1) \
                               + K.sum(K.log(gamma) * gamma, axis=-1))
        #version2
        # loss = reconst_loss + self.alpha * (K.sum(0.5 * gamma * K.sum(
        #         K.log(math.pi * 2) + K.log(lambda_tensor3) + K.exp(
        #     z_log_var_t) / lambda_tensor3 + K.square(z_mean_t - u_tensor3) / lambda_tensor3, axis=1), axis=1) \
        #                                     - 0.5 * K.sum(z_log_var + 1 + K.log(2*math.pi), axis=1) \
        #                                     - K.sum(
        #             K.log(theta_tensor2)* gamma, axis=1) \
        #                                     + K.sum(K.log(gamma) * gamma, axis=1))
        return loss
Exemplo n.º 16
0
def loss_function_multiple_distance_and_triangle(y_true, y_pred):
    # 点損失
    error = mean_squared_error(y_true, y_pred)
    # 線の損失
    for i in range(8):
        error += mean_squared_error(y_true[:, ((i+1)*2):18]-y_true[:, 0:(16-i*2)], y_pred[:, ((i+1)*2):18]-y_pred[:, 0:(16-i*2)])
    # 面の損失
    for comb in combinations(range(9), 3):
        s_true = sarrus_formula(
            y_true[:, (comb[0]*2):(comb[0]*2+2)],
            y_true[:, (comb[1]*2):(comb[1]*2+2)],
            y_true[:, (comb[2]*2):(comb[2]*2+2)]
        )
        s_pred = sarrus_formula(
            y_pred[:, (comb[0]*2):(comb[0]*2+2)],
            y_pred[:, (comb[1]*2):(comb[1]*2+2)],
            y_pred[:, (comb[2]*2):(comb[2]*2+2)]
        )
        error += K.abs(s_true - s_pred)
    return error
Exemplo n.º 17
0
        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)

            rec_loss = objectives.mean_squared_error(x, x_decoded_mean)

            kl_loss = -0.5 * K.mean(1 + self.z_logvar - K.square(self.z_mean) -
                                    K.exp(self.z_logvar),
                                    axis=-1)

            return rec_loss + kl_loss
Exemplo n.º 18
0
def cnn_loss(x, x_decoded_mean):
    #N = tf.convert_to_tensor(DPParam, dtype=tf.float32)

    gamma = tf.convert_to_tensor(DPParam['LPMtx'], dtype=tf.float32)
    N = tf.convert_to_tensor(DPParam['Nvec'], dtype=tf.float32)
    m = tf.convert_to_tensor(DPParam['m'], dtype=tf.float32)
    W = tf.convert_to_tensor(DPParam['B'], dtype=tf.float32)
    v = tf.convert_to_tensor(DPParam['nu'], dtype=tf.float32)

    num_cluster = N.shape[0]
    z_mean_1_last = tf.expand_dims(z_mean, -1)  # bs, latent_dim, 1
    z_mean_1_mid = tf.expand_dims(z_mean, 1)  # bs, 1, latent_dim

    for k in range(num_cluster):
        gamma_k_rep = tf.squeeze(
            K.repeat(tf.expand_dims(gamma[:, k], -1), latent_dim))
        z_k_bar = 1 / N[k] * K.sum(tf.multiply(gamma_k_rep, z_mean),
                                   axis=0)  #(latent_dim, )
        z_k_bar_batch = tf.squeeze(
            K.repeat(tf.expand_dims(z_k_bar, 0), batch_size))
        #tf.transpose(z_k_bar_batch, perm=[1, 0])
        z_k_bar_batch_1_last = tf.expand_dims(z_k_bar_batch,
                                              -1)  # bs, latent_dim, 1
        z_k_bar_batch_1_mid = tf.expand_dims(z_k_bar_batch,
                                             1)  # bs, 1, latent_dim

        # TODO:!
        S_k = 1 / N[k] * K.sum(K.batch_dot(
            tf.multiply(tf.expand_dims(gamma_k_rep, -1),
                        (z_mean_1_last - z_k_bar_batch_1_last)),
            z_mean_1_mid - z_k_bar_batch_1_mid),
                               axis=0)  # (latent_dim, latent_dim)
        temp = tf.linalg.trace(tf.linalg.solve(W[k], S_k))
        temp2 = tf.matmul(tf.expand_dims((z_k_bar - m[k]), 0),
                          tf.linalg.inv(W[k]))
        temp3 = tf.squeeze(
            tf.matmul(temp2, tf.expand_dims((z_k_bar - m[k]), -1)))
        if k == 0:
            e = 0.5 * N[k] * (v[k] * (temp + temp3))
        else:
            e += 0.5 * N[k] * (v[k] * (temp + temp3))

    loss_ = alpha * original_dim * objectives.mean_squared_error(
        K.flatten(x), K.flatten(x_decoded_mean)) - scale * K.sum(
            (z_log_var + 1), axis=-1)
    loss_ = K.sum(loss_, axis=0) + e
    # loss = K.sum(loss_, axis = 0)
    #for i in range(5):
    #    loss_ += N

    #return loss_
    return loss_
Exemplo n.º 19
0
    def validate(self, x_train, y_train):
        preds = self.predict(x_train)
        answers = y_train

        for ans, pred in zip(answers, preds):
            row = "ans: \t{0: 3.3f}, predict: \t{1: 3.3f}, diff:\t{2: 3.3f}"
            print(row.format(ans[0], pred[0], pred[0] - ans[0]))

        ans_t = answers.reshape(answers.shape[1], answers.shape[0])
        pred_t = preds.reshape(preds.shape[1], preds.shape[0])

        mse = K.eval(mean_squared_error(ans_t, pred_t))
        print("mean squared error is {0}".format(mse))
Exemplo n.º 20
0
    def gan_loss(y_true, y_pred):

        #trade-off coefficient
        alpha_recip = 0.05

        y_true_flat = K.batch_flatten(y_true)
        y_pred_flat = K.batch_flatten(y_pred)

        #L_adv = objectives.binary_crossentropy(y_true_flat, y_pred_flat)
        L_adv = objectives.mean_squared_error(y_true_flat, y_pred_flat)

        L_seg = gen_dice_multilabel(labels, fake_labels)

        return alpha_recip * L_adv + L_seg
Exemplo n.º 21
0
 def vae_loss(self, x, x_decoded_mean, args):
     """
     We train with two loss functions. Reconstruction loss force decoded samples to match to X (just like
     autoencoder). KL loss (latent loss) calculates the divergence between the learned latent distribution
     derived from z_mean and z_logsigma and the original distribution of X.
     """
     z_mean, z_logsigma = args
     print('z_mean', z_mean)
     print('logsig', z_logsigma)
     reconstruction_loss = objectives.mean_squared_error(x, x_decoded_mean)
     latent_loss = -0.50 * K.mean(
         1 + z_logsigma - K.square(z_mean) - K.exp(z_logsigma), axis=-1)
     print('RL ', reconstruction_loss)
     print('LL ', latent_loss)
     return K.mean(reconstruction_loss + latent_loss)
Exemplo n.º 22
0
def vae_loss(x,
             x_decoded_mean,
             z,
             z_mean,
             z_log_var,
             u_p,
             theta_p,
             lambda_p,
             alpha=1,
             datatype='sigmoid'):
    Z = tf.transpose(K.repeat(z, n_centroid), [0, 2, 1])
    z_mean_t = tf.transpose(K.repeat(z_mean, n_centroid), [0, 2, 1])
    z_log_var_t = tf.transpose(K.repeat(z_log_var, n_centroid), [0, 2, 1])
    u_tensor3 = tf.tile(tf.expand_dims(u_p, [0]), [batch_size, 1, 1])
    # u_tensor3 = T.repeat(tf.expand_dims(u_p,[0]), batch_size, axis=0)
    # lambda_tensor3 = T.repeat(tf.expand_dims(lambda_p,[0]), batch_size, axis=0)
    lambda_tensor3 = tf.tile(tf.expand_dims(lambda_p, [0]), [batch_size, 1, 1])
    temp_theta_p = tf.expand_dims(theta_p, [0])
    temp_theta_p = tf.expand_dims(temp_theta_p, [0])
    # theta_tensor3 = temp_theta_p * T.ones((batch_size, z_dim, n_centroid))
    theta_tensor3 = tf.tile(temp_theta_p, [batch_size, z_dim, 1])

    #@TODO
    #PROBLEM HERE ? add theta z_dim times for each cluster?
    p_c_z = K.exp(K.sum((K.log(theta_tensor3) - 0.5 * K.log(2 * math.pi * lambda_tensor3) - \
                         K.square(Z - u_tensor3) / (2 * lambda_tensor3)), axis=1)) + 1e-10

    gamma = p_c_z / K.sum(p_c_z, axis=-1, keepdims=True)
    gamma_t = K.repeat(gamma, z_dim)

    if datatype == 'sigmoid':
        loss = alpha * original_dim * objectives.binary_crossentropy(x, x_decoded_mean) \
                   + K.sum(0.5 * gamma_t * (
            z_dim * K.log(math.pi * 2) + K.log(lambda_tensor3) + K.exp(z_log_var_t) / lambda_tensor3 + K.square(
                    z_mean_t - u_tensor3) / lambda_tensor3), axis=(1, 2)) \
                   - 0.5 * K.sum(z_log_var + 1, axis=-1) \
                   - K.sum(K.log(K.repeat_elements(tf.expand_dims(theta_p, [0]), batch_size, 0)) * gamma, axis=-1) \
                   + K.sum(K.log(gamma) * gamma, axis=-1)
    else:
        loss = alpha * original_dim * objectives.mean_squared_error(x, x_decoded_mean) \
               + K.sum(0.5 * gamma_t * (
            z_dim * K.log(math.pi * 2) + K.log(lambda_tensor3) + K.exp(z_log_var_t) / lambda_tensor3 + K.square(
                z_mean_t - u_tensor3) / lambda_tensor3), axis=(1, 2)) \
               - 0.5 * K.sum(z_log_var + 1, axis=-1) \
               - K.sum(K.log(K.repeat_elements(tf.expand_dims(theta_p, [0]), batch_size, 0)) * gamma, axis=-1) \
               + K.sum(K.log(gamma) * gamma, axis=-1)

    return tf.reduce_mean(loss)
Exemplo n.º 23
0
def print_regression_model_summary(prefix, y_test, y_pred,
                                   parmsFromNormalization):
    y_test = (y_test * parmsFromNormalization.std *
              parmsFromNormalization.sqrtx2) + parmsFromNormalization.mean
    y_pred = (y_pred * parmsFromNormalization.std *
              parmsFromNormalization.sqrtx2) + parmsFromNormalization.mean

    mse = mean_squared_error(y_test, y_pred)
    error_AC, rmsep, mape, rmse = almost_correct_based_accuracy(
        y_test, y_pred, 10)
    rmsle = calculate_rmsle(y_test, y_pred)
    print ">> %s AC_errorRate=%.1f RMSEP=%.6f MAPE=%6f RMSE=%6f mse=%f rmsle=%.5f" % (
        prefix, error_AC, rmsep, mape, rmse, mse, rmsle)
    log.write(
        "%s AC_errorRate=%.1f RMSEP=%.6f MAPE=%6f RMSE=%6f mse=%f rmsle=%.5f" %
        (prefix, error_AC, rmsep, mape, rmse, mse, rmsle))
Exemplo n.º 24
0
    def cvae_loss(self, x, y):
        ec = int(self.edge_clip)
        x_f = K.flatten(x[:, :, ec:-1 * ec, ec:-1 * ec, ec:-1 * ec])
        y_f = K.flatten(y[:, :, ec:-1 * ec, ec:-1 * ec, ec:-1 * ec])

        reconstruction_loss = objectives.mean_squared_error(
            x_f, y_f) * 16**3 * 4**3 * self.rec_loss_factor

        kl_loss = 0.5 * K.sum(
            K.square(self.mu) + K.exp(self.log_var) - self.log_var - 1,
            axis=-1)
        #    out_o_bounds = tf.reduce_sum(tf.cast(tf.logical_and(y[:,0,ec:-1*ec,ec:-1*ec,ec:-1*ec]>.2,y[:,-1,ec:-1*ec,ec:-1*ec,ec:-1*ec]<4.0),tf.float32))

        rr = (y[:, 0, ec:-1 * ec, ec:-1 * ec, ec:-1 * ec] - 1.0 * np.ones(
            (32, 44, 44, 44)))**2 + (
                y[:, -1, ec:-1 * ec, ec:-1 * ec, ec:-1 * ec] - 3.5 * np.ones(
                    (32, 44, 44, 44)))**2
        out_o_bounds = tf.reduce_sum(tf.math.exp(-1 * rr / 0.15) * 5)

        loss = K.mean(reconstruction_loss + kl_loss)
        return loss
Exemplo n.º 25
0
    def create_loss_optimizer(self):
        z_mean_t = K.permute_dimensions(K.repeat(self.z_mean, self.n_clusters),
                                        [0, 2, 1])
        z_log_var_t = K.permute_dimensions(
            K.repeat(self.z_log_var, self.n_clusters), [0, 2, 1])

        u_tensor3 = K.repeat_elements(K.expand_dims(self.u_p, 0),
                                      self.batch_size,
                                      axis=0)
        lambda_tensor3 = K.repeat_elements(K.expand_dims(self.lambda_p, 0),
                                           self.batch_size,
                                           axis=0)
        gamma_t = K.repeat(self.gamma, self.dimensions[-1])

        if self.datatype == 'binary':
            self.loss = self.alpha * self.dimensions[0] * objectives.binary_crossentropy(self.x, self.output) \
                   + K.sum(0.5 * gamma_t * (self.dimensions[-1] * K.log(math.pi * 2) + K.log(lambda_tensor3) + K.exp(z_log_var_t) / lambda_tensor3 + K.square(z_mean_t - u_tensor3) / lambda_tensor3), axis=(1, 2)) \
                   - 0.5 * K.sum(self.z_log_var + 1, axis=-1) \
                   - K.sum(K.log(K.repeat_elements(K.expand_dims(self.theta_p, 0), self.dimensions[0], 0)) * self.gamma, axis=-1) \
                   + K.sum(K.log(self.gamma) * self.gamma, axis=-1)

        else:
            self.loss = self.alpha * self.dimensions[0] * objectives.mean_squared_error(self.x, self.output) \
                   + K.sum(0.5 * gamma_t * (self.dimensions[-1] * K.log(math.pi * 2) + K.log(lambda_tensor3) + K.exp(z_log_var_t) / lambda_tensor3 + K.square(z_mean_t - u_tensor3) / lambda_tensor3), axis=(1, 2)) \
                   - 0.5 * K.sum(self.z_log_var + 1, axis=-1) \
                   - K.sum(K.log(K.repeat_elements(K.expand_dims(self.theta_p_norm, 0), self.batch_size, 0)) * self.gamma, axis=-1) \
                   + K.sum(K.log(self.gamma) * self.gamma, axis=-1)
        self.cost = tf.reduce_mean(self.loss)
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate,
            beta1=0.9,
            beta2=0.999,
            epsilon=5).minimize(self.cost)

        self.normalize = tf.assign(self.theta_p_norm,
                                   self.theta_p / tf.reduce_sum(self.theta_p))

        print('Loss optimizer created.')
Exemplo n.º 26
0
        def c_loss(x_true_c, x_pred_c):
            max_length_f = 1.0 * max_length
            x_true_conn = x_true_c[:, :, conn_dim_start:]
            x_pred_conn = x_pred_c[:, :, conn_dim_start:]

            #variant 1
            #x_true_conn = 0.5 * x_true_conn + 0.5
            #x_pred_conn = 0.5 * K.round(x_pred_conn * max_length_f) / max_length_f + 0.5
            #variant 2
            #x_pred_conn = 0.5 * x_pred_conn + 0.5
            #variant 3
            x_true_conn = K.round(x_true_conn * max_length_f)
            x_pred_conn = K.round(x_pred_conn * max_length_f)

            #x_pred_conn.sort(axis=2)
            #x_true_conn.sort(axis=2)

            #x_pred_conn = K.round(x_pred_conn * max_length_f) / max_length_f
            x_true_conn = K.flatten(x_true_conn)
            x_pred_conn = K.flatten(x_pred_conn)

            return objectives.mean_squared_error(x_true_conn,
                                                 x_pred_conn) / max_length_f
Exemplo n.º 27
0
def loss_function_with_distance(y_true, y_pred):
    point_mse = mean_squared_error(y_true, y_pred)
    distance_mse = mean_squared_error(y_true[:, 2:18]-y_true[:, 0:16], y_pred[:, 2:18]-y_pred[:, 0:16])
    return point_mse + distance_mse
Exemplo n.º 28
0
def loss_function_simple(y_true, y_pred):
    return mean_squared_error(y_true, y_pred)
Exemplo n.º 29
0
if mode == "new":
    print("saving model weights...")
    model.save_weights(netfile, overwrite = True)
    exit(0)

# are we prepared for this???
if len(sys.argv) < 4:
    print(usage)
    exit(0)

# prepare the model for learning
print("compiling model...")
#rms = SGD()
rms = RMSprop()
#model.compile(optimizer=rms, loss="mean_squared_error")
KObjs.allzero = lambda x,y: KObjs.mean_squared_error(x, y)\
                            * rnd.choice([-0.01, 0.01])
#model.compile(optimizer=rms, loss={"output": "mean_squared_error"})
model.compile(optimizer = rms,\
    loss = {"output": "mean_squared_error", "memo-out": "mean_squared_error"})

# load weights if available
print("loading weights...")
model.load_weights(sys.argv[2])
audiofile = sys.argv[3]

if mode == "train":
    print("loading audio input...")
    data, sample_rate = soundfile.read(audiofile)
    data = data[:,0] # extract the 0th channel
    
 def objective(self, out_true, out_pred):
     mse = mean_squared_error(out_true, out_pred)
     return mse
Exemplo n.º 31
0
def loss_function_with_multiple_distance(y_true, y_pred):
    error = mean_squared_error(y_true, y_pred)
    for i in range(8):
        error += mean_squared_error(y_true[:, ((i+1)*2):18]-y_true[:, 0:(16-i*2)], y_pred[:, ((i+1)*2):18]-y_pred[:, 0:(16-i*2)])
    return error
Exemplo n.º 32
0
def mse_loss(x, x_decoded):
    original_dim = np.float32(np.prod((IMG_DIM, IMG_DIM, 3)))
    return K.mean(original_dim * mean_squared_error(x, x_decoded))
Exemplo n.º 33
0
for i in range(nb_test_samples):
	if(random.random() < 0.5):#50 % of the time make feature[2] = 1 & the output also = 1
		test_features[i, 2] = 1
		test_labels[i] = 1
		test_ones = test_ones + 1
	else:
		test_features[i,2] = 0
print "Expect "+str(test_ones)+" ones in test set"

###Objective function:

#from keras.objectives import categorical_crossentropy
#loss = tf.reduce_mean(categorical_crossentropy(labels, output_layer))

from keras.objectives import mean_squared_error
loss = tf.reduce_mean(mean_squared_error(labels, output_layer))

train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

###training:
with sess.as_default():
	for i in range(nb_train_samples): #Batching this might help also.
		train_step.run(feed_dict={
			features:train_features[i,:].reshape(1,sample_width), 
			labels: train_labels[i,:].reshape(1,1),
			K.learning_phase(): 1
		})


###evaluation:
correct_prediction = tf.equal(tf.round(output_layer), labels)
Exemplo n.º 34
0
def poi_gau_mix(y_true, log_y_pred):
    return log_poisson(y_true, log_y_pred) + 0.01 * mean_squared_error(
        y_true, K.exp(log_y_pred))
Exemplo n.º 35
0
def clipped_mse(y_true, y_pred):
    y_true = K.clip(y_true, -1.0, 1.0)
    y_pred = K.clip(y_pred, -1.0, 1.0)
    return objectives.mean_squared_error(y_true, y_pred)
Exemplo n.º 36
0
def mse_steer_angle(y_true, y_pred):
    return mean_squared_error(y_true[0], y_pred[0])
Exemplo n.º 37
0
def rmse(y_true, y_pred):
    from keras import backend as k
    from keras.objectives import mean_squared_error

    return k.sqrt(mean_squared_error(y_true, y_pred))