Пример #1
0
def point_metrics(y_data, output):
    """Evaluate metrics comparing real and predicted output values."""
    # Reshaping in memory to avoid duplicate use of RAM
    original_real_shape = y_data.shape
    y_data.shape = (y_data.shape[2], y_data.shape[1], y_data.shape[0])
    original_output_shape = output.shape
    output.shape = (output.shape[2], output.shape[1], output.shape[0])
    mae = losses.mean_absolute_error(y_data, output)
    mse = losses.mean_squared_error(y_data, output)
    mape = losses.mean_absolute_percentage_error(y_data, output)
    try:
        keras_session = backend.get_session()
        mae = mae.eval(session=keras_session)
        mse = mse.eval(session=keras_session)
        mape = mape.eval(session=keras_session)
    except NotImplementedError:
        mae = mae.numpy()
        mse = mse.numpy()
        mape = mape.numpy()
    y_data.shape = original_real_shape
    output.shape = original_output_shape
    return [
        np.mean(mae),
        np.sqrt(np.mean(mse)),
        np.mean(mape), mae,
        np.sqrt(mse), mape
    ]
def l1_loss(x, v, c=count):
    x = K.flatten(x)
    v = K.flatten(v[c])
    count = 1 + c
    # print(x.shape)
    # print(v.shape)
    return K.mean(losses.mean_absolute_error(x, v))
def test_loss(y_true, y_pred):
    return sd_weights[0] * dice_coef_loss(
        y_true, y_pred) + sd_weights[0] * seg_crossentropy_weighted(
            y_true, y_pred) + sd_weights[3] * mean_absolute_error(
                y_true, y_pred) + sd_weights[0] * vol_diff(
                    y_true, y_pred) + sd_weights[4] * recall_loss(
                        y_true, y_pred)
    def vgg16_perceptual_loss(self, y, yhat):
        mae = losses.mean_absolute_error(y, yhat)

        y_reshaped = self.organize_slices_for_feature_extraction(y)
        y_features = vgg16_feature_extractor(y_reshaped)
        yhat_reshaped = self.organize_slices_for_feature_extraction(yhat)
        yhat_features = vgg16_feature_extractor(yhat_reshaped)

        S = self.gram_matrix(y_features)
        C = self.gram_matrix(yhat_features)
        perceptual = K.mean(K.square(S - C), axis=(0, 1, 2))

        return mae + perceptual
Пример #5
0
    def multi_loss(self, ys_true, ys_pred):
        assert len(ys_true) == self.nb_outputs and len(
            ys_pred) == self.nb_outputs
        loss = 0

        #for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars):
        #    precision = K.exp(-log_var[0])
        #    loss += K.sum(precision * (y_true - y_pred)**2., -1) + log_var[0]

        precision = K.exp(-self.log_vars[0][0])
        loss += precision * mean_absolute_error(
            ys_true[0], ys_pred[0]) + self.log_vars[0][0]
        precision = K.exp(-self.log_vars[1][0])
        loss += precision * quaternion_mean_multiplicative_error(
            ys_true[1], ys_pred[1]) + self.log_vars[1][0]
        #loss += precision * quaternion_phi_4_error(ys_true[1], ys_pred[1]) + self.log_vars[1][0]

        return K.mean(loss)
Пример #6
0
    def make_critic_train_fn(self):
        action_oh_pl = kb.placeholder(shape=(None, self.env.action_space.n))
        discounted_rw_pl = kb.placeholder(shape=(None, ))

        critic_results = self.critic_model.output

        # Mean squared error of the prediction
        loss = kb.mean(mean_absolute_error(discounted_rw_pl, critic_results))

        adam = Adam(lr=self.learning_rate)

        update_op = adam.get_updates(
            loss=loss, params=self.critic_model.trainable_weights)

        train_fn = kb.function(
            inputs=[self.critic_model.input, action_oh_pl, discounted_rw_pl],
            outputs=[self.critic_model.output, loss],
            updates=update_op)
        return train_fn
 def perceptual_loss(self, y, yhat):
     y_features = feature_extractor(y)
     yhat_features = feature_extractor(yhat)
     return K.sum(losses.mean_absolute_error(y, yhat), axis=(1, 2, 3)) + losses.mean_squared_error(y_features, yhat_features)
Пример #8
0
def l1(y_true, y_pred):
    """ L1 metric (MAE) """
    return losses.mean_absolute_error(y_true, y_pred)
 def loss_func(x, y):
     return mean_absolute_error(x, y) * weight_l1 + ssim_loss(
         x, y) * weight_ssim
Пример #10
0
 def DSSIM_L1(y_true, y_pred):
     return alpha*dssim(y_true, y_pred) + (1.0-alpha)*mean_absolute_error(y_true, y_pred)
Пример #11
0
def nowcast_mae(y_true,y_pred):
    """
    MAE normalized by SCALE
    """
    return mean_absolute_error(y_true,y_pred)/(SCALE)
def weighted_ce_l1_bycase(y_true, y_pred):
    # return seg_crossentropy_weighted_bycase(y_true, y_pred) + mean_absolute_error(y_true,y_pred) + sd_weights[0] * dice_coef_loss(y_true,y_pred) + sd_weights[0]*vol_diff(y_true, y_pred)
    return seg_crossentropy_weighted_bycase(
        y_true, y_pred) + mean_absolute_error(
            y_true, y_pred) + 0.5 * dice_coef_loss(y_true, y_pred) + vol_diff(
                y_true, y_pred)  # change from regular dice loss to dice 05
def sce_dice_l2_vol_loss(y_true, y_pred):
    # return sd_weights[0]*dice_coef_loss(y_true,y_pred)+sd_weights[0]*seg_crossentropy_weighted(y_true, y_pred)+sd_weights[3]*mean_absolute_error(y_true,y_pred)+sd_weights[0]*vol_diff(y_true, y_pred)
    return dice_coef_loss(y_true, y_pred) + seg_crossentropy_weighted(
        y_true, y_pred) + mean_absolute_error(y_true, y_pred) + vol_diff(
            y_true, y_pred)
def l1_loss(y_true, y_pred):
    return sd_weights[3] * mean_absolute_error(y_true, y_pred)
def sce_and_ssim_with_l1_loss(y_true, y_pred):
    return sd_weights[4] * seg_crossentropy(
        y_true, y_pred) + sd_weights[4] * ssim_loss(
            y_true, y_pred) + sd_weights[4] * mean_absolute_error(
                y_true, y_pred)
def l1_and_ssim_loss(y_true, y_pred):
    return sd_weights[5] * mean_absolute_error(
        y_true, y_pred) + sd_weights[3] * ssim_loss(y_true, y_pred)
Пример #17
0
 def loss(y_true, y_pred):
     reconst = discriminator(y_pred)
     return mean_absolute_error(
         reconst,
         y_pred
     )
Пример #18
0
def sigma_mean_absolute_error(y_true, y_pred):
    """Mean squared error for variable sigma output."""
    return losses.mean_absolute_error(y_true[:, 0], y_pred[:, 0])
Пример #19
0
def custom_loss(y_true, y_pred):
    mae_loss = losses.mean_absolute_error(y_true, y_pred)
    y_true, y_pred = tf.math.sigmoid(y_true), tf.math.sigmoid(y_pred)
    return losses.kullback_leibler_divergence(
        y_true, y_pred) + mae_loss  # js_divergence(y_true, y_pred)
Пример #20
0
 def vae_loss(y_true, y_pred):
     reconstruciton_loss = losses.mean_absolute_error(inputs, outputs)
     kl_loss = (
         0.5 *
         K.sum(K.exp(log_sigma) + K.square(mu) - 1. - log_sigma, axis=1))
     return reconstruciton_loss + (beta * kl_loss)
Пример #21
0
#%%
for target in state_list:
    for target_y in ['CC_s','TT_s']:
        fig, ax = plt.subplots()
        test_df[(test_df.state== target)& (test_df.predict==True)].plot(x='date',y=target_y,ax=ax,title=target)
        test_df2[(test_df2.state== target)& (test_df2.predict==True)].plot(x='date',y=target_y,ax=ax,marker='x',linewidth=0)
        CORE_DATA[CORE_DATA.state==target].plot(x='date',y=target_y,ax=ax)
        NewCORE_DATA[(NewCORE_DATA.state==target)&(NewCORE_DATA.date>='2020-04-16')].plot(x='date',y=target_y,ax=ax,marker='o',linewidth=2)
        # PR_state[STATE].plot(x='ds',y='yhat',ax=ax)
        plt.axvline(x='2020-04-16',linestyle='--',color='r')
        L=ax.legend(['forecast','forecast_2','history','real','04-16'],loc='upper left')
        plt.savefig('./prophet/'+target_y+'/'+target+'.png',bbox_inches = 'tight')
        plt.close()
#%%

np.mean(mean_absolute_error(y_pred=model_c.predict(X_all),y_true=Y_C_all))
np.std(mean_absolute_error(y_pred=model_c.predict(X_all),y_true=Y_C_all))

np.mean(mean_absolute_error(y_pred=model_c.predict(X_test),y_true=Y_C_test))
np.std(mean_absolute_error(y_pred=model_c.predict(X_test),y_true=Y_C_test))

np.mean(mean_squared_error(y_pred=model_c.predict(X_all),y_true=Y_C_all))
np.std(mean_squared_error(y_pred=model_c.predict(X_all),y_true=Y_C_all))

np.mean(mean_squared_error(y_pred=model_c.predict(X_test),y_true=Y_C_test))
np.std(mean_squared_error(y_pred=model_c.predict(X_test),y_true=Y_C_test))
#%%
np.mean(mean_absolute_error(y_pred=model_t.predict(X_all),y_true=Y_T_all))
np.std(mean_absolute_error(y_pred=model_t.predict(X_all),y_true=Y_T_all))

np.mean(mean_absolute_error(y_pred=model_t.predict(X_test),y_true=Y_T_test))
        def loss_func(x, y):            return mean_absolute_error(x, y)*weight_l1 + \
ssim_loss(x, y)*weight_ssim + perceptual_loss(x, y) * \
weight_perceptual_loss
    else:
Пример #23
0
print("EVALUATE")
test_loss, test_acc = model.evaluate(dsTest, verbose=2)

#%%[markdown]
## Predict
#
yPred = model.predict(dsTest)
images = loadImagesFromDir('resources/{}/test/masks/'.format(objectName), (imageH, imageW), 'rgb')
labels = list(np.load('resources/{}/{}_keypoints_test.npy'.format(objectName, objectName)).reshape(testDataSize,numFeatures))

#%%[markdown]
## Plot prediction results
#
a = list(zip(images, labels, yPred))
a.sort(key=lambda t: mean_absolute_error(t[1], t[2]).numpy())
a = np.array(a)

print("BEST\n")
plotObjectsWithKeypoints(slice=0, images=a[:,0], labels=a[:,1], yPred=a[:,2])

print("\nWORST\n")
plotObjectsWithKeypoints(slice=6, images=a[:,0], labels=a[:,1], yPred=a[:,2])


#%%[markdown]
## Save model
#
if saveModel:
    model.save('resources/models/' + modelName)
Пример #24
0
    def evaluate(self, log, path, num_prefixes=8):
        # Generate raw predictions
        raw = self._evaluate_raw(log)
        raw.to_csv("ts_" + self.abstraction + "_" + str(self.horizon) +
                   path.replace("/", ""),
                   encoding="utf-8",
                   sep=",",
                   index=False)
        # Compute metrics
        next_activity_acc = len(
            raw[(raw["pred-next-activity"] == raw["true-next-activity"])
                & (raw["prefix-length"] >= 1)]) / np.max(
                    [len(raw[raw["prefix-length"] >= 1]), 1])
        next_time_mae = mean_absolute_error(
            raw[raw["prefix-length"] >= 1]["true-next-time"].astype(
                float).to_numpy(), raw[raw["prefix-length"] >= 1]
            ["pred-next-time"].astype(float).to_numpy()).numpy()
        outcome_acc = len(raw[(raw["pred-outcome"] == raw["true-outcome"])
                              & (raw["prefix-length"] >= 1)]) / np.max(
                                  [len(raw[raw["prefix-length"] >= 1]), 1])
        cycle_time_mae = mean_absolute_error(
            raw[raw["prefix-length"] >= 1]["true-cycle-time"].astype(
                float).to_numpy(), raw[raw["prefix-length"] >= 1]
            ["pred-cylce-time"].astype(float).to_numpy()).numpy()

        next_activity_acc_pre = [
            len(raw[(raw["pred-next-activity"] == raw["true-next-activity"])
                    & (raw["prefix-length"] == prefix_length)]) /
            np.max([len(raw[raw["prefix-length"] == prefix_length]), 1])
            for prefix_length in range(1, num_prefixes + 1)
        ]
        next_time_mae_pre = [
            mean_absolute_error(
                raw[raw["prefix-length"] ==
                    prefix_length]["true-next-time"].astype(float).to_numpy(),
                raw[raw["prefix-length"] == prefix_length]
                ["pred-next-time"].astype(float).to_numpy()).numpy()
            for prefix_length in range(1, num_prefixes + 1)
        ]
        outcome_acc_pre = [
            len(raw[(raw["pred-outcome"] == raw["true-outcome"])
                    & (raw["prefix-length"] == prefix_length)]) /
            np.max([len(raw[raw["prefix-length"] == prefix_length]), 1])
            for prefix_length in range(1, num_prefixes + 1)
        ]
        cycle_time_mae_pre = [
            mean_absolute_error(
                raw[raw["prefix-length"] ==
                    prefix_length]["true-cycle-time"].astype(float).to_numpy(),
                raw[raw["prefix-length"] == prefix_length]
                ["pred-cylce-time"].astype(float).to_numpy()).numpy()
            for prefix_length in range(1, num_prefixes + 1)
        ]

        prefix_predictions = next_activity_acc_pre + next_time_mae_pre + outcome_acc_pre + cycle_time_mae_pre

        if not os.path.exists(path):
            prefix_columns = []
            for metric in ["naa_{}", "ntm_{}", "oa_{}", "ctm_{}"]:
                for prefix in range(1, num_prefixes + 1):
                    prefix_columns.append(metric.format(prefix))
            columns = [
                "model", "timestamp", "num_layer", "num_shared_layer",
                "hidden_neurons", "advanced_time_attributes",
                "data_attributes", "event_dim", "text_encoding", "text_dim",
                "next_activity_acc", "next_time_mae", "outcome_acc",
                "cycle_time_mae"
            ] + prefix_columns
            df = pd.DataFrame(columns=columns)
            df.to_csv(path, encoding="utf-8", sep=",", index=False)
        df = pd.read_csv(path, sep=",")

        df.loc[len(df)] = [
            "ts",
            datetime.now().strftime("%Y-%m-%d-%H-%M-%S"), "-", "-", "-", "-",
            "-", "-", self.abstraction, self.horizon, next_activity_acc,
            next_time_mae, outcome_acc, cycle_time_mae
        ] + prefix_predictions
        df.to_csv(path, encoding="utf-8", sep=",", index=False)
        return df
Пример #25
0
 def vae_loss(y_true, y_pred):
     recon = losses.mean_absolute_error(inputs, outputs)
     kl_loss = beta * 0.5 * K.sum(
         K.exp(log_sigma) + K.square(mu) - 1. - log_sigma, axis=1)
     #kl_loss = K.print_tensor(kl_loss[0])
     return K.mean(recon + kl_loss)
Пример #26
0
 def cGAN_loss_photo(real_photo, fake_photo):
     return tf.reduce_mean(
         losses.mean_absolute_error(real_photo,
                                    fake_photo)) * self.lambda_
Пример #27
0
def mae(hr, sr):
    return mean_absolute_error(hr, sr)
Пример #28
0
 def n_mae(*args, **kwargs):
     mae = mean_absolute_error(*args, **kwargs)
     return mae / (y_nums - 1) * 100.0
Пример #29
0
def train_batch(imgs_A, imgs_B):
    with tf.GradientTape() as g_tape, tf.GradientTape() as d_tape:
        Fake_Mongoloid = generator_Negroid_to_Mongoloid(imgs_A, training=True)
        Fake_Negroid = generator_Mongoloid_to_Negroid(imgs_B, training=True)
        

        logits_real_A = discriminator_Negroid(imgs_A, training=True)
        logits_fake_A = discriminator_Negroid(Fake_Negroid, training=True)
        dA_loss = discriminator_loss(logits_real_A, logits_fake_A)


        logits_real_B = discriminator_Mongoloid(imgs_B, training=True)
        logits_fake_B = discriminator_Mongoloid(Fake_Mongoloid, training=True)
        dB_loss = discriminator_loss(logits_real_B, logits_fake_B)
        
       
        
        d_loss = (dA_loss + dB_loss) / 2



        # Translate images back to original domain
        reconstr_Negroid = generator_Mongoloid_to_Negroid(Fake_Mongoloid, training=True)
        reconstr_Mongoloid = generator_Negroid_to_Mongoloid(Fake_Negroid, training=True)
        
        id_Negroid = generator_Mongoloid_to_Negroid(imgs_A, training=True)
        id_Mongoloid = generator_Negroid_to_Mongoloid(imgs_B, training=True)


        gen_Mongoloid_loss = 5 * tf.math.reduce_mean(mean_squared_error(logits_fake_A, valid))
        gen_Negroid_loss = 5 * tf.math.reduce_mean(mean_squared_error(logits_fake_B, valid))
        Mongo_reconstr_loss = 10 * tf.math.reduce_mean(mean_absolute_error(reconstr_Negroid, imgs_A))
        Negro_reconstr_loss = 10 * tf.math.reduce_mean(mean_absolute_error(reconstr_Mongoloid, imgs_B))
        Mongo_id_loss = 2 * tf.math.reduce_mean(mean_absolute_error(id_Negroid, imgs_A))
        Negro_id_loss = 2 * tf.math.reduce_mean(mean_absolute_error(id_Mongoloid, imgs_B))

        gA_loss = tf.math.reduce_sum([
               gen_Negroid_loss,
               Negro_reconstr_loss,
               Negro_id_loss,

        ])


        gB_loss = tf.math.reduce_sum([
               gen_Mongoloid_loss ,
               Mongo_reconstr_loss,
               Mongo_id_loss,

        ])

        gen_loss = tf.math.reduce_sum([
            gen_Mongoloid_loss,
            gen_Negroid_loss,
            Mongo_reconstr_loss,
            Negro_reconstr_loss,
            Mongo_id_loss,
            Negro_id_loss,
        ])

            
    gradients_of_d = d_tape.gradient(d_loss, discriminator_Negroid.trainable_variables + discriminator_Mongoloid.trainable_variables)
    discriminator_optimizer.apply_gradients(zip(gradients_of_d, discriminator_Negroid.trainable_variables + discriminator_Mongoloid.trainable_variables))

    gradients_of_generator = g_tape.gradient(gen_loss, generator_Negroid_to_Mongoloid.trainable_variables + generator_Mongoloid_to_Negroid.trainable_variables)
    optimizer.apply_gradients(zip(gradients_of_generator, generator_Negroid_to_Mongoloid.trainable_variables + generator_Mongoloid_to_Negroid.trainable_variables))
    
    return dA_loss, dB_loss, d_loss, gen_loss, gen_Mongoloid_loss, gen_Negroid_loss, Mongo_reconstr_loss, Negro_reconstr_loss, Mongo_id_loss,  Negro_id_loss, gA_loss, gB_loss
Пример #30
0
 def cGAN_loss_photo_uniform(real_photo_quartets, fake_photos):
     return tf.reduce_mean(losses.mean_absolute_error(real_photo_quartets, fake_photos)) \
             * self.lambda_