error_metric_f, ml_train_loss_f, ml_val_loss_f, ml_test_loss_f = \
      ml_model_obj.evaluate(ml_x_t,train_y_t, ml_val_x_t, val_y_t, ml_test_x,
                         test_y_t, scale = NN_scaling)

title = 'metric learning'
print_stat(title, error_metric, ml_train_loss, ml_val_loss, ml_test_loss)
if fine_tuning:
    title = 'metric learning with fine tuning'
    print_stat(title, error_metric_f, ml_train_loss_f, ml_val_loss_f,
               ml_test_loss_f)

# =============================================================================

from plotters import error_dist

error_dist(ml_x_s,
           train_y_s,
           ml_x_t,
           train_y_t,
           error_metric,
           test_y_t,
           title=title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_metric, 100)
plt.show()

# del ml_model
# del ml_model_obj
Esempio n. 2
0
emb_val_x_s = embedder.fit_transform(val_x_s)
emb_val_x_t = embedder.fit_transform(val_x_t)
emb_test_x = embedder.fit_transform(test_x_t)

# =============================================================================
num_inputs = emb_x_s.shape[1]# input layer size
# =============================================================================

model_obj = my_models(num_inputs, dropout = dropout_pr)
model = model_obj.build_model()
model = model_obj.fit(emb_x_s, train_y_s, emb_val_x_s, val_y_s, 
                      scale = NN_scaling)
model = model_obj.fit(emb_x_t, train_y_t, emb_val_x_t, val_y_t, 
                      scale = NN_scaling)

error_fine_tuning, train_loss, val_loss, test_loss = \
  model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                     test_y_t, scale = NN_scaling)
  
title = 'Naive learning + fine tuning'
print_stat(title, error_fine_tuning, train_loss, val_loss, test_loss)

# =============================================================================
from plotters import error_dist
error_dist(emb_x_s, train_y_s, emb_x_t, train_y_t, error_fine_tuning,
           test_y_t, title = title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_fine_tuning, 100)    
plt.show()
Esempio n. 3
0
                             train_y_t,
                             title='measure difference',
                             ax=ax1)
        plot_scatter_colored(error_test,
                             test_y_t,
                             title='error distribution',
                             ax=ax2)
    else:
        fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 5))
        plot_scatter_colored(rssi_diff,
                             train_y_t,
                             title='measure difference',
                             ax=ax1)
        plot_scatter_colored(error_test,
                             test_y_t,
                             title='error distribution',
                             ax=ax2)
        plot_scatter_colored(weights,
                             train_y_s,
                             title='weights based on kernel',
                             ax=ax3)
    if title is not None:
        fig.suptitle(title)


error_dist(emb_x_s, emb_x_t, error_sample_bias, weights=coef_s, title=title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_sample_bias, 100)
plt.show()
    error_metric_plus_sample_f, train_loss_f, val_loss_f, test_loss_f =\
      w_model_obj.evaluate(emb_u_x_t, u_train_y_t, emb_u_val_x_t, u_val_y_t,
                           emb_test_x, test_y_t, scale = NN_scaling)

title = 'metric training plus sample selection bias'
print_stat(title, error_metric_plus_sample, train_loss, val_loss, test_loss)

if fine_tuning:
    title = 'metric training plus sample selection bias'
    print_stat(title, error_metric_plus_sample_f, train_loss_f, val_loss_f,
               test_loss_f)
# =============================================================================

from plotters import error_dist
error_dist(emb_x_s,
           train_y_s,
           emb_u_x_t,
           u_train_y_t,
           error_metric_plus_sample,
           test_y_t,
           weights=coef_s,
           title=title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_metric_plus_sample, 100)
plt.show()
#
#del model
#del w_model_obj
Esempio n. 5
0
    del model_obj

model_obj = my_models(num_inputs, dropout=dropout_pr)
model = model_obj.build_model()
model = model_obj.fit(emb_x_t,
                      train_y_t,
                      emb_val_x_t,
                      val_y_t,
                      scale=NN_scaling)

error_normal, train_loss, val_loss, test_loss = \
  model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                     test_y_t, scale = NN_scaling)

title = 'normal learning'
print_stat(title, error_normal, train_loss, val_loss, test_loss)

# =============================================================================
from plotters import error_dist
#error_dist(emb_x_s, train_y_s, emb_x_t, train_y_t, error_naive,
#           test_y_t, title = title)
plt.show()

from plotters import plot_cdf

plot_cdf(error_normal, 100)
plt.show()

#
#del model
#del model_obj
Esempio n. 6
0
error_naive, nl_train_loss, nl_val_loss, nl_test_loss = \
  model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                     test_y_t, scale = NN_scaling)
  
if fine_tuning:
  model = model_obj.fit(emb_x_t, train_y_t, emb_val_x_t, val_y_t, 
                      scale = NN_scaling)

  error_naive_f, nl_train_loss_f, nl_val_loss_f, nl_test_loss_f = \
    model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                       test_y_t, scale = NN_scaling)
  
title = 'Naive learning'
print_stat(title, error_naive, nl_train_loss, nl_val_loss, nl_test_loss)

if fine_tuning:
  title = 'Naive learning with fine-tuning'
  print_stat(title, error_naive_f, nl_train_loss_f, nl_val_loss_f, nl_test_loss_f)
# =============================================================================
from plotters import error_dist
error_dist(emb_x_s, train_y_s, emb_x_t, train_y_t, error_naive,
           test_y_t, title = title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_naive, 100)    
plt.show()
  
del model
del model_obj