error_metric_f, ml_train_loss_f, ml_val_loss_f, ml_test_loss_f = \
      ml_model_obj.evaluate(ml_x_t,train_y_t, ml_val_x_t, val_y_t, ml_test_x,
                         test_y_t, scale = NN_scaling)

title = 'metric learning'
print_stat(title, error_metric, ml_train_loss, ml_val_loss, ml_test_loss)
if fine_tuning:
    title = 'metric learning with fine tuning'
    print_stat(title, error_metric_f, ml_train_loss_f, ml_val_loss_f,
               ml_test_loss_f)

# =============================================================================

from plotters import error_dist

error_dist(ml_x_s,
           train_y_s,
           ml_x_t,
           train_y_t,
           error_metric,
           test_y_t,
           title=title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_metric, 100)
plt.show()

# del ml_model
# del ml_model_obj
示例#2
0
emb_val_x_s = embedder.fit_transform(val_x_s)
emb_val_x_t = embedder.fit_transform(val_x_t)
emb_test_x = embedder.fit_transform(test_x_t)

# =============================================================================
num_inputs = emb_x_s.shape[1]# input layer size
# =============================================================================

model_obj = my_models(num_inputs, dropout = dropout_pr)
model = model_obj.build_model()
model = model_obj.fit(emb_x_s, train_y_s, emb_val_x_s, val_y_s, 
                      scale = NN_scaling)
model = model_obj.fit(emb_x_t, train_y_t, emb_val_x_t, val_y_t, 
                      scale = NN_scaling)

error_fine_tuning, train_loss, val_loss, test_loss = \
  model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                     test_y_t, scale = NN_scaling)
  
title = 'Naive learning + fine tuning'
print_stat(title, error_fine_tuning, train_loss, val_loss, test_loss)

# =============================================================================
from plotters import error_dist
error_dist(emb_x_s, train_y_s, emb_x_t, train_y_t, error_fine_tuning,
           test_y_t, title = title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_fine_tuning, 100)    
plt.show()
    error_metric_plus_sample_f, train_loss_f, val_loss_f, test_loss_f =\
      w_model_obj.evaluate(emb_u_x_t, u_train_y_t, emb_u_val_x_t, u_val_y_t,
                           emb_test_x, test_y_t, scale = NN_scaling)

title = 'metric training plus sample selection bias'
print_stat(title, error_metric_plus_sample, train_loss, val_loss, test_loss)

if fine_tuning:
    title = 'metric training plus sample selection bias'
    print_stat(title, error_metric_plus_sample_f, train_loss_f, val_loss_f,
               test_loss_f)
# =============================================================================

from plotters import error_dist
error_dist(emb_x_s,
           train_y_s,
           emb_u_x_t,
           u_train_y_t,
           error_metric_plus_sample,
           test_y_t,
           weights=coef_s,
           title=title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_metric_plus_sample, 100)
plt.show()
#
#del model
#del w_model_obj
                          scale = NN_scaling, training_w = training_weights)

  error_metric_plus_sample_f, t_ssbc_train_loss_f, t_ssbc_val_loss_f, t_ssbc_test_loss_f =\
    w_model_obj.evaluate(t_ssbc_x_t, train_y_t, t_ssbc_val_x_t, val_y_t,
                         t_ssbc_test_x, test_y_t, scale = NN_scaling)
  
title = 'no labeled data - transformed sample selection bias'
print_stat(title, error_metric_plus_sample, t_ssbc_train_loss, t_ssbc_val_loss, t_ssbc_test_loss)

if fine_tuning:
  title = 'no labeled data -  transformed sample selection bias'
  print_stat(title, error_metric_plus_sample_f, t_ssbc_train_loss_f, t_ssbc_val_loss_f,
             t_ssbc_test_loss_f)
# =============================================================================
  
from plotters import error_dist, plot_cdf, plot_embeding
error_dist(t_ssbc_x_s, train_y_s, t_ssbc_x_t, train_y_t, error_metric_plus_sample,
           test_y_t, weights=coef_s,  title = title)
plt.show()

# plot_embeding(train_x_s, train_x_t, coef_s, train_y_s = train_y_s, train_y_t = train_y_t, fig_name = 't-ssbc-ae')
# plot_embeding(train_x_s, train_x_t, coef_s, fig_name = 't-ssbc-ae')


plot_cdf(error_metric_plus_sample, 100)    
plt.show()

print_stat(title, error_metric_plus_sample, t_ssbc_train_loss, t_ssbc_val_loss, t_ssbc_test_loss)

del model
del w_model_obj
示例#5
0
error_naive, nl_train_loss, nl_val_loss, nl_test_loss = \
  model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                     test_y_t, scale = NN_scaling)
  
if fine_tuning:
  model = model_obj.fit(emb_x_t, train_y_t, emb_val_x_t, val_y_t, 
                      scale = NN_scaling)

  error_naive_f, nl_train_loss_f, nl_val_loss_f, nl_test_loss_f = \
    model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                       test_y_t, scale = NN_scaling)
  
title = 'Naive learning'
print_stat(title, error_naive, nl_train_loss, nl_val_loss, nl_test_loss)

if fine_tuning:
  title = 'Naive learning with fine-tuning'
  print_stat(title, error_naive_f, nl_train_loss_f, nl_val_loss_f, nl_test_loss_f)
# =============================================================================
from plotters import error_dist
error_dist(emb_x_s, train_y_s, emb_x_t, train_y_t, error_naive,
           test_y_t, title = title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_naive, 100)    
plt.show()
  
del model
del model_obj
model = w_model_obj.fit(emb_x_s,
                        train_y_s,
                        emb_val_x_s,
                        val_y_s,
                        val_w=training_val_weights,
                        scale=NN_scaling,
                        training_w=training_weights)

error_sample_bias_xy, train_loss, val_loss, test_loss =\
  w_model_obj.evaluate(emb_x_t, train_y_t, emb_val_x_t, val_y_t,
                       emb_test_x, test_y_t, scale = NN_scaling)
title = 'sample selection bias on X, Y'
print_stat(title, error_sample_bias_xy, train_loss, val_loss, test_loss)
# =============================================================================

from plotters import error_dist

error_dist(emb_x_s,
           train_y_s,
           emb_x_t,
           train_y_t,
           error_sample_bias_xy,
           test_y_t,
           weights=coef_s,
           title=title)
plt.show()

from plotters import plot_cdf

plot_cdf(error_sample_bias_xy, 100)
plt.show()