Beispiel #1
0
emb_val_x_s = embedder.fit_transform(val_x_s)
emb_val_x_t = embedder.fit_transform(val_x_t)
emb_test_x = embedder.fit_transform(test_x_t)

# =============================================================================
num_inputs = emb_x_s.shape[1]# input layer size
# =============================================================================

model_obj = my_models(num_inputs, dropout = dropout_pr)
model = model_obj.build_model()
model = model_obj.fit(emb_x_s, train_y_s, emb_val_x_s, val_y_s, 
                      scale = NN_scaling)
model = model_obj.fit(emb_x_t, train_y_t, emb_val_x_t, val_y_t, 
                      scale = NN_scaling)

error_fine_tuning, train_loss, val_loss, test_loss = \
  model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                     test_y_t, scale = NN_scaling)
  
title = 'Naive learning + fine tuning'
print_stat(title, error_fine_tuning, train_loss, val_loss, test_loss)

# =============================================================================
from plotters import error_dist
error_dist(emb_x_s, train_y_s, emb_x_t, train_y_t, error_fine_tuning,
           test_y_t, title = title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_fine_tuning, 100)    
plt.show()
  ml_model_obj.evaluate(ml_x_t,train_y_t, ml_val_x_t, val_y_t, ml_test_x,
                     test_y_t, scale = NN_scaling)

if fine_tuning:
    ml_model = ml_model_obj.fit(ml_x_t,
                                train_y_t,
                                ml_val_x_t,
                                val_y_t,
                                scale=NN_scaling)

    error_metric_f, ml_train_loss_f, ml_val_loss_f, ml_test_loss_f = \
      ml_model_obj.evaluate(ml_x_t,train_y_t, ml_val_x_t, val_y_t, ml_test_x,
                         test_y_t, scale = NN_scaling)

title = 'metric learning'
print_stat(title, error_metric, ml_train_loss, ml_val_loss, ml_test_loss)
if fine_tuning:
    title = 'metric learning with fine tuning'
    print_stat(title, error_metric_f, ml_train_loss_f, ml_val_loss_f,
               ml_test_loss_f)

# =============================================================================

from plotters import error_dist

error_dist(ml_x_s,
           train_y_s,
           ml_x_t,
           train_y_t,
           error_metric,
           test_y_t,
Beispiel #3
0

fine_tuning = True
%run -i naive_learning.py

fine_tuning = False
%run -i naive_learning.py
%run -i metric_learning_training.py
%run -i sample_selection_bias_by_unlabeled_tranining.py
%run -i transform_features.py
%run -i transformed_sample_selection.py



print("cat 1 no data from target location")
title = 'Naive learning'
print_stat(title, error_naive, nl_train_loss, nl_val_loss, nl_test_loss)

print("cat 2 additional labeled data set from target location")
title = 'Naive learning with fine-tuning'
print_stat(title, error_naive_f, nl_train_loss_f, nl_val_loss_f, nl_test_loss_f)

title = 'metric learning'
print_stat(title, error_metric, ml_train_loss, ml_val_loss, ml_test_loss)

print("cat 3 unlabeled data set from target location")
title = 'sample selection bias'
print_stat(title, error_sample_bias, ssbc_train_loss, ssbc_val_loss, ssbc_test_loss)

title = 'no labeled data - transformed sample selection bias'
print_stat(title, error_metric_plus_sample, t_ssbc_train_loss, t_ssbc_val_loss, t_ssbc_test_loss)
Beispiel #4
0
                        model_type='weighted',
                        dropout=dropout_pr)
model = w_model_obj.build_model()
model = w_model_obj.fit(emb_x_s,
                        train_y_s,
                        emb_val_x_s,
                        val_y_s,
                        val_w=training_val_weights,
                        scale=NN_scaling,
                        training_w=training_weights)

error_sample_bias, train_loss, val_loss, test_loss =\
  w_model_obj.evaluate(emb_x_t, train_y_t, emb_val_x_t, val_y_t,
                       emb_test_x, test_y_t, scale = NN_scaling)
title = 'metric learning plus sample selection bias'
print_stat(title, error_sample_bias, train_loss, val_loss, test_loss)
# =============================================================================


def error_dist(X_s, X_t, error_test, weights=None, title=None):
    from scipy.interpolate import griddata
    rssi_interp_s = griddata(train_y_s, X_s, train_y_t, method='cubic')
    rssi_diff = rssi_interp_s - X_t
    rssi_diff = np.sum(rssi_diff, axis=1)
    if weights is None:
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
        plot_scatter_colored(rssi_diff,
                             train_y_t,
                             title='measure difference',
                             ax=ax1)
        plot_scatter_colored(error_test,
    training_weights = np.ones(train_y_t.shape)
    training_val_weights = np.ones(val_y_t.shape)
    model = w_model_obj.fit(emb_x_t,
                            train_y_t,
                            emb_val_x_t,
                            val_y_t,
                            val_w=training_val_weights,
                            scale=NN_scaling,
                            training_w=training_weights)

    error_metric_plus_sample_f, train_loss_f, val_loss_f, test_loss_f =\
      w_model_obj.evaluate(emb_u_x_t, u_train_y_t, emb_u_val_x_t, u_val_y_t,
                           emb_test_x, test_y_t, scale = NN_scaling)

title = 'metric training plus sample selection bias'
print_stat(title, error_metric_plus_sample, train_loss, val_loss, test_loss)

if fine_tuning:
    title = 'metric training plus sample selection bias'
    print_stat(title, error_metric_plus_sample_f, train_loss_f, val_loss_f,
               test_loss_f)
# =============================================================================

from plotters import error_dist
error_dist(emb_x_s,
           train_y_s,
           emb_u_x_t,
           u_train_y_t,
           error_metric_plus_sample,
           test_y_t,
           weights=coef_s,
Beispiel #6
0
    del model_obj

model_obj = my_models(num_inputs, dropout=dropout_pr)
model = model_obj.build_model()
model = model_obj.fit(emb_x_t,
                      train_y_t,
                      emb_val_x_t,
                      val_y_t,
                      scale=NN_scaling)

error_normal, train_loss, val_loss, test_loss = \
  model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                     test_y_t, scale = NN_scaling)

title = 'normal learning'
print_stat(title, error_normal, train_loss, val_loss, test_loss)

# =============================================================================
from plotters import error_dist
#error_dist(emb_x_s, train_y_s, emb_x_t, train_y_t, error_naive,
#           test_y_t, title = title)
plt.show()

from plotters import plot_cdf

plot_cdf(error_normal, 100)
plt.show()

#
#del model
#del model_obj
Beispiel #7
0
                      scale = NN_scaling)

error_naive, nl_train_loss, nl_val_loss, nl_test_loss = \
  model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                     test_y_t, scale = NN_scaling)
  
if fine_tuning:
  model = model_obj.fit(emb_x_t, train_y_t, emb_val_x_t, val_y_t, 
                      scale = NN_scaling)

  error_naive_f, nl_train_loss_f, nl_val_loss_f, nl_test_loss_f = \
    model_obj.evaluate(emb_x_t,train_y_t, emb_val_x_t, val_y_t, emb_test_x,
                       test_y_t, scale = NN_scaling)
  
title = 'Naive learning'
print_stat(title, error_naive, nl_train_loss, nl_val_loss, nl_test_loss)

if fine_tuning:
  title = 'Naive learning with fine-tuning'
  print_stat(title, error_naive_f, nl_train_loss_f, nl_val_loss_f, nl_test_loss_f)
# =============================================================================
from plotters import error_dist
error_dist(emb_x_s, train_y_s, emb_x_t, train_y_t, error_naive,
           test_y_t, title = title)
plt.show()

from plotters import plot_cdf
plot_cdf(error_naive, 100)    
plt.show()
  
del model