Ejemplo n.º 1
0
def test_kmm(source, target):

    kmm_kernel = 'rbf'
    B = 1000
    print('Kernel mean matching')
    # from kernel_mean_matching import eprimical_kmm as ekmm
    # coef_s,_  =  ekmm(target, source, kern = kmm_kernel, B = B)

    from kernel_mean_matching import kernel_mean_matching as kmm
    coef_s = kmm(target, source, kern=kmm_kernel, B=B)

    tr = coef_s > 0.1
    tr = tr.reshape(tr.shape[0], )

    from embedders import embedding, variable_embedder
    n_components = 2
    embedding_type = "no_embedding"
    embedder = embedding(embedding_type, n_cmp=n_components, n_ngb=10)
    if embedding_type == "autoencoder":
        split = 0.3
        cut = np.floor(target.shape[0] * (1 - split)).astype(int)
        test_X = target[:cut, :]
        val_X = target[cut:, :]
        cut = np.floor(x_s.shape[0] * (1 - split)).astype(int)
        test_C = source[:cut, :]
        val_C = source[cut:, :]
        emb_c, emb_val_c, emb_x, emb_val_x = variable_embedder(embedder,\
                            [test_C, test_X],[val_C, val_X])
        source = np.concatenate((emb_c, emb_val_c), axis=0)
        target = np.concatenate((emb_x, emb_val_x), axis=0)
    else:
        source, target, = variable_embedder(embedder, [source, target])

    marker_size = 5
    l1, = plt.plot(source[:, 0],
                   source[:, 1],
                   'o',
                   color='red',
                   label='source')
    plt.setp(l1, markersize=marker_size)

    l2, = plt.plot(target[:, 0],
                   target[:, 1],
                   'o',
                   color='blue',
                   label='target')
    plt.setp(l2, markersize=marker_size)

    marker_size = 2
    l1, = plt.plot(source[:, 0],
                   source[:, 1],
                   'o',
                   color='red',
                   label='source')
    plt.setp(l1, markersize=marker_size)
    marker_size = 1
    l3, = plt.plot(source[tr, 0], source[tr, 1], 'o', color='k', label='ssbc')
    plt.setp(l3, markersize=marker_size)

    plt.show()
Ejemplo n.º 2
0
def eprimical_kmm_emb(target_samples,
                      source_samples,
                      kern='rbf',
                      B=1,
                      embedder_type='autoencoder',
                      n_components=30):
    from embedders import embedding, variable_embedder
    embedder = embedding(embedder_type, n_cmp=n_components, n_ngb=10)
    if embedder_type != "autoencoder":
        X, C = variable_embedder(embedder, [target_samples, source_samples])
    else:
        from autoencoder import autoencoder
        split = 0.3
        cut = np.floor(target_samples.shape[0] * (1 - split)).astype(int)
        test_X = target_samples[:cut, :]
        val_X = target_samples[cut:, :]
        cut = np.floor(source_samples.shape[0] * (1 - split)).astype(int)
        test_C = source_samples[:cut, :]
        val_C = source_samples[cut:, :]
        test_X, val_X, test_C, val_C\
        = variable_embedder(embedder,\
                            [test_X, test_C],[val_X, val_C])

        X = np.concatenate((test_X, val_X), axis=0)
        C = np.concatenate((test_C, val_C), axis=0)


# =============================================================================
#   from embedders import variable_embedder
#   embeded_output = variable_embedder(embedder, (source_samples, target_samples))
#   C = embeded_output[0]
#   X = embeded_output[1]
# =============================================================================
    if embedder_type == 'tsne' or embedder_type == "autoencoder":
        X = X.astype(np.double)
        C = C.astype(np.double)

    coef_s = kernel_mean_matching(X, C, kern=kern, B=B)
    coef_t = []

    # coef_s, coef_t = eprimical_kmm(X, C , kern=kern, B=B)

    return coef_s, coef_t
Ejemplo n.º 3
0
def embedd():
    ##############################################################################
    from embedders import embedding, variable_embedder
    n_components = 5
    embedding_type = "spectral"
    embedder = embedding(embedding_type, n_cmp=n_components, n_ngb=30)
    ##############################################################################

    if embedding_type == "autoencoder":
        emb_train_x_s, emb_train_val_x_s, emb_train_x_t, emb_train_val_x_t\
          = variable_embedder(embedder,\
                              [train_x_s, train_x_t],[val_x_s, val_x_t])
    else:
        emb_train_x_s, emb_train_val_x_s, emb_train_x_t, emb_train_val_x_t\
          = variable_embedder(embedder,\
                              [train_x_s, val_x_s, train_x_t, val_x_t])

    # if  embedding_type == "autoencoder":
    #   emb_train_x_s, emb_train_val_x_s\
    #     = embedder.fit_transform(train_x_s, val_x_s)
    # else:
    #   emb_train_x_s, emb_train_val_x_s\
    #     = variable_embedder(embedder, [train_x_s, val_x_s])

    plt.scatter(emb_train_x_s[:, 0],
                emb_train_x_s[:, 1],
                c=cmap_s,
                label='source',
                marker="o",
                s=5)
    plt.show()

    plt.scatter(emb_train_x_t[:, 0],
                emb_train_x_t[:, 1],
                c=cmap_t,
                label='source',
                marker="o",
                s=5)
    plt.show()
Ejemplo n.º 4
0
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from load_data import load
from kernel_mean_matching import eprimical_kmm_emb as ekmm_emb
from kernel_mean_matching import kernel_mean_matching as kmm

train_x_s, train_y_s, val_x_s, val_y_s, train_x_t, train_y_t, \
  val_x_t, val_y_t, test_x_t, test_y_t = load(unlabele_target_percentage = 1)

##############################################################################
from embedders import embedding, variable_embedder
n_components = 2
embedding_type = "no_embedding"
embedder = embedding(embedding_type, n_cmp=n_components, n_ngb=10)
##############################################################################
if embedding_type == "autoencoder":
    train_x_s, val_x_s, train_x_t, val_x_t\
      = variable_embedder(embedder,\
                          [train_x_s, train_x_t],[val_x_s, val_x_t])
    test_x_t = embedder.predict(test_x_t)
else:
    train_x_s, val_x_s, train_x_t, val_x_t, test_x_t \
    = variable_embedder(embedder,[train_x_s, val_x_s,
                                  train_x_t, val_x_t, test_x_t])
##############################################################################
treshhold = 0.5
##############################################################################
###### ssbc
print('Kernel mean matching')
# =============================================================================
Ejemplo n.º 5
0
print("Increazing the power level of the source floor with 50dB")
train_x_s[train_x_s != 110] = train_x_s[train_x_s != 110] + 80 
val_x_s[val_x_s != 110] = val_x_s[val_x_s != 110] + 80 

#normalize the vectors of Xs and Ys
def normalizer(X):
  mean_X = np.mean(X)
  std_X = np.sqrt(np.var(X))
  X = (X-mean_X) / std_X
  return X

from embedders import embedding

embedding_type = "no_embedding"
embedder = embedding(embedding_type)
# =============================================================================
emb_x_s = embedder.fit_transform(train_x_s)
emb_x_t = embedder.fit_transform(train_x_t)
emb_val_x_s = embedder.fit_transform(val_x_s)
emb_val_x_t = embedder.fit_transform(val_x_t)
emb_test_x = embedder.fit_transform(test_x_t)

# =============================================================================
num_inputs = emb_x_s.shape[1]# input layer size
# =============================================================================

model_obj = my_models(num_inputs, dropout = dropout_pr)
model = model_obj.build_model()
model = model_obj.fit(emb_x_s, train_y_s, emb_val_x_s, val_y_s, 
                      scale = NN_scaling)
Ejemplo n.º 6
0
from plotters import plot_scatter_colored 
from plotters import plot_cmp
from sklearn import preprocessing
from keras.utils import plot_model

from load_data import load

train_x_s, train_y_s, val_x_s, val_y_s, train_x_t, train_y_t, \
  val_x_t, val_y_t, test_x_t, test_y_t = load()

load_data_flag = False


from embedders import variable_embedder, embedding 
embedding_type = "pca"
embedder = embedding(embedding_type, n_cmp = 519)

train_x_s, val_x_s, train_x_t, val_x_t, test_x_t = variable_embedder(embedder,\
[train_x_s, val_x_s, train_x_t, val_x_t, test_x_t]) 



fine_tuning = True
%run -i naive_learning.py

fine_tuning = False
%run -i naive_learning.py
%run -i metric_learning_training.py
%run -i sample_selection_bias_by_unlabeled_tranining.py
%run -i transform_features.py
%run -i transformed_sample_selection.py
Ejemplo n.º 7
0
def plot_embeding(x_s,
                  x_t,
                  coef_s,
                  train_y_s=None,
                  train_y_t=None,
                  fig_name=None):

    import numpy as np
    treshhold = 0.9
    # treshhold = np.mean(coef_s)
    coef_ind = coef_s > treshhold
    coef_ind = coef_ind.reshape(coef_ind.shape[0], )
    from embedders import embedding, variable_embedder
    n_components = 2
    embedding_type = "mds"
    embedder = embedding(embedding_type, n_cmp=n_components, n_ngb=10)
    if embedding_type == "autoencoder":
        split = 0.3
        cut = np.floor(x_t.shape[0] * (1 - split)).astype(int)
        test_X = x_t[:cut, :]
        val_X = x_t[cut:, :]
        cut = np.floor(x_s.shape[0] * (1 - split)).astype(int)
        test_C = x_s[:cut, :]
        val_C = x_s[cut:, :]
        emb_c, emb_val_c, emb_x, emb_val_x = variable_embedder(embedder,\
                            [test_C, test_X],[val_C, val_X])
        emb_train_x_s = np.concatenate((emb_c, emb_val_c), axis=0)
        emb_train_x_t = np.concatenate((emb_x, emb_val_x), axis=0)
    else:
        emb_train_x_s, emb_train_x_t, = variable_embedder(embedder, [x_s, x_t])
    ##############################################################################
    import matplotlib.pyplot as plt
    import seaborn as sns
    sns.set_style("whitegrid")
    bg_color = 'white'
    fg_color = 'black'
    f = plt.figure(figsize=(10, 10), facecolor=bg_color, edgecolor=fg_color)

    if train_y_s is not None:

        cmap_s = cmap2d(train_y_s)
        cmap_t = cmap2d(train_y_t)

    else:
        cmap_s = 'b'
        cmap_t = 'k'

    l00 = plt.scatter(emb_train_x_s[:, 0],
                      emb_train_x_s[:, 1],
                      c=cmap_s,
                      label='source',
                      marker="o",
                      s=3)
    l01 = plt.scatter(emb_train_x_t[:, 0],
                      emb_train_x_t[:, 1],
                      c=cmap_t,
                      label='target',
                      marker="^",
                      s=3)
    l22 = plt.scatter(emb_train_x_s[coef_ind, 0],
                      emb_train_x_s[coef_ind, 1],
                      c='r',
                      label='ssbc',
                      s=0.5)

    if fig_name:
        plt.savefig(fig_name + ".svg", dpi=1200)
    else:
        plt.show()