Ejemplo n.º 1
0
print("Creating the dataset")

x_train, y_train, _ = import_data_toy_quantile(150)
n = x_train.shape[0]
m = 10

plt.figure()
plt.scatter(x_train, y_train, marker='.')
plt.show()

# %%
# Defining an ITL model, first without a learnable kernel

print("Defining the model")

kernel_input = Gaussian(3.5)
kernel_output = Gaussian(9)
model = DecomposableIdentity(kernel_input, kernel_output, 1)
lbda = 0.001
lbda_cross = 0.01
sampler = LinearSampler(0.1, 0.9, 10, 0)
sampler.m = 10
est = IQR(model, lbda, lbda_cross, sampler)
#%%
# Learning the coefficients of the model
print("Fitting the coefficients of the model")

est.fit_alpha_gd(x_train,
                 y_train,
                 n_epochs=40,
                 lr=0.001,
Ejemplo n.º 2
0
# the extracted landmarks on your computer.
# See utils/README.md
path_to_rafd = '../../torch_itl/datasets/Rafd_Aligned/Rafd_LANDMARKS'
path_to_kdef = '../../torch_itl/datasets/KDEF_Aligned/KDEF_LANDMARKS'
# test of import
data_train, data_test = get_data_landmarks('KDEF', path_to_kdef)
n, m, nf = data_train.shape
print('Testing import, data dimensions:', n, m, nf)
# %%
# ----------------------------------
# Defining our model
# ----------------------------------
print('Defining the model')
# define Landmarks kernel
gamma_inp = 0.07
kernel_input = Gaussian(gamma_inp)
# define emotion kernel
gamma_out = 0.4
kernel_output = Gaussian(gamma_out)
# define functional model
model = DecomposableIdentity(kernel_input, kernel_output, nf)
# define emotion sampler
sampler = CircularEmoSampler()
# define regularization
lbda = 2e-5
# define the emotion transfer estimator
est = EmoTransfer(model, lbda, sampler, inp_emotion='joint')
# %%
# ----------------------------------
# Fitting fewer coefficients with non invertible matrix A -- KDEF
# ----------------------------------