Esempio n. 1
0
BATCH_SIZE = 100

# Model
INPUT_DIM = DIM
HIDDEN_DIM = 20
OUT_DIM = HIDDEN_DIM

# Training
N_EPOCH = 1000
LR = 1e-3

# Data generation
CENTERS = torch.randn(N_CENTERS * DIM).view(N_CENTERS, DIM)
gmm = GMM(DIM, CENTERS, VARIANCE)

X_train, y_train = gmm.sample(TRAIN_SAMPLES)
X_test, y_test = gmm.sample(TEST_SAMPLES)

train_CURL = ContrastiveDataset(
    *build_CURL_dataset(X_train, y_train, CURL_TRAIN_SIZE))
assert len(train_CURL) == CURL_TRAIN_SIZE
test_CURL = ContrastiveDataset(
    *build_CURL_dataset(X_test, y_test, CURL_TEST_SIZE))

train_data = GMMDataset(X_train, y_train)
test_data = GMMDataset(X_test, y_test)

train_loader = DataLoader(train_data, shuffle=True, batch_size=BATCH_SIZE)
test_loader = DataLoader(test_data, shuffle=False, batch_size=BATCH_SIZE)

curl_train_loader = DataLoader(train_CURL, shuffle=True, batch_size=BATCH_SIZE)
Esempio n. 2
0
plt.plot(L)
plt.xlabel('Iteration')
plt.ylabel('Value')
plt.title('EM log-likelihood function')
plt.show()

# 2.) K-means algorithm:
# TODO
#mu_0 = random.sample(list(X),M)
#D = gmm.k_means(max_iter=max_iter, tol=1e-2, interactive=False)

#plt.ioff()
#plt.plot(D)
#plt.xlabel('Iteration')
#plt.ylabel('Value')
#plt.title('Cumulative distance')
#plt.show()

# 3.) Sampling from GMM
# TODO
Y = gmm.sample(N=N)

for i in range(M):
    plt.plot(Y[i][:, 0], Y[i][:, 1], 'o')
    plt.plot(np.mean(Y[i][:, 0]), np.mean(Y[i][:, 1]), 'x', color='black')
    plt.text(np.mean(Y[i][:, 0]) + 10, np.mean(Y[i][:, 1]) + 10, '$\mu_{a}$')

plt.xlabel('x')
plt.ylabel('y')
plt.title('Sampled data and corresponding means')
plt.show()