def main():
  X = get_simple_data()

  plt.scatter(X[:,0], X[:,1])
  plt.show()

  costs = np.empty(10)
  costs[0] = None
예제 #2
0
def main():
    X = get_simple_data()

    costs = np.empty(10)
    costs[0] = None

    for k in range(1, 10):
        c = plot_k_means(X, k)
        costs[k] = c

    plt.plot(costs)
    plt.title('cost vs K')
    plt.show()
def main():
  X = get_simple_data()

  plt.scatter(X[:,0], X[:,1])
  plt.show()

  costs = np.empty(10)
  costs[0] = None
  for k in range(1, 10):
    M, R = plot_k_means(X, k, show_plots=False)
    c = cost(X, R, M)
    costs[k] = c

  plt.plot(costs)
  plt.title("Cost vs K")
  plt.show()
예제 #4
0
def main():
    X = get_simple_data()

    plt.scatter(X[:, 0], X[:, 1])
    plt.show()

    costs = np.empty(10)
    costs[0] = None
    for k in range(1, 10):
        M, R = plot_k_means(X, k, show_plots=False)
        c = cost(X, R, M)
        costs[k] = c

    plt.plot(costs)
    plt.title("Cost vs K")
    plt.show()
import numpy as np
import matplotlib.pyplot as plt
from kmeans import get_simple_data
from sklearn.preprocessing import StandardScaler


# get the data and standardize it
X = get_simple_data()
scaler = StandardScaler()
X = scaler.fit_transform(X)

# get shapes
N, D = X.shape
K = 3

# initialize parameters
W = np.random.randn(D, K)

# set hyperparameters
n_epochs = 100
learning_rate = 0.001
losses = []

# training loop
for i in range(n_epochs):
  loss = 0
  for j in range(N):
    h = W.T.dot(X[j]) # K-length vector
    k = np.argmax(h) # winning neuron

    # accumulate loss