def get_training_examples():
    X1 = np.array([[10, 10], [8, 6], [8, 10], [8, 8], [12, 6], [9, 5], [11, 8],
                   [11, 5]])
    X2 = np.array([[10, 13], [6, 5], [6, 9], [9, 2], [14, 8], [12, 11],
                   [10, 13], [13, 4]])

    y1 = np.ones(len(X1))
    y2 = np.ones(len(X2)) * -1
    return get_dataset(X1, y1, X2, y2)
def get_training_examples():
    X1 = np.array([[10, 10], [6, 6], [6, 11], [3, 15], [12, 6], [9, 5],
                   [16, 3], [11, 5]])
    X2 = np.array([[3, 6], [6, 3], [2, 9], [9, 2], [18, 1], [1, 18], [1, 13],
                   [13, 1]])

    y1 = np.ones(len(X1))
    y2 = np.ones(len(X2)) * -1
    return get_dataset(X1, y1, X2, y2)
Exemplo n.º 3
0
from succinctly.algorithms.smo_algorithm import SmoAlgorithm


def linear_kernel(x1, x2):
    return np.dot(x1, x2)


def compute_w(multipliers, X, y):
    return np.sum(multipliers[i] * y[i] * X[i] for i in range(len(y)))


if __name__ == '__main__':

    seed(5)  # to have reproducible results

    X_data, y_data = get_dataset(linearly_separable.get_training_examples)

    smo = SmoAlgorithm(X_data,
                       y_data,
                       C=10,
                       tol=0.001,
                       kernel=linear_kernel,
                       use_linear_optim=True)

    smo.main_routine()

    w = compute_w(smo.alphas, X_data, y_data)

    print('w = {}'.format(w))

    # -smo.b because Platt uses the convention w.x-b=0
Exemplo n.º 4
0
import numpy as np
from succinctly.datasets import get_dataset, linearly_separable as ls
import cvxopt.solvers


def compute_w(multipliers, X, y):
    return sum(multipliers[i] * y[i] * X[i] for i in range(len(y)))


def compute_b(w, X, y):
    return np.sum([y[i] - np.dot(w, X[i]) for i in range(len(X))]) / len(X)


if __name__ == '__main__':
    X, y = get_dataset(ls.get_training_examples)
    m = X.shape[0]

    # Gram matrix - The matrix of all possible inner products of X.
    K = np.array([np.dot(X[i], X[j]) for j in range(m)
                  for i in range(m)]).reshape((m, m))

    P = cvxopt.matrix(np.outer(y, y) * K)
    q = cvxopt.matrix(-1 * np.ones(m))

    # Equality constraints
    A = cvxopt.matrix(y, (1, m))
    b = cvxopt.matrix(0.0)

    # Inequality constraints
    G = cvxopt.matrix(np.diag(-1 * np.ones(m)))
    h = cvxopt.matrix(np.zeros(m))