Example #1
0
def svm_classify(newdata, SVM):
    """SVM classifier.

    Args:
        newdata (ndarray): Input test set (of unseen data).\n
        SVM (dictinary): The trained SVM from 'svm_train'.

    Yields:
        prediction (ndarray): The list of predictions .

    Example:
        Calculate the distance matrix to itself.

        >>> K = kernel(X, X)
    """
    # Unpack the SVM dictionary
    sv = SVM['support_vector']
    alpha_hat = SVM['alpha_hat']
    scale = SVM['scale']
    bias = SVM['bias']
    shift = SVM['shift']
    sigma = SVM['sigma']

    # Shift and scale the support vectors
    for c in range(len(sv[0])):
        sv[:, c] = scale[c] * (sv[:, c] + shift[c])

    # Shift and scale the data
    for c in range(len(newdata[0])):
        newdata[:, c] = scale[c] * (newdata[:, c] + shift[c])

    # Get the RBF kernel
    G = np.asmatrix(kernel_svm_rbf(sv.T, newdata.T, sigma)).T

    # Classify new data
    f = np.dot(G, alpha_hat.T) + bias

    # Class prediction is determined by the sign of the dot product above
    prediction = np.sign(f)
    prediction = np.array(prediction)

    return (prediction)
Example #2
0
def svm_classify(newdata, SVM):
    """SVM classifier.

    Args:
        newdata (ndarray): Input test set (of unseen data).\n
        SVM (dictinary): The trained SVM from 'svm_train'.

    Yields:
        prediction (ndarray): The list of predictions .

    Example:
        Calculate the distance matrix to itself.

        >>> K = kernel(X, X)
    """
    # Unpack the SVM dictionary
    sv = SVM['support_vector']
    alpha_hat = SVM['alpha_hat']
    scale = SVM['scale']
    bias = SVM['bias']
    shift = SVM['shift']
    sigma = SVM['sigma']

    # Shift and scale the support vectors
    for c in range(len(sv[0])):
        sv[:, c] = scale[c] * (sv[:, c] + shift[c])

    # Shift and scale the data
    for c in range(len(newdata[0])):
        newdata[:, c] = scale[c] * (newdata[:, c] + shift[c])

    # Get the RBF kernel
    G = np.asmatrix(kernel_svm_rbf(sv.T, newdata.T, sigma)).T

    # Classify new data
    f = np.dot(G, alpha_hat.T) + bias

    # Class prediction is determined by the sign of the dot product above
    prediction = np.sign(f)
    prediction = np.array(prediction)

    return(prediction)
Example #3
0
def svm_train(X, y, sigma, C):
    """SVM training for a 2 label classification problem. Label -1 and 1 are
    used to distinguise between the two classes.

    Args:
        X (ndarray): Input training data set.\n
        y (ndarray): Label for the training set.\n
        sigma (float): The variance of the RBF kernel.\n
        C (float): Upper bound optimization value.

    Yields:
        SVM (dict): A container of the trained values (support vectors etc.).

    Example:
        Train the synthetic SVM data set

        >>> SVM = svm_train(Train, target, 1, 1000)
    """
    # Scale data set (other scaling can be used instead)
    shift = np.mean(X, axis=0)
    stdiv = np.std(X, axis=0)
    scale = 1/stdiv

    # Apply scaling
    for c in range(len(X[0])):
        X[:, c] = scale[c] * (X[:, c] + shift[c])

    # Generate kernel
    kernel = kernel_svm_rbf(X.T, X.T, sigma)

    # Make kernel symmetric
    kernel = (kernel + kernel.T)/2  # + np.diag(1 / (np.ones((len(y),1)) * C))

    # Formulate as QP as the problem  min:  1/2*x'*H*x + f'*x

    # Construct H: Represents the Hessian matrix
    H = y.dot(y.T) * kernel

    # Make H symmetric
    # H = (H+H.T)/2

    # Construct f: Represents the linear term
    f = -np.ones(len(X))

    # Construct Aeq: Represents the linear coefficients in Aeq*x = beq
    Aeq = y.T

    # Construct beq: Represents the constant vector in Aeq*x = beq
    beq = 0

    # Construct lb: Represents the lower bounds elementwise in lb
    lb = np.zeros(len(y))

    # Construct ub: Represents the upper bounds elemetwise in ub
    ub = C*np.ones(len(y))

    # QP solver
    res = qp_optimizer(H, f, Aeq, beq, lb, ub)

    # The support vectors are non-zero of res.x
    reference = np.sqrt(np.spacing(1))
    support_vector_index = find(res.x > reference)
    support_vector = X[support_vector_index, :]

    # Hat
    alpha_hat = y[support_vector_index].T * res.x[support_vector_index]

    # Find the bias (several possibilities)
    max_pos = np.argmax(res.x)
    bias = y[max_pos] - np.sum(alpha_hat * kernel[support_vector_index, max_pos])

    # Rescale data
    for c in range(len(support_vector[0])):
        support_vector[:, c] = (support_vector[:, c]/scale[c]) - shift[c]

    # Construct a dictonary of the trained values
    SVM = {}
    SVM['support_vector'] = support_vector
    SVM['support_vector_index'] = support_vector_index
    SVM['sigma'] = sigma
    SVM['C'] = C
    SVM['alpha_hat'] = alpha_hat
    SVM['shift'] = shift
    SVM['scale'] = scale
    SVM['bias'] = bias

    return(SVM)