Beispiel #1
0
def HPK_kernel(X, T=None, degree=2):
    """performs the HPK kernel between the samples matricex *X* and *T*.
    The HPK kernel is defines as:
    .. math:: k(x,z) = \langle x,z \rangle^d

    Parameters
    ----------
    X : (n,m) array_like,
        the train samples matrix.
    T : (l,m) array_like,
        the test samples matrix. If it is not defined, then the kernel is calculated
        between *X* and *X*.

    Returns
    -------
    K : (l,n) ndarray,
        the HPK kernel matrix.
    """

    if degree < 1:
        raise ValueError('degree must be greather than 0')
    if degree != floor(degree):
        raise ValueError('degree must be int')
    X, T = check_X_T(X, T)
    return np.dot(X, T.T)**degree
Beispiel #2
0
def monotone_dnf_kernel(X,T=None,d=2,c=2):
    X, T = check_X_T(X, T)
    n = X.shape[1]
    n_c = binom(n,c)
    XX = np.dot(X.sum(axis=1).reshape(X.shape[0],1), np.ones((1,T.shape[0])))
    TT = np.dot(T.sum(axis=1).reshape(T.shape[0],1), np.ones((1,X.shape[0])))
    XXc = binom(XX,c)
    TTc = binom(TT,c)
    return binom(n_c,d) - binom(n_c - XXc, d) - binom(n_c - TTc.T, d) + binom(my_mdk(X,T,c),d)
Beispiel #3
0
def monotone_dnf_kernel(X, T=None, d=2, c=2):
    X, T = check_X_T(X, T)
    n = X.shape[1]
    n_c = binom(n, c)
    XX = np.dot(X.sum(axis=1).reshape(X.shape[0], 1), np.ones((1, T.shape[0])))
    TT = np.dot(T.sum(axis=1).reshape(T.shape[0], 1), np.ones((1, X.shape[0])))
    XXc = binom(XX, c)
    TTc = binom(TT, c)
    return binom(n_c, d) - binom(n_c - XXc, d) - binom(n_c - TTc.T, d) + binom(
        my_mdk(X, T, c), d)
Beispiel #4
0
def monotone_disjunctive_kernel(X, T=None, d=2):
    X, T = check_X_T(X, T)
    L = np.dot(X, T.T)
    n = X.shape[1]

    XX = np.dot(X.sum(axis=1).reshape(X.shape[0], 1), np.ones((1, T.shape[0])))
    TT = np.dot(T.sum(axis=1).reshape(T.shape[0], 1), np.ones((1, X.shape[0])))
    N_x = n - XX
    N_t = n - TT
    N_xz = N_x - TT.T + L

    N_d = binom(n, d)
    N_x = binom(N_x, d)
    N_t = binom(N_t, d)
    N_xz = binom(N_xz, d)
    return (N_d - N_x - N_t.T + N_xz)
Beispiel #5
0
def monotone_disjunctive_kernel(X,T=None,d=2):
    X, T = check_X_T(X, T)
    L = np.dot(X,T.T)
    n = X.shape[1]

    XX = np.dot(X.sum(axis=1).reshape(X.shape[0],1), np.ones((1,T.shape[0])))
    TT = np.dot(T.sum(axis=1).reshape(T.shape[0],1), np.ones((1,X.shape[0])))
    N_x = n - XX
    N_t = n - TT
    N_xz = N_x - TT.T + L

    N_d = binom(n, d)
    N_x = binom(N_x,d)
    N_t = binom(N_t,d)
    N_xz = binom(N_xz,d)
    return (N_d - N_x - N_t.T + N_xz)
Beispiel #6
0
def homogeneous_polynomial_kernel(X, T=None, degree=2):
    """performs the HPK kernel between the samples matricex *X* and *T*.
    The HPK kernel is defines as:
    .. math:: k(x,z) = \langle x,z \rangle^d

    Parameters
    ----------
    X : (n,m) array_like,
        the train samples matrix.
    T : (l,m) array_like,
        the test samples matrix. If it is not defined, then the kernel is calculated
        between *X* and *X*.

    Returns
    -------
    K : (l,n) ndarray,
        the HPK kernel matrix.
    """

    X, T = check_X_T(X, T)
    return np.dot(X, T.T)**degree
Beispiel #7
0
def homogeneous_polynomial_kernel(X, T=None, degree=2):
    """performs the HPK kernel between the samples matricex *X* and *T*.
    The HPK kernel is defines as:
    .. math:: k(x,z) = \langle x,z \rangle^d

    Parameters
    ----------
    X : (n,m) array_like,
        the train samples matrix.
    T : (l,m) array_like,
        the test samples matrix. If it is not defined, then the kernel is calculated
        between *X* and *X*.

    Returns
    -------
    K : (l,n) ndarray,
        the HPK kernel matrix.
    """
    
    X, T = check_X_T(X, T)
    return np.dot(X,T.T)**degree
Beispiel #8
0
def d_kernel(X, k=2):
    X, _ = check_X_T(X, None)
    R = co.matrix((X.T > 0) * 1.0)
    n = R.size[0]
    m = R.size[1]

    x_choose_k = [0] * (n + 1)
    x_choose_k[0] = 0
    for i in range(1, n + 1):
        x_choose_k[i] = binom(i, k)

    nCk = x_choose_k[n]
    X = R.T * R

    K = co.matrix(0.0, (X.size[0], X.size[1]))
    for i in range(m):
        for j in range(i, m):
            n_niCk = x_choose_k[n - int(X[i, i])]
            n_njCk = x_choose_k[n - int(X[j, j])]
            n_ni_nj_nijCk = x_choose_k[n - int(X[i, i]) - int(X[j, j]) +
                                       int(X[i, j])]
            K[i, j] = K[j, i] = nCk - n_niCk - n_njCk + n_ni_nj_nijCk
    return np.array(K)
Beispiel #9
0
def monotone_conjunctive_kernel(X, T=None, c=2):
    X, T = check_X_T(X, T)
    L = np.dot(X, T.T)
    return binom(L, c)
Beispiel #10
0
def monotone_conjunctive_kernel(X,T=None,c=2):
    X, T = check_X_T(X, T)
    L = np.dot(X,T.T)
    return binom(L,c)
Beispiel #11
0
def all_subsequences_kernel(X, T=None, binary=False):
    X, T = check_X_T(X, T)
    EX = all_subsequences_embedding(X, binary=binary)
    ET = all_subsequences_embedding(T, binary=binary)
    return dictionary_dot(EX, ET)
Beispiel #12
0
def fixed_length_subsequences_kernel(X, T=None, p=2, binary=False):
    X, T = check_X_T(X, T)
    EX = fixed_length_subsequences_embedding(X, p=p, binary=binary)
    ET = fixed_length_subsequences_embedding(T, p=p, binary=binary)
    return dictionary_dot(EX, ET)
Beispiel #13
0
def spectrum_kernel(X, T=None, p=2, binary=False):
    X, T = check_X_T(X, T)
    EX = spectrum_embedding(X, p=p, binary=binary)
    ET = spectrum_embedding(T, p=p, binary=binary)
    return dictionary_dot(EX, ET)