예제 #1
0
def Relevance_Vector_Classification_Training(X,Y,kernel_mode):

    A=np.zeros((X.shape[0]+1,X.shape[0]+1),float)

    np.fill_diagonal(A,1e-5)

    sigmoids=[sigmoid_function(y)*(1-sigmoid_function(y)) for y in Y]
    B=np.diag(sigmoids)

    designMatrix = tuning.design_matrix(X.shape[0],kernel_mode,X)

    num_iter=0

    while(True):

        weightMaxPosteriori, Sigma =  probability_estimators.second_order_statistics(designMatrix)
        gamas = np.ones(X.shape[0]+1, float) - np.diag(A)*np.diag(Sigma)

        deleted_indexes = []
        while (True):

            A_old = np.copy(A)
            for j in range(1, A.shape[0]):

                A[j, j] = gamas[j] / (weightMaxPosteriori[j] ** 2)

                if (A[j, j] > 10e8):
                    deleted_indexes.append(j)

            if (len(deleted_indexes) > 0):

                A = np.delete(A, deleted_indexes, 0)
                A = np.delete(A, deleted_indexes, 1)
                A_old = np.delete(A_old, deleted_indexes, 0)
                A_old = np.delete(A_old, deleted_indexes, 1)

                deleted_indexes[:] = [x - 1 for x in deleted_indexes]

                X = np.delete(X, deleted_indexes, 0)
                Y = np.delete(Y, deleted_indexes, 0)
                B = np.delete(B, deleted_indexes, 0)
                B = np.delete(B, deleted_indexes, 1)

                designMatrix = tuning.design_matrix(X.shape[0], kernel_mode, X)

                deleted_indexes.clear()

        # Convergence criterion
        if (np.abs(np.trace(A) - np.trace(A_old)))< 1e-3 or num_iter>20000:
            break

        num_iter+=1
    weightMaxPosteriori, _ = probability_estimators.second_order_statistics(designMatrix, A, B, Y)

    return X, weightMaxPosteriori
예제 #2
0
def test():

    X = np.linspace(-10, 10, 100)
    Y = np.sin(np.abs(X)) / np.abs(X)

    # plt.plot(Y,'r')

    variance = 10**(-2)

    A = np.zeros((X.shape[0] + 1, X.shape[0] + 1), float)
    B = np.zeros((X.shape[0], X.shape[0]), float)

    np.fill_diagonal(A, 1)
    np.fill_diagonal(B, (1 / variance))

    designMatrix = tuning.design_matrix(X.shape[0], "linear_spline", X, Y)
    mean, Sigma = probability_estimators.second_order_statistics(
        designMatrix, A, B, Y)

    gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)
    max_iter = 0

    deleted_indexes = []
    while (True):

        A_old = np.copy(A)
        for j in range(1, A.shape[0]):

            A[j, j] = gamas[j] / (mean[j]**2)

            if (A[j, j] > 1000):
                deleted_indexes.append(j)

        if (len(deleted_indexes) > 0):

            debug = 0

            A = np.delete(A, deleted_indexes, 0)
            A = np.delete(A, deleted_indexes, 1)
            A_old = np.delete(A_old, deleted_indexes, 0)
            A_old = np.delete(A_old, deleted_indexes, 1)

            deleted_indexes[:] = [x - 1 for x in deleted_indexes]

            B = np.delete(B, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 1)
            X = np.delete(Y, deleted_indexes, 0)
            Y = np.delete(Y, deleted_indexes, 0)

            deleted_indexes.clear()

            debug = 0

        # Convergence criterion
        if (np.abs(np.trace(A) - np.trace(A_old))) < 0 and max_iter > 1:
            break

        max_iter += 1
        designMatrix = tuning.design_matrix(X.shape[0], "linear_spline", X, Y)
        mean, Sigma = probability_estimators.second_order_statistics(
            designMatrix, A, B, Y)
        gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)

    res = np.diag(A)
    debug = 0
예제 #3
0
def train(kernel_mode):

    X = np.linspace(-10, 10, 100)
    Y = np.sin(np.abs(X)) / np.abs(X)

    # plt.plot(Y,'r')

    variance = 0.0001

    A = np.zeros((X.shape[0] + 1, X.shape[0] + 1), float)
    B = np.zeros((X.shape[0], X.shape[0]), float)

    np.fill_diagonal(A, 1e-5)
    # A = A * np.random.normal(0, 6, A.shape[0])
    np.fill_diagonal(B, (1 / variance))

    designMatrix = tuning.design_matrix(X.shape[0], kernel_mode, X)

    max_iter = 0

    deleted_indexes = []
    while (True):

        A_old = np.copy(A)

        mean, Sigma = probability_estimators.second_order_statistics(
            designMatrix, A, B, Y)
        gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)
        # gamas = 1 - np.diag(A) * np.diag(Sigma)

        for j in range(0, A.shape[0]):

            A[j, j] = gamas[j] / (mean[j]**2)

            if (A[j, j] > 1e9 and j > 0):
                deleted_indexes.append(j)

        # B = np.zeros((X.shape[0], X.shape[0]), float)
        # np.fill_diagonal(B, (1 / tuning.common_noise_variance(Y, designMatrix, mean, Sigma, gamas)))

        if (len(deleted_indexes) > 0):

            A = np.delete(A, deleted_indexes, 0)
            A = np.delete(A, deleted_indexes, 1)
            A_old = np.delete(A_old, deleted_indexes, 0)
            A_old = np.delete(A_old, deleted_indexes, 1)

            deleted_indexes[:] = [x - 1 for x in deleted_indexes]

            X = np.delete(X, deleted_indexes, 0)
            Y = np.delete(Y, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 1)

            designMatrix = tuning.design_matrix(X.shape[0], kernel_mode, X)

            deleted_indexes.clear()

            debug = 0

        # Convergence criterion
        if (np.abs(np.trace(A) - np.trace(A_old))) < 1e-3:
            break
        if max_iter > 400:
            break

        max_iter += 1

        print(max_iter)

    mean, _ = probability_estimators.second_order_statistics(
        designMatrix, A, B, Y)

    res = np.diag(A)
    debug = 0

    return X, mean
def test():
    X = np.linspace(-10, 10, 100)
    Y = np.sin(np.abs(X)) / np.abs(X)

    # plt.plot(Y,'r')

    variance = 0.01

    A = np.zeros((X.shape[0] + 1, X.shape[0] + 1), float)
    B = np.zeros((X.shape[0], X.shape[0]), float)

    np.fill_diagonal(A, 1)
    np.fill_diagonal(B, (1 / variance))

    designMatrix = tuning.design_matrix(X.shape[0], "linear_spline", X)
    mean, Sigma = probability_estimators.second_order_statistics(designMatrix, A, B, Y)

    gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)
    max_iter = 0

    deleted_indexes = []
    while (True):

        A_old = np.copy(A)

        for j in range(1, A.shape[0]):

            A[j, j] = gamas[j] / (mean[j] ** 2)

            if (A[j, j] > 1e9):
                deleted_indexes.append(j)

        debug = 0

        if (len(deleted_indexes) > 0):
            debug = 0

            A = np.delete(A, deleted_indexes, 0)
            A = np.delete(A, deleted_indexes, 1)
            A_old = np.delete(A_old, deleted_indexes, 0)
            A_old = np.delete(A_old, deleted_indexes, 1)

            deleted_indexes[:] = [x - 1 for x in deleted_indexes]

            B = np.delete(B, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 1)
            X = np.delete(Y, deleted_indexes, 0)
            Y = np.delete(Y, deleted_indexes, 0)

            deleted_indexes.clear()

            debug = 0

        # Covergence criterion suggested from RVM+Explained
        # which can be found on the Literature folder
        if (np.abs(np.trace(A) - np.trace(A_old))) and max_iter > 1:
            break

        max_iter += 1
        designMatrix = tuning.design_matrix(X.shape[0], "linear_spline", X)
        mean, Sigma = probability_estimators.second_order_statistics(designMatrix, A, B, Y)
        gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)

    res = np.diag(A)
    res2 = np.diag(A_old)

    debug = 0

    X_new = np.random.uniform(-10, 10, 100)
    Y_true = np.sin(np.abs(X_new)) / np.abs(X_new)
    Y_new = []
    MSE=[]

    for i in range(0, 100):
        y = 0
        for j in range(1, 5):

            Y_new.append(np.sum(y) + res[0])
            MSE.append(np.sqrt(Y_true[i] - Y_new[i]))

    plt.plot(X_new, Y_new)
    plt.show()

    debug=0