Ejemplo n.º 1
0
def Relevance_Vector_Classification_Training(X,Y,kernel_mode):

    A=np.zeros((X.shape[0]+1,X.shape[0]+1),float)

    np.fill_diagonal(A,1e-5)

    sigmoids=[sigmoid_function(y)*(1-sigmoid_function(y)) for y in Y]
    B=np.diag(sigmoids)

    designMatrix = tuning.design_matrix(X.shape[0],kernel_mode,X)

    num_iter=0

    while(True):

        weightMaxPosteriori, Sigma =  probability_estimators.second_order_statistics(designMatrix)
        gamas = np.ones(X.shape[0]+1, float) - np.diag(A)*np.diag(Sigma)

        deleted_indexes = []
        while (True):

            A_old = np.copy(A)
            for j in range(1, A.shape[0]):

                A[j, j] = gamas[j] / (weightMaxPosteriori[j] ** 2)

                if (A[j, j] > 10e8):
                    deleted_indexes.append(j)

            if (len(deleted_indexes) > 0):

                A = np.delete(A, deleted_indexes, 0)
                A = np.delete(A, deleted_indexes, 1)
                A_old = np.delete(A_old, deleted_indexes, 0)
                A_old = np.delete(A_old, deleted_indexes, 1)

                deleted_indexes[:] = [x - 1 for x in deleted_indexes]

                X = np.delete(X, deleted_indexes, 0)
                Y = np.delete(Y, deleted_indexes, 0)
                B = np.delete(B, deleted_indexes, 0)
                B = np.delete(B, deleted_indexes, 1)

                designMatrix = tuning.design_matrix(X.shape[0], kernel_mode, X)

                deleted_indexes.clear()

        # Convergence criterion
        if (np.abs(np.trace(A) - np.trace(A_old)))< 1e-3 or num_iter>20000:
            break

        num_iter+=1
    weightMaxPosteriori, _ = probability_estimators.second_order_statistics(designMatrix, A, B, Y)

    return X, weightMaxPosteriori
Ejemplo n.º 2
0
def test():

    X = np.linspace(-10, 10, 100)
    Y = np.sin(np.abs(X)) / np.abs(X)

    # plt.plot(Y,'r')

    variance = 10**(-2)

    A = np.zeros((X.shape[0] + 1, X.shape[0] + 1), float)
    B = np.zeros((X.shape[0], X.shape[0]), float)

    np.fill_diagonal(A, 1)
    np.fill_diagonal(B, (1 / variance))

    designMatrix = tuning.design_matrix(X.shape[0], "linear_spline", X, Y)
    mean, Sigma = probability_estimators.second_order_statistics(
        designMatrix, A, B, Y)

    gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)
    max_iter = 0

    deleted_indexes = []
    while (True):

        A_old = np.copy(A)
        for j in range(1, A.shape[0]):

            A[j, j] = gamas[j] / (mean[j]**2)

            if (A[j, j] > 1000):
                deleted_indexes.append(j)

        if (len(deleted_indexes) > 0):

            debug = 0

            A = np.delete(A, deleted_indexes, 0)
            A = np.delete(A, deleted_indexes, 1)
            A_old = np.delete(A_old, deleted_indexes, 0)
            A_old = np.delete(A_old, deleted_indexes, 1)

            deleted_indexes[:] = [x - 1 for x in deleted_indexes]

            B = np.delete(B, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 1)
            X = np.delete(Y, deleted_indexes, 0)
            Y = np.delete(Y, deleted_indexes, 0)

            deleted_indexes.clear()

            debug = 0

        # Convergence criterion
        if (np.abs(np.trace(A) - np.trace(A_old))) < 0 and max_iter > 1:
            break

        max_iter += 1
        designMatrix = tuning.design_matrix(X.shape[0], "linear_spline", X, Y)
        mean, Sigma = probability_estimators.second_order_statistics(
            designMatrix, A, B, Y)
        gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)

    res = np.diag(A)
    debug = 0
Ejemplo n.º 3
0
def train(kernel_mode):

    X = np.linspace(-10, 10, 100)
    Y = np.sin(np.abs(X)) / np.abs(X)

    # plt.plot(Y,'r')

    variance = 0.0001

    A = np.zeros((X.shape[0] + 1, X.shape[0] + 1), float)
    B = np.zeros((X.shape[0], X.shape[0]), float)

    np.fill_diagonal(A, 1e-5)
    # A = A * np.random.normal(0, 6, A.shape[0])
    np.fill_diagonal(B, (1 / variance))

    designMatrix = tuning.design_matrix(X.shape[0], kernel_mode, X)

    max_iter = 0

    deleted_indexes = []
    while (True):

        A_old = np.copy(A)

        mean, Sigma = probability_estimators.second_order_statistics(
            designMatrix, A, B, Y)
        gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)
        # gamas = 1 - np.diag(A) * np.diag(Sigma)

        for j in range(0, A.shape[0]):

            A[j, j] = gamas[j] / (mean[j]**2)

            if (A[j, j] > 1e9 and j > 0):
                deleted_indexes.append(j)

        # B = np.zeros((X.shape[0], X.shape[0]), float)
        # np.fill_diagonal(B, (1 / tuning.common_noise_variance(Y, designMatrix, mean, Sigma, gamas)))

        if (len(deleted_indexes) > 0):

            A = np.delete(A, deleted_indexes, 0)
            A = np.delete(A, deleted_indexes, 1)
            A_old = np.delete(A_old, deleted_indexes, 0)
            A_old = np.delete(A_old, deleted_indexes, 1)

            deleted_indexes[:] = [x - 1 for x in deleted_indexes]

            X = np.delete(X, deleted_indexes, 0)
            Y = np.delete(Y, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 1)

            designMatrix = tuning.design_matrix(X.shape[0], kernel_mode, X)

            deleted_indexes.clear()

            debug = 0

        # Convergence criterion
        if (np.abs(np.trace(A) - np.trace(A_old))) < 1e-3:
            break
        if max_iter > 400:
            break

        max_iter += 1

        print(max_iter)

    mean, _ = probability_estimators.second_order_statistics(
        designMatrix, A, B, Y)

    res = np.diag(A)
    debug = 0

    return X, mean
def test():
    X = np.linspace(-10, 10, 100)
    Y = np.sin(np.abs(X)) / np.abs(X)

    # plt.plot(Y,'r')

    variance = 0.01

    A = np.zeros((X.shape[0] + 1, X.shape[0] + 1), float)
    B = np.zeros((X.shape[0], X.shape[0]), float)

    np.fill_diagonal(A, 1)
    np.fill_diagonal(B, (1 / variance))

    designMatrix = tuning.design_matrix(X.shape[0], "linear_spline", X)
    mean, Sigma = probability_estimators.second_order_statistics(designMatrix, A, B, Y)

    gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)
    max_iter = 0

    deleted_indexes = []
    while (True):

        A_old = np.copy(A)

        for j in range(1, A.shape[0]):

            A[j, j] = gamas[j] / (mean[j] ** 2)

            if (A[j, j] > 1e9):
                deleted_indexes.append(j)

        debug = 0

        if (len(deleted_indexes) > 0):
            debug = 0

            A = np.delete(A, deleted_indexes, 0)
            A = np.delete(A, deleted_indexes, 1)
            A_old = np.delete(A_old, deleted_indexes, 0)
            A_old = np.delete(A_old, deleted_indexes, 1)

            deleted_indexes[:] = [x - 1 for x in deleted_indexes]

            B = np.delete(B, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 1)
            X = np.delete(Y, deleted_indexes, 0)
            Y = np.delete(Y, deleted_indexes, 0)

            deleted_indexes.clear()

            debug = 0

        # Covergence criterion suggested from RVM+Explained
        # which can be found on the Literature folder
        if (np.abs(np.trace(A) - np.trace(A_old))) and max_iter > 1:
            break

        max_iter += 1
        designMatrix = tuning.design_matrix(X.shape[0], "linear_spline", X)
        mean, Sigma = probability_estimators.second_order_statistics(designMatrix, A, B, Y)
        gamas = np.ones(X.shape[0] + 1, float) - np.diag(A) * np.diag(Sigma)

    res = np.diag(A)
    res2 = np.diag(A_old)

    debug = 0

    X_new = np.random.uniform(-10, 10, 100)
    Y_true = np.sin(np.abs(X_new)) / np.abs(X_new)
    Y_new = []
    MSE=[]

    for i in range(0, 100):
        y = 0
        for j in range(1, 5):

            Y_new.append(np.sum(y) + res[0])
            MSE.append(np.sqrt(Y_true[i] - Y_new[i]))

    plt.plot(X_new, Y_new)
    plt.show()

    debug=0
Ejemplo n.º 5
0
def iterative_reweighted_least_squares_algorithm2(X,
                                                  target,
                                                  kernel_mode="rbf",
                                                  max_iter=25,
                                                  alpha_threshold=10e8,
                                                  gradient_threshold=10e-6,
                                                  overshoot_criterion=power(
                                                      2, -8)):

    weights = np.zeros(X.shape[0] + 1)

    A = np.zeros((X.shape[0] + 1, X.shape[0] + 1), float)
    np.fill_diagonal(A, 1e-5)

    designMatrix = tuning.design_matrix(X.shape[0], kernel_mode, X)
    deleted_indexes = []

    for it in range(1000):

        A_old = np.copy(A)

        for j in range(1, A.shape[0]):

            if it > 0:
                A[j, j] = gamas[j] / (weightMaxPosteriori[j]**2)

            if (A[j, j] > 10e8):
                deleted_indexes.append(j)

        if (len(deleted_indexes) > 0):
            A = np.delete(A, deleted_indexes, 0)
            A = np.delete(A, deleted_indexes, 1)
            A_old = np.delete(A_old, deleted_indexes, 0)
            A_old = np.delete(A_old, deleted_indexes, 1)

            deleted_indexes[:] = [x - 1 for x in deleted_indexes]

            X = np.delete(X, deleted_indexes, 0)
            target = np.delete(target, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 0)

            designMatrix = tuning.design_matrix(X.shape[0], "rbf", X)
            weights = np.delete(X, deleted_indexes, 0)

            deleted_indexes.clear()

        debug = 0

        y = np.dot(designMatrix, weights)
        y = np.asarray([almost_sigmoid(y[i]) for i in range(y.shape[0])])

        # Negative log function minimization
        data_term = 0
        for i in range(target.shape[0]):

            if target[i] == 1:
                data_term -= math.log(y[i])
            else:
                data_term -= math.log(1.0 - y[i])

        regulariser = -0.5 * np.sum(np.dot(weights.T, np.dot(A, weights)))
        error = (data_term + regulariser / 2.0) / weights.shape[0]

        # tempB = [y[i] * (1 - y[i]) for i in range(len(y))]
        # B = np.asarray(tempB)

        for g in range(max_iter):

            temp_weight = [y[k] * (1 - y[k]) for k in range(y.shape[0])]
            irls_weights = np.diagflat(temp_weight)

            Hessian = np.dot(designMatrix.T, np.dot(irls_weights,
                                                    designMatrix))
            Hessian += np.diag(A)
            Sigma = np.linalg.inv(Hessian)

            e = np.asarray([target[k] - y[k] for k in range(y.shape[0])])
            gradient_error = np.dot(designMatrix.T, e) - np.dot(A, weights)
            gradient_error -= A * weights

            delta_weights = np.dot(Sigma, gradient_error)

            if (g >= 2 and np.linalg.norm(gradient_error) / weights.shape[0] <
                    gradient_threshold):
                break

            delta_weights = np.dot(Sigma, gradient_error)

            l = 1

            # Overshooting part
            while (l > overshoot_criterion):

                weightMaxPosteriori = weights + l * delta_weights

                y = np.dot(designMatrix, weightMaxPosteriori)
                y = np.asarray(
                    [almost_sigmoid(y[i]) for i in range(y.shape[0])])

                data_term = 0
                for i in range(target.shape[0]):

                    if target[i] == 1:
                        try:
                            data_term -= math.log(y[i])
                        except ValueError:
                            lo = y[i]
                            debug = 0
                    else:
                        data_term -= math.log(1.0 - y[i])

                regulariser = -0.5 * np.dot(weights.T, np.dot(A, weights))
                error_update = (data_term +
                                regulariser / 2.0) / weights.shape[0]

                if (error_update > error):
                    l /= 2.0
                else:
                    weights = weightMaxPosteriori
                    break

            weights = weightMaxPosteriori
            gamas = np.ones(X.shape[0], float) - np.diag(A) * np.diag(Sigma)
            A_old = np.copy(A)

            deleted_indexes = []

        for j in range(1, A.shape[0]):

            A[j, j] = gamas[j] / (weightMaxPosteriori[j]**2)

            if (A[j, j] > 10e8):
                deleted_indexes.append(j)

        if (len(deleted_indexes) > 0):
            A = np.delete(A, deleted_indexes, 0)
            A = np.delete(A, deleted_indexes, 1)
            A_old = np.delete(A_old, deleted_indexes, 0)
            A_old = np.delete(A_old, deleted_indexes, 1)

            deleted_indexes[:] = [x - 1 for x in deleted_indexes]

            X = np.delete(X, deleted_indexes, 0)
            target = np.delete(target, deleted_indexes, 0)
            B = np.delete(B, deleted_indexes, 0)
            # B = np.delete(B, deleted_indexes, 1)

            designMatrix = tuning.design_matrix(X.shape[0], "rbf", X)
            weights = np.delete(X, deleted_indexes, 0)

            deleted_indexes.clear()

        if (np.abs(np.trace(A) - np.trace(A_old))) < 1e-3:
            break

    return weights, X