def get_armijos_step_size(kernel_matrices,
                          d,
                          y_mat,
                          alpha0,
                          box_constraints,
                          gamma0,
                          Jd,
                          D,
                          dJ,
                          c=0.5,
                          T=0.5):
    # m = D' * dJ, should be negative
    # Loop until f(x + gamma * p <= f(x) + gamma*c*m)
    # J(d + gamma * D) <= J(d) + gamma * c * m
    gamma = gamma0
    m = D.T.dot(dJ)

    while True:
        combined_kernel_matrix = k_helpers.get_combined_kernel(
            kernel_matrices, d + gamma * D)

        alpha, new_J, info = compute_J(combined_kernel_matrix, y_mat, alpha0,
                                       box_constraints)

        if new_J <= Jd + gamma * c * m:
            return gamma
        else:
            # Update gamma
            gamma = gamma * T
            return gamma / 2
Пример #2
0
def compute_gamma_linesearch(gamma_min,gamma_max,delta_max,cost_min,cost_max,d,D,kernel_matrices,J_prev,y_mat,
                             alpha,C,goldensearch_precision_factor):
    gold_ratio = (5**0.5+1)/2
##    print "stepmin",gamma_min
##    print "stepmax",gamma_max
##    print "deltamax",delta_max
    gamma_arr=np.array([gamma_min,gamma_max])
    cost_arr=np.array([cost_min,cost_max])
    coord=np.argmin(cost_arr)
##    print 'linesearch conditions'
##    print 'gamma_min',gamma_min
##    print 'gamma_max',gamma_max
##    print 'delta_max',delta_max
##    print 'golden search precision factor', goldensearch_precision_factor
    while ((gamma_max-gamma_min) > goldensearch_precision_factor*(abs(delta_max)) and gamma_max>np.finfo(float).eps):
        # print 'in line search loop'
        gamma_medr=gamma_min+(gamma_max-gamma_min)/gold_ratio #gold ratio value is around 0.618, smaller than 2/3
        gamma_medl=gamma_min+(gamma_medr-gamma_min)/gold_ratio
        tmp_d_r = d + gamma_medr*D
        alpha_r,cost_medr=compute_J_SVM(k_helpers.get_combined_kernel(kernel_matrices,tmp_d_r),y_mat,C)
        tmp_d_l=d+gamma_medl*D
        alpha_l,cost_medl=compute_J_SVM(k_helpers.get_combined_kernel(kernel_matrices,tmp_d_l),y_mat,C)
        cost_arr=np.array([cost_min, cost_medl, cost_medr, cost_max])
        gamma_arr=np.array([gamma_min, gamma_medl, gamma_medr, gamma_max])
        coord=np.argmin(cost_arr)
        if coord==0:
            gamma_max=gamma_medl
            cost_max=cost_medl
            alpha=alpha_l
        if coord==1:
            gamma_max=gamma_medr
            cost_max=cost_medr
            alpha=alpha_r
        if coord==2:
            gamma_min=gamma_medl
            cost_min=cost_medl
            alpha=alpha_l
        if coord==3:
            gamma_min=gamma_medr
            cost_min=cost_medr
            alpha=alpha_r
    if cost_arr[coord] < J_prev: #J_prev is the starting point, this step checks if we need to move the point
        return gamma_arr[coord], alpha,cost_arr[coord]
    else:
        return gamma_min,alpha,cost_min
def test_kernel_processing(train_data, test_data, kernel_functions, weight_train):
    n_test = test_data.shape[0]
    n_train = train_data.shape[0]
    M = len(kernel_functions)
    kernel_matrices = []
    for m in range(M):
        kernel_func = kernel_functions[m]
        kernel_matrices.append(np.empty((n_test, n_train)))

        # Creates kernel matrix
        for i in range(n_test):
            for j in range(n_train):
                kernel_matrices[m][i, j] = kernel_func(test_data[i], train_data[j])
    final_test_data = k_helpers.get_combined_kernel(kernel_matrices, weight_train)
    return final_test_data
Пример #4
0
def get_armijos_step_size(iteration, C, kernel_matrices, d, y_mat, alpha0, gamma0, Jd, D, dJ, c=0.5, T = 0.5):
    # print 'descent direction in armijos function'
    # print D

    #m = D' * dJ, should be negative
    #Loop until f(x + gamma * p <= f(x) + gamma*c*m)
    # J(d + gamma * D) <= J(d) + gamma * c * m
    gamma = gamma0
    m = D.T.dot(dJ)

    while True:
        combined_kernel_matrix = k_helpers.get_combined_kernel(kernel_matrices, d + gamma * D)

        alpha, new_J = compute_J_SVM(combined_kernel_matrix, y_mat, C)

        if new_J <= Jd + gamma * c * m:
            return gamma
        else:
            # Update gamma
            gamma = gamma * T
            return gamma
def my_kernel(u):
    kernel_matrices = k_helpers.get_all_kernels(u, kernel_functions)
    combined_kernel_matrix = k_helpers.get_combined_kernel(
        kernel_matrices, weight_train)
    return combined_kernel_matrix
Пример #6
0
        # 对测试数据进行处理

        kernel_functions = [
            k_helpers.create_histogram_kernel,
            k_helpers.create_histogram_kernel,
            k_helpers.create_exponential_kernel(final_gamma),
        ]
        n_test = GLCM_X_test.shape[0]
        n_train = GLCM_X_train.shape[0]
        kernel_test_matrices = []
        GLCM_test_matrics = np.empty((n_test, n_train))
        FD_test_matrics = np.empty((n_test, n_train))
        Harris_test_matrics = np.empty((n_test, n_train))
        for i in range(n_test):
            for j in range(n_train):
                GLCM_test_matrics[i][j] = kernel_functions[0](GLCM_X_test[i],
                                                              GLCM_X_train[j])
                FD_test_matrics[i][j] = kernel_functions[1](FD_X_test[i],
                                                            FD_X_train[j])
                Harris_test_matrics[i][j] = kernel_functions[2](
                    Harris_X_test[i], Harris_X_train[j])
        kernel_test_matrices.append(GLCM_test_matrics)
        kernel_test_matrices.append(FD_test_matrics)
        kernel_test_matrices.append(Harris_test_matrics)

        final_test_data = k_helpers.get_combined_kernel(
            kernel_test_matrices, weights)

        score_SVC += clf.score(final_test_data, y_test)
        print('一次循环的精度为%s' % (clf.score(final_test_data, y_test)))
    print('SVC最后的分类精度:%s' % (score_SVC / num_k))
def classiy(data, target):
    num_k = 10
    score_SVC = 0
    skf = StratifiedKFold(n_splits=num_k)
    for train_index, test_index in skf.split(data, target):
        X_train, X_test = data[train_index], data[test_index]
        y_train, y_test = target[train_index], target[test_index]
        n, d = X_train.shape
        # 这里的gamma好像也是可以训练出来的
        gamma = 1.0 / d
        kernel_functions = [
            # k_helpers.create_linear_kernel,
            # k_helpers.create_rbf_kernel(gamma),
            # k_helpers.create_poly_kernel(2, gamma),
            # k_helpers.create_exponential_kernel(gamma),
            k_helpers.create_histogram_kernel,
            # k_helpers.create_exponential_kernel(gamma),
            # k_helpers.create_sigmoid_kernel(gamma)
        ]
        for j in range(y_train.size):
            if y_train[j] != 0:
                y_train[j] = -1
            else:
                y_train[j] = 1
        for i in range(y_test.size):
            if y_test[i] != 0:
                y_test[i] = -1
            else:
                y_test[i] = 1
        # 核函数与权重之间进行线性组合 形成新的合成后的核函数 给SVM 进行分类
        clf = svm.SVC(kernel='precomputed')
        kernel_matrices = k_helpers.get_all_kernels(X_train, kernel_functions)
        # 固定权重设置
        new_train = k_helpers.get_combined_kernel(kernel_matrices,
                                                  weight_train)
        clf.fit(new_train, y_train)
        # 训练权重
        # 惩罚数值 C:penalty value => 0<=alpha_i<=C
        # C = 1
        # M = len(kernel_functions)
        # pointD = np.ones(M) / M
        # weights, combined_kernel, J, alpha, duality_gap, final_gamma = algo1.find_kernel_weights(pointD, kernel_matrices, C, y_train, 1, gamma)
        # print('************************最后算到的结果************************')
        # print('weights', weights)
        # # print('combined_kernel', combined_kernel)
        # # print('J', J)
        # # print('alpha', alpha)
        # # print('duality_gap', duality_gap)
        # print('gamma', final_gamma)
        # # 训练出权重了以后,接下来就是合成新的矩阵
        # clf.fit(combined_kernel, y_train)

        # 这里的测试集需要重新调整一下
        # final_test_data = test_kernel_processing(X_train, X_test, kernel_functions, weights)
        # no train
        final_test_data = test_kernel_processing(X_train, X_test,
                                                 kernel_functions,
                                                 weight_train)

        score_SVC += clf.score(final_test_data, y_test)
        print('一次循环的精度为%s' % (clf.score(final_test_data, y_test)))
    print('SVC最后的分类精度:%s' % (score_SVC / num_k))