Пример #1
0
def experiment14_17():

    for fi in range(2 , -11, -1):

        X, Y = create_muldata_fromfile(train_file)
        X_train = X[:train_num]
        Y_train = Y[:train_num]
        X_val = X[train_num:]
        Y_val = Y[train_num:]

        W = train_ridge_regression_model(X_train, Y_train, 10 ** fi)
        Y_p = np.dot(X_train, W).transpose()[0]

        etrain = error_01(Y_p, Y_train)

        Y_p = np.dot(X_val, W).transpose()[0]
        eval = error_01(Y_p, Y_val)

        X, Y = create_muldata_fromfile(test_file)
        Y_p = np.dot(X, W).transpose()[0]

        eout_ = error_01(Y_p, Y)

        fi_ex14 = [-4, -6, -2, -10, -8]
        fi_ex15 = [-9, -1, -5, -7, -3]
        fi_ex16 = [-6, -8, -4, -2, 0]
        fi_ex17 = [-3, -8, -6, -9, 0]

        if fi in fi_ex17:
            print 'fi : %d, errortrain -> %f errorval -> %f errorout -> %f' % (fi, etrain, eval, eout_)
Пример #2
0
def experiment14_17():

    for fi in range(2, -11, -1):

        X, Y = create_muldata_fromfile(train_file)
        X_train = X[:train_num]
        Y_train = Y[:train_num]
        X_val = X[train_num:]
        Y_val = Y[train_num:]

        W = train_ridge_regression_model(X_train, Y_train, 10**fi)
        Y_p = np.dot(X_train, W).transpose()[0]

        etrain = error_01(Y_p, Y_train)

        Y_p = np.dot(X_val, W).transpose()[0]
        eval = error_01(Y_p, Y_val)

        X, Y = create_muldata_fromfile(test_file)
        Y_p = np.dot(X, W).transpose()[0]

        eout_ = error_01(Y_p, Y)

        fi_ex14 = [-4, -6, -2, -10, -8]
        fi_ex15 = [-9, -1, -5, -7, -3]
        fi_ex16 = [-6, -8, -4, -2, 0]
        fi_ex17 = [-3, -8, -6, -9, 0]

        if fi in fi_ex17:
            print 'fi : %d, errortrain -> %f errorval -> %f errorout -> %f' % (
                fi, etrain, eval, eout_)
Пример #3
0
def error_with_fi(fi):

    X, Y = create_muldata_fromfile(train_file)

    W = train_ridge_regression_model(X, Y, fi)
    # print(X.shape, W.shape)
    Y_p = np.dot(X, W).transpose()[0]

    print error_01(Y_p, Y)

    X, Y = create_muldata_fromfile(test_file)

    Y_p = np.dot(X, W).transpose()[0]

    print error_01(Y_p, Y)
Пример #4
0
def error_with_fi(fi):

    X, Y = create_muldata_fromfile(train_file)

    W = train_ridge_regression_model(X, Y, fi)
    # print(X.shape, W.shape)
    Y_p = np.dot(X, W).transpose()[0]

    print error_01(Y_p, Y)

    X, Y = create_muldata_fromfile(test_file)

    Y_p = np.dot(X, W).transpose()[0]

    print error_01(Y_p, Y)
Пример #5
0
def ex_19_20():

    X, Y = create_muldata_fromfile(train_data)

    X_train = X[:400]
    Y_train = Y[:400]
    X_test = X[400:]
    Y_test = Y[400:]

    gammas = [32, 2, 0.125]
    rus = [0.001, 1, 1000]

    for gamma in gammas:

        for ru in rus:

            beta = get_beta(X_train, Y_train, gamma, ru)

            # X_new = [[4, 6], [3, 5]]

            # print 'Beta get'

            r = predict_by_X(beta, X_test, X_train, gamma)

            print 'gamma|ru|error %f|%f|%f' % (gamma, ru, error_01(r, Y_test))
Пример #6
0
    def avg_error_pertree(self, X, y):

        all_error = 0

        for i in range(0, self.__n_estimator):

            cur_crtree = self.__rf_tree[i]
            y_p = cur_crtree.predict(X)
            all_error += error_01(y_p, y)

        return all_error / self.__n_estimator
Пример #7
0
    def avg_error_pertree(self, X, y):

        all_error = 0

        for i in range(0, self.__n_estimator):

            cur_crtree = self.__rf_tree[i]
            y_p = cur_crtree.predict(X)
            all_error += error_01(y_p, y)

        return all_error / self.__n_estimator
Пример #8
0
        fi_ex17 = [-3, -8, -6, -9, 0]

        if fi in fi_ex17:
            print 'fi : %d, errortrain -> %f errorval -> %f errorout -> %f' % (
                fi, etrain, eval, eout_)


cv_num = 5
X, Y = create_muldata_fromfile(train_file)
group_num = len(X) / cv_num

for fi in [-4, 0, -2, -6, -8]:

    all_error = 0

    for i in range(cv_num):

        X_train = np.vstack((X[0:i * group_num], X[(i + 1) * group_num:]))
        Y_train = np.vstack((Y[0:i * group_num], Y[(i + 1) * group_num:]))
        X_val = X[i * group_num:(i + 1) * group_num]
        Y_val = Y[i * group_num:(i + 1) * group_num]

        W = train_ridge_regression_model(X_train, Y_train, 10**fi)
        Y_p = np.dot(X_val, W).transpose()[0]
        ecv = error_01(Y_p, Y_val)
        all_error += ecv

    print 'fi : %d, errorcv -> %f ' % (fi, all_error / cv_num)

error_with_fi(1)
Пример #9
0
            if r - 0.5 > 0:

                zo_result.append(1)

            else:

                zo_result.append(-1)

        result = zo_result

    return result

X, Y = create_muldata_fromfile(train_file)

# X = np.array([[1, 1, 0], [1, 0, 1]])
# Y = np.array([1, -1])

W = train_logistic_model(X, Y, l_n = 0.01, is_sgd=True)

# print(W)

X, Y = create_muldata_fromfile(test_file)

ra = predict(W, X)

print(error_01(ra, Y))

'''
fix learning rate : n = 0.001 0.475
fix learning rate : n = 0.01 0.22 cost:68s
'''
Пример #10
0
        fi_ex16 = [-6, -8, -4, -2, 0]
        fi_ex17 = [-3, -8, -6, -9, 0]

        if fi in fi_ex17:
            print 'fi : %d, errortrain -> %f errorval -> %f errorout -> %f' % (fi, etrain, eval, eout_)

cv_num = 5
X, Y = create_muldata_fromfile(train_file)
group_num = len(X) / cv_num

for fi in [-4, 0, -2, -6, -8]:

    all_error = 0

    for i in range(cv_num):

        X_train = np.vstack((X[0: i * group_num], X[(i + 1) * group_num:]))
        Y_train = np.vstack((Y[0: i * group_num], Y[(i + 1) * group_num:]))
        X_val = X[i * group_num:(i + 1) * group_num]
        Y_val = Y[i * group_num:(i + 1) * group_num]

        W = train_ridge_regression_model(X_train, Y_train, 10 ** fi)
        Y_p = np.dot(X_val, W).transpose()[0]
        ecv = error_01(Y_p, Y_val)
        all_error += ecv

    print 'fi : %d, errorcv -> %f ' % (fi, all_error / cv_num)


error_with_fi(1)
Пример #11
0
            if r - 0.5 > 0:

                zo_result.append(1)

            else:

                zo_result.append(-1)

        result = zo_result

    return result


X, Y = create_muldata_fromfile(train_file)

# X = np.array([[1, 1, 0], [1, 0, 1]])
# Y = np.array([1, -1])

W = train_logistic_model(X, Y, l_n=0.01, is_sgd=True)

# print(W)

X, Y = create_muldata_fromfile(test_file)

ra = predict(W, X)

print(error_01(ra, Y))
'''
fix learning rate : n = 0.001 0.475
fix learning rate : n = 0.01 0.22 cost:68s
'''
Пример #12
0
        result = 1 if score > 0 else -1
        p_ys.append(result)

    return p_ys

datas = get_datas(train_file)
X = [dd[:-1] for dd in datas]
Y = [dd[-1] for dd in datas]
models = train_stump_tree_adaboost(datas, 300)

# models.sort(key=lambda x:x[-1])
# print(models)

yps = adaboost_model_predict(models, X)
print error_01(Y, yps)  #ein

datas = get_datas(test_file)
X = [dd[:-1] for dd in datas]
Y = [dd[-1] for dd in datas]
yps = adaboost_model_predict(models, X)
print error_01(Y, yps)  #eout

# X = [[1, 25], [0, 3], [2, 5]]
# Y = [1, 1, -1]

# datas = [[1, 25, 1], [0, 3, 1], [2, 5, -1]]



Пример #13
0
            score += ((x[i] - theta) * s) * w

        result = 1 if score > 0 else -1
        p_ys.append(result)

    return p_ys


datas = get_datas(train_file)
X = [dd[:-1] for dd in datas]
Y = [dd[-1] for dd in datas]
models = train_stump_tree_adaboost(datas, 300)

# models.sort(key=lambda x:x[-1])
# print(models)

yps = adaboost_model_predict(models, X)
print error_01(Y, yps)  #ein

datas = get_datas(test_file)
X = [dd[:-1] for dd in datas]
Y = [dd[-1] for dd in datas]
yps = adaboost_model_predict(models, X)
print error_01(Y, yps)  #eout

# X = [[1, 25], [0, 3], [2, 5]]
# Y = [1, 1, -1]

# datas = [[1, 25, 1], [0, 3, 1], [2, 5, -1]]