コード例 #1
0
ファイル: learners.py プロジェクト: arennax/fss18_xia
def CART(dataset, a=12, b=1, c=2):

    dataset = normalize(dataset)
    mre_list = []
    sa_list = []
    for train, test in KFold_df(dataset, 3):
        train_input = train.iloc[:, :-1]
        train_actual_effort = train.iloc[:, -1]
        test_input = test.iloc[:, :-1]
        test_actual_effort = test.iloc[:, -1]
        # max_depth: [1:12], min_samples_leaf: [1:12], min_samples_split: [2:21]

        model = DecisionTreeRegressor(max_depth=a,
                                      min_samples_leaf=b,
                                      min_samples_split=c)
        model.fit(train_input, train_actual_effort)
        test_predict_effort = model.predict(test_input)
        test_predict_Y = test_predict_effort
        test_actual_Y = test_actual_effort.values

        mre_list.append(mre_calc(test_predict_Y,
                                 test_actual_Y))  ######### for MRE
        sa_list.append(sa_calc(test_predict_Y,
                               test_actual_Y))  ######### for SA

    mre_mean = np.mean(mre_list)  ######### for MRE
    sa_mean = np.mean(sa_list)  ######### for SA

    return mre_mean  ######### for MRE
コード例 #2
0
ファイル: tuned_learners.py プロジェクト: arennax/effort_redo
 def cart_builder(a, b, c):
     model = DecisionTreeRegressor(max_depth=a,
                                   min_samples_leaf=b,
                                   min_samples_split=c)
     model.fit(train_input, train_actual_effort)
     test_predict_effort = model.predict(test_input)
     test_predict_Y = test_predict_effort
     test_actual_Y = test_actual_effort.values
     # mre_list.append(mre_calc(test_predict_Y, test_actual_Y))
     # sa_list.append(sa_calc(test_predict_Y, test_actual_Y))
     if metrics == 0:
         return mre_calc(test_predict_Y,
                         test_actual_Y)  ############# MRE
     elif metrics == 1:
         return sa_calc(test_predict_Y, test_actual_Y)  ############# SA
コード例 #3
0
def RF(dataset, max_depth=3):

    dataset = normalize(dataset)
    mre_list = []
    sa_list = []
    for train, test in KFold_df(dataset, 3):
        train_input = train.iloc[:, :-1]
        train_actual_effort = train.iloc[:, -1]
        test_input = test.iloc[:, :-1]
        test_actual_effort = test.iloc[:, -1]

        model = RandomForestRegressor(max_depth)
        model.fit(train_input, train_actual_effort)
        test_predict_effort = model.predict(test_input)
        test_predict_Y = test_predict_effort
        test_actual_Y = test_actual_effort.values

        mre_list.append(mre_calc(test_predict_Y,
                                 test_actual_Y))  ######### for MRE
        sa_list.append(sa_calc(test_predict_Y,
                               test_actual_Y))  ######### for SA

    return mre_list, sa_list
コード例 #4
0
def SVM(dataset):

    dataset = normalize(dataset)
    mre_list = []
    sa_list = []
    for train, test in KFold_df(dataset, 3):
        train_input = train.iloc[:, :-1]
        train_actual_effort = train.iloc[:, -1]
        test_input = test.iloc[:, :-1]
        test_actual_effort = test.iloc[:, -1]

        model = svm.SVR(gamma='scale')
        model.fit(train_input, train_actual_effort)
        test_predict_effort = model.predict(test_input)
        test_predict_Y = test_predict_effort
        test_actual_Y = test_actual_effort.values

        mre_list.append(mre_calc(test_predict_Y,
                                 test_actual_Y))  ######### for MRE
        sa_list.append(sa_calc(test_predict_Y,
                               test_actual_Y))  ######### for SA

    return mre_list, sa_list
コード例 #5
0
def KNN(dataset, n_neighbors=3):

    dataset = normalize(dataset)
    mre_list = []
    sa_list = []
    for train, test in KFold_df(dataset, 3):
        train_input = train.iloc[:, :-1]
        train_actual_effort = train.iloc[:, -1]
        test_input = test.iloc[:, :-1]
        test_actual_effort = test.iloc[:, -1]

        model = neighbors.KNeighborsRegressor(n_neighbors)
        model.fit(train_input, train_actual_effort)
        test_predict_effort = model.predict(test_input)
        test_predict_Y = test_predict_effort
        test_actual_Y = test_actual_effort.values

        mre_list.append(mre_calc(test_predict_Y,
                                 test_actual_Y))  ######### for MRE
        sa_list.append(sa_calc(test_predict_Y,
                               test_actual_Y))  ######### for SA

    return mre_list, sa_list
コード例 #6
0
ファイル: optimizers.py プロジェクト: arennax/effort_redo
def flash(metrics, train_input, train_actual_effort, test_input, test_actual_effort, pop_size):
    def convert(index):
        a = int(index / 240 + 1)
        b = int(index % 240 / 20 + 1)
        c = int(index % 20 + 2)
        return a, b, c

    all_case = set(range(0, 2880))
    modeling_pool = random.sample(all_case, pop_size)

    List_X = []
    List_Y = []

    for i in range(len(modeling_pool)):
        temp = convert(modeling_pool[i])
        List_X.append(temp)
        model = DecisionTreeRegressor(max_depth=temp[0], min_samples_leaf=temp[1], min_samples_split=temp[2])
        model.fit(train_input, train_actual_effort)
        test_predict_effort = model.predict(test_input)
        test_predict_Y = test_predict_effort
        test_actual_Y = test_actual_effort.values
        if metrics == 0:
            List_Y.append(mre_calc(test_predict_Y, test_actual_Y))  ######### for MRE flash
        elif metrics == 1:
            List_Y.append(sa_calc(test_predict_Y, test_actual_Y))  ######### for SA flash

    remain_pool = all_case - set(modeling_pool)
    test_list = []
    for i in list(remain_pool):
        test_list.append(convert(i))

    upper_model = DecisionTreeRegressor()
    life = 20

    if metrics == 0:
        while len(List_X) < 201 and life > 0:  # eval_number
            upper_model.fit(List_X, List_Y)
            candidate = random.sample(test_list, 1)
            test_list.remove(candidate[0])
            candi_pred_value = upper_model.predict(candidate)
            if candi_pred_value < np.median(List_Y):  ######### for MRE flash
                List_X.append(candidate[0])
                candi_config = candidate[0]
                candi_model = DecisionTreeRegressor(max_depth=candi_config[0], min_samples_leaf=candi_config[1],
                                                    min_samples_split=candi_config[2])
                candi_model.fit(train_input, train_actual_effort)
                candi_pred_Y = candi_model.predict(test_input)
                candi_actual_Y = test_actual_effort.values

                List_Y.append(mre_calc(candi_pred_Y, candi_actual_Y))  ######### for MRE flash

            else:
                life -= 1

        return np.min(List_Y)  ########## min for MRE

    elif metrics == 1:
        while len(List_X) < 201 and life > 0:  # eval_number
            upper_model.fit(List_X, List_Y)
            candidate = random.sample(test_list, 1)
            test_list.remove(candidate[0])
            candi_pred_value = upper_model.predict(candidate)
            if candi_pred_value > np.median(List_Y):  ######### for SA flash
                List_X.append(candidate[0])
                candi_config = candidate[0]
                candi_model = DecisionTreeRegressor(max_depth=candi_config[0], min_samples_leaf=candi_config[1],
                                                    min_samples_split=candi_config[2])
                candi_model.fit(train_input, train_actual_effort)
                candi_pred_Y = candi_model.predict(test_input)
                candi_actual_Y = test_actual_effort.values

                List_Y.append(sa_calc(candi_pred_Y, candi_actual_Y))  ######### for SA flash

            else:
                life -= 1

        return np.max(List_Y)  ########## min for SA