示例#1
0
def evaluate(individual):
    keys = list(parameters.keys())
    dim = len(keys)
    #    print(type(individual))
    #    print(individual)
    assert len(individual) == dim
    for i in range(dim):
        (lo, up) = para_bounds[i]
        if individual[i] < lo:
            sentence = 'increase ' + str(keys[i]) + ' : ' + str(
                individual[i]) + ' to lower bound !'
            print(sentence)
            individual[i] = lo
        elif individual[i] > up:
            sentence = 'decrease ' + str(keys[i]) + ' : ' + str(
                individual[i]) + ' to upper bound !'
            print(sentence)
            individual[i] = up

    BayesOp_Parameters = dict(zip(keys, individual))
    BayesOp_Parameters['silent'] = True
    BayesOp_Parameters['nthread'] = -1
    BayesOp_Parameters['seed'] = 0
    #    BayesOp_Parameters['objective'] = "multi:softprob"
    BayesOp_Parameters['max_depth'] = int(BayesOp_Parameters['max_depth'])
    BayesOp_Parameters['n_estimators'] = int(
        BayesOp_Parameters['n_estimators'])
    '''
    r = np.load(name)
    Feature_train_list = r['F_tr_l']
    Label_train_list = r['L_tr_l']
    Feature_valid_list = r['F_va_l']
    Label_valid_list = r['L_va_l']
    '''

    Feature_train_list = data_dic['F_tr_l']
    Label_train_list = data_dic['L_tr_l']
    Feature_valid_list = data_dic['F_va_l']
    Label_valid_list = data_dic['L_va_l']
    Num_list = len(Feature_train_list)

    Num_Cross_Folders = Num_list
    ml_record = MetricList(Num_Cross_Folders)
    i = 0
    for j in range(Num_list):
        Feature_train = Feature_train_list[j]
        Label_train = Label_train_list[j]
        Feature_valid = Feature_valid_list[j]
        Label_valid = Label_valid_list[j]
        Label_valid.ravel()

        xgb = xgboost.XGBClassifier(**BayesOp_Parameters)
        xgb.fit(Feature_train, Label_train.ravel())
        Label_predict = xgb.predict(Feature_valid)
        ml_record.measure(i, Label_valid, Label_predict, 'weighted')
        i += 1

    return ml_record.mean_G(),
示例#2
0
def xgboostcv(
    max_depth,
    learning_rate,
    n_estimators,
    gamma,
    min_child_weight,
    max_delta_step,
    subsample,
    colsample_bytree,
    silent=True,
    nthread=-1,
    seed=0,
):

    r = np.load(name)
    Feature_train_list = r['F_tr_l']
    Label_train_list = r['L_tr_l']
    Feature_valid_list = r['F_va_l']
    Label_valid_list = r['L_va_l']
    Num_list = len(Feature_train_list)

    Num_Cross_Folders = Num_list
    ml_record = MetricList(Num_Cross_Folders)
    i = 0
    for j in range(Num_list):
        Feature_train = Feature_train_list[j]
        Label_train = Label_train_list[j]
        #        Num_train = Feature_train_list[j].shape[0]

        Feature_valid = Feature_valid_list[j]
        Label_valid = Label_valid_list[j]
        Label_valid.ravel()
        #        Num_valid = Feature_valid_list[j].shape[0]

        xgb = xgboost.XGBClassifier(max_depth=int(max_depth),
                                    learning_rate=learning_rate,
                                    n_estimators=int(n_estimators),
                                    silent=silent,
                                    nthread=nthread,
                                    gamma=gamma,
                                    min_child_weight=min_child_weight,
                                    max_delta_step=max_delta_step,
                                    subsample=subsample,
                                    colsample_bytree=colsample_bytree,
                                    seed=seed,
                                    objective="multi:softprob")
        xgb.fit(Feature_train, Label_train.ravel())
        Label_predict = xgb.predict(Feature_valid)

        ml_record.measure(i, Label_valid, Label_predict, 'weighted')
        i += 1

    return ml_record.mean_G()
def evaluate(individual):
    keys = list(parameters.keys())
    dim = len(keys)
    #    print(type(individual))
    #    print(individual)
    assert len(individual) == dim
    for i in range(dim):
        (lo, up) = para_bounds[i]
        if individual[i] < lo:
            sentence = 'increase ' + str(keys[i]) + ' : ' + str(
                individual[i]) + ' to lower bound !'
            print(sentence)
            individual[i] = lo
        elif individual[i] > up:
            sentence = 'decrease ' + str(keys[i]) + ' : ' + str(
                individual[i]) + ' to upper bound !'
            print(sentence)
            individual[i] = up

    BayesOp_Parameters = dict(zip(keys, individual))
    BayesOp_Parameters['silent'] = True
    BayesOp_Parameters['nthread'] = -1
    BayesOp_Parameters['seed'] = 1234
    #    BayesOp_Parameters['objective'] = "multi:softprob"
    BayesOp_Parameters['max_depth'] = int(BayesOp_Parameters['max_depth'])
    BayesOp_Parameters['n_estimators'] = int(
        BayesOp_Parameters['n_estimators'])
    Num_Cross_Folders = 5
    ml_record = MetricList(Num_Cross_Folders)
    i = 0

    for file in files:
        name = dir_path + '/' + file
        r = np.load(name)

        Feature_train = r['F_tr']
        Label_train = r['L_tr']
        Num_train = Feature_train.shape[0]
        # print(Num_train)
        Feature_test = r['F_te']
        Label_test = r['L_te']
        Label_test.ravel()
        Num_test = Feature_test.shape[0]
        # print(Num_test)

        xgb = xgboost.XGBClassifier(**BayesOp_Parameters)
        xgb.fit(Feature_train, Label_train.ravel())
        Label_predict = xgb.predict(Feature_test)
        ml_record.measure(i, Label_test, Label_predict, 'weighted')
        i += 1

    return ml_record.mean_G(),
示例#4
0
def xgboostcv(max_depth,
              learning_rate,
              n_estimators,
              gamma,
              min_child_weight,
              max_delta_step,
              subsample,
              colsample_bytree,
              silent =True,
              nthread = -1,
              seed = 1234,
              ):
    Num_Cross_Folders = 5
    ml_record = MetricList(Num_Cross_Folders)
    i = 0
    for file in files:
        name = dir_path + '/' + file
        r = np.load(name)

        Feature_train = r['F_tr']
        Label_train = r['L_tr']
        Num_train = Feature_train.shape[0]
        # print(Num_train)
        Feature_test = r['F_te']
        Label_test = r['L_te']
        Label_test.ravel()
        Num_test = Feature_test.shape[0]
        # print(Num_test)

        xgb = xgboost.XGBClassifier(max_depth = int(max_depth),
                                    learning_rate = learning_rate,
                                    n_estimators = int(n_estimators),
                                    silent = silent,
                                    nthread = nthread,
                                    gamma = gamma,
                                    min_child_weight = min_child_weight,
                                    max_delta_step = max_delta_step,
                                    subsample = subsample,
                                    colsample_bytree = colsample_bytree,
                                    seed = seed,
                                    objective = "multi:softprob")
        xgb.fit(Feature_train, Label_train.ravel())
        Label_predict = xgb.predict(Feature_test)

        ml_record.measure(i, Label_test, Label_predict, 'weighted')
        i += 1


    return ml_record.mean_G()
def evaluate(para_value):
    keys = para_keys
    dim = len(keys)
    BayesOp_Parameters = dict(zip(keys, para_value))
    BayesOp_Parameters['silent'] = True
    BayesOp_Parameters['nthread'] = -1
    BayesOp_Parameters['seed'] = 0
    #    BayesOp_Parameters['objective'] = "multi:softprob"
    BayesOp_Parameters['max_depth'] = int(BayesOp_Parameters['max_depth'])
    BayesOp_Parameters['n_estimators'] = int(BayesOp_Parameters['n_estimators'])

    Num_Cross_Folders = 5
    ml_record = MetricList(Num_Cross_Folders)
    i = 0
    for file in files:
        name = dir_path + '/' + file
        r = np.load(name)

        Feature_train = r['F_tr']
        Label_train = r['L_tr']
        Num_train = Feature_train.shape[0]
        # print(Num_train)
        Feature_test = r['F_te']
        Label_test = r['L_te']
        Label_test.ravel()
        Num_test = Feature_test.shape[0]
        # print(Num_test)

        xgb = xgboost.XGBClassifier(**BayesOp_Parameters)
        xgb.fit(Feature_train, Label_train.ravel())
        Label_predict = xgb.predict(Feature_test)

        ml_record.measure(i, Label_test, Label_predict, 'weighted')
        i += 1

    return 1-ml_record.mean_G()
                pool_classifiers = RandomForestClassifier(n_estimators=10)
                pool_classifiers.fit(Feature_train, Label_train.ravel())
                mcb = MCB(pool_classifiers)
                mcb.fit(Feature_train, Label_train.ravel())
                Label_predict = mcb.predict(Feature_test)
            elif m == 'DES-MI':
                pool_classifiers = RandomForestClassifier(n_estimators=10)
                pool_classifiers.fit(Feature_train, Label_train.ravel())
                dmi = DESMI(pool_classifiers)
                dmi.fit(Feature_train, Label_train.ravel())
                Label_predict = dmi.predict(Feature_test)
            elif m == 'One_vs_Rest-SMOTE-XGBoost':
                sm = SMOTE()
                Feature_train_o, Label_train_o = sm.fit_sample(Feature_train, Label_train.ravel())
                clf = OneVsRestClassifier(xgboost.XGBClassifier())
                clf.fit(Feature_train_o, Label_train_o)
                Label_predict = clf.predict(Feature_test)
            elif m == 'One_vs_Rest-XGBoost':
                clf = OneVsRestClassifier(xgboost.XGBClassifier())
                clf.fit(Feature_train, Label_train.ravel())
                Label_predict = clf.predict(Feature_test)

            ml_record.measure(i, Label_test, Label_predict, 'weighted')
            i += 1

        file_wirte = "Result_One_vs_All.txt"
        ml_record.output(file_wirte, m, Dir)