my_custom_scorer = make_scorer(my_pred, greater_is_better=True)

    tpo = TPOTClassifier(generations=100,
                         verbosity=2,
                         population_size=100,
                         scoring='f1',
                         n_jobs=-1,
                         config_dict=tpot_config,
                         mutation_rate=0.9,
                         crossover_rate=0.1,
                         cv=5,
                         random_state=5)

    tpo.fit(x_train, y_train)

    evalution_model(tpo, x_train, y_train)
    evalution_model(tpo, x_test, y_test)
    evalution_model(tpo,
                    np.array(df_btest.drop("is_sucess_by_contract", axis=1)),
                    df_btest["is_sucess_by_contract"])

    import datetime
    i = datetime.datetime.today()
    print(i)
    s = str(i.month) + str(i.day) + str(i.hour) + str(i.minute)
    model_name = "GM_export/main_new/" + "GM" + s + ".py"
    tpo.export(model_name)

    import numpy as np
    import pandas as pd
    from sklearn.ensemble import RandomForestClassifier
Exemplo n.º 2
0
        train_set=dtrain,
        num_boost_round=100,
        valid_sets=[dtrain],
        valid_names=None,
        fobj=None,
        feval=None,
        init_model=None,
        categorical_feature='auto',
        early_stopping_rounds=None,
        evals_result=evals_result,
        verbose_eval=10,
        keep_training_booster=False,
        callbacks=None,
    )
    print("================训练集================")
    evalution_model(clfs, x_train, y_train)
    print("================测试集================")
    evalution_model(clfs, x_test, y_test)
    print("===========b_test===================")
    evalution_model(clfs, df_xbtest, df_ybtest)
    #  查找每棵树叶子节点
    y_pred_train = clfs.predict(x_train, pred_leaf=True)
    y_pred_test = clfs.predict(x_test, pred_leaf=True)
    y_pred_btest = clfs.predict(df_xbtest, pred_leaf=True)

    #  onehot编码
    enc = OneHotEncoder()
    enc.fit(y_pred_train)
    train_encode = np.array(enc.transform(y_pred_train).toarray())
    test_encode = np.array(enc.transform(y_pred_test).toarray())
    btest_encode = np.array(enc.transform(y_pred_btest).toarray())
Exemplo n.º 3
0
    # model.save('models/deep_model/lstm_0228.h5')
    # model = load_model('models/deep_model/lstm_0228.h5')

    # classes = model.predict_classes(xt)
    # acc = np_utils.accuracy(classes, yt)
    # score, acc = model.evaluate(xt, yt, batch_size=16, verbose=1)
    score, acc = model.evaluate(x_test, y_test, verbose=1)
    print('Test score:', score)
    print('Test accuracy:', acc)

    y_pred_train = model.predict(x_train)
    y_pred_test = model.predict(x_test)
    y_pred_btest = model.predict(x_btest)

    print("================训练集================")
    evalution_model(model, x_train, y_train)
    print("================测试集==============")
    evalution_model(model, x_test, y_test)
    print("================B测试集==============")
    evalution_model(model, x_btest, y_btest)

    y_pred_train_prob = model.predict_proba(x_train)
    y_pred_test_prob = model.predict_proba(x_test)
    y_pred_btest_prob = model.predict_proba(x_btest)


    x_trains["voice_prob"] = y_pred_train_prob
    x_tests["voice_prob"] = y_pred_test_prob
    x_btests["voice_prob"] = y_pred_btest_prob

Exemplo n.º 4
0
    )

    fg = rf_bo.max

    clfs = RandomForestClassifier(
        n_estimators=int(fg["params"]["n_estimators"]),
        min_samples_split=int(fg["params"]["min_samples_split"]),
        max_features=int(fg["params"]["max_features"]),
        # float
        max_depth=int(fg["params"]["max_depth"]),
        random_state=5,
        class_weight={1: 1.46})
    clfs.fit(x_train, y_train)

    print("================训练集================")
    evalution_model(clfs, x_train, y_train)
    print("================测试集================")
    evalution_model(clfs, x_test, y_test)
    print("===========b_test===================")
    evalution_model(clfs, df_btest.drop("is_pigeon", axis=1),
                    df_btest["is_pigeon"])

    #catboost基模型
    # from catboost import CatBoostClassifier, CatBoostRegressor, Pool
    #
    #
    # def catboost_cv(n_estimators, learning_rate, depth, l2_leaf_reg):
    #     clf = CatBoostClassifier(
    #                             n_estimators=int(n_estimators),
    #                             learning_rate=learning_rate,
    #                             depth=int(depth),