def ModelRank(p_dict, model_file, pasttourn=True):
    model = joblib.load(model_file)
    wrapper(p_dict,
            type='dict',
            out_file='_temp.xls',
            tourn='AUS',
            court='H',
            rd=list(p_dict.keys())[0],
            all_rounds=False)

    df_t = pd.read_excel('_temp.xls', header=0, index_col=0)

    #global X_test
    X_test, y_test = data_prep_func(df_t,
                                    X_list="X_list_logreg.save",
                                    full_data=False,
                                    drop_extra=False,
                                    modtype='logreg',
                                    pasttourn=pasttourn)

    X_list = joblib.load("X_list_logreg.save")

    X_test = X_test[X_list]

    #    if model_file in ["xg_cl.h5", 'eclf.h5']:
    #        y_pred = np.where(model.predict(X_test)>0.5,1,0)
    #    else:
    y_pred = model.predict(X_test)
    #y_pred predicts whether player 0 wins (1) or not (0)
    #therefore return (1- y_pred ) to decide which player wins
    return (1 - y_pred[0])
def NeuNetRank(p_dict, pasttourn=True):
    model = load_model('model_50_2.h5')
    wrapper(p_dict,
            type='dict',
            out_file='_temp.xls',
            tourn='AUS',
            court='H',
            rd=list(p_dict.keys())[0],
            all_rounds=False)

    df_t = pd.read_excel('_temp.xls', header=0, index_col=0)

    #global X_test
    X_test, y_test = data_prep_func(df_t,
                                    X_list="X_list_neunet.save",
                                    full_data=False,
                                    drop_extra=True,
                                    pasttourn=pasttourn)

    y_pred = model.predict(X_test)
    #print(y_pred)
    #print(np.rint(y_pred))
    return int(round(y_pred.item(0)))
Exemplo n.º 3
0
import joblib
#from keras.models import load_model
import pandas as pd
from data_prep import data_prep_func

logreg = joblib.load("logreg_new.h5")
#neunet = load_model("model_50_2.h5")
ranfor = joblib.load("ranfor_new.h5")
xg_cl = joblib.load("xg_cl.h5")

eclf = EnsembleVoteClassifier(clfs=[logreg, ranfor, xg_cl],
                              weights=[1, 1, 1],
                              refit=False)

tourn_list = [
    'A2017', 'F2017', 'W2017', 'U2017', 'A2018', 'F2018', 'W2018', 'U2018',
    'A2019', 'F2019', 'W2019', 'U2019', 'A2020', 'U2020', 'F2020'
]
df_list = []

for f in tourn_list:
    df_list.append(
        pd.read_excel('./data/data_' + f + '.xls', header=0, index_col=0))

df_train = pd.concat(df_list, ignore_index=True)

#def NeuNetTennis(df_train):
X_train, y_train = data_prep_func(df_train, full_data=True, drop_extra=True)

joblib.dump(eclf.fit(X_train, y_train), "eclf.h5")
Exemplo n.º 4
0
#Training tournaments

tourn_list = [
    'A2017', 'F2017', 'W2017', 'U2017', 'A2018', 'F2018', 'W2018', 'U2018',
    'A2019', 'F2019', 'W2019', 'U2019', 'A2020', 'U2020', 'F2020'
]
df_list = []

for f in tourn_list:
    df_list.append(
        pd.read_excel('./data/data_' + f + '.xls', header=0, index_col=0))

df_train = pd.concat(df_list, ignore_index=True)

X_train, y_train = data_prep_func(df_train, modtype="logreg")
X_list = list(X_train.columns.values)
joblib.dump(X_list, "X_list_logreg.save")

# Setup the hyperparameter grid
n_space = range(10, 100, 10)
depth_space = range(1, 9)
param_grid = {
    'n_estimators': n_space,
    'max_depth': depth_space,
    'criterion': ['gini', 'entropy']
}

# Instantiate a logistic regression classifier: logreg
ranfor = RandomForestClassifier()
Exemplo n.º 5
0
#Training tournaments

tourn_list = [
    'A2017', 'F2017', 'W2017', 'U2017', 'A2018', 'F2018', 'W2018', 'U2018',
    'A2019', 'F2019', 'W2019', 'U2019', 'A2020', 'U2020', 'F2020'
]
df_list = []

for f in tourn_list:
    df_list.append(
        pd.read_excel('./data/data_' + f + '.xls', header=0, index_col=0))

df_train = pd.concat(df_list, ignore_index=True)

X_train, y_train = data_prep_func(df_train, modtype="logreg")
X_list = list(X_train.columns.values)
joblib.dump(X_list, "X_list_logreg.save")

# Setup the hyperparameter grid
c_space = np.logspace(-5, 8, 25)
param_grid = {'C': c_space, 'penalty': ['l1', 'l2']}

# Instantiate a logistic regression classifier: logreg
logreg = LogisticRegression(solver='liblinear')

# Instantiate the GridSearchCV object: logreg_cv
logreg_cv = GridSearchCV(logreg, param_grid, cv=5)

# Fit it to the data
logreg_cv.fit(X_train, y_train)