Esempio n. 1
0
def evalute_model(path):
    df = pd.read_csv(path)
    x, y = df.iloc[:, :-1].values, df.iloc[:, -1].values
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.3,
                                                        random_state=1337)

    rf = RandomForestClassifier(n_estimators=100, random_state=1337)
    print_result("Without NaiveBayes:", rf, x_train, x_test, y_train, y_test)

    rf_nb = RandomForestClassifier(n_estimators=100, random_state=1337)
    rf_nb.base_estimator = NBDecisionTreeClassifier()
    print_result("With NaiveBayes:", rf_nb, x_train, x_test, y_train, y_test)
Esempio n. 2
0
ensembles_results['train_score'].append(knn_bagging_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(knn_bagging_ensemble.score(X_validation,y_validation))


# In[157]:


# As DecisionTreeclassifier was the most cussessful in previous ensembles,
# now I will try out RandomForestClassifier


# In[158]:


rfc_ensemble = RandomForestClassifier(n_estimators=100, random_state=0)
rfc_ensemble.base_estimator = dtc_estimator


# In[159]:


rfc_ensemble.fit(X_train, y_train)

ensembles_results['ensemble'].append('RandomForestClassifier')
ensembles_results['train_score'].append(rfc_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(rfc_ensemble.score(X_validation,y_validation))


# In[160]: