Exemple #1
0
# Split training data

train, test, train_lab, test_lab = train_test_split(Y,
                                                    emnist.labels,
                                                    stratify=emnist.labels,
                                                    test_size=0.3,
                                                    random_state=random_state)

#D = {"T":0, "t":1, "N":2, "n":3}
D = {'a': 0, 'b': 1, 'Cc': 2, 'd': 3, 'e': 4, 'A': 5, 'B': 6, 'D': 7, 'E': 8}
model = bs.BayesSoftMaxClassifier(train,
                                  np.array([D[i] for i in train_lab]),
                                  numClasses=9,
                                  numSim=10000,
                                  burnIn=5000,
                                  candVar=.008,
                                  paramVar=.06)
model.SamplePosterior()

# Evaluation
model.Predict(test)
err = np.mean(model.predictions != np.array([D[i] for i in test_lab]))
bal = balanced_accuracy_score(np.array([D[i] for i in test_lab]),
                              model.predictions)
print("Error in BayesSoftmax: " + str(err))

model.PlotParamTrace(-1)
model.PlotParamDist(0)
model.PlotPredictiveDistribution(0)
Exemple #2
0
# Import libraries
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
# define data, split to train & test, create model and fit data
X = Variables
Y = Classes
Model = RandomForestClassifier(n_estimators=10, params).fit(X, Y)
# Predict new classes
Y_predict = Model.Predict(X_test)
# Score model
accuracy_score(Y_test, Y_predict)