示例#1
0
from sklearn.model_selection import train_test_split
from numpy import ravel
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.svm import SVC

### Bayes: 0.50316455696202533, 0.49936708860759493
### Log: .49786628733997157
### lin svm: 0.49644381223328593 SVC(C = 7.5, kernel = 'linear', gamma = 'auto')
### SVC(C = .005, kernel = 'linear'), acc:  0.520410, roc: 0.519155, kappa:  0.038282, f1: 0.463038

data = classificationdata()
xvars = list(data)[:-1]

useset = data
#useset, holdoutset = train_test_split(data, test_size = .1, random_state = 1108)

#kernels = ['poly', 'rbf', 'sigmoid']
#Cs = [.01, .1, 1, 10]
#gammas = [.001, .01, .05, .1]

Cs = [.005, .01, .05]

end = len(Cs) * 9
at = 0

parameterscores = pd.DataFrame()
示例#2
0
import numpy as np
import pandas as pd
from classificationdata import classificationdata
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt

np.random.seed(42)
traindata = pd.read_csv('train_line_data.csv')
testdata = pd.read_csv('test_line_data.csv')
features = list(traindata)[1:]
x_feat = features[:-2]
train_x = traindata[x_feat]
train_y = traindata['y']
train_juice = traindata['juice']
testdata = classificationdata('test')
test_x = testdata[x_feat]
test_y = testdata['y']
test_juice = testdata['juice']

model = LogisticRegression()
model.fit(train_x, train_y)

confidence_threshold = .56
bank = 1000

bankhistory = []
pred = None
inputdata = None
prediction = None
gamejuice = None