def Perceptron_Model(X_train, y_train, X_test, y_test, max_iter):
    
    model = Perceptron(max_iter = max_iter)
    classifier = model.fit(X_train, y_train)
    testing_model = model.predict(X_test)
    score = accuracy_score(y_test, testing_model)
    cv_scores = cross_val_score(classifier, X_test, y_test, cv = 3)

    print(' ')
    print('===== Perceptron Model =====')
    print('score:', score)
    print('cross validation scores:', cv_scores)
    
    # Visualize parameters in a table.
    visualize_params(model.get_params())
    
    # Display confusion matrix.
    visualize_heatmap(y_test, testing_model, 'Perceptron Model')    
    
    return score
Esempio n. 2
0
print('召回率:', metrics.recall_score(y_test, y_pred_class))
print('错误率:', 1 - (metrics.accuracy_score(y_test, y_pred_class)))

# In[12]:

# 训练感知机模型
from sklearn.linear_model import Perceptron
# n_iter_no_change:可以理解成梯度下降中迭代的次数
# eta0:可以理解成梯度下降中的学习率
# random_state:设置随机种子的,为了每次迭代都有相同的训练集顺序
ppn = Perceptron(n_iter_no_change=40, eta0=0.1, random_state=0)
ppn.fit(X_train, y_train)

# In[13]:

ppn.get_params()

# In[14]:

# 分类测试集,这将返回一个测试结果的数组
y_pred = ppn.predict(X_test)
# 计算模型在测试集上的准确性
metrics.accuracy_score(y_test, y_pred)

# In[15]:

from sklearn import svm

svr = svm.SVR()
svr.fit(X_train, y_train)
Esempio n. 3
0
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron

iris = load_iris()
X = iris.data[:, (2, 3)]  # petal length, petal width
y = (iris.target == 0).astype(np.int)

# Classifier based on Perceptron
model = Perceptron()
model.fit(X, y)

parameters = model.get_params()
for k, v in parameters.items():
    print(k, " : ", v)

print(model.score(X, y))

y_pred = model.predict([[2.0, 0.5]])
print(y_pred)

y_pred = model.predict([[3.0, 1.5]])
print(y_pred)
Esempio n. 4
0
              [2,2],
              [4,4],
              [5,5]])
y = np.array([-1, -1, 1, 1])
w = np.array([0,99, 5])
Example2Perceptron = Perceptron(X,y,plot_data_lines = True, plot_errors = True)
w_ex2 = Example2Perceptron.train(w,epochs = 20)
print(w_ex2)



from sklearn.linear_model import Perceptron
sk_perceptron = Perceptron(tol=1e-5, random_state=0)
sk_perceptron.fit(X,y)
print(sk_perceptron.score(X,y))
print(sk_perceptron.get_params())
print([sk_perceptron.coef_, sk_perceptron.intercept_])
print(sk_perceptron.n_iter_)

sk_bigdata = Perceptron(max_iter = 1000, eta0=0.1, tol=1e-5, random_state=0)
sk_bigdata.fit(X_train, y_train)
print([sk_bigdata.coef_, sk_bigdata.intercept_])
print('Accuracy Training= ',sk_bigdata.score(X_train, y_train)*100)
print('Accuracy Testing= ',sk_bigdata.score(X_test, y_test)*100)


ciplakAyak = X-np.mean(X, axis = 0)
cov = np.dot(ciplakAyak.T,ciplakAyak)/X.shape[0]
print(cov)
cov_numpy = np.cov(X, rowvar = False, ddof = 0)
print(cov_numpy)
import numpy as np
from sklearn.linear_model import SGDClassifier, Perceptron

r'''
特点:
二分类
特征空间线性可分
'''
X = np.array([(3,3),(4,3),[1,1]])
y = np.array(['b','b','a'])
module = Perceptron(n_iter=20,verbose=1)
print module.get_params()
module.fit(X,y)
print module.predict(np.array([(0,0),(2,10),(4,5),(1,1)]))
print module.get_params(deep=True)
Esempio n. 6
0
    jinputdata = np.array(jpos + jneg)
    jinputlabels = jposlab + jneglab
    inputdata = np.array(pos + neg)
    inputlabels = poslab + neglab
    with open('inputdata.pkl', 'wb') as f:
        pickle.dump(inputdata, f)
    with open('inputlabels.pkl', 'wb') as f:
        pickle.dump(inputlabels, f)
    with open('validbound.pkl', 'wb') as f:
        pickle.dump(jinputdata, f)
    with open('validlab.pkl', 'wb') as f:
        pickle.dump(jinputlabels, f)
    perceptron = Perceptron()
    perceptron.fit(inputdata.reshape(-1, 1), inputlabels)
    with open('perceptron-parameters.pkl', 'wb') as f:
        pickle.dump(perceptron.get_params(), f)
#
score = perceptron.score(jinputdata.reshape(-1, 1), jinputlabels)
with open('perceptron-score.pkl', 'wb') as f:
    pickle.dump(score, f)

with open('inputdata.pkl', 'rb') as f:
    inputdata = pickle.load(f)
with open('inputlabels.pkl', 'rb') as f:
    inputlabels = pickle.load(f)

perceptron = Perceptron()
perceptron.fit(inputdata.reshape(-1, 1), inputlabels)

file_path = "images_segmentation.pkl"
n_bytes = 2 ** 31
Esempio n. 7
0
    word = keys[n]
    wordTest.append(word)
print(wordTest)

####################################################################################################

#Test these words for Sentiment analysis
#Training and testing accuracy is 83.14% and 81.14% respectively. 
#Errors in classification at times can be observed

for i in wordTest:
    predSent(i)
    
####################################################################################################
#Parameters of the model
clf.get_params()

#Weights associated with the model
print("Weights assigned by sklearn Perceptron: \n", clf.coef_[0])

####################################################################################################
#Mis classification errors in the test dataset

y_predict = clf.predict(X_test)
cnt = 0
for i in range(len(X_test)):
    if y_predict[i] != y_test[i]:
        cnt = cnt + 1
print("Total number of misclassification with Perceptron classifier: ", cnt) #221

####################################################################################################
Esempio n. 8
0
    cols = [1]  # get rid of index and day, but keep month
    dataSet = dataSet.drop(dataSet.columns[cols], axis=1)
    dataSet['Month'] = dataSet['Month'].apply(
        lambda x: math.floor(x / 4))  # sort month into 1,2,3,4 by season

    msk = np.random.rand(len(dataSet)) < 0.8

    trainData = dataSet[msk]
    testData = dataSet[~msk]

    X_train, Y_train = getXandY(trainData)
    X_test, Y_test = getXandY(testData)

    pla = Perceptron(max_iter=1000, random_state=np.random, warm_start=True)
    print(pla.get_params())

    for i in range(0, 700):
        pla = pla.fit(X_train, Y_train)
        score = pla.score(X_test, Y_test)
        Y_pred = pla.predict(X_test)
        # F1 Measure
        Y_test = pd.Series(Y_test)
        series = Y_test.value_counts()
        null_accuracy = (series[0] / (series[0] + series[1]))
        print('Null Acuuracy: ', str(null_accuracy))
        cm = confusion_matrix(Y_test, Y_pred)
        print(cm)
        print('Confusion matrix\n\n', cm)

        print('\nTrue Positives(TP) = ', cm[0, 0])
Esempio n. 9
0
    plt.show()
    # Waiting to implement
    pass


if __name__ == '__main__':
    """
    PLA
    """
    X, Y = getInputData("classification.txt")
    X = np.c_[np.ones(len(X)), np.array(
        X
    )]  # Coordinates vector of points, which is added '1' at first column.
    pla = Perceptron()
    pla.n_iter = 200
    info = pla.get_params()
    print(info)
    pla = pla.fit(X, Y)
    score = pla.score(X, Y)
    W = pla.coef_
    print('score =', score)
    print('W=', W)
    plot(X, Y, W[0])
    """
    pocket PLA
    """
    iterList = []
    numList = []
    best_score = 0
    W = None
    X, Y = getInputData1("classification.txt")  #Get column 5 as Y