def main():
        from sklearn import datasets
        iris = datasets.load_iris()
        X = iris.data[: [2, 3]]
        y = iris.target

        from sklearn.model_selection import train_test_split
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)

        nb = NaiveBayes()
        nb.fit(X_train, y_train)

        import pandas as pd
        import matplotlib.pyplot as plt
        import numpy as np
        from Perceptron import plot_decision_regions

        plot_decision_regions(X, y, classifier=nb)
        plt.tittle('Naive Bayes Trial')
        plt.Xlabel('Sepal Length[standardized]')
        plt.ylabel('Petal Length[Standardized]')
        plt.show()
예제 #2
0
df['labels'] = labels
df.sort_values('labels')

#===Number of unique patterns=====
print(Number of unique patterns)

df['labels'].unique()

#============stocks moving together and stocks are different from each other===
print(stocks moving together and stocks are different from each other)
print(stocks apparently similar in performance)
df_cat0= df.loc[df['labels']==0]
df_cat1= df.loc[df['labels']==1]
df_cat2=df.loc[df['labels']==2]
df_cat2=df.loc[df['labels']==3]




ks = range(1,10)
inertias = []
for k in ks:
    model = KMeans(n_clusters = k)
    model.fit(df)
    inertias.append(model.inertia_)
plt.plot(ks,inertias,'-o')
plt.Xlabel('no of cluster,k')
plt.ylabel('Inertias')


예제 #3
0
kolon_eksik_deger_toplami = veriler.isnull().sum()
print(kolon_eksik_deger_toplami)

X = veriler.iloc[:, 1:].values

from sklearn.cluster import KMeans

sonuclar = []
for i in range(1, 11):
    kmeans = KMeans(n_clusters=i, init='k-means++', n_init=10, random_state=0)
    kmeans.fit(X)
    sonuclar.append(kmeans.inertia_)

plt.plot(range(1, 11), sonuclar)
plt.title('Küme Sayısı Belirlemek için Dirsek Yöntemi')
plt.Xlabel('Küme Sayısı')
plt.show()

kmeans = KMeans(n_clusters=6, init='k-means++', random_state=0)
Y_tahmin = kmeans.fit_predict(X)
print(Y_tahmin)
for i in range(0, 181):
    print(veriler.iloc[i, 0])
    print(Y_tahmin[i])

plt.scatter(X[Y_tahmin == 0, 0],
            X[Y_tahmin == 0, 1],
            s=75,
            c='cyan',
            label='Küme 1')
plt.scatter(X[Y_tahmin == 1, 0],
예제 #4
0
plt.show()

h = sns.PairGrid(iris)
h = h.map(plt.scatter)
sns.pairplot(iris)
plt.show(h)

i = sns.JointGrid(x='sepal_length', y='sepal_width', data=iris)
i = i.plot(sns.regplot, sns.distplot)
plt.show(i)

########################### Configuration ######################
g.despine(left=True)

g.set_ylabels('Survived')

g.set_xticklabels(rotation=45)

g.set_axis_labels('Survived', 'Sex')

h.set(xlim=(0, 5), ylim=(0, 5), xticks=[0, 2.5, 5], yticks=[0, 2.5, 5])

plt.title('Title')
plt.ylabel('Y')
plt.Xlabel('X')

plt.ylim(0, 100)
plt.xlim(0, 100)

plt.setp(ax, yticks=[0, 5])  # Setting axis property
from sklearn.linear_model import LinearRegression
lin_reg=LinearRegression()
lin_reg.fit(X,Y)

#fitting polynomial regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg=PolynomialFeatures(degree=4)
X_poly=poly_reg.fit_transform(X)
lin_reg_2=LinearRegression()
lin_reg_2.fit(X_poly,Y)

#visualising thre linear rehgression result
plt.scatter(X,Y,color='red')
plt.plot(X,lin_reg.predict(X),color='blue')
plt.title('Truth of Bluff(Linear Regression)')
plt.Xlabel('Position level')
plt.Ylabel('Salary')
plt.show()

#visualising the polynomial
X_grid=np.arange(min(X),max(X),0.1)
X_grid=X_grid.reshape((len(X_grid),1))
plt.scatter(X,Y,color='red')
plt.plot(X,lin_reg_2.predict(poly_reg.fit_transform(X)),color='blue')
plt.title('Truth of Bluff(Polynomial Regression)')
plt.Xlabel('Position level')
plt.Ylabel('Salary')
plt.show()

#predict a new result with linear regression
lin_reg.predict(6.5)
예제 #6
0
                                                    random_state=0)

# feature scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train=sc_X.fit_transform(X_train)
X_test=sc_X.transform(X_test)"""

#fitting simple linear regression to the training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, Y_train)

#predicting the test set result
Y_pred = regressor.predict(X_test)

#  visualizing the training set
plt.scatter(X_train, Y_train, color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title('Salary vs Experoence(Training set)')
plt.xlabel('Years of Experience')
plt.Ylabel('Salary')
plt.show()

#  visualizing the training set
plt.scatter(X_test, Y_test, color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title('Salary vs Experoence(Test set)')
plt.Xlabel('Years of Experience')
plt.Ylabel('Salary')
plt.show()
예제 #7
0
df5 = pd.read_excel("METEOROLOGICAL_SUB_DIVISION_WISE_ANNUAL_RAINFALL.xls")
df5

# In[82]:

df5[df5['Sub-division'] == df5['Sub-division'].min()][[
    '2010 (in millimetre)', 'Sub-division'
]]

# In[83]:

import matplotlib.pyplot as plt
plt.plot(df5['Sub-division'],
         df5['2002 (in millimetre)'],
         linestyle="dashed",
         marker='*',
         color="purple")
x_pos = np.arange(len(df5))
plt.Xlabel("Sub divisions")
plt.ylabel("Rainfall")
plt.legend()
plt.title('rainfall analysis')
plt.show()

# In[69]:

import matpltlib.pyplot as plt
rc('font', weight='bold')

# In[ ]:
예제 #8
0
import matplotlib.pyplot as plt
import random
n = int(input("Enter no. of student: "))
l = [int(i) for i in range(n)]
m = []
for i in range(n):
    x = random.randint(40, 100)
    m.append(x)
print(m)
print(l)
plt.plot(l, m)
plt.title("Roll No. VS Marks ")
plt.Xlabel("Roll No.")
plt.Ylabel("Marks")
plt.show()
예제 #9
0
sc_y = StandardScaler()
X = sc_x.fit_transform(X)
y = sc_y.fit_transform(y)

#fitting regressor

from sklearn.svm import SVR
regressor = SVR(kernel='rbf')
regressor.fit(X, y)
#prediction

y_pred = sc_y.inverse_transform(
    regressor.predict(sc_x.transform(np.array([[6.5]]))))

#visualisation
plt.scatter(X, y, color='green')
plt.plot(X, regressor.predict(X), color='red')
plt.title('bluffer detector')
plt.Xlabel('experience')
plt.ylabel('salary')
plt.show()

X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='pink')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.title('bluffer detector')
plt.Xlabel('experience')
plt.ylabel('salary')
plt.show()