Esempio n. 1
0
best_parameters= grid_search.best_params_
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, Y_set = X_train, Y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
                     np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.arraY([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
             alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.Ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(Y_set)):
    plt.scatter(X_set[Y_set == j, 0], X_set[Y_set == j, 1],
                c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Training set)')
plt.xlabel('Age')
plt.Ylabel('Estimated SalarY')
plt.legend()
plt.show()

# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, Y_set = X_test, Y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
                     np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.arraY([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
             alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.Ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(Y_set)):
    plt.scatter(X_set[Y_set == j, 0], X_set[Y_set == j, 1],
                c = ListedColormap(('red', 'green'))(i), label = j)
lin_reg=LinearRegression()
lin_reg.fit(X,Y)

#fitting polynomial regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg=PolynomialFeatures(degree=4)
X_poly=poly_reg.fit_transform(X)
lin_reg_2=LinearRegression()
lin_reg_2.fit(X_poly,Y)

#visualising thre linear rehgression result
plt.scatter(X,Y,color='red')
plt.plot(X,lin_reg.predict(X),color='blue')
plt.title('Truth of Bluff(Linear Regression)')
plt.Xlabel('Position level')
plt.Ylabel('Salary')
plt.show()

#visualising the polynomial
X_grid=np.arange(min(X),max(X),0.1)
X_grid=X_grid.reshape((len(X_grid),1))
plt.scatter(X,Y,color='red')
plt.plot(X,lin_reg_2.predict(poly_reg.fit_transform(X)),color='blue')
plt.title('Truth of Bluff(Polynomial Regression)')
plt.Xlabel('Position level')
plt.Ylabel('Salary')
plt.show()

#predict a new result with linear regression
lin_reg.predict(6.5)
# -*- coding: utf-8 -*-
"""Estudo de caso: crescimento da população brasileira.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1eByi2L4I4BRl3Qdkaf4jsaYXUKoGt3j7
"""

#Estudo de caso: crescimento da população brasileira
#datasus
import matplotlib.pyplot as plt

dados = open("populacao-brasileira.csv").readlines()

x = []
y = []

for i in range(len(dados)):
    if i != 0:
        linha = dados[i].split(";")
        x.append(int(linha[0]))
        y.append(int(linha[1]))

plt.plot(x, y, color="k", linestyle="__")
plt.bar(x, y, color='#e4e4e4')
plt.title("Crescimento da população Brasileira 1980 a 2016")
plt.xlabel("ano")
plt.Ylabel("população x 100.000.000")
#plt.show()
plt.savefig("populaçao_brasileira.png", dpi=300)
Esempio n. 4
0
import matplotlib.pyplot as plt
import random
n = int(input("Enter no. of student: "))
l = [int(i) for i in range(n)]
m = []
for i in range(n):
    x = random.randint(40, 100)
    m.append(x)
print(m)
print(l)
plt.plot(l, m)
plt.title("Roll No. VS Marks ")
plt.Xlabel("Roll No.")
plt.Ylabel("Marks")
plt.show()
Esempio n. 5
0
def fit_linear(filename):
    my_file = open(filename)
    data = my_file.read()
    data = data.split('\n')

    mydata = data[0]
    datalist = mydata.split(' ')
    sum1 = len(datalist)

    for z in data:
        if 'x axis' in z:
            xlabel = z.split(":")[1]
        if 'y axis' in z:
            ylabel = z.split(":")[1]

    if sum1 == 4:
        (x, Y, Dx, dY) = check_column(data)
        a = funa(x, Y, dY)
        b = funb(Y, a, x, dY)
        chi = funchi2(Y, a, b, x, dY)
        print('a=', funa(x, Y, dY), '+-', funda(x, dY))
        print('b=', funb(Y, a, x, dY), '+-', fundb(x, dY))
        print('chi2=', funchi2(Y, a, b, x, dY))
        print('chi2_reduced=', funchi2red(chi, x))
        xx = np.array(x)
        YY = np.array(Y)
        lin = a * xx + b
        xarray = np.array(Dx)
        Yarray = np.array(dY)

        y = lin
        plt.errorbar(xx, YY, Yarray, xarray, fmt='none', ecolor='b')
        plt.plot(xx, Y, 'r')
        plt.xlabel(xlabel)
        plt.Ylabel(Ylabel)
        plt.show()
        plt.savefig('linear_fit.svg')
        my_file.close()
    else:

        (x, Y, Dx, dY) = check_rows(data)
        a = funa(x, Y, dY)
        b = funb(Y, a, x, dY)
        chi = funchi2(Y, a, b, x, dY)
        print('a=', funa(x, Y, dY), '+-', funda(x, dY))
        print('b=', funb(Y, a, x, dY), '+-', fundb(x, dY))
        print('chi2=', funchi2(Y, a, b, x, dY))
        print('chi2_reduced=', funchi2red(chi, x))
        xx = np.array(x)
        YY = np.array(Y)
        lin = a * xx + b
        xarray = np.array(Dx)
        Yarray = np.array(dY)

        y = lin
        plt.errorbar(
            xx,
            YY,
            Yarray,
            xarray,
            fmt='none',
            ecolor='b',
        )
        plt.plot(xx, Y, 'r')
        plt.xlabel(xlabel)
        plt.Ylabel(Ylabel)
        plt.show()
        plt.savefig('linear_fit.svg')
        my_file.close()
# standardize the features
sc = StandardScaler()
data_std = sc.fit_transform(data)

# X_train= X_train.reshape(-1,1)

# standardize the features

cov_mat = np.cov(data_std.T)
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)

# calculate cumulative sum of explained variances
tot = sum(eigen_vals)
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
cum_var_exp = np.cumsum(var_exp)

# plot explained variances
plt.bar(range(1, 14),
        var_exp,
        alpha=0.5,
        align='center',
        label='individual explained variance')
plt.step(range(1, 14),
         cum_var_exp,
         where='mid',
         label='cumulative explained variance')
plt.Ylabel('Explained variance ratio')
plt.xlabel('Principal component index')
plt.legend(loc='best')
plt.show()