Ejemplo n.º 1
0
y = (xa + xb + xc) // 3
X_train, X_valid, y_train, y_valid \
    = train_test_split(X, y)

model = LinearRegression(fit_intercept=True)
model = GaussianNB()
model = make_pipeline(StandardScaler(), PCA(2), KNeighborsClassifier(5))
model = SVC(kernel='linear', C=0.1)
model = SVC(kernel='rbf', C=15, gamma=5)
model = MLPClassifier(hidden_layer_sizes=(4, 3))

model.fit(X_train, y_train)
print(model.predict(X_valid))
print(model.score(X_valid, y_valid))

from sklearn.cluster import KMeans
model = KMeans(n_clusters=5)
y = model.fit_predict(X)
print(model.predict(X))

from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor

model = KNeighborsRegressor(5)
model = SVR(kernel='rbf', C=1, gamma='auto')
model = MLPRegressor(hidden_layer_sizes=(8, 6), activation='logistic')

model.fit(X_train, y_train)
print(model.score(X_valid, y_valid))
print('accuracy:-', accuracy_score(y_test, ycapnew))

#================K-fold cross validation-> to improve accuracy score============================
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
crossycap = cross_val_predict(modelfit, df, Y, cv=4)
score = r2_score(Y, crossycap.round())
print('k-fold score:-', score)


#=====================================rmse====================================================
def rmse(Y, ycap):
    score = np.sqroot(np.mean(((Y - ycap.round())**2)))
    return score


print('rmse score:', score)

#=============================clustering part (recommendation engine)========================
import matplotlib.pyplot as plt
plt.scatter(df["Outlet_Location_Type"], df["ProductCategory"])
plt.show()

from sklearn.cluster import KMeans
model = KMeans(n_clusters=3, init="k-means++")
prediction = model.fit_predict(df[["Outlet_Location_Type", "ProductCategory"]])
df["cluster"] = prediction
print(df)
df.to_csv("C:\\Users\\nick\\RetailDatalog1.csv")
#=============================================================================================
Ejemplo n.º 3
0
# %%
labels = KMeans(6, random_state=0).fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap="viridis")

# %%
X, y = make_moons(200, noise=0.05, random_state=0)

# %%
labels = KMeans(2, random_state=0).fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap="viridis")

# %%
model = SpectralClustering(n_clusters=2,
                           affinity="nearest_neighbors",
                           assign_labels="kmeans")
labels = model.fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap="viridis")

# %%
digits = load_digits()
digits.data.shape

# %%
kmeans = KMeans(n_clusters=10, random_state=0)
clusters = kmeans.fit_predict(digits.data)
kmeans.cluster_centers_.shape

# %%
fig, ax = plt.subplots(2, 5, figsize=(8, 3))
centers = kmeans.cluster_centers_.reshape(10, 8, 8)
for axi, center in zip(ax.flat, centers):