Exemplo n.º 1
0
  File "C:\WinPython-64bit-2.7.10.1\python-2.7.10.amd64\lib\site-packages\matplotlib\pyplot.py", line 3099, in plot
    ret = ax.plot(*args, **kwargs)
  File "C:\WinPython-64bit-2.7.10.1\python-2.7.10.amd64\lib\site-packages\matplotlib\axes\_axes.py", line 1373, in plot
    for line in self._get_lines(*args, **kwargs):
  File "C:\WinPython-64bit-2.7.10.1\python-2.7.10.amd64\lib\site-packages\matplotlib\axes\_base.py", line 304, in _grab_next_args
    for seg in self._plot_args(remaining, kwargs):
  File "C:\WinPython-64bit-2.7.10.1\python-2.7.10.amd64\lib\site-packages\matplotlib\axes\_base.py", line 292, in _plot_args
    seg = func(x[:, j % ncx], y[:, j % ncy], kw, kwargs)
  File "C:\WinPython-64bit-2.7.10.1\python-2.7.10.amd64\lib\site-packages\matplotlib\axes\_base.py", line 244, in _makeline
    self.set_lineprops(seg, **kwargs)
  File "C:\WinPython-64bit-2.7.10.1\python-2.7.10.amd64\lib\site-packages\matplotlib\axes\_base.py", line 184, in set_lineprops
    raise TypeError('There is no line property "%s"' % key)
TypeError: There is no line property "x_label"
>>> plt.xlabel("False positive rate")
<matplotlib.text.Text object at 0x0000000017300A90>
>>> plt.ylabels("True positive rate")

Traceback (most recent call last):
  File "<pyshell#14>", line 1, in <module>
    plt.ylabels("True positive rate")
AttributeError: 'module' object has no attribute 'ylabels'
>>> plt.ylabel("True positive rate")
<matplotlib.text.Text object at 0x000000002563F0F0>
>>> plt.title("ROC: Neural Network Classifier")
<matplotlib.text.Text object at 0x00000000256E5518>
>>> plt.show()
>>> plt.title("ROC Curve: Neural Network Classifier")
<matplotlib.text.Text object at 0x0000000031F7A400>
>>> plt.show()
>>> plt.plot(tpr, fpr)
[<matplotlib.lines.Line2D object at 0x0000000015D8CEF0>]
              metrics=["accuracy"])

# train the network
print("[INFO] training model...")
H = model.fit(trainX,
              trainY,
              validation_data=(testX, testY),
              epochs=40,
              batch_size=32,
              verbose=1)

# evaluate the model
print("[INFO] evaluating model...")
predictions = model.predict(testX, batch_size=32)
print(
    classification_report(testY.argmax(axis=1),
                          predictions.argmax(axis=1),
                          target_names=labelNames))

# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, 100), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, 100), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, 100), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, 100), H.history["val_accuracy"], label="val_accuracy")
plt.title("Training loss and accuracy")
plt.xlabel("Epochs #")
plt.ylabels("Loss/Accuracy")
plt.legend()
plt.show()
Exemplo n.º 3
0
df = web.DataReader('AAPL',
                    data_source='yahoo',
                    start='2012-01-01',
                    end='2019-12-17')
df

df.shape
df.describe
df.head(5)

plt.figure(figsize=(16, 8))
plt.title('Close Price History')
plt.plot(df['Close'])
plt.xlabels('Date', fontsize=18)
plt.ylabels('Close Price USD($)', fontsize=18)
plt.show()

#Create a dataframe with only the Close column
data = df.filter(['Close'])
data.shape
data
# Convert dataframe into numpy value
dataset = data.values
dataset
#Get the number of frows to tarin the model
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len

#Scale the data
scaler = MinMaxScaler(feature_range=(0, 1))
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd

x = [1, 2, 3, 4, 5]
y = [5, 7, 3, 8, 4]
plt.title('Title text here ...')
plt.xlabels('X Axis Label')
plt.ylabels('Y Axis Label')
plt.grid(TRUE)
plt.bar(x, y)
plt.show()
pca.fit(digits.data)

digits_pca = pca.transform(digits.data)
colors = ["#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525",
          "#A83683", "#4E655E", "#853541", "#3A3120","#535D8E"]
plt.figure(figsize=(10, 10))
plt.xlim(digits_pca[:, 0].min(), digits_pca[:, 0].max())
plt.ylim(digits_pca[:, 1].min(), digits_pca[:, 1].max())
for i in range(len(digits.data)):

    plt.text(digits_pca[i, 0], digits_pca[i, 1], str(digits.target[i]),
    color = colors[digits.target[i]],
    fontdict={'weight' : 'bold', 'size' :9})

plt.xlabels("첫번째 주성분")
plt.ylabels("두번째 주성분")
#손글씨를 두개의 주성분을 통해서 점을 찍어 보았습니다 => 올바르게 분류를 안해줍니다.

# PCA만 잘 이해해도 데이터 분석하는 데 많은 도움이 됩니다.
# PCA는 넓은 범위에 영향을 미칩니다.



# 가까운 것은 더 가깝게, 먼 거리는 더 멀게 해서 변환된 결과값으로 원본으로 복원하는 것이 불가능한 변환이지만,
# 확연하게 분류( 역변환이 불가능 )
# k-means에서 몇개의 그룹으로 나누어야 하는가를 알 때, 이 것을 시각적으로 확인할때
from sklearn.manifold import TSNE
tsne = TSNE(random_state= 42)
digits_tsne = tsne.fit_transform(digits.data)
plt.figure(figsize=(10, 10))
plt.xlim(digits_tsne[:, 0].min(), digits_tsne[:, 0].max() + 1)