Example #1
0
def plot_embedding(X, y, title=None):
    '''
    INPUT:
    X - decomposed feature matrix
    y - target labels (digits)
    Creates a pyplot object showing digits projected onto 2-dimensional
    feature space. PCA should be performed on the feature matrix before
    passing it to plot_embedding.
    '''
    x_min, x_max = np.min(X, 0), np.max(X, 0)
    X = (X - x_min) / (x_max - x_min)

    plt.figure(figsize=(10, 6), dpi=250)
    ax = plt.subplot(111)
    ax.axis('off')
    ax.patch.set_visible(False)
    for i in range(X.shape[0]):
        plt.text(X[i, 0],
                 X[i, 1],
                 str(y[i]),
                 color=plt.cm.Set1(y[i] / 10.),
                 fontdict={
                     'weight': 'bold',
                     'size': 12
                 })

    plt.xticks([]), plt.yticks([])
    plt.ylim([-0.1, 1.1])
    plt.xlim([-0.1, 1.1])

    if title is not None:
        plt.title(title, fontsize=16)
Example #2
0
def scree_plot(pca, title=None):
    num_components = pca.n_components_
    ind = np.arange(num_components)
    vals = pca.explained_variance_ratio_
    plt.figure(figsize=(10, 6), dpi=250)
    ax = plt.subplot(111)
    ax.bar(ind,
           vals,
           0.35,
           color=[
               (0.949, 0.718, 0.004),
               (0.898, 0.49, 0.016),
               (0.863, 0, 0.188),
               (0.694, 0, 0.345),
               (0.486, 0.216, 0.541),
               (0.204, 0.396, 0.667),
               (0.035, 0.635, 0.459),
               (0.486, 0.722, 0.329),
           ])

    for i in range(num_components):
        ax.annotate(r"%s%%" % ((str(vals[i] * 100)[:4])),
                    (ind[i] + 0.2, vals[i]),
                    va="bottom",
                    ha="center",
                    fontsize=12)

    ax.set_xticklabels(ind, fontsize=12)

    ax.set_ylim(0, max(vals) + 0.05)
    ax.set_xlim(0 - 0.45, 8 + 0.45)

    ax.xaxis.set_tick_params(width=0)
    ax.yaxis.set_tick_params(width=2, length=12)

    ax.set_xlabel("Principal Component", fontsize=12)
    ax.set_ylabel("Variance Explained (%)", fontsize=12)

    if title is not None:
        plt.title(title, fontsize=16)
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dropout(0.5))
model.add(Dense(10))
#model.add(Dropout(0.5))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')

history_object = model.fit(X_train,
                           y_train,
                           batch_size=32,
                           nb_epoch=8,
                           shuffle=True,
                           verbose=1,
                           validation_split=0.1)

model.save('model.h5')

from matplotlib.pyplot import plt
print(history_object.history.keys())

plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
Example #4
0
# Extract x and y coordinates
x = r[:,0]
y = r[:,1]

# Import functionality for plotting
from matplotlib.pyplot import plt

# Plot figure
plt.plot(x,y)

# Prettify the plot
plt.xlabel('Horizontal distance, [m]')
plt.ylabel('Vertical distance, [m]')
plt.title('Trajectory of a fired cannonball')
plt.grid()
plt.axis([0, 900, 0, 250])

# Makes the plot appear on the screen
plt.show()
# overfit / underfit

# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))

# Loop over different values of k
for i, k in enumerate(neighbors):
    # Setup a k-NN Classifier with k neighbors: knn
    knn = KNeighborsClassifier(n_neighbors=k)

    # Fit the classifier to the training data
    knn.fit(X_train, y_train)

    #Compute accuracy on the training set
    train_accuracy[i] = knn.score(X_train, y_train)

    #Compute accuracy on the testing set
    test_accuracy[i] = knn.score(X_test, y_test)

# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label='Testing Accuracy')
plt.plot(neighbors, train_accuracy, label='Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
                    (255, 255, 255), 2)

        # Create an image to draw the lines on
        color_warp = np.zeros_like(warped).astype(np.uint8)
        # Recast the x and y points into usable format for cv2.fillPoly()
        pts_left = np.array([np.transpose(np.vstack([self.bestx[0], ploty]))])
        pts_right = np.array(
            [np.flipud(np.transpose(np.vstack([self.bestx[1], ploty])))])
        pts = np.hstack((pts_left, pts_right))
        # Draw the lane onto the warped blank image
        cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
        # Warp the blank back to original image space using inverse perspective matrix (Minv)
        newwarp = self.perspective.transform(color_warp, 'inverse')
        # Combine the result with the original image
        result = cv2.addWeighted(undistorted_image, 1, newwarp, 0.3, 0)
        return result


if __name__ == '__main__':
    from matplotlib.pyplot import plt
    import glob
    for image_p in glob.glob('test_images/test*.jpg'):
        line = Line('./cal_camera.p')
        image = mpimg.imread(image_p)
        draw_img = line.finding(image)

        fig = plt.figure(figsize=(15, 15))
        plt.imshow(window_img)
        plt.title('Raw Detections')
        plt.axis('off')
        fig.tight_layout()
#print Puncertainty, NoPuncertainty, Ratiouncertainty
print 'ratio uncertainty=', Ratiouncertainty

"""To plot this ratio vs angle with error bars, we first need to
define the error bars"""

# with polarizer
plt.errorbar(xaxis,Parray,yerr=Puncertainty,
         fmt='o',ecolor=None,elinewidth=None,
         capsize=3,barsabove=False,lolims=False,
         uplims=False,xlolims=False,xuplims=False)

plt.plot(xaxis,Parray)
plt.xlabel('Degrees')
plt.ylabel('Power')
plt.title('Input Angle vs. Power (With Polarizer)')
plt.show()

# without polarizer
plt.errorbar(xaxis,NoParray,yerr=NoPuncertainty,
         fmt='o',ecolor=None,elinewidth=None,
         capsize=3,barsabove=False,lolims=False,
         uplims=False,xlolims=False,xuplims=False)

plt.plot(xaxis,NoParray)
plt.xlabel('Degrees')
plt.ylabel('Power')
plt.title('Input Angle vs. Power (Without Polarizer)')
plt.show()

# Ratio
    batch_size=batch_size,
    epochs=epochs,
    #verbose=1,
    callbacks=[cb],
    validation_split=.1,
    #validation_data=(X_test, y_test)
)
score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

predicted = model.predict(X_test, verbose=False)
predicted = np.argmax(predicted, axis=1)

from keras.utils import plot_model
plot_model(model, to_file='CNN.png')

#CM = ConfusionMatrix(predicted, y_test_orig, c);
#np.savetxt("data/CNN_predicted_raw.txt", predicted, "%d")
#np.savetxt("data/CNN_cm_raw.txt", CM, "%d");

from matplotlib.pyplot import plt

plt.plot(history.history['val_acc'])
plt.plot(history.history['acc'])
plt.legend(['Validation Accuracy', 'Training Accuracy'])
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.title('CNN Convergence Curve')
plt.show()
return_and_volatility_datfarame = pd.DataFrame()
return_and_volatility_datfarame["Annual Returns"] = (
    stock_prices.pct_change().mean() * 252) * 100
return_and_volatility_datfarame["Annual Risk"] = (
    stock_prices.pct_change().std() * sqrt(252)) * 100
return_and_volatility_datfarame.index.name = "Company Symbol"

#-----------Elbow Method to get the optimal number of cluster-----#

wcss = []
for i in range(1, 11):
    kmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
    kmeans.fit(return_and_volatility_datfarame)
    wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()

# After plotting the result of "Number of clusters" vs "WCSS" we can notice that
#the number of clusters reaches 4 (on the X axis), the reduction
# the within-cluster sums of squares (WCSS) begins to slow down for each increase in cluster number. Hence
# the optimal number of clusters for this data comes out to be 4. Therefore lets take number of cluster for k means = 4
#--------------------applying K-Means Clustering-------------#

kmeans = KMeans(n_clusters=4, init='k-means++', random_state=42)
y_kmeans = kmeans.fit_predict(return_and_volatility_datfarame)

return_and_volatility_datfarame.reset_index(level=['Company Symbol'],
                                            inplace=True)