Пример #1
0
def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None):
    """
    will plot a stack of traces one above the other assuming
    tarray.shape =  numSamples, numRows
    """
    data = tarray
    numSamples, numRows = tarray.shape
    # data = np.random.randn(numSamples,numRows) # test data
    # data.shape = numSamples, numRows
    if seconds:
        t = seconds * np.arange(numSamples, dtype=float) / numSamples
        # import pdb
        # pdb.set_trace()
        if start_time:
            t = t + start_time
            xlm = (start_time, start_time + seconds)
        else:
            xlm = (0, seconds)

    else:
        t = np.arange(numSamples, dtype=float)
        xlm = (0, numSamples)

    ticklocs = []
    ax = plt.subplot(111)
    plt.xlim(*xlm)
    # xticks(np.linspace(xlm, 10))
    dmin = data.min()
    dmax = data.max()
    dr = (dmax - dmin) * 0.7  # Crowd them a bit.
    y0 = dmin
    y1 = (numRows - 1) * dr + dmax
    plt.ylim(y0, y1)

    segs = []
    for i in range(numRows):
        segs.append(np.hstack((t[:, np.newaxis], data[:, i, np.newaxis])))
        # print "segs[-1].shape:", segs[-1].shape
        ticklocs.append(i * dr)

    offsets = np.zeros((numRows, 2), dtype=float)
    offsets[:, 1] = ticklocs

    lines = LineCollection(
        segs,
        offsets=offsets,
        transOffset=None,
    )

    ax.add_collection(lines)

    # set the yticks to use axes coords on the y axis
    ax.set_yticks(ticklocs)
    # ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])
    if not plt.ylabels:
        plt.ylabels = ["%d" % ii for ii in range(numRows)]
    ax.set_yticklabels(ylabels)

    plt.xlabel('time (s)')
Пример #2
0
def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None):
    """
    will plot a stack of traces one above the other assuming
    tarray.shape =  numSamples, numRows
    """
    data = tarray
    numSamples, numRows = tarray.shape
# data = np.random.randn(numSamples,numRows) # test data
# data.shape = numSamples, numRows
    if seconds:
        t = seconds * np.arange(numSamples, dtype=float)/numSamples
# import pdb
# pdb.set_trace()
        if start_time:
            t = t+start_time
            xlm = (start_time, start_time+seconds)
        else:
            xlm = (0,seconds)

    else:
        t = np.arange(numSamples, dtype=float)
        xlm = (0,numSamples)

    ticklocs = []
    ax = plt.subplot(111)
    plt.xlim(*xlm)
    # xticks(np.linspace(xlm, 10))
    dmin = data.min()
    dmax = data.max()
    dr = (dmax - dmin)*0.7  # Crowd them a bit.
    y0 = dmin
    y1 = (numRows-1) * dr + dmax
    plt.ylim(y0, y1)

    segs = []
    for i in range(numRows):
        segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))
        # print "segs[-1].shape:", segs[-1].shape
        ticklocs.append(i*dr)

    offsets = np.zeros((numRows,2), dtype=float)
    offsets[:,1] = ticklocs

    lines = LineCollection(segs, offsets=offsets,
                           transOffset=None,
                           )

    ax.add_collection(lines)

    # set the yticks to use axes coords on the y axis
    ax.set_yticks(ticklocs)
    # ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])
    if not plt.ylabels:
        plt.ylabels = ["%d" % ii for ii in range(numRows)]
    ax.set_yticklabels(ylabels)

    plt.xlabel('time (s)')
Пример #3
0
def visualize_plot(item, xlab, ylab):
    plt.xlabel(xlab)
    plt.ylabel(ylab)
    plt.plot(item, lw=2)
    return
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dropout(0.5))
model.add(Dense(10))
#model.add(Dropout(0.5))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')

history_object = model.fit(X_train,
                           y_train,
                           batch_size=32,
                           nb_epoch=8,
                           shuffle=True,
                           verbose=1,
                           validation_split=0.1)

model.save('model.h5')

from matplotlib.pyplot import plt
print(history_object.history.keys())

plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
Пример #5
0
# Extract x and y coordinates
x = r[:,0]
y = r[:,1]

# Import functionality for plotting
from matplotlib.pyplot import plt

# Plot figure
plt.plot(x,y)

# Prettify the plot
plt.xlabel('Horizontal distance, [m]')
plt.ylabel('Vertical distance, [m]')
plt.title('Trajectory of a fired cannonball')
plt.grid()
plt.axis([0, 900, 0, 250])

# Makes the plot appear on the screen
plt.show()
# overfit / underfit

# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))

# Loop over different values of k
for i, k in enumerate(neighbors):
    # Setup a k-NN Classifier with k neighbors: knn
    knn = KNeighborsClassifier(n_neighbors=k)

    # Fit the classifier to the training data
    knn.fit(X_train, y_train)

    #Compute accuracy on the training set
    train_accuracy[i] = knn.score(X_train, y_train)

    #Compute accuracy on the testing set
    test_accuracy[i] = knn.score(X_test, y_test)

# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label='Testing Accuracy')
plt.plot(neighbors, train_accuracy, label='Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
# Omega for which motion is circular
w = np.sqrt(g/(r*(np.tan(a))))

T1 = np.arange(t1,t2,e)

rlist = []
for i in T1:
    r2 = r*(w**2)*(np.sin(a)**2)-g*(np.cos(a))*(np.sin(a))
    r = r + e*r1
    r1 = r1 + e*r2
    rlist.append(r)

T1 = np.arange(t1,t2,e)

plt.plot(T1,rlist,label='r(t) given r(0)=r_0')
plt.xlabel('t')
plt.ylabel('r(t)')
plt.legend()
plt.show()

subrlist = []
for i in T1:
    r2 = subr*(w**2)*(np.sin(a)**2)-g*(np.cos(a))*(np.sin(a))
    r = subr + e*r1
    r1 = r1 + e*r2
    subrlist.append(r)

print subrlist

plt.plot(T1,subrlist,label='r(t) given r(0)=0.999r_0')
plt.xlabel('t')
Пример #8
0
        Us = [
            M.H_MPO.make_U(-d * dt, approx) for d in [0.5 + 0.5j, 0.5 - 0.5j]
        ]
    eng = PurificationApplyMPO(psi, Us[0], options)
    Szs = [psi.expectation_value("Sz")]
    betas = [0.]
    while beta < beta_max:
        beta += 2. * dt  # factor of 2:  |psi> ~= exp^{- dt H}, but rho = |psi><psi|
        betas.append(beta)
        for U in Us:
            eng.init_env(U)  # reset environment, initialize new copy of psi
            eng.run()  # apply U to psi
        Szs.append(psi.expectation_value("Sz"))  # and further measurements...
    return {'beta': betas, 'Sz': Szs}


if __name__ == "__main__":
    import logging
    logging.basicConfig(level=logging.INFO)
    data_tebd = imag_tebd()
    data_mpo = imag_apply_mpo()

    import numpy as np
    from matplotlib.pyplot import plt

    plt.plot(data_mpo['beta'], np.sum(data_mpo['Sz'], axis=1), label='MPO')
    plt.plot(data_tebd['beta'], np.sum(data_tebd['Sz'], axis=1), label='TEBD')
    plt.xlabel(r'$\beta$')
    plt.ylabel(r'total $S^z$')
    plt.show()
    batch_size=batch_size,
    epochs=epochs,
    #verbose=1,
    callbacks=[cb],
    validation_split=.1,
    #validation_data=(X_test, y_test)
)
score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

predicted = model.predict(X_test, verbose=False)
predicted = np.argmax(predicted, axis=1)

from keras.utils import plot_model
plot_model(model, to_file='CNN.png')

#CM = ConfusionMatrix(predicted, y_test_orig, c);
#np.savetxt("data/CNN_predicted_raw.txt", predicted, "%d")
#np.savetxt("data/CNN_cm_raw.txt", CM, "%d");

from matplotlib.pyplot import plt

plt.plot(history.history['val_acc'])
plt.plot(history.history['acc'])
plt.legend(['Validation Accuracy', 'Training Accuracy'])
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.title('CNN Convergence Curve')
plt.show()
from numpy import unique
from numpy import where
from sklearn.datasets import make_classification
from sklearn.cluster import MeanShift
from matplotlib.pyplot import plt
# define dataset
X, _ = make_classification(n_samples=1000, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, random_state=4)
# define the model
model = MeanShift()
# fit model and predict clusters
yhat = model.fit_predict(X)
# retrieve unique clusters
clusters = unique(yhat)
# create scatter plot for samples from each cluster
for cluster in clusters:
    # get row indexes for samples with this cluster
    row_ix = where(yhat == cluster)
    # create scatter of these samples
    pyplot.scatter(X[row_ix, 0], X[row_ix, 1])
    plt.xlabel("x1")
    plt.ylabel("x2")
# show the plot
pyplot.show()
return_and_volatility_datfarame["Annual Returns"] = (
    stock_prices.pct_change().mean() * 252) * 100
return_and_volatility_datfarame["Annual Risk"] = (
    stock_prices.pct_change().std() * sqrt(252)) * 100
return_and_volatility_datfarame.index.name = "Company Symbol"

#-----------Elbow Method to get the optimal number of cluster-----#

wcss = []
for i in range(1, 11):
    kmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
    kmeans.fit(return_and_volatility_datfarame)
    wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()

# After plotting the result of "Number of clusters" vs "WCSS" we can notice that
#the number of clusters reaches 4 (on the X axis), the reduction
# the within-cluster sums of squares (WCSS) begins to slow down for each increase in cluster number. Hence
# the optimal number of clusters for this data comes out to be 4. Therefore lets take number of cluster for k means = 4
#--------------------applying K-Means Clustering-------------#

kmeans = KMeans(n_clusters=4, init='k-means++', random_state=42)
y_kmeans = kmeans.fit_predict(return_and_volatility_datfarame)

return_and_volatility_datfarame.reset_index(level=['Company Symbol'],
                                            inplace=True)
return_and_volatility_datfarame["Cluster Name"] = y_kmeans