コード例 #1
0
ファイル: visualize.py プロジェクト: sureshaks/CausalBandits
def visualize_plot(item, xlab, ylab):
    plt.xlabel(xlab)
    plt.ylabel(ylab)
    plt.plot(item, lw=2)
    return
コード例 #2
0
ファイル: lstplotting.py プロジェクト: karenhes/cse4schools
# Extract x and y coordinates
x = r[:,0]
y = r[:,1]

# Import functionality for plotting
from matplotlib.pyplot import plt

# Plot figure
plt.plot(x,y)

# Prettify the plot
plt.xlabel('Horizontal distance, [m]')
plt.ylabel('Vertical distance, [m]')
plt.title('Trajectory of a fired cannonball')
plt.grid()
plt.axis([0, 900, 0, 250])

# Makes the plot appear on the screen
plt.show()
コード例 #3
0
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dropout(0.5))
model.add(Dense(10))
#model.add(Dropout(0.5))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')

history_object = model.fit(X_train,
                           y_train,
                           batch_size=32,
                           nb_epoch=8,
                           shuffle=True,
                           verbose=1,
                           validation_split=0.1)

model.save('model.h5')

from matplotlib.pyplot import plt
print(history_object.history.keys())

plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
コード例 #4
0
# overfit / underfit

# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))

# Loop over different values of k
for i, k in enumerate(neighbors):
    # Setup a k-NN Classifier with k neighbors: knn
    knn = KNeighborsClassifier(n_neighbors=k)

    # Fit the classifier to the training data
    knn.fit(X_train, y_train)

    #Compute accuracy on the training set
    train_accuracy[i] = knn.score(X_train, y_train)

    #Compute accuracy on the testing set
    test_accuracy[i] = knn.score(X_test, y_test)

# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label='Testing Accuracy')
plt.plot(neighbors, train_accuracy, label='Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
w = np.sqrt(g/(r*(np.tan(a))))

T1 = np.arange(t1,t2,e)

rlist = []
for i in T1:
    r2 = r*(w**2)*(np.sin(a)**2)-g*(np.cos(a))*(np.sin(a))
    r = r + e*r1
    r1 = r1 + e*r2
    rlist.append(r)

T1 = np.arange(t1,t2,e)

plt.plot(T1,rlist,label='r(t) given r(0)=r_0')
plt.xlabel('t')
plt.ylabel('r(t)')
plt.legend()
plt.show()

subrlist = []
for i in T1:
    r2 = subr*(w**2)*(np.sin(a)**2)-g*(np.cos(a))*(np.sin(a))
    r = subr + e*r1
    r1 = r1 + e*r2
    subrlist.append(r)

print subrlist

plt.plot(T1,subrlist,label='r(t) given r(0)=0.999r_0')
plt.xlabel('t')
plt.ylabel('r(t)')
コード例 #6
0
ファイル: purification.py プロジェクト: yeon-lee/tenpy
        Us = [
            M.H_MPO.make_U(-d * dt, approx) for d in [0.5 + 0.5j, 0.5 - 0.5j]
        ]
    eng = PurificationApplyMPO(psi, Us[0], options)
    Szs = [psi.expectation_value("Sz")]
    betas = [0.]
    while beta < beta_max:
        beta += 2. * dt  # factor of 2:  |psi> ~= exp^{- dt H}, but rho = |psi><psi|
        betas.append(beta)
        for U in Us:
            eng.init_env(U)  # reset environment, initialize new copy of psi
            eng.run()  # apply U to psi
        Szs.append(psi.expectation_value("Sz"))  # and further measurements...
    return {'beta': betas, 'Sz': Szs}


if __name__ == "__main__":
    import logging
    logging.basicConfig(level=logging.INFO)
    data_tebd = imag_tebd()
    data_mpo = imag_apply_mpo()

    import numpy as np
    from matplotlib.pyplot import plt

    plt.plot(data_mpo['beta'], np.sum(data_mpo['Sz'], axis=1), label='MPO')
    plt.plot(data_tebd['beta'], np.sum(data_tebd['Sz'], axis=1), label='TEBD')
    plt.xlabel(r'$\beta$')
    plt.ylabel(r'total $S^z$')
    plt.show()
from numpy import unique
from numpy import where
from sklearn.datasets import make_classification
from sklearn.cluster import MeanShift
from matplotlib.pyplot import plt
# define dataset
X, _ = make_classification(n_samples=1000, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, random_state=4)
# define the model
model = MeanShift()
# fit model and predict clusters
yhat = model.fit_predict(X)
# retrieve unique clusters
clusters = unique(yhat)
# create scatter plot for samples from each cluster
for cluster in clusters:
    # get row indexes for samples with this cluster
    row_ix = where(yhat == cluster)
    # create scatter of these samples
    pyplot.scatter(X[row_ix, 0], X[row_ix, 1])
    plt.xlabel("x1")
    plt.ylabel("x2")
# show the plot
pyplot.show()
    stock_prices.pct_change().mean() * 252) * 100
return_and_volatility_datfarame["Annual Risk"] = (
    stock_prices.pct_change().std() * sqrt(252)) * 100
return_and_volatility_datfarame.index.name = "Company Symbol"

#-----------Elbow Method to get the optimal number of cluster-----#

wcss = []
for i in range(1, 11):
    kmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
    kmeans.fit(return_and_volatility_datfarame)
    wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()

# After plotting the result of "Number of clusters" vs "WCSS" we can notice that
#the number of clusters reaches 4 (on the X axis), the reduction
# the within-cluster sums of squares (WCSS) begins to slow down for each increase in cluster number. Hence
# the optimal number of clusters for this data comes out to be 4. Therefore lets take number of cluster for k means = 4
#--------------------applying K-Means Clustering-------------#

kmeans = KMeans(n_clusters=4, init='k-means++', random_state=42)
y_kmeans = kmeans.fit_predict(return_and_volatility_datfarame)

return_and_volatility_datfarame.reset_index(level=['Company Symbol'],
                                            inplace=True)
return_and_volatility_datfarame["Cluster Name"] = y_kmeans