Example #1
0
def test_changepoint_scaled():

    p = 150
    M = multiscale(p)
    M.minsize = 10
    X = ra.adjoint(M)

    Y = np.random.standard_normal(p)
    Y[20:50] += 8
    Y += 2
    meanY = Y.mean()

    lammax = np.fabs(np.sqrt(M.sizes) * X.adjoint_map(Y) / (1 + np.sqrt(np.log(M.sizes)))).max()

    penalty = rr.weighted_l1norm((1 + np.sqrt(np.log(M.sizes))) / np.sqrt(M.sizes), lagrange=0.5*lammax)
    loss = rr.squared_error(X, Y - meanY)
    problem = rr.simple_problem(loss, penalty)
    soln = problem.solve()
    Yhat = X.linear_map(soln)
    Yhat += meanY

    if INTERACTIVE:
        plt.scatter(np.arange(p), Y)
        plt.plot(np.arange(p), Yhat)
        plt.show()
Example #2
0
def visualize_plot(item, xlab, ylab):
    plt.xlabel(xlab)
    plt.ylabel(ylab)
    plt.plot(item, lw=2)
    return
Example #3
0
# Extract x and y coordinates
x = r[:,0]
y = r[:,1]

# Import functionality for plotting
from matplotlib.pyplot import plt

# Plot figure
plt.plot(x,y)

# Prettify the plot
plt.xlabel('Horizontal distance, [m]')
plt.ylabel('Vertical distance, [m]')
plt.title('Trajectory of a fired cannonball')
plt.grid()
plt.axis([0, 900, 0, 250])

# Makes the plot appear on the screen
plt.show()
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dropout(0.5))
model.add(Dense(10))
#model.add(Dropout(0.5))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')

history_object = model.fit(X_train,
                           y_train,
                           batch_size=32,
                           nb_epoch=8,
                           shuffle=True,
                           verbose=1,
                           validation_split=0.1)

model.save('model.h5')

from matplotlib.pyplot import plt
print(history_object.history.keys())

plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
# overfit / underfit

# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))

# Loop over different values of k
for i, k in enumerate(neighbors):
    # Setup a k-NN Classifier with k neighbors: knn
    knn = KNeighborsClassifier(n_neighbors=k)

    # Fit the classifier to the training data
    knn.fit(X_train, y_train)

    #Compute accuracy on the training set
    train_accuracy[i] = knn.score(X_train, y_train)

    #Compute accuracy on the testing set
    test_accuracy[i] = knn.score(X_test, y_test)

# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label='Testing Accuracy')
plt.plot(neighbors, train_accuracy, label='Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
Example #6
0
#     (5,500), for omega=2.
# Plot each component overlayed on each other.

import numpy as np
import matplotlib.pyplot.plt

def square_copmonent(x, omega, k):
    """returns kth term frm square_wave aprxmtn"""
    return (4.0/np.pi) * np.sin(2*np.pi * (2*k-1) * omega*x) / (2*k-1)

x = np.linspace(0, 1, 500)
y = np.zeros((5, len(x))

for k in range(5):
    y[k,:] = square_component(x, 2, k+1)
    plt.plot(x, y[k,:], label = 'k=' + str(k+1))
plt.legend()
plt.show()

# Staff Soln:
# import numpy as np
# import matplotlib.pyplot as plt
# def square_component(x,omega,k):
#     val = (4.0/np.pi)* np.sin(2*np.pi*(2*k-1)*omega*x)/(2*k-1)
#     return val
#
# omega=2
# x = np.linspace(0,1,500)
# ks = [1,2,3,4,5]
# y = np.zeros((5,len(x)))
#
##z = np.arange(0,100)
##fu = [z**2 for z in range(0,100)]
##P = np.array((1/(2*(a**2)))*[(1/2)*(np.exp(-2*z/a))*(a+2*z)
##                    + (2*z/(9*np.sqrt(2)*a))*(np.cos(0))*(np.exp(-3*z/a))*(a+3*z)
##                    + ((np.power(z,2))/(8*(a**2)))*(np.exp(-z/a))*(a+z)])

def P(z):
    fu = [z**2 for z in range(0,100)]
    z += 0
    P += (1/(2*(a**2)))*[(1/2)*(np.exp(-2*z/a))*(a+2*z)
                    + (2*z/(9*np.sqrt(2)*a))*(np.cos(0))*(np.exp(-3*z/a))*(a+3*z)
                    + ((z**2)/(8*(a**2)))*(np.exp(-z/a))*(a+z)]
    P.append()
    return P

plt.plot(P(100))
plt.show()

"""Classical system with circular motion"""

# P 6.18 Draft 4 rewrite v2

import numpy as np
import matplotlib.pyplot as plt

g = 9.8
# Alpha
a = np.radians(20)
# Initial position r_0
r = 0.1
subr = 0.999*r
Example #8
0
from sklearn import datasets
import numpy as np
from sklearn.linear_model import LogisticRegression
from matplotlib.pyplot import plt
iris = datasets.load_iris()
feature = iris["data"][:,3:]
leabel =(iris["target"]==2).astype(np.int)

#training
clf = LogisticRegression()
clf.fit(feature,leabel)
example = clf.predict(([[1.2]]))
print(example)

#plot a graph
feature_new = np.linspace (0,3,1000).reshape(-1,1)
leabel_prob = clf.predict_proba(feature_new)
plt.plot(feature_new,leabel_prob[:,1],"g-",Label="verginica")
plt.show()



from matplotlib.pyplot import plt

a = [1, 2, 3, 4]
b = [1, 2, 4, 3]

plt.plot(a, b)
plt.show()
Example #10
0
from matplotlib.pyplot import plt

a = [1,2,3,4]
b = [1,2,4,3]

plt.plot(a,b)
plt.show()
Example #11
0
        Us = [
            M.H_MPO.make_U(-d * dt, approx) for d in [0.5 + 0.5j, 0.5 - 0.5j]
        ]
    eng = PurificationApplyMPO(psi, Us[0], options)
    Szs = [psi.expectation_value("Sz")]
    betas = [0.]
    while beta < beta_max:
        beta += 2. * dt  # factor of 2:  |psi> ~= exp^{- dt H}, but rho = |psi><psi|
        betas.append(beta)
        for U in Us:
            eng.init_env(U)  # reset environment, initialize new copy of psi
            eng.run()  # apply U to psi
        Szs.append(psi.expectation_value("Sz"))  # and further measurements...
    return {'beta': betas, 'Sz': Szs}


if __name__ == "__main__":
    import logging
    logging.basicConfig(level=logging.INFO)
    data_tebd = imag_tebd()
    data_mpo = imag_apply_mpo()

    import numpy as np
    from matplotlib.pyplot import plt

    plt.plot(data_mpo['beta'], np.sum(data_mpo['Sz'], axis=1), label='MPO')
    plt.plot(data_tebd['beta'], np.sum(data_tebd['Sz'], axis=1), label='TEBD')
    plt.xlabel(r'$\beta$')
    plt.ylabel(r'total $S^z$')
    plt.show()
    batch_size=batch_size,
    epochs=epochs,
    #verbose=1,
    callbacks=[cb],
    validation_split=.1,
    #validation_data=(X_test, y_test)
)
score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

predicted = model.predict(X_test, verbose=False)
predicted = np.argmax(predicted, axis=1)

from keras.utils import plot_model
plot_model(model, to_file='CNN.png')

#CM = ConfusionMatrix(predicted, y_test_orig, c);
#np.savetxt("data/CNN_predicted_raw.txt", predicted, "%d")
#np.savetxt("data/CNN_cm_raw.txt", CM, "%d");

from matplotlib.pyplot import plt

plt.plot(history.history['val_acc'])
plt.plot(history.history['acc'])
plt.legend(['Validation Accuracy', 'Training Accuracy'])
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.title('CNN Convergence Curve')
plt.show()
return_and_volatility_datfarame = pd.DataFrame()
return_and_volatility_datfarame["Annual Returns"] = (
    stock_prices.pct_change().mean() * 252) * 100
return_and_volatility_datfarame["Annual Risk"] = (
    stock_prices.pct_change().std() * sqrt(252)) * 100
return_and_volatility_datfarame.index.name = "Company Symbol"

#-----------Elbow Method to get the optimal number of cluster-----#

wcss = []
for i in range(1, 11):
    kmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
    kmeans.fit(return_and_volatility_datfarame)
    wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()

# After plotting the result of "Number of clusters" vs "WCSS" we can notice that
#the number of clusters reaches 4 (on the X axis), the reduction
# the within-cluster sums of squares (WCSS) begins to slow down for each increase in cluster number. Hence
# the optimal number of clusters for this data comes out to be 4. Therefore lets take number of cluster for k means = 4
#--------------------applying K-Means Clustering-------------#

kmeans = KMeans(n_clusters=4, init='k-means++', random_state=42)
y_kmeans = kmeans.fit_predict(return_and_volatility_datfarame)

return_and_volatility_datfarame.reset_index(level=['Company Symbol'],