def plot_graphs(history, string):
    plt.plot(history.history[string])
    plt.plot(history.history['val_' + string])
    plt.xlabel("Epochs")
    plt.ylabel(string)
    plt.legend([string, 'val_' + string])
    plt.show()
Example #2
0
def show_train_history(train_history, train, validation):
    plt.plot(train_history, history[train])
    plt.plot(train_history, history[validation])
    plt.title('Train History')
    plt.ylabel(train)
    plt.xlabel('Epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()
def visual_inspection(raw_signal_list,
                      filtered_signal_list,
                      begin_sec, end_sec):
    import matplotlib.pylot as plt
    
    for raw_signal, filtered_signal in zip(raw_signal_list,
                                           filtered_signal_list):
        plt.figure(figsize=(20, 20))
        plt.plot(raw_signal.T)
        plt.plot(filterd_signal.T)
        plt.xlim(begin_sec * 1000, end_sec * 1000)
        plt.legend(['raw', 'filtered'])
        plt.show()
Example #4
0
def plot_regression_line(x, y, b):
    # plotting the actual points as scatter plot
    plt.scatter(x, y, color="m", marker="o", s=30)

    # predict response vector
    y_pred = b[0] + b[1] * x

    # plotting the regression line
    plt.plot(x, y_pred, color="g")

    # putting labels
    plt.xlabel('x')
    plt.ylabel('y')

    # function to show plot
    plt.show()
Example #5
0
 def plot_hull(self, show_points=False):
     """
     Function that plots the boundaries of a convex hull using 
     matplotlib.pyplot. Input hull must be of type:
     scipy.spatial.qhull.ConvexHull 
         
     points input must be of the original coordinates.
     """
     hull = self.convex_hull(self.dots)
     plt.figure()
     for simplex in hull.simplices:
         plt.plot(self.dots[simplex,0], \
         self.dots[simplex,1], 'k-')
     if show_points:
         plt.scatter(self.dots[:,0], \
         self.dots[:,1], s=10,c='g')
         plt.scatter(self.dots[:,0], \
         self.dots[:,1], s=30,c='orange')
         plt.show()
Example #6
0
 def plot_hull(self, show_points=False):
     """
     Function that plots the boundaries of a convex hull using 
     matplotlib.pyplot. Input hull must be of type:
     scipy.spatial.qhull.ConvexHull 
         
     points input must be of the original coordinates.
     """
     hull = self.convex_hull(self.dots)
     plt.figure()
     for simplex in hull.simplices:
         plt.plot(self.dots[simplex,0], \
         self.dots[simplex,1], 'k-')
     if show_points:
         plt.scatter(self.dots[:,0], \
         self.dots[:,1], s=10,c='g')
         plt.scatter(self.dots[:,0], \
         self.dots[:,1], s=30,c='orange')
         plt.show()
Example #7
0
def predict_prices(dates, prices, x):
    dates = np.reshape(dates, (len(dates), 1))

    svr_len = SVR(kernel='linear', C=1e3)
    svr_poly = SVR(kernel='poly', C=1e3, degree=2)
    svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)

    svr_lin.fit(dates, prices)
    svr_poly.fit(dates, prices)
    svr_rbf.fit(dates, prices)

    plt.scatter(dates, prices, color='black', label='data')
    plt.plot(dates, svr_rbf.predict(dates), color='red', label='RBF model')
    plt.plot(dates,
             svr_lin.predict(dates),
             color='green',
             label='Linear model')
    plt.plot(dates,
             svr_poly.predict(dates),
             color='blue',
             label='Polynomial model')
    plt.xlabel('Date')
    plt.ylabel('Price')
    plt.title('Sipport Vector Regression')
    plt.legend()
    plt.show()

    return svr_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0]
    DATE = datetime(now.year, now.month, now.date)  # UTC date
    hours = range(0, 1)  # model run hour for the date
    for h in hours:
        FileNames = download_hrrr_nat_subsection(DATE, h)
        print FileNames

    # There are 50 sigma levels in HRRR
    p1 = []
    p2 = []
    t1 = []
    t2 = []
    # Create a vertical temperature profile at a few points
    for f in FileNames:
        grbs = pygrib.open(f + '.small')
        pres = grbs.select(name='Pressure')
        temp = grbs.select(name='Temperature')
        #
        for l in range(0, 50):
            p1.append(pres[l].values[1, 1] / 100)
            p2.append(pres[l].values[30, 30] / 100)
            t1.append(temp[l].values[1, 1] - 273.15)
            t2.append(temp[l].values[30, 30] - 273.15)
            #
            plt.scatter(t1, p1, color='b')
            plt.plot(t1, p1, color='b')
            plt.scatter(t2, p2, color='r')
            plt.plot(t2, p2, color='r')

    plt.gca().invert_yaxis()
    plt.show()
Example #9
0
def dpa_setup(ser):

    ser = Serial("/embsec/dpa_lab/dpa_setup")

    datafile = h5py.File('aes_decrypt_powertraces_test_target.hdf5', 'r')
    datasets = datafile.keys()

    init = True

    partA_buf = [
    ]  # lists of traces in partition A, indexed by key candidate. Individual traces will be numpy arrays!
    partB_buf = []

    partA_cnt = [
    ]  # list of number of traces in each partition, indexed by byte under examination
    partB_cnt = []
    avg_buf = None  # average of all traces
    avg_cnt = 0

    trim = False

    skeycan = 0  # the index to the sub-key of the round 10 key we are examining!

    # The loop below iterates through all traces in the file, and performs 16 key guesses on
    # the key byte indexed by skeycan. So, this performs DPA for 16 key guesses of just one
    # byte of the (round 10) key. If you want to keep this current code structure, you will
    # need to manually change the for loop bounds to perform more guesses. You will also
    # need to change skeycan to test out other sub-key bytes.

    for name in datasets:  # iterate through all traces in the hdf5 file
        print("Processing: %s" % name)
        ds = datafile[name]
        trace = np.array(
            ds)  # this is your current trace! As **a numpy array**

        ciphertext_hex = ds.attrs[metaname]
        ciphertext = binascii.unhexlify(ciphertext_hex)

        # If requested, truncate the trace before analysis.
        # This can be used to cut out problematic noisey sections while accelerating
        #  computation and reducing memory needs (great for key attacks)
        if trim:
            trace = trace[:trim]

        if init:  # sets up the partition buffers initially
            for x in range(16):  # just work on 16 possible key bytes, for now.
                partA_buf.append(0 * trace)  # initialize all 'traces' to zero
                partB_buf.append(0 * trace)
                partA_cnt.append(0)
                partB_cnt.append(0)
            avg_buf = 0 * trace
            init = False

        for x in range(
                0, 16):  # just work on 16 key candidates, more is too slow.

            ham = hamming(
                ciphertext[skeycan])  # hmmm ... is this what we want?

            if ham > 4:
                partA_buf[
                    x] += trace  # add the trace to the list of traces for that key candidate
                partA_cnt[
                    x] += 1  # increment the count for this partition and key candidate
            elif ham < 4:
                partB_buf[x] += trace
                partB_cnt[x] += 1
                pass

        avg_buf += trace
        avg_cnt += 1

    result = dict()

    avg_buf = avg_buf / avg_cnt
    result['avg trace'] = avg_buf
    result['trace cnt'] = avg_cnt

    absmax = []
    for x in range(16):
        means = (partA_buf[x] / partA_cnt[x]) - (partB_buf[x] / partB_cnt[x])
        result[x] = means
        absmax.append(np.max(np.abs(means)))
    result['absmax'] = absmax

    # Plot the maximum value of the absolute value of each DPA hypothesis
    plt.figure()
    plt.title("AbsMax of DPA Hypotheses (%d traces)" % result['trace cnt'])
    plt.plot(result['absmax'])

    # Plot the mean trace and all DPA Ciphertext Byte outputs
    plt.figure()
    plt.plot(result['avg trace'], label='Mean Trace')

    dpaPlotScale = 20
    for x in range(16):
        plt.plot(np.abs(result[x]) * dpaPlotScale, label="CT DPA Byte %d" % x)
    plt.legend(loc='upper right')
    plt.title("Ciphertext (CT) DPA Results (%d traces)" % result['trace cnt'])
    plt.show()

    # The next couple lines are to send the key you found / get a flag (if applicable)
    key_answer = bytes(16)  # your key you found! As a byte array
    ser.write(key_answer)

    return ser.read_until()
Example #10
0
import numpy as np
import matplotlib.pylot as plt

a = np.random.randn(400, 2)
print(a)
m = a.mean(0)
print(m, m.shape)

plt.plot(a[:, 0], a[:, 1], 'o', markersize=5, alpha=0.50)
plt.plot(m[0], m[1], 'ro', markersize=10)
plt.show()
Example #11
0
#!/usr/bin/env python
import matplotlib
import matplotlib.pylot as plt
import numpy as np
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('-i', type=argparse.FileType('r'))
parser.add_argument('-o')
args = parser.parse_args()

data = np.loadtxt(args.i)
plt.plot(data[:, 0], data[:, 1])
plt.savefig(args.o)
Example #12
0
# initialize time and x and y expenditure at initial time
t_0 = 0
init_data = np.array([14, 5])

# starting RK45 integration method
sys_1 = integrate.RK45(model, t_0, init_data, 1000, 0.001)

# storing initial data
sol_x = [sys_1.y[0]]
sol_y = [sys_1.y[1]]
time = [t_0]

for i in range(5000):
    sys_1.step()  # performing integration step
    sol_x.append(
        sys_1.y[0]
    )  # storing the results in our solution list, y is the attribute current state
    sol_y.append(sys_1.y[1])
    time.append(sys_1.t)

plt.figure(figsize=(20, 10))

# plotting results in a graph
plt.plot(time, sol_x, 'b--', label='Country A')
plt.plot(time, sol_y, 'r--', label='Country B')
plt.ylabel('Military Expenditure (billions USD)', fontsize=16)
plt.xlabel('Time (years)', fontsize=16)
plt.legend(loc='best', fontsize=22)
plt.title('Simple Arms Race: Aggressive vs. Passive', fontsize=28)
plt.show()
    d['timeInt'] = time.mktime(d['timeStruct'])
    datasetWithTimeValues.append(d)

from collections import defaultdict
weekRatings = defaultdict(list)
for d in datasetWithTimeValues:
    day = d['timeStruct'].tm_wday
    weekRatings[day].append(d['stars'])
weekAverages = {}
for d in weekRatings:
    weekAverages[d]=sum(weekRatings[d]*1.0/len(weekRatings[d]))
weekAverages
x = list(weekAverages,keys())
Y=[weekAverages[x] for x in X]
import matplotlib.pylot as plt
plt.plot(X,Y)
plt.bar(X,Y)
# zoom in more to see the detail
plt.ylim(3.6, 3.8)
plt.bar(X, Y)

plt.ylim(3.6,3.8)
plt.xlabel("Weekday")
plt.ylabel("Rating")
plt.xticks([0,1,2,3,4,5,6],['S','M','T','W','T','F','S'])
plt.title("Rating as a function of weekday")
plt.bar(X,Y)

#L4 Live-coding: MatPlotLib
path = "datasets/yelp_data/review.json"
f = open(path,'r',encoding = 'utf8')
Example #14
0
df['salary', 'name'] #shows column names.
df['salary'].max() #shows maximum value in column.
df.describe() #shows numerical columns data within dataframe (count, mean, std, min, 25%, 50%, 75%, max).
my_salary = df['salary'] > 60000
df[df['salary'] > 60000] or df[my_salary]
df.as_matrix() #returns numpy array.

#Data Visualization Reference.
import numpy as np
import pandas as pd
import matplotlib.pylot as plt
%matplotlib inline #jupyter notebook only.  below line for everything else.
plt.show()
x = np.arange(0, 10)
y = x ** 2
plt.plot(x, y, 'red') #shows red line.
plt.plot(x, y, '*') #shows stars on graph.
plt.plot(x, y, 'r--') #shows red line with dashes.
plt.xlim(0, 4) #shows x-axis limits at 0 and 4.
plt.ylim(0, 10) #shows y-axis limits at 0 and 10.
plt.title("title goes here")
plt.xlabel('x label goes here')
plt.ylabel('y label goes here')
mat = np.arange(0, 100).reshape(10, 10) #makes array.
plt.imshow(mat, cmap = 'RdYlGn')
mat = np.random.randint(0, 1000, (10, 10))
plt.imshow(mat)
plt.colorbar()
df = pd.read_csv('salaries.csv')
df.plot(x = 'salary', y = 'age', kind = 'scatter') #kind could be 'line' or whatever else you need.
Example #15
0
#y = x + 1 그래프

import matplotlib.pylot as plt

plt.plot([1, 2, 3, 4])
plt.show()
Example #16
0
    dzdt = np.array([dxdt, dydt])
    return dzdt

# initialize time and x and y expenditure at initial time
t_0 = 0
init_data = np.array([5, 3])

# starting RK45 integration method
sys_1 = integrate.RK45(model, t_0, init_data, 1000, 0.001)

# storing initial data 
sol_x = [sys_1.y[0]]
sol_y = [sys_1.y[1]]
time = [t_0]

for i in range(5000):
    sys_1.step() # performing integration step
    sol_x.append(sys_1.y[0]) # storing the results in our solution list, y is the attribute current state
    sol_y.append(sys_1.y[1])
    time.append(sys_1.t)

plt.figure(figsize=(20, 10))

# plotting results in a graph
plt.plot(time, sol_x, 'b--', label='Country A (aggressive)')
plt.plot(time, sol_y, 'r--', label='Country B (passive)')
plt.ylabel('Military Expenditure (billions USD)', fontsize = 16)
plt.xlabel('Time (years)', fontsize = 16)
plt.legend(loc='best', fontsize = 22)
plt.title('Arms Race: Aggressive vs. Passive', fontsize = 28)
plt.show()
Example #17
0
# Get heartrate from FFT
max_val = 0
max_index = 0
for index, fft_val in enumerate(red_fft):
    if fft_val > max_val:
        max_val = fft_val
        max_index = index

heartrate = freqs[max_index] * 60        
print('Estimated Heartate: {} bpm'.format(heartrate))


# Plotting
if PLOT:
    plt.figure(figsize=(16,9))
    plt.plot(x, colors['red'], color='#fc4f30')
    plt.xlabel('Time [s]')
    plt.ylabel('Normalized Pixel Color')
    plt.title('Time-Series Red Channel Pixel Data')
    fig1 = plt.gcf()
    plt.show()
    if SAVE:
        plt.draw()
        fig1.savefig('./{}_time_series.png'.format(filename), dpi=200)
    
    # Plot the highpass data
    plt.figure(figsize=(16,9))
    plt.plot(x_filt, colors['red_filt'], color='#fc4f30')
    plt.xlabel('Time [s]')
    plt.ylabel('Normalized Pixel Color')
    plt.title('Filtered Red Channel Pixel Data')
Example #18
0
# !/usr/bin/python
# -*- coding: UTF-8 -*-

##########################
# Creator: Javy
# Create Time: 20170416
# Email: [email protected]
# Description: sigmoid
##########################

import matplotlib.pylot as plt
import numpy as np


def sigmoid(z):
    return 1.0 / (1.0 + np.exp(-z))


z = np.arange(-7, 7, 0.1)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color='k')
plt.axhspan(0.0, 1.0, facecolor='1.0', alpha=1.0, ls='dotted')
plt.axhline(y=0.5, ls='dotted', color='k')
plt.yticks([0.0, 0.5, 1.0])
plt.ylim(-0.1, 1.1)
plt.xlable('z')
plt.ylable('$\phi (z)$')
plt.show()
Example #19
0
import tensorflow as tf
import numpy as np
import matplotlib.pylot as plt
np.random.seed(5)
steps=3000
learning_rate=0.01
x_data=np.linspace(-1,1,100)[,np.newaxis]
y_data=np.squard(x_data)*0.4+np.random.randn(*x_data.shape)*0.5
x=tf.placeholder(tf.float32,[None,1])
y=tf.placeholder(tf.float32,[None,1])
weight_L1=tf.Variable(tf.random_normal([1,10]))
biases_L1=tf.Variable(tf.zeros[1,10])
Output_L1=tf.matmul(x,weight_L1)+biases_L1
L1=tf.nn.tanh(Output_L1)
weight_L2=tf.Variable(tf.random_normal([10,1]))
biases_L2=tf.Variable(tf.zeros[1,1])
Output_L2=tf.matmul(L1,weight_L2)+biases_L2
pred=tf.nn.tanh(Output_L2)
loss=tf.reduce_mean(tf.square(y-pred))
train=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
plt.figure()
plt.scatter(x_data,y_data)
with tf.Session() as sess:
  init=tf.global_variables_initializer()
  sess.run(init)
  for i in range(steps):
    sess.run(train,feed_dict={x:x_data,y:y_data})
  pred_value=sess.run(pred,feed_dict={x:x_data})
  plt.plot(x_data,pred_value)
  plt.show()
Example #20
0
from matplotlib import pylot as plt

# In[3]:

from matplotliv import pyplot as plt

# In[4]:

from matplotlib import pyplot as plt

# In[9]:

x = [1, 2, 3]
y = [1, 4, 9]
z = [10, 5, 0]
plt.plot(x, y)
plt.plot(x, z)
plt.title("test plot")
plt.xlabel("x")
plt.ylabel("y and z")
plt.legend(["this is y", "this is z"])
plt.show()

# In[10]:

sample_data = pd.read_csv('sample_data.csv')

# In[11]:

sample_data
    BlackHawk,
    temperature_to_mass,
    MASS_CONVERSION,
)
import matplotlib.pylot as plt
import numpy as np


def spec_geom(E, T, M):
    gam = 27 * G_NEWTON**2 * M**2 * E**2
    boltz = 1.0 / (np.exp(E / T) - 1.0)
    return gam * boltz / (2.0 * np.pi)


if __name__ == "__main__":
    mpbh_gram = 1.5e18
    mpbh_gev = mpbh_gram * MASS_CONVERSION
    blackhawk = BlackHawk(mpbh_gram)
    blackhawk.run()

    engs = blackhawk.primary["energies"]
    spec = blackhawk.primary["photon"]

    T = temperature_to_mass(mpbh_gev)
    spec2 = spec_geom(engs, T, mpbh_gev)

    plt.figure(dpi=150)
    plt.plot(engs, spec)
    plt.plot(engs, spec2)
    plt.show()
svm_grid = {'kernel': kernel,'C': c,'gamma': gamma,'epsilon': epsilon, 'shrinking' : shrinking}
     
svm = SVM()
svm_search = RandomizedSearchCV(svm, svm_grid,scoring = 'neg_mean_squared_error',cv = 3,return_train_score=True,n_jobs=-1, n_iter=40,verbose=1)
svm_search.best_params_

svm_confirmed = svm_search.best_estimator_
svm_pred = svm_confirmed.predict(future_forecast)

svm_confirmed
svm_pred

# check against testing data

svm_test_pred = svm_confirmed.predict(x_text_confirmed)
plt.plot(svm_text_pred)
plt.plot(y_text_confirmed)
print('MAE:', mean_absolute_error(svm_text_pred, y_text_confirmed))
print('MSE:',mean_squared_error(svm_text_pred,y_text_confirmed))

# Total Number of coronavirus cases over time
 
plt.figure(figsize=(20, 12))
plt.plot(adjusted_dates, world_cases)
plt.title('Number of Conronavirus Cases Over Time',size=30)
plt.xlabel('Day Since 1/22/2020',size=30)
plt.ylabel('Number of Cases',size=30)
plt.xticks(size=15)
plt.yticks(size=15)

Example #23
0
    accuracy = tf.reduce_mean( tf.cast(correct_prediction, 'float') )
    
    #train
    with tf.Session() as sess:
        sess.run(init)
        
        for epoch in range(training_epochs):
            avg_cost = 0.
            total_batch = int(n_sample/batch_size)
            
            for i in range(total_batch):
                _, c = sess.run([optimizer, cost], feed_dict={x: XData[i*batch_size:(i+1)*batch_size, :],
                                                              y: YData[i*batch_size:(i+1)*batch_size, :]})
                avg_cost += c / total_batch
            
            plt.plot(epoch+1, avg_cost)
            
            if epoch % display_step == 0:
                print('Epoch:', '%04d'% (epoch + 1), 'cost=', '{:,9f}'.format(avg_cost))
    
    print 'Model Training Finished!'
    
    
    
    
    
    


    
    
Example #24
0
import tushare as ts
import talib
import matplotlib.pylot as plt


df=ts.get_k_data('600050',start='2019-03-1',end='2019-03-03')
    #提取收盘价
closed=df['close'].values
    #获取均线的数据,通过timeperiod参数来分别获取 5,10,20 日均线的数据。
ma5=talib.SMA(closed,timeperiod=5)
ma10=talib.SMA(closed,timeperiod=10)
ma20=talib.SMA(closed,timeperiod=20)

    #打印出来每一个数据
print(closed)
print(ma5)
print(ma10)
print(ma20)

    #通过plog函数可以很方便的绘制出每一条均线
    plt.plot(closed)
    plt.plot(ma5)
    plt.plot(ma10)
    plt.plot(ma20)
    #添加网格,可有可无,只是让图像好看点
    plt.grid()
    #记得加这一句,不然不会显示图像
    plt.show()
Example #25
0
print(randomArray(20))
import matplotlib.pylot as plt

maxN = 100
for n in range(1, maxN + 1):
    setup = '''
from __main__ import randomArray, buildinSort
array = randomArray({})
    '''.format(n)

    prog = '''
buildinSort(array)
    '''
    t = timeit.Timer(prog, setup)
    print(f'For an array of the length {n}: {t.timeit(1000) / 1000}')
    fig = plt.figure()
    plt.plot(range(1, maxN + 1), t, label="buildin")
    a = 1e-7
    plt.plot(range(1, maxN + 1), [a*n for n in range(1, maxN + 1)], label="a*n")
    plt.legend()
    plt.show 


#matplotlib 




#zeitmessung
# Inner product of vectors
print(a.dot(b))
print(np.dot(a, b))

# Matrix / vector product; both produce the rank 1 array [29 67]
print(c.dot(d))
print(np.dot(c, d))

# Matrix / matrix product; both produce the rank 2 array
# [[19 22]
#  [43 50]]
print(a.dot(c))
print(np.dot(b, d))

# In[24]:

import numpy as np
from matplotlib import pylot as plt

x = np.arrange(1, 11)
y = 2 * x + 5

plt.title("Matplotlib demo")
plt.xlabel("x axis caption")
plt.ylabel("y axis caption")
plt.plot(x, y, "ob")
plt.show()

# In[ ]:
Example #27
0
        else:
            params_unreg.append(p)

    optimizer = torch.optim.SGD([{'params': params_reg, 'weight_decay': 1e-5}, {'params': params_unreg}], lr=current_lr)

    return optimizer

def plot_accuracies(train_top1, train_top5, val_top1, val_top5, SWD):
	''' Plots the top-1/5 accuracy for each epoch in the training and validation sets '''

    plt.figure()

    epochs = range(len(train_top1))
    lw=1

    plt.plot(epochs, train_top1, color='darkred', lw=lw, linestyle='dashed', label='top-1 (train)')
    plt.plot(epochs, train_top5, color='red', lw=lw, label='top-5 (train)')
    plt.plot(epochs, val_top1, color='darkblue', lw=lw*2, linestyle='dashed', label='top-1 (test)')
    plt.plot(epochs, val_top5, color='blue', lw=lw*2, label='top-5 (test)')

    plt.xlabel('Epoch #', fontsize=20)
    plt.ylabel('Accuracy (%)')

    if SWD:
        filename = 'SWD-results.png'
    else:
        filename = 'SGD-results.png'

    plt.savefig(filename)

class AverageMeter(object):
Example #28
0
# Get reference values
reference_data = pd.DataFrame.as_matrix(data['Ref AC'])
Y_calib = reference_data[:xxx]
Y_valid = reference_data[xxx:]
 
# Get spectra
X_calib = pd.DataFrame.as_matrix(data.iloc[:422, 2:])
X_valid = pd.DataFrame.as_matrix(data.iloc[423:, 2:])
 
# Get wavelengths (They are in the first line which is considered a header from pandas)
wl = np.array(list(data)[2:])
    
# Plot spectra
plt.figure(figsize=(8,4.5))
with plt.style.context(('ggplot')):
    plt.plot(wl, X_calib.T)
    plt.xlabel('Wavelength (nm)')
    plt.ylabel('Absorbance')    
plt.show()

# Calculate derivatives
X2_calib = savgol_filter(X_calib, 17, polyorder = 2,deriv=2)
X2_valid = savgol_filter(X_valid, 17, polyorder = 2,deriv=2)
 
# Plot second derivative
plt.figure(figsize=(8,4.5))
with plt.style.context(('ggplot')):
    plt.plot(wl, X2_calib.T)
    plt.xlabel('Wavelength (nm)')
    plt.ylabel('D2 Absorbance')
plt.show()
for index in range(NumSteps):
    TimeStep.append(
        (100.0 / 8.0) / dT)  #8 CP values that need time allocated to them
    CPStep.append((2.0 / 10.0) / dCP)
    LambdaVals.append(int(Lambda + LambdaIncrement))

Counter = 0

Cost = CalculateCost(TimeStep, CPStep, LambdaVals)
CostTracker = []
CostTracker.append(Cost)

while Counter < 10000000:

    TimeStep, CPStep, LambdaVals = RandomPerturbation(TimeStep, CPStep,
                                                      LambdaVals)

    TestCost = CalculateCost(TimeStep, CPStep, LambdaIncrement)

    if TestCost < Cost:
        Cost = TestCost
        CostTracker.append(Cost)

    Counter = Counter + 1

SaveProtocol(TimeStep, CPStep, LambdaVals)

plt.plot(CostTracker)
plt.show()
plt.close()
Example #30
0
import matplotlib.pylot as plt

years = [
    1950, 1995, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005,
    2010, 2015
]

pops = [2.5, 2.7, 3, 3.3, 3.6, 4, 4.4, 4.8, 5.3, 5.7, 6.1, 6.5, 6.9, 7.3]
death = [1.2, 1.7, 1.8, 2.2, 2.5, 2.7, 2.9, 3, 3.1, 3.3, 3.5, 3.8, 4.0, 4.3]
'''
plt.plot(years, pops,'---', color=(255/255, 100/255, 100/255))
plt.plot(years, death, color=(.6, .6, .1))
'''
lines = plt.plot(years, pops, years, death)
plt.grid(True)

plt.setp(lines, color=(1, .4, .4), marker='o')

plt.ylabel("Population in Billions")
plt.xlabel("Population growth by Year")
plt.title("Population Growth")
plt.show()
Example #31
0
# coding: utf-8
import matplotlib.pylot as plt
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 10, 10)
y = np.sin(x)
plt.plot(x, y)
plot.show()
plt.show()
plt.plot(x, y)
plt.xlabel("Time")
plt.ylabel("Some function of time")
plt.title("sin")
plt.show()
x = np.linspace(0, 10, 100)
y = np.sin(x)
plt.plot(x, y)
plt.show()
pd