示例#1
0
def plot_graphs(history, string):
    plt.plot(history.history[string])
    plt.plot(history.history['val_' + string])
    plt.xlabel("Epochs")
    plt.ylabel(string)
    plt.legend([string, 'val_' + string])
    plt.show()
示例#2
0
def predict_prices(dates, prices, x):
    dates = np.reshape(dates, (len(dates), 1))

    svr_len = SVR(kernel='linear', C=1e3)
    svr_poly = SVR(kernel='poly', C=1e3, degree=2)
    svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)

    svr_lin.fit(dates, prices)
    svr_poly.fit(dates, prices)
    svr_rbf.fit(dates, prices)

    plt.scatter(dates, prices, color='black', label='data')
    plt.plot(dates, svr_rbf.predict(dates), color='red', label='RBF model')
    plt.plot(dates,
             svr_lin.predict(dates),
             color='green',
             label='Linear model')
    plt.plot(dates,
             svr_poly.predict(dates),
             color='blue',
             label='Polynomial model')
    plt.xlabel('Date')
    plt.ylabel('Price')
    plt.title('Sipport Vector Regression')
    plt.legend()
    plt.show()

    return svr_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0]
示例#3
0
def GenerateOutcomes(x, z, num_cont, num_bin):
    """
    Following the generating procedure defined by Madras in Algorithm 2
    """
    # As defined by Madras
    num_z = z.shape[1]
    w = -11
    beta_a = 6

    # Algorithm 2
    # horizontal concatenation
    xz = np.concatenate((x, z), 1)
    W = np.ones(xz.shape[1])*.5

    # lists to store generated values
    y_t0_a0, y_t1_a0, y_t0_a1, y_t1_a1 = list(), list(), list(), list()
    mu_t0_a0, mu_t1_a0, mu_t0_a1, mu_t1_a1 = list(), list(), list(), list()

    # loop over observations because all need individual beta sample
    for obs in xz:
        # sample new beta
        beta_cont = choice([0, .1, .2, .3, .4], num_cont, p=[.5, .125, .125, .125, .125])
        beta_bin = choice([0, .1, .2, .3, .4], num_bin, p=[.6, .1, .1, .1, .1])

        beta_z = choice([.4, .6], num_z, p=[.5, .5])
        # in x, continuous variables come first
        beta = np.concatenate((beta_cont, beta_bin, beta_z), 0)

        # calculate y dist
        mu1 = np.matmul(np.exp(obs + W), beta)
        mu_t0_a0.append(mu1)
        mu2 = np.matmul(obs, beta)-w
        mu_t1_a0.append(mu2)
        mu3 = np.matmul(np.exp(obs + W), beta) + beta_a
        mu_t0_a1.append(mu3)
        mu4 = np.matmul(obs, beta) - w + beta_a
        mu_t1_a1.append(mu4)
        # sample new y
        y_t0_a0.append(np.random.normal(mu1, 1, 1)[0])
        y_t1_a0.append(np.random.normal(mu2, 1, 1)[0])
        y_t0_a1.append(np.random.normal(mu3, 1, 1)[0])
        y_t1_a1.append(np.random.normal(mu4, 1, 1)[0])

    plt_entries = {'y_t0_a0': y_t0_a0, 'y_t1_a0': y_t1_a0, 'y_t0_a1': y_t0_a1, 'y_t1_a1': y_t1_a1}
    plt.figure()
    plt.title('Generated data')

    for label, entry in plt_entries.items():
        plt.hist(entry, label=label, alpha=0.5, bins=20)
    plt.legend()
    plt.show()

    y_all = np.transpose(np.vstack((y_t0_a0, y_t1_a0, y_t0_a1, y_t1_a1)))
    mu_all = np.transpose(np.vstack((mu_t0_a0, mu_t1_a0, mu_t0_a1, mu_t1_a1)))

    # column names should be consistent with above vstack
    y_column = 'y_t0_a0, y_t1_a0, y_t0_a1, y_t1_a1'
    mu_column = 'mu_t0_a0, mu_t1_a0, mu_t0_a1, mu_t1_a1'
    return y_all, mu_all, y_column, mu_column
示例#4
0
def show_train_history(train_history, train, validation):
    plt.plot(train_history, history[train])
    plt.plot(train_history, history[validation])
    plt.title('Train History')
    plt.ylabel(train)
    plt.xlabel('Epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()
def visual_inspection(raw_signal_list,
                      filtered_signal_list,
                      begin_sec, end_sec):
    import matplotlib.pylot as plt
    
    for raw_signal, filtered_signal in zip(raw_signal_list,
                                           filtered_signal_list):
        plt.figure(figsize=(20, 20))
        plt.plot(raw_signal.T)
        plt.plot(filterd_signal.T)
        plt.xlim(begin_sec * 1000, end_sec * 1000)
        plt.legend(['raw', 'filtered'])
        plt.show()
             X2,
             classifer.predict(np.array([X1.ravel(),
                                         X2.ravel()]).T).reshape(X1.shape),
             alpha=0.75,
             cmap=ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
    plt.scatter(X_set[y_set == j, 0],
                X_set[y_set == j, 1],
                c=ListedColormap(('red', 'green'))(i),
                label=j)
plt.title('Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()

# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(
    np.arange(start=X_set[:, 0].min() - 1,
              stop=X_set[:, 0].max() + 1,
              step=0.01),
    np.arange(start=X_set[:, 1].min() - 1,
              stop=X_set[:, 1].max() + 1,
              step=0.01))
plt.contourf(X1,
             X2,
             classifer.predict(np.array([X1.ravel(),
示例#7
0
# initialize time and x and y expenditure at initial time
t_0 = 0
init_data = np.array([14, 5])

# starting RK45 integration method
sys_1 = integrate.RK45(model, t_0, init_data, 1000, 0.001)

# storing initial data
sol_x = [sys_1.y[0]]
sol_y = [sys_1.y[1]]
time = [t_0]

for i in range(5000):
    sys_1.step()  # performing integration step
    sol_x.append(
        sys_1.y[0]
    )  # storing the results in our solution list, y is the attribute current state
    sol_y.append(sys_1.y[1])
    time.append(sys_1.t)

plt.figure(figsize=(20, 10))

# plotting results in a graph
plt.plot(time, sol_x, 'b--', label='Country A')
plt.plot(time, sol_y, 'r--', label='Country B')
plt.ylabel('Military Expenditure (billions USD)', fontsize=16)
plt.xlabel('Time (years)', fontsize=16)
plt.legend(loc='best', fontsize=22)
plt.title('Simple Arms Race: Aggressive vs. Passive', fontsize=28)
plt.show()
visual_confirmed_cases.append(others) 


# Visualize the 10 countries
plt.figure(figsize=(32, 18))
plt.barh(visual_unique_countries, visual_confirmed_cases)
plt.title('Number of Covid-19 Confirmed Cases in Countries/Regions',size=20)
plt.show()


# Create a pie chart to see the total confirmed cases in 10 different countries
c = random.choices(list(mcolors.CSS4_COLORS.values()),k = len()unique_countries) 
plt.figure(figsize=(20,20))
plt.title('Covid-19 Confirmed Cases per Country')
plt.pie(visual_confirmed_cases,colors=c)
plt.legend(visual_unique_countries,loc='best')
plt.show()


# Create a pie chart to see the total confirmed cases in 10 different countries outside china

c = random.choices(len(mcolors.CSS4_COLORS.values()), k = len(unique_countries)) 
plt.figure(figsize=(20,20))
plt.title('Covid-19 Confirmed Cases in Countries Outside of Mainlend China')
plt.pie(visual_confirmed_cases[1:], colors=c)
plt.legend(visual_unique_countries[1:],loc = 'best')
plt.show()

# Building the SVM model

kernel = ['poly','sigmoid','rbf']
示例#9
0
# In[4]:

from matplotlib import pyplot as plt

# In[9]:

x = [1, 2, 3]
y = [1, 4, 9]
z = [10, 5, 0]
plt.plot(x, y)
plt.plot(x, z)
plt.title("test plot")
plt.xlabel("x")
plt.ylabel("y and z")
plt.legend(["this is y", "this is z"])
plt.show()

# In[10]:

sample_data = pd.read_csv('sample_data.csv')

# In[11]:

sample_data

# In[12]:

type(sample_data)

# In[15]:
示例#10
0
negative = percentage(negative, noOfSearchTerms)
neutral = percentage(neutral, noOfSearchTerms)

positive = format(positive, '.2f')
negative = format(negative, '.2f')
neutral = format(neutral, '.2f')

print("How are poeple reacting on " + searchTerm + " by analyzing " +
      str(noOfSearchTerms) + "Tweets.")

if (polarity == 0.00):
    print("Neutral")
elif (polarity < 0.00):
    print("Negative")
elif (polarity > 0.00):
    print("Positive")

labels = [
    'Positive [' + str(positive) + '%]', 'Neutral [' + str(neutral) + '%]',
    'Negative [' + str(negative) + '%]'
]
sizes = [positive, neutral, negative]
colors = ['yellowgreen', 'gold', 'red']
patches, texts = plt.pie(sizes, colors=colors, startangle=90)
plt.legend(patches, labels, loc="best")
plt.title('How people are reacting on ' + searchTerm + ' by analyzing ' +
          str(noOfSearchTerms) + ' Tweets.')
plt.axis('equal')
plt.tight.layout()
plt.show()
示例#11
0
percent_popular = len(np_ratings[popular_apps]) / len(np_ratings) * 100
print("percent_popular")

unpopular_apps = np_ratings < 4
print("percent_unpopular", len(np_ratings[unpopular_apps]))

percent_unpopular = 100 - (np_ratings[unpopular_apps]) / len(np_ratings) * 100
print("percent_unpopular")

somewhat_popular = 100 - (percent_popular + percent_unpopular)
print("somewhat_popular")

# do a visualization with out new data
labels = "Sucks", "Meh", "Love it!"
sizes = [unpopular_apps, somewhat_popular, popular_apps]
colors = ['yellowgreen', 'lightgreen', 'lightskyblue']
explode = (0.1, 0.1, 0.15)

plt.pie(sizes, explode=explode, colors=color, autopct='%1.1%', shadow=True, startangle=140)

plt.axis('equal')
plt.legend(labels, loc=1)
plt.title("Do we love our apps?")
plt.xlabel("User Ratings - App Installs (10,000+ apps)")
plt.show()

# print ('processed', line_count, 'lines of data')
print(categories)
print('first row of data', installs [0])
print('last row of data', installs [-1])
示例#12
0
def dpa_setup(ser):

    ser = Serial("/embsec/dpa_lab/dpa_setup")

    datafile = h5py.File('aes_decrypt_powertraces_test_target.hdf5', 'r')
    datasets = datafile.keys()

    init = True

    partA_buf = [
    ]  # lists of traces in partition A, indexed by key candidate. Individual traces will be numpy arrays!
    partB_buf = []

    partA_cnt = [
    ]  # list of number of traces in each partition, indexed by byte under examination
    partB_cnt = []
    avg_buf = None  # average of all traces
    avg_cnt = 0

    trim = False

    skeycan = 0  # the index to the sub-key of the round 10 key we are examining!

    # The loop below iterates through all traces in the file, and performs 16 key guesses on
    # the key byte indexed by skeycan. So, this performs DPA for 16 key guesses of just one
    # byte of the (round 10) key. If you want to keep this current code structure, you will
    # need to manually change the for loop bounds to perform more guesses. You will also
    # need to change skeycan to test out other sub-key bytes.

    for name in datasets:  # iterate through all traces in the hdf5 file
        print("Processing: %s" % name)
        ds = datafile[name]
        trace = np.array(
            ds)  # this is your current trace! As **a numpy array**

        ciphertext_hex = ds.attrs[metaname]
        ciphertext = binascii.unhexlify(ciphertext_hex)

        # If requested, truncate the trace before analysis.
        # This can be used to cut out problematic noisey sections while accelerating
        #  computation and reducing memory needs (great for key attacks)
        if trim:
            trace = trace[:trim]

        if init:  # sets up the partition buffers initially
            for x in range(16):  # just work on 16 possible key bytes, for now.
                partA_buf.append(0 * trace)  # initialize all 'traces' to zero
                partB_buf.append(0 * trace)
                partA_cnt.append(0)
                partB_cnt.append(0)
            avg_buf = 0 * trace
            init = False

        for x in range(
                0, 16):  # just work on 16 key candidates, more is too slow.

            ham = hamming(
                ciphertext[skeycan])  # hmmm ... is this what we want?

            if ham > 4:
                partA_buf[
                    x] += trace  # add the trace to the list of traces for that key candidate
                partA_cnt[
                    x] += 1  # increment the count for this partition and key candidate
            elif ham < 4:
                partB_buf[x] += trace
                partB_cnt[x] += 1
                pass

        avg_buf += trace
        avg_cnt += 1

    result = dict()

    avg_buf = avg_buf / avg_cnt
    result['avg trace'] = avg_buf
    result['trace cnt'] = avg_cnt

    absmax = []
    for x in range(16):
        means = (partA_buf[x] / partA_cnt[x]) - (partB_buf[x] / partB_cnt[x])
        result[x] = means
        absmax.append(np.max(np.abs(means)))
    result['absmax'] = absmax

    # Plot the maximum value of the absolute value of each DPA hypothesis
    plt.figure()
    plt.title("AbsMax of DPA Hypotheses (%d traces)" % result['trace cnt'])
    plt.plot(result['absmax'])

    # Plot the mean trace and all DPA Ciphertext Byte outputs
    plt.figure()
    plt.plot(result['avg trace'], label='Mean Trace')

    dpaPlotScale = 20
    for x in range(16):
        plt.plot(np.abs(result[x]) * dpaPlotScale, label="CT DPA Byte %d" % x)
    plt.legend(loc='upper right')
    plt.title("Ciphertext (CT) DPA Results (%d traces)" % result['trace cnt'])
    plt.show()

    # The next couple lines are to send the key you found / get a flag (if applicable)
    key_answer = bytes(16)  # your key you found! As a byte array
    ser.write(key_answer)

    return ser.read_until()