def plot_bar_pdf(self):

        """Function to plot the pdf of the binomial distribution
        
        Args:
            None
        
        Returns:
            list: x values for the pdf plot
            list: y values for the pdf plot
            
        """
    
        x=[]
        y=[]
        
        for i in range(self.n+1):
            x.append(i)
            y.append(self.pdf(i))
            
        plt.bar(x,y)
        plt.title("result destribution")
        plt.xlable("result")
        plt.ylable("probablity od destribution")
        
        return x,y
Example #2
0
 def plot(self, x, n, p):
     pmf = beta.pmf(x, n, p)
     plt.plot(x, pmf, 'o-')
     plt.title('Beta: n=%i , p=%.2f' % (n, p), fontsize='value')
     plt.xlabel('Number of successes')
     plt.ylable('Probability of Successes', fontsize='value')
     plt.show()
Example #3
0
    def plot_bar_pdf(self):
        """Function to plot the pdf of the binomial distribution
        
        Args:
            None
        
        Returns:
            list: x values for the pdf plot
            list: y values for the pdf plot
            
        """

        # TODO: Use a bar chart to plot the probability density function from
        # k = 0 to k = n

        #   Hint: You'll need to use the pdf() method defined above to calculate the
        #   density function for every value of k.

        #   Be sure to label the bar chart with a title, x label and y label

        #   This method should also return the x and y values used to make the chart
        #   The x and y values should be stored in separate lists
        x_pos = range(n + 1)
        y = [self.pdf(i) for i in x_pos]
        plt.bar(x_pos, y)
        plt.title("Probability Density Function Bar Chart")
        plt.xlable("k")
        plt.ylable("density function")
Example #4
0
   def map_channel(filePath, xsections, self):
        """
        Creates a map of the channel using longitudinal profile data that 
        includes markers indicating where any and all crosssections have been
        taken.
        
        Paramenters
        -----------
        filePath:  A string of the directory to the channel domain. 
                   (this is the .channel file)
        xsections: a list of strings of the directories to the xsection 
                   domains that will be marked on the map. (these are the 
                   .xsection files)
        -----------

        Example format for input domains are located in dataFileTemplates.
        """
        xSectionList = []
        
        for path in xsections:
            with open(path,'r') as f_in:
                next(f_in)
                next(f_in)
                coordString = f_in.readline()
                coordString = coordString[:-1]
                coordList = coordString.split(',')
                coords = [float(number)for number in coordList]
                xSectionList.append(coords)
        xCoordPoints = [coord[0] for coord in xSectionList]
        yCoordPoints = [coord[1] for coord in xSectionList]
            
        plt.figure(num=4)
        plt.plot(self.x, self.y, 'k', xCoordPoints, yCoordPoints, 'ro')
        plt.xlabel('Easting')
        plt.ylable('Northing')
Example #5
0
    def plot_bar(self):
        """Function to output a histogram of the instance variable data using 
        matplotlib pyplot library.
        
        Args:
            None
            
        Returns:
            None
        """

        # TODO: Use the matplotlib package to plot a bar chart of the data
        #       The x-axis should have the value zero or one
        #       The y-axis should have the count of results for each case
        #
        #       For example, say you have a coin where heads = 1 and tails = 0.
        #       If you flipped a coin 35 times, and the coin landed on
        #       heads 20 times and tails 15 times, the bar chart would have two bars:
        #       0 on the x-axis and 15 on the y-axis
        #       1 on the x-axis and 20 on the y-axis

        #       Make sure to label the chart with a title, x-axis label and y-axis label
        x_pos = [1, 0]
        y = [self.n * self.p, self.n * (1 - self.p)]
        plt.bar(x_pos, y)
        plt.title("Binomia Distribution Bar Chart")
        plt.xlable("senario")
        plt.ylable("occurrences")
    def plot_bar(self):
        """Function to output a histogram of the instance variable data using 
        matplotlib pyplot library.
        
        Args:
            None
            
        Returns:
            None
        """

        # TODO: Use the matplotlib package to plot a bar chart of the data
        #       The x-axis should have the value zero or one
        #       The y-axis should have the count of results for each case
        #
        #       For example, say you have a coin where heads = 1 and tails = 0.
        #       If you flipped a coin 35 times, and the coin landed on
        #       heads 20 times and tails 15 times, the bar chart would have two bars:
        #       0 on the x-axis and 15 on the y-axis
        #       1 on the x-axis and 20 on the y-axis

        #       Make sure to label the chart with a title, x-axis label and y-axis label
        plt.hist(self.data)
        plt.xlabel('value')
        plt.ylable('amount')
        plt.title('Bi-Histogram')
def plot_line(x, y, b):
    plt.scatter(x, y, color="m", marker="o", s=30)
    y_pred = b[0] + b[1] * x
    plt.plot(x, y_pred, color="g")
    plt.xlabel("x")
    plt.ylable("y")
    plt.show()
Example #8
0
 def plot(self, x, n, p):
     pmf = hypergeom.pmf(x, n, p)
     plt.plot(x, pmf, 'o-')
     plt.title('HyperGeometric: n=%i , p=%.2f' % (n, p), fontsize='value')
     plt.xlabel('Number of successes')
     plt.ylable('Probability of Successes', fontsize='value')
     plt.show()
Example #9
0
 def printPlot(self):
     plt.plot(self.df['סיכום ברוטו'],self.df['מס הכנסה'])
     plt.xlable('סיכום ברוטו')
     plt.ylable('מס הכנסה')
     plt.title('יחס שכר\מס')
     plt.legend()
     print(self.df)
 def plot(self, epochs, title="learning Rate Schedule"):
     lrs = [self(i) for i in epochs]
     plt.style.use("ggplot")
     plt.figure()
     plt.plot(epochs, lrs)
     plt.title(title)
     plt.xlabel("Epoch #")
     plt.ylable("Learning Rate")
def plot_matches_by_team():
    y = ipl_df[['batting_team','match_code']].groupby(['batting_team']).agg('nunique')
    x = np.arange(len(y.index))
    plt.bar(x,y1['match_code'])
    plt.xlable('Team Names')
    plt.ylable('Matches Played')
    plt.xticks(x,y.index.values,rotation = 90)
    plt.show()
 def plot_image(self):
     plt.scatter(self.sublevels, self.commits)
     plt.ylable('fix commits')
     plt.xlable('kernel sublevel')
     plt.savefig("sublevel_%s.png" % self.rev)
     plt.clf()
     plt.scatter(self.release_hours, self.commits)
     plt.ylabel('fix commits')
     plt.xlable('hours')
     plt.savefig("hours_%s.png" % self.rev)
Example #13
0
 def scan(self):
     param = self.parameters.widget.get()
     filename = param['Filename']
     F = open(filename + '.dat', 'w')
     f = filename + 'wavelength.dat'
     F2 = open(f, 'w')
     start_wavelength = param['Start'].magnitude * 1e9
     stop_wavelength = param['Stop'].magnitude * 1e9
     speed = param['Speed'].magnitude * 1e9
     n = param['Num Scan']
     self.spec = []
     with Client(self.conn1) as dlc:
         dlc.set("laser1:ctl:scan:wavelength-begin", start_wavelength)
         dlc.set("laser1:ctl:scan:wavelength-end", stop_wavelength)
         dlc.set("laser1:ctl:scan:speed", speed)
         dlc.set("laser1:ctl:scan:microsteps", True)
         dlc.set("laser1:ctl:scan:shaple", 1)  #0=Sawtooth,1=Triangle
         dlc.set("laser1:ctl:scan:trigger:output-enabled", True)
         for x in range(n - 1):
             dlc.set("laser1:ctl:wavelength-set", start_wavelength)
             dlc.set("laser1:ctl:scan:trigger:output-threshold",
                     start_wavelength + 0.1)
             while True:
                 st = dlc.get("io:digital-out2:value-act+0.1")
                 if st == False:
                     break
             dlc.set("laser1:ctl:scan:trigger:output-threshold",
                     stop_wavelength)
             time.sleep(0.5)
             act_start = self.wm.measure_wavelength()
             dlc.exec("laser1:ctl:scan:start")
             daq.start()
             if dlc.get("io:digital-out2:value-act"):
                 dlc.exec("laser1:ctl:scan:pause")
                 data = daq.read(nidaqmx.constants.READ_ALL_AVAILABLE)
                 daq.wait_until_done()
                 self.xs.append(data)
                 daq.stop()
                 act_stop = self.wm.measure_wavelength()
                 print('%d scan: act start = %f, act stop = %f' %
                       (n, act_start, act_stop))
         for i in range(n - 1):
             self.spec = self.spec + 1 / n * self.xs[i, :]
         self.wl = np.linspace(act_start, act_stop, len(self.spec))
         plt.plot(self.wl, self.spec)
         plt.xlable('wavelength/nm')
         plt.ylable('transmission')
         for item in self.spec:
             F.write("%f," % item)
         F.write("\n")
         for item in self.wl:
             F.write("%f," % item)
         return
Example #14
0
def makeGraph(data, loan):
    xcor = []
    ycor = []
    for point in data:
        xcor.append(point[0])
        ycor.append(point[1])
    pyplot.plot(xcor, ycor)
    pyplot.title(
        str(100 * loan['intrest']) + "% Intrest With $" +
        str(loan['monthly']) + " Monthy Payments")
    pyplot.xlable("Month")
    pyplot.ylable("Principal")
    pyplot.show()
def q04_boxplot(path,
                x='month',
                y='Sales',
                kind='box',
                order=[
                    'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
                    'Sep', 'Oct', 'Nov', 'Dec'
                ],
                size=8):
    train, validation = q02_data_splitter(path)
    sns.factorplot(data=train, x=x, y=y, kind=kind, order=order, size=size)
    plt.xlabel('month')
    plt.ylable('Sales')
    plt.show()
Example #16
0
def roc_curve_plot(y_test, pred_proba_c1):
    fprs, tprs, threshold = roc_curve(y_test, pred_proba_c1)

    plt.plot(fprs, tprs, lable='ROC')
    plt.plot([0, 1], [0, 1], 'k--', lable='Random')

    start, end = plt.xlim()
    plt.xticks(np.round(np.arange(start, end, 0.1), 2))
    plt.xlim(0, 1)
    plt.ylim(0, 1)
    plt.xlabll('FPR(1 - Sensitivity')
    plt.ylable('TPR (Recall)')
    plt.legend()
    plt.show()
def test():
    ctr = Counter()
    db = MongoClient().patents
    dates = pat_dates(db)
    outfn = 'pat_isd_histogram.p'
    ctr.update(dates)
    save_dict(outfn, dict(ctr))
    f = plt.figure()
    f.set_size_inches(18.5, 10.5)
    plt.hist(dates, bins=20)
    plt.xlabel('Date')
    plt.ylable('Count')
    plt.title('Number of Patents Issued over Time')
    plt.savefig('pat_isd_histogram.png')
Example #18
0
def test():
    ctr = Counter()
    db = MongoClient().patents
    dates = pat_dates(db)
    outfn = 'pat_isd_histogram.p'
    ctr.update(dates)
    save_dict(outfn, dict(ctr))
    f = plt.figure()
    f.set_size_inches(18.5, 10.5)
    plt.hist(dates, bins=20)
    plt.xlabel('Date')
    plt.ylable('Count')
    plt.title('Number of Patents Issued over Time')
    plt.savefig('pat_isd_histogram.png')
def rainbow_plot(station_name, station_series):
    time_series = station_series(station_name)
    time_series_for_each_week = separate_weeks(time_series)

    plt.figure(figsize=(15, 5))
    for week, in time_series_for_each_week:
        days, counts = zip(*week)
        days = range(len(counts))
        plt.plot(days, counts)

        plt.xlabel('Day of Week')
        plt.ylable('Number of Turnstile Entries')
        plt.xticks(np.arrange(7), ['St', 'Sn', 'Mo', 'Tu', 'We', 'Th', 'Fr'])
        plt.title('Ridership per day for station %s' % station_name)
Example #20
0
def get_stock(symbol):
    last_year_date = datetime.strftime(datetime.now() - relativedelta(years=1),
                                       "%Y-%m-%d")
    date = get_last_trading_date()
    url = requests.get(
        'https://www.quandl.com/api/v3/datasets/WIKI/{}.json?start_date={}&end_date={}'
        .format(symbol, last_year_date, date))
    json_dataset = url.json()
    json_data = json_dataset['dataset']['data']
    dates = []
    closing = []
    for day in json_data:
        dates.append(datetime.strptime(day[0], "%Y-%m-%d"))
        closing.append(day[4])
    plt.plot_date(dates, closing, '-')
    plt.title(symbol)
    plt.xlabel('Date')
    plt.ylable('Stock Price')
    plt.savefig('foo.png')
def describe_year(year):
    filtereddf = df.filter(df['year'] == year).agg({
        'value': 'sum'
    }).withColumnRenamed('sum(value)', 'convictions')
    burough_list = [x[0] for x in filtereddf.toLocalIterator()]
    conviction_list = [x[1] for x in filtereddf.toLocalIterator()]

    plt.title('Crime for the year:' + year, frontsize=30)
    plt.xlable('Boroughs', fontsize=30)
    plt.ylable('Convictions', fontsize=30)

    plt.xtics(rotation=90, frontsize=30)
    plt.ytics(frontsize=30)
    plt.autoscale()

    plt.figure(figsize=(33, 10))
    plt.bar(burough_list, conviction_list)
    plt.xtic
    plt.show()
Example #22
0
def visualize_the_confusion_matrix(list_z_score_data):
    list_label = [
        'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10',
        'constitution'
    ]

    plt.imshow(list_z_score_data, interpolation='nearest')
    plt.title("可视化混淆矩阵")
    plt.colorbar()

    x_locations = numpy.array(range(len(list_label)))

    plt.xticks(x_locations, list_label, rotation=90)
    plt.yticks(x_locations, list_label)

    plt.ylable("True label")
    plt.xlable("Predicted label")

    # cm = confusion_matrix(y_true, y_pred)
    numpy.set_printoptions(precision=2)
Example #23
0
def gmeans(X,alpha=0.0001,k=1):
    needtoinc = True
    trialData = X
    fit = KMeans(n_clusters=k)
    initresult = fit.fit(trialData)
    centers = initresult.cluster_centers_
    while(needtoinc):
        needtoinc = False
        i=0
        normTestData = trialData[initresult.labels_ == i]
        normTestData = np.matrix(normTestData)
        pvalue = normalityTest(normTestData)
        if pvalue <= alpha:
            needtoinc = True
            tempresults = KMeans(2)
            tempresults = tempresults.fit(normTestData)
            newcenters = tempresults.cluster_centers_
        else:
            newcenters = centers[i, :]

        k = centers.shape[0]
        for i in range(1, k):
            normTestData = trialData[initresult.labels_ == i]
            normTestData = np.matrix(normTestData)
            pvalue = normalityTest(normTestData)
            if pvalue <= alpha:
                needtoinc = True
                tempresults = KMeans(2)
                tempresults = tempresults.fit(normTestData)
                newcenters = np.vstack((newcenters, tempresults.cluster_centers_))
            else:
                newcenters = np.vstack((newcenters, centers[i,:]))
        centers = newcenters
        initresult = KMeans(centers.shape[0],init=centers).fit(trialData)
        centers = initresult.cluster_centers_
    print 'optimal no of clusters:',centers.shape[0]
    x = trialData.as_matrix()
    plt.figure()
    plt.scatter(x[:,0],x[:,1],c=initresult.labels_)
	plt.xlable('x1')
	plt.ylable('x2')
Example #24
0
def plot_return_risk():
    ret, vol = return_risk(stocks)
    color = np.arry([0.18, 0.96, 0.75, 0.3, 0.9, 0.5])
    plt.scatter(ret,
                vol,
                marker='o',
                c=color,
                s=500,
                camp=plt.get_cmap('Spectral'))
    plt.xlable("日收益率均值%")
    plt.ylable("标准差%")
    for lable, x, y in zip(stocks.keys(), ret, vol):
        plt.annotate(lable,
                     xy=(x, y),
                     xytext=(20, 20),
                     textcoords="offset points",
                     ha="right",
                     va="bottom",
                     bbox=dict(boxstyle='round,pad=0.5',
                               fc='yellow',
                               alpha=0.5),
                     arrowprops=dict(arrowstyle="->",
                                     connetionstyle="arc3,rad=0"))
Example #25
0
def plot_learning_curve(loss_record, title=''):
    '''
        Plot learning curve of your DNN (train & dev loss)
    '''

    total_steps = len(loss_record['train'])
    x_1 = range(total_steps)
    x_2 = x_1[::(len(loss_record['train']) // len(loss_record['dev']))]
    figure(figsize=(6, 4))

    # plot curve of training data
    plt.plot(x_1, loss_record['train'], c='tab:red', label='train')

    # plot curve of testing data
    plt.plot(x_2, loss_record['dev'], c='tab:cyan', label='dev')

    # Configurate figure
    plt.ylim(0.0, 5.)
    plt.xlabel('Training steps')
    plt.ylable('MES loss')
    plt.title(f'Learning curve of {title}')
    plt.legend()
    plt.show()
Example #26
0
    def plot_complex_spec(self, savefig=False):
        """
        Plot the complex spectrum.

        Parameters
        ----------
        savefig : boolean
            Save figure by title

        """
        title = 'lambdas'
        lim = 1.5
        plt.rc('text', usetex=True)
        plt.rc('font', family='serif')
        plt.Circle((0, 0), radius=1, edgecolor='k', facecolor='None')
        plt.scatter(self.lambdas.real, self.lambdas.imag, 'rk')
        plt.xlim(-lim, lim)
        plt.ylim(-lim, lim)
        plt.xlabel(r'$\mathbb{C}$')
        plt.ylable(r'')
        plt.show()
        if savefig:
            plt.savefig('%s.png' % title.replace(' ', ''))
Example #27
0
plt.figure(figsize = (18,18))
plt.subplot(2,1,1)
plt.plot(range(df.shape[0]),all_mid_data,color='b')

#plot the prediction change over time
#plot older predictions with low alpha and newer prediction
#with high alpha
start_alpha = 0.25
alpha=np.arange(start_alpha,1.1,(1.0-start_alpha)/len(predictions_over_time[::3]))
for p_i,p in enumerate(predictions_over_time[::3]):
	for xval,ypal in zip(x_axis_seq,p):
		plt.plot(xval,yval,color='r',alpha=alpha[p_i])

plt.title('Evolution of test predictions over time', fontsize=18)
plt.xlabel('Date', fontsize=18)
plt.ylable('Mid Price', fontsize=18)
plt.xlim(11000,12500)

plt.subplot(2,1,2)


#predicting the best test predictions you got
plt.plot(range(df.shape[0],all_mid_data,color='b')
for xval,yval in zip(x_axis_seq,predictions_over_time[best_prediction_epoch]):
	plt.plot(xval,yval,color='r')

plt.title('Best test prediction over time',fontsize=18)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Mid price',fontsize=18)
plt.xlim(11000,12500)
plt.show()
Example #28
0
from sklearn.metrics import auc
colors = ['balck', 'orange', 'blue', 'green']
linestyles = [':', '--', '-.', '-']
for clf, lable, clr, ls in zip(all_clf, clf_labels, colors, linestyles):
    # asssuming the label of positive class is 1
    y_pred = clf.fit(X_train, y_train).predict_proba(X_test)[:, 1]
    fpr, tpr, thresholds = roc_curve(y_true=y_test, y_score=y_pred)
    roc_auc = auc(x=fpr, t=tpr)
    plt.plot(fpr, tpr, colors=clr, linestyles=ls, lablel='%s (auc = %0.2f)' % (label, roc_auc))
plt.legend(loc='lower right')
plt.plot([0, 1, [0, 1], linestyles='--', color='gray', linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid()
plt.xlabel('False Positive Rate')
plt.ylable('True Positive Rate')
plt.show()

# Sample seven
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
from itertolls import product
x_min = X_train_std[:, 0].min() - 1
x_max = X_train_std[:, 0].max() + 1
y_min = X_train_std{:, 1}.min() - 1
y_max = X_train_std[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
                    np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(7, 5))
for idx, clf, tt in zip(product([0, 1], [0, 1]), all_clf, clf_labels):
    clf.fit(X_train_std, y_train)

# In[187]:


# calling the above defined function 
test = Elbow(k)


# In[188]:


# plotting the curves 
plt.plot(k, test)
plt.xlable('K Neighbors')
plt.ylable(' Test error')
plt.title('Elbow curve error')


# In[189]:


# creating instance of KNN
clf = KNN(n_neighbors=12)

# fitting the model 
clf.fit(train_x,train_y)


# predicting over the main set and calculating F1
test_predict = clf.predict(test_x)
numbers_of_selection = [0] * d
sum_of_rewards = [0] * d
adds_selected = []
total_reward = 0
for n in range(0, N):
    ad = 0
    max_upper_bound = 0
    for i in range(0, d):
        if numbers_of_selection[i] > 0:
            average_reward = sum_of_rewards[i] / numbers_of_selection[i]
            delta_i = math.sqrt(3 / 2 * math.log(n + 1) /
                                numbers_of_selection[i])
            upper_bound = average_reward + delta_i
        else:
            upper_bound = 1e400
        if upper_bound > max_upper_bound:
            max_upper_bound = upper_bound
            ad = i
    ads_selected.append(ad)
    numbers_of_selection[ad] += 1
    reward = dataset.values[n, ad]
    sum_of_rewards[ad] += reward
    total_reward += reward

# visualising The result

plt.hist(ads_selected)
plt.title("Histogram of Ads Selesction")
plt.xlable("Ads")
plt.ylable("number of times")
Example #31
0
scores = []
for k in k_range:
    knn = KNeighborsClassifier(n_neighbors=k)
    scores.append(np.mean(cross_val_score(knn, X, y, cv=5, scoring='accuracy')))
scores

# plot the K values (x-axis) versus the 5-fold CV score (y-axis)
plt.figure()
plt.plot(k_range, scores)

# automatic grid search for an optimal value of K
from sklearn.grid_search import GridSearchCV
knn = KNeighborsClassifier()
k_range = range(1, 30, 2)
param_grid = dict(n_neighbors=k_range)
grid = GridSearchCV(knn, param_grid, cv=5, scoring='accuracy')
grid.fit(X, y)

# check the results of the grid search
grid.grid_scores_
grid_mean_scores = [result[1] for result in grid.grid_scores_]
plt.figure()
plt.ylim([0.9, 1])
plt.xlabel('Tuning Parameter: N nearest neighbors')
plt.ylable('Classification Accuracy')
plt.plot(k_range, grid_mean_scores)
grid.best_score_
grid.best_params_
grid.best_estimator_
plt.plot(grid.best_params_['n_neighbors'], grid.best_score_, 'ro', markersize=12, markeredgewidth=1.5,
         markerfacecolor='None', markeredgecolor='r')
Example #32
0
                V0,
                options_data.loc[option]['STRIKE'],
                options_data.loc[option]['TTM'],
                r,
                options_data.loc[option]['PRICE'],
                sigma_est=2,
                it=100)
        options_data['IMP_VOL'].loc[option]=imp_vol

#plot implied volatilities
plot_data=options_data[options_data['IMP_VOL']>0]

maturities = sorted(set(options_data['MATURITY']))

plt.figure(figsize=(8,6))
for maturity in maturities:
    #select data for maturity
    data=plot_data[options_data.Maturity == maturity]
    plt.plot(data['STRIKE'],data['IMP_VOL'],lable=maturity.date(),lw=1.5)
    plt.plot(data['STRIKE'],data['IMP_VOL'],'r.')
plt.grid(True)
plt.xlable('strike')
plt.ylable('implied volatility of volatitlity')
plt.legend()
plt.show()

#group data for simplicity
keep = ['PRICE','IMP_VOL']
group_data=plot_data.groupby(['MATURITY','STRIKE'])[keep]
group_data=group_data.sum()
group_data.head()
Example #33
0
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=1 / 3,
                                                    random_state=0)

# Fitting Simple Linear Regression to Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)

# Predicting the Test set results
y_pred = regressor.predict(X_test)

# Visualizing the Training set results
plt.scatter(X_train, y_train, color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title("Salary vs Experience (Training Set)")
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.show()

# Visualizing the Test set results
plt.scatter(X_test, y_test, color="red")
plt.plot(X_train, regressor.predict(X_train), color="blue")
plt.title("Salary vs Experience (Test Set)")
plt.xlabel("Years of Experience")
plt.ylable("Salary")
plt.show()
Example #34
0
        verbose = 1,
        )

X, y = load()
print ('X.shape == {}, X.min == {:.3f}, X.max == {:.3f}').format(X.shape, X.min(), X.max())
print ('y.shape == {}, y.min == {:.3f}, y.max == {:.3f}').format(y.shape, y.min(), y.max())
net1.fit(X, y)

train_loss = np.array([i['train_loss'] for i in net1.train_history_])
valid_loss = np.array([i['valid_loss'] for i in net1.train_history_])
pyplot.plot(train_loss, linewidth = 3, label = 'train')
pyplot.plot(valid_loss, linewidth = 3, label = 'valid')
pyplot.grid()
pyplot.legend()
pyplot.xlabel('epoch')
pyplot.ylable('loss')
pyplot.ylim(1e-3, 1e-2)
pyplot.yscale('log')
pyplot.show


def plot_sample(x, y, axis):
    img = x.reshape(96, 96)
    axis.imshow(img, cmap = 'gray')
    axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker = 'x', s = 10)

X, _ = load(test = True)
y_pred = net1.predict(X)

fig = pyplot.figure(figsize(6, 6))
fig.subplit_adjust(left = 0, right = 1, bottom = 0, top = 1, hspace = 0.05, wspace = 0.05)
N=int(T/dt)

t=[i*dt for i in range(N)]

na=[None]*N

nb=[None]*N    //根据获取的数据初始化计算过程中使用的数组(列表),常数等

na[0]=input('A的初始原子数')

ta=input('A的衰变常数')

nb[0]=input('B的初始原子数')

tb=input('B的衰变常数')//通过人机交互获取模拟的初始数据

for i in range(N-1):
  
  na[i+1]=na[i]-dt*na[i]/ta
  
  nb[i+1]=nb[i]-dt*nb[i]/tb+dt*na[i]/ta//用欧勒法数值解微分方程组

pyp.plot(t,na,'k',t,nb,'r')

pyp.title('NA='+na[0]+'  Ta='+ta+'NB='+nb[0]+'  Tb='+tb)

pyp.xlable('时间/年')

pyp.ylable('粒子数/个')//绘图