def show(self, zones): plt.plot(x_values, y_values, '.') plt.x_label(self.x_label) plt.y_label(self.y_label) plt.title(self.title) plt.grid(self.show_grid) plt.show()
def graph_plot(): plt.plot(x_data, y_data, label="displacement") #グラフのx軸、y軸をx_data(x_difference),y_data(z_difference)に設定 plt.title('fly_distance') #タイトルを'fly_distance'に設定 plt.x_label('x_difference') #x軸の名前を'x_difference'に設定 plt.y_label('z_difference') #y軸の名前を'z_difference'に設定 plt.grid() #罫線の追加 plt.savefig('fly_distance.png') #グラフを'fly_distance.png'という名前で保存
def main(params): model_dir = "task" + str(params['task_id']) + "_" + params['model_dir'] if not os.path.exists(model_dir): os.makedirs(model_dir) chatbot = chatBot(data_dir=params['data_dir'], model_dir=model_dir, task_id=params['task_id'], isInteractive=params['interactive'], OOV=params['OOV'], memory_size=params['memory_size'], random_state=params['random_state'], batch_size=params['batch_size'], learning_rate=params['learning_rate'], epsilon=params['epsilon'], max_grad_norm=params['max_grad_norm'], evaluation_interval=params['evaluation_interval'], hops=params['hops'], epochs=params['epochs'], embedding_size=params['embedding_size'], save_model=params['save_model'], checkpoint_path=params['checkpoint_path']) if params['train']: chatbot.train() # Plot Losses plt.plot(chatbot.losses) plt.x_label('Epochs') plt.y_label('Losses') plt.show() plt.savefig('scratch_models/task{0}_epochs{1}_plot.png'.format( chatbot.task_id, chatbot.epochs)) else: chatbot.test(0) chatbot.test(1) chatbot.test(2)
def plot_metrics(history, metric): """plot train and test metrics.""" plt.plot(history.history(metric)) plt.plot(history.history["val_" + metric], "") plt.xlabel("Epochs") plt.y_label(metric) plt.legend([metric, "val_" + metric]) plt.show()
def _set_attributes(self, pid): """ sets the attributes of the plot for the given pid. Returns the location of the legend for that plot """ attrib = dict(self._gb_plot_attributes) attrib.update(dict(self._plot_attributes.get(pid, {}))) if 'title' in attrib: plt.title(attrib['title']) if 'xlim' in attrib: plt.xlim(*attrib['xlim']) if 'ylim' in attrib: plt.ylim(*attrib['ylim']) if 'x_label' in attrib: plt.x_label(*attrib['x_label']) if 'y_label' in attrib: plt.y_label(*attrib['y_label']) #note: options for legend loc include "lower left", "left", "center", etc, return attrib.get('legend_loc', 'best')
@author: Nagaraj U """ import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset=pd.read_csv('Position_Salaries.csv') X=dataset.iloc[:,1:2].values y=dataset.iloc[:,2].values #no need of splitting into train and test #fitting random forest tree for dataset from sklearn.ensemble import RandomForestRegressor regressor=RandomForestRegressor(n_estimators=500,random_state=0) regressor.fit(X,y) #preficting result y_pred=regressor.predict([[6.5]]) #visualising results X_grid=np.arange(min(X),max(X),0.01) X_grid=X_grid.reshape(len(X_grid),1) plt.scatter(X,y,color='red') plt.plot(X_grid,regressor.predict(X_grid),color='blue') plt.title('truth or bluf of emplyee') plt.x_label('level') plt.y_label('salaries')
sns.displot(investor_funds,ax =ax[2], color = "#2EAD46") ax[2].set_title("Total committed by Investors", fontsize=14) #上面三图观察发现三者分布相似 #年份与贷款之间的关系 dt_series = pd.to_datetime(df['issue_d']) #http://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html#pandas.to_datetime df['year'] = dt_series.dt.year # 新创年份属性 #对to_datetime对象调用其字典 plt.figure(figsize = (12,8)) sns.barplot('year','loan_amount',data = df,palette = 'tab10') #第一个为x,第二个为y plt.title("Issuance of Loans", fontsize = 14) plt.xlabel('year',fontsize = 14) plt.y_label("Average loan amount issued", fontsize = 14) #贷款数量逐年升高 df["loan_status"].value_counts() ''' Current 601779 Fully Paid 207723 Charged Off 45248 Late (31-120 days) 11591 Issued 8460 In Grace Period 6253 Late (16-30 days) 2357 Does not meet the credit policy. Status:Fully Paid 1988 Default 1219
from sklearn.linear_model import Ridge rdregressor = Ridge(alpha=1, normalize=True) rdregressor.fit(X_train, y_train) y_pred = rdregressor.predict(X_train) score3 = rdregressor.score(X_train, y_train) * 100 print("R Square value:", score3) print("Custome accuracy for Ridge:", custom_accuracy(y_test, y_pred, 20)) from sklearn.linear_model import Lasso lsregressor = Lasso(alpha=1, normalize=True) lsregressor.fit(X_train, y_train) y_pred = lsregressor.predict(X_test) score4 = lsregressor.score(X_train, y_train) * 100 print("R Square value:", score4) print("Custome accuracy for Lasso:", custom_accuracy(y_test, y_pred, 20)) from sklearn.svm import SVR svregressor = SVR() svregressor.fit(X_train, y_train) y_pred = svregressor.predict(X_test) score5 = svregressor.score(X_train, y_train) * 100 print("R Square value:", score5) print("Custome accuracy for SVR:", custom_accuracy(y_test, y_pred, 20)) models = ['Random Forest', 'Linear Regression', 'Ridge', 'Losso'] acc_score = [0.79, 0.43, 0.28, 0.26] plt.bar(models, acc_score, color=['green', 'pink', 'cyan', 'skyblue']) plt.y_label("Accurate scores") plt.title("Which model is the best accurate for inbalenced data") plt.show()
def rocScan(varlist, scan_targets, labels, ylabels, data, plotpath='', x_min=0., x_max=1.0, y_min=0.0, y_max=1.0, x_log=False, y_log=False, rejection=False, x_label='False positive rate', y_label='True positive rate', linestyles=[], colorgrouping=-1, extra_lines=[], atlas_x=-1, atlas_y=-1, simulation=False, textlist=[]): ''' Creates a set of ROC curve plots by scanning over the specified variables. One set is created for each target (neural net score dataset). varlist: a list of rocVar instances to scan over scan_targets: a list of neural net score datasets to use labels: a list of target names (strings); must be the same length as scan_targets ''' rocs = buildRocs(varlist, scan_targets, labels, ylabels, data) for target_label in labels: for v in varlist: # prepare matplotlib figure plt.cla() plt.clf() fig = plt.figure() fig.patch.set_facecolor('white') plt.plot([0, 1], [0, 1], 'k--') for label in v.labels: # first generate ROC curve x = rocs[target_label + label]['x'] y = rocs[target_label + label]['y'] var_auc = auc(x, y) if not rejection: plt.plot(x, y, label=label + ' (area = {:.3f})'.format(var_auc)) else: plt.plot(y, 1. / x, label=label + ' (area = {:.3f})'.format(var_auc)) # plt.title('ROC Scan of '+target_label+' over '+v.latex) if x_log: plt.xscale('log') if y_log: plt.yscale('log') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.x_label(x_label) plt.y_label(y_label) #ampl.set_xlabel(x_label) #ampl.set_ylabel(y_label) plt.legend() drawLabels(fig, atlas_x, atlas_y, simulation, textlist) if plotpath != '': plt.savefig(plotpath + 'roc_scan_' + target_label + '_' + v.name + '.pdf') plt.show()
# compile the model model.compile(locc = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy']) # train the model hist = model.fit(x_train, y_train_one_hot, match_size = 256, epochs = 10, validation_split = 0.2) # evaluate model using test data set model.evaluate(x_test, y_test_one_hot)[1] # visualize the model's accuracy plt.plot(hist.history['accuracy']) plt.plot(hist.history['val_accuracy']) plt.title('Model Accuracy') plt.x_label('Epoch') plt.y_label('Accuracy') plt.legend(['Train', 'Val'], loc = 'upper left') plt.show() # visualize the models loss plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('Model Loss') plt.x_label('Epoch') plt.y_label('Loss') plt.legend(['Train', 'Val'], loc = 'upper right') plt.show() # test the model with an example from google.colab import files uploaded = files.upload()
# fitting the RNN to training set regressor.fit(X_train, y_train, batch_size=32, epochs=200) ##################################################### ## Part-3: MAking prediction $ visualising result ## ################################################### test_set = pd.read_csv('Google_Stock_Price_Test.csv') X_test = test_set.iloc[:, 1:2] # scaling the X_test input = sc.transform(X_test) # before predictiong , converting in 3-D input = np.reshape(input, (20, 1, 1)) # now predicting the next price y_pred = regressor.predict(input) # since we are getting the scaled output so, using inverse_transform() y_pred = sc.inverse_transform(y_pred) # Visualization of result plt.plot(X_test, color='red', label="Real Google Price") plt.plot(y_pred, color='blue', label="Predicted Google Price") plt.title("Price_Prediction") plt.x_label("Time") plt.y_label("Stock-Price-Google") plt.legend() plt.show()