def plot_contour_3d(xi, yi, zi, rot=120, labels=None): fig = plt.figure(figsize=(w, golden_mean * w)) ax = fig.add_subplot(1, 1, 1, projection='3d') # Normalise Z zi_norm = zi / np.nanmax(zi) xig, yig = np.meshgrid(xi, yi) if rot < 90: zoffset = np.nanmin(zi) xoffset = np.nanmin(xig) yoffset = np.nanmin(yig) elif rot < 180: zoffset = np.nanmin(zi) xoffset = np.nanmax(xig) yoffset = np.nanmin(yig) else: zoffset = np.nanmin(zi) xoffset = np.nanmax(xig) yoffset = np.nanmax(yig) ax.plot_surface(xig, yig, zi, rstride=1, cstride=1, alpha=0.45, facecolors=cm.coolwarm(zi_norm), linewidth=1, antialiased=True) cset = ax.contour(xig, yig, zi, zdir='z', offset=zoffset, linestyles='dashed', cmap=cm.coolwarm) cset = ax.contour(xig, yig, zi, zdir='x', offset=xoffset, cmap=cm.coolwarm) cset = ax.contour(xig, yig, zi, zdir='y', offset=yoffset, cmap=cm.coolwarm) ax.view_init(30, rot) if labels is not None: ax.set_xlabel(labels['x']) ax.set_ylabel(labels['y']) ax.set_zlabel(labels['z']) fig.tight_layout() return fig
def plot_wing(self): n_names = len(self.names) self.ax.cla() az = self.ax.azim el = self.ax.elev dist = self.ax.dist for j, name in enumerate(self.names): mesh0 = self.mesh[self.curr_pos*n_names+j].copy() self.ax.set_axis_off() if self.show_wing: def_mesh0 = self.def_mesh[self.curr_pos*n_names+j] x = mesh0[:, :, 0] y = mesh0[:, :, 1] z = mesh0[:, :, 2] try: # show deformed mesh option may not be available if self.show_def_mesh.get(): x_def = def_mesh0[:, :, 0] y_def = def_mesh0[:, :, 1] z_def = def_mesh0[:, :, 2] self.c2.grid(row=0, column=3, padx=5, sticky=Tk.W) if self.ex_def.get(): z_def = (z_def - z) * 10 + z_def def_mesh0 = (def_mesh0 - mesh0) * 30 + def_mesh0 else: def_mesh0 = (def_mesh0 - mesh0) * 2 + def_mesh0 self.ax.plot_wireframe(x_def, y_def, z_def, rstride=1, cstride=1, color='k') self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k', alpha=.3) else: self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k') self.c2.grid_forget() except: self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k') cg = self.cg[self.curr_pos] # self.ax.scatter(cg[0], cg[1], cg[2], s=100, color='r') if self.show_tube: # Get the array of radii and thickness values for the FEM system r0 = self.radius[self.curr_pos*n_names+j] t0 = self.thickness[self.curr_pos*n_names+j] # Create a normalized array of values for the colormap colors = t0 colors = colors / np.max(colors) # Set the number of rectangular patches on the cylinder num_circ = 12 fem_origin = self.fem_origin_dict[name.split('.')[-1] + '_fem_origin'] # Get the number of spanwise nodal points n = mesh0.shape[1] # Create an array of angles around a circle p = np.linspace(0, 2*np.pi, num_circ) # This is just to show the deformed mesh if selected if self.show_wing: if self.show_def_mesh.get(): mesh0[:, :, 2] = def_mesh0[:, :, 2] # Loop through each element in the FEM system for i, thick in enumerate(t0): # Get the radii describing the circles at each nodal point r = np.array((r0[i], r0[i])) R, P = np.meshgrid(r, p) # Get the X and Z coordinates for all points around the circle X, Z = R*np.cos(P), R*np.sin(P) # Get the chord and center location for the FEM system chords = mesh0[-1, :, 0] - mesh0[0, :, 0] comp = fem_origin * chords + mesh0[0, :, 0] # Add the location of the element centers to the circle coordinates X[:, 0] += comp[i] X[:, 1] += comp[i+1] Z[:, 0] += fem_origin * (mesh0[-1, i, 2] - mesh0[0, i, 2]) + mesh0[0, i, 2] Z[:, 1] += fem_origin * (mesh0[-1, i+1, 2] - mesh0[0, i+1, 2]) + mesh0[0, i+1, 2] # Get the spanwise locations of the spar points Y = np.empty(X.shape) Y[:] = np.linspace(mesh0[0, i, 1], mesh0[0, i+1, 1], 2) # Set the colors of the rectangular surfaces col = np.zeros(X.shape) col[:] = colors[i] # Plot the rectangular surfaces for each individual FEM element try: self.ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.viridis(col), linewidth=0) except: self.ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.coolwarm(col), linewidth=0) lim = 0. for j in range(n_names): ma = np.max(self.mesh[self.curr_pos*n_names+j], axis=(0,1,2)) if ma > lim: lim = ma lim /= float(self.zoom_scale) self.ax.auto_scale_xyz([-lim, lim], [-lim, lim], [-lim, lim]) self.ax.set_title("Iteration: {}".format(self.curr_pos)) # round_to_n = lambda x, n: round(x, -int(np.floor(np.log10(abs(x)))) + (n - 1)) if self.opt: obj_val = self.obj[self.curr_pos] self.ax.text2D(.15, .05, self.obj_key + ': {}'.format(obj_val), transform=self.ax.transAxes, color='k') self.ax.view_init(elev=el, azim=az) # Reproduce view self.ax.dist = dist
return 2.0*x+y # Main code starts here fig=plt.figure() # Create figure ax=fig.gca(projection='3d') # Be in 3d plt.hold(True) # So we can plot two plots on top of each other # Start Surface Plot X=np.arange(-10.0,10.0,1.0) Y=np.arange(-10.0,10.0,1.0) X,Y=np.meshgrid(X,Y) # Make mesh Z=f(X,Y) G=f(X,Y) N=G/G.max() ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.25,facecolors=cm.coolwarm(N),linewidth=0,antialiased=False,shade=False) # Start Gradient Plot cset = ax.contour(X, Y, Z, cmap=cm.coolwarm,alpha=.9) ax.clabel(cset, fontsize=9, inline=1) # Bottom (xy-plane) contour cset = ax.contour(X, Y, Z, zdir='z', offset=-30, cmap=cm.coolwarm) ax.clabel(cset,fontsize=9,inline=1) # Labels ax.set_title("Plot of f(x,y)=2x+y") ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z')
def train_model(learning_rate, steps, batch_size, input_feature=feature_total_rooms, model_dir=os.path.abspath("./model")): """ Trains a linear regression model of one feature :param learning_rate: a `float`, the learning rate. :param steps: a non-zero `int`, the total number of training steps. a training step consists of a forward and backward pass using a single batch. :param batch_size: a non-zero `int`, the batch size. :param input_feature: a `string` specifying a column from `california_housting_dataframe` to use as input feature :param model_dir: a `string' specifying a path to saving model :return: """ periods = 10 steps_per_periods = steps / periods # 定义特征 my_feature_data = california_housing_dataframe[[input_feature]] #特征数组 feature_columns = [tf.feature_column.numeric_column(feature_total_rooms)] #特征类型数组 # 定义目标 targets = california_housing_dataframe[target_column] # 使用小批量随机梯度下降训练模型 # 并使用梯度裁剪确保梯度大小不会过大 my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) # 定义线性回归模型 linear_regressor = tf.estimator.LinearRegressor(feature_columns=feature_columns, optimizer=my_optimizer, model_dir=model_dir) # Create input functions. training_input_fn = lambda: input_fn(my_feature_data, targets, batch_size=batch_size) prediction_input_fn = lambda: input_fn(my_feature_data, targets, num_epochs=1, shuffle=False) # Set up to plot the state of our model's line each period. plt.figure(figsize=(15, 6)) plt.subplot(1, 4, 1) plt.scatter(california_housing_dataframe[input_feature], california_housing_dataframe[target_column]) plt.subplot(1, 4, 2) plt.title("Learned Line by Period") plt.ylabel(target_column) plt.xlabel(input_feature) sample = california_housing_dataframe.sample(n=300) #samaple没有作用,仅仅是用于帮助生成训练结果的图 plt.scatter(sample[input_feature], sample[target_column]) colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] plt.subplot(1, 4, 3) plt.scatter(sample[input_feature], sample[target_column]) # Train the model, but do so inside a loop so that we can periodically assess # loss metrics. print("Training model...") print("RMSE (on training data):") root_mean_squared_errors = [] for period in range(0, periods): # Train the model, starting from the prior state. linear_regressor.train(input_fn=training_input_fn, steps=steps_per_periods) # Take the break and compute predication. preditions = linear_regressor.predict(input_fn=prediction_input_fn) preditions = np.array([item['predictions'][0] for item in preditions]) # Compute loss mean_squared_error = metrics.mean_squared_error(preditions, targets) root_mean_squared_error = math.sqrt(mean_squared_error) print(" period %2d : %0.3f" % (period, root_mean_squared_error)) # Add the loss metrics from this period to our list. root_mean_squared_errors.append(root_mean_squared_error) #只用于了作图 # Finally, track the weights and biases over time. # Apply some math to ensure that the data and line are plotted neatly. y_extents = np.array([0, sample[input_feature].max()]) # Retrieve the final weight and bias generated during training. weight = linear_regressor.get_variable_value("linear/linear_model/%s/weights" % input_feature)[0] bias = linear_regressor.get_variable_value("linear/linear_model/bias_weights") x_extents = (y_extents - bias) / weight x_extents = np.maximum(np.maximum(x_extents, sample[input_feature].max()), sample[input_feature].min()) y_extents = weight * x_extents + bias plt.plot(x_extents, y_extents, color=colors[period]) print("Model training finished.") # Output a graph of loss metrics over periods. plt.subplot(1, 4, 4) plt.ylabel("RMSE") plt.xlabel("Periods") plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) plt.show() # Output a table with calibration data. calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(preditions) calibration_data["targets"] = pd.Series(targets) display.display(calibration_data.describe()) print("Final RMSE (on training data): %0.2f" % root_mean_squared_error)
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"): periods = 10 steps_per_period = steps / periods # 定义输入特征 my_feature = california_housing_dataframe[[input_feature]] # 把输入特征设置为特征列 feature_columns = [tf.feature_column.numeric_column(input_feature)] training_input_fn = lambda: my_input_fn(my_feature, targets, batch_size=batch_size) # 为预测也创建一个输入函数 # 因为对每条数据只预测一次, 所以不需要打乱数据 prediction_input_fn = lambda: my_input_fn(my_feature, targets, num_epochs=1, batch_size=batch_size, shuffle=False) # 定义目标/标签 label = "median_house_value" targets = california_housing_dataframe[label] # 梯度下降 my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) # 定义线性回归模型 linear_regressor = tf.estimator.LinearRegressor( feature_columns = feature_columns, optimizer = my_optimizer ) # Set up to plot the state of our model's line each period. sample = california_housing_dataframe.sample(n=300) plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.title("Learned Line by Period") plt.ylabel(label) plt.xlabel(input_feature) plt.scatter(sample[input_feature], sample[label]) colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] # 训练 print("Training model...") print("RMSE (on training data):") root_mean_squared_errors = [] for period in xrange(0, periods): _ = linear_regressor.train( input_fn = training_input_fn, steps=steps_per_period ) # 调用 predict() predictions = linear_regressor.predict(input_fn=prediction_input_fn) # 用numpy 格式化数据, 方便计算 predictions = np.array([item['predictions'][0] for item in predictions]) # RMSE and min_max_diff mean_squared_error = metrics.mean_squared_error(predictions, targets) root_mean_squared_error = math.sqrt(mean_squared_error) # Add RMSE to list root_mean_squared_errors.append(root_mean_squared_error) print("Root Mean Squared Error (on training data) (period %s): %0.3f" % (period, root_mean_squared_error)) # Root Mean Squared Error (on training data): 237.417 # 画出回归曲线 y_extents = np.array([0, sample[label].max()]) weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0] bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights') x_extents = (y_extents - bias) / weight x_extents = np.maximum(np.minimum(x_extents, sample[input_feature].max()), sample[input_feature].min()) y_extents = weight * x_extents + bias plt.plot(x_extents, y_extents, color=colors[period]) print("Model training finished.") # Output a graph of loss metrics over period plt.subplot(1, 2, 2) plt.ylabel("RMSE") plt.xlabel("Periods") plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) # create a table with calibration data calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(predictions) calibration_data["targets"] = pd.Series(targets) display.display(calibration_data.describe()) print("Final RMSE (on training data): %0.2f" % root_mean_squared_error) return calibration_data
def update(self,data): self.txt.set_text(self.text%data[self.label]) self.dot.set_color(cm.coolwarm((data[self.label]-self.low)/self.amp))
def plot_quantile_average_cumulative_return(avg_cumulative_returns, by_quantile=False, std_bar=False, title=None, ax=None): """ Plots sector-wise mean daily returns for factor quantiles across provided forward price movement columns. Parameters ---------- avg_cumulative_returns: pd.Dataframe The format is the one returned by performance.average_cumulative_return_by_quantile by_quantile : boolean, optional Disaggregated figures by quantile (useful to clearly see std dev bars) std_bar : boolean, optional Plot standard deviation plot title: string, optional Custom title ax : matplotlib.Axes, optional Axes upon which to plot. Returns ------- ax : matplotlib.Axes """ avg_cumulative_returns = avg_cumulative_returns.multiply(DECIMAL_TO_BPS) quantiles = len(avg_cumulative_returns.index.levels[0].unique()) palette = [cm.coolwarm(i) for i in np.linspace(0, 1, quantiles)] palette = palette[::-1] # we want negative quantiles as 'red' if by_quantile: if ax is None: v_spaces = ((quantiles - 1) // 2) + 1 f, ax = plt.subplots(v_spaces, 2, sharex=False, sharey=False, figsize=(18, 6 * v_spaces)) ax = ax.flatten() for i, (quantile, q_ret) in enumerate( avg_cumulative_returns.groupby(level='factor_quantile')): mean = q_ret.loc[(quantile, 'mean')] mean.name = 'Quantile ' + str(quantile) mean.plot(ax=ax[i], color=palette[i]) ax[i].set_ylabel('Mean Return (bps)') if std_bar: std = q_ret.loc[(quantile, 'std')] ax[i].errorbar(std.index, mean, yerr=std, fmt='none', ecolor=palette[i], label='none') ax[i].axvline(x=0, color='k', linestyle='--') ax[i].legend() i += 1 else: if ax is None: f, ax = plt.subplots(1, 1, figsize=(18, 6)) for i, (quantile, q_ret) in enumerate( avg_cumulative_returns.groupby(level='factor_quantile')): mean = q_ret.loc[(quantile, 'mean')] mean.name = 'Quantile ' + str(quantile) mean.plot(ax=ax, color=palette[i]) if std_bar: std = q_ret.loc[(quantile, 'std')] ax.errorbar(std.index, mean, yerr=std, fmt='none', ecolor=palette[i], label='none') i += 1 ax.axvline(x=0, color='k', linestyle='--') ax.legend() ax.set(ylabel='Mean Return (bps)', title=("Average Cumulative Returns by Quantile" if title is None else title), xlabel='Periods') return ax
metrica_defence = pvm.lastrow_calc_player_velocities(data_defence,smoothing=True) # Read in Events events_dict, events_df = create.create_events(metrica_attack) # Real Shirt Mapping shirt_mapping = sm.create_consistent_shirt_mapping(last_row) events_df = sm.real_shirt_mapping(events_df, shirt_mapping) # to Bokeh Format bokeh_attack = mtb.tracking_to_bokeh_format(metrica_attack) bokeh_defence = mtb.tracking_to_bokeh_format(metrica_defence) # List of available Matches match_list = events_df.index.get_level_values(level=0).unique().tolist() # Surface Colour Map m_coolwarm_rgb = (255 * cm.coolwarm(range(256))).astype('int') bokehpalette = [RGB(*tuple(rgb)).to_hex() for rgb in m_coolwarm_rgb] # Create each of the tabs tab1 = goals_overview_tab(events_df, match_list, bokeh_attack, bokeh_defence, shirt_mapping) #tab2 = pitch_surfaces_tab(events_df, metrica_attack, metrica_defence, bokeh_attack, bokeh_defence, shirt_mapping, match_list) #tab3 = player_displacement_value_tab(events_df, metrica_attack, metrica_defence, bokeh_attack, bokeh_defence, shirt_mapping, match_list) # Put all the tabs into one application #tabs = Tabs(tabs = [tab1, tab2, tab3]) tabs = Tabs(tabs = [tab1]) # Put the tabs in the current document for display curdoc().add_root(tabs)
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"): """Trains a linear regression model of one feature. Args: learning_rate: A `float`, the learning rate. steps: A non-zero `int`, the total number of training steps. A training step consists of a forward and backward pass using a single batch. batch_size: A non-zero `int`, the batch size. input_feature: A `string` specifying a column from `california_housing_dataframe` to use as input feature. """ # 10 Loops periods = 10 # Calculate steps for each period steps_per_period = steps / periods # Prepare Feature Data my_feature = input_feature my_feature_data = california_housing_dataframe[[my_feature]] # Prepare Label Data my_label = "median_house_value" targets = california_housing_dataframe[my_label] # Create feature columns. feature_columns = [tf.feature_column.numeric_column(my_feature)] # Create input functions. # Input, Target, Batch Size training_input_fn = lambda: my_input_fn( my_feature_data, targets, batch_size=batch_size) # Check Whole Data Without Shuffling prediction_input_fn = lambda: my_input_fn( my_feature_data, targets, num_epochs=1, shuffle=False) # Create a linear regressor object. my_optimizer = tf.train.GradientDescentOptimizer( learning_rate=learning_rate) # Always 5 my_optimizer = tf.contrib.estimator.clip_gradients_by_norm( my_optimizer, 5.0) linear_regressor = tf.estimator.LinearRegressor( feature_columns=feature_columns, optimizer=my_optimizer) ################################################################################################################ # Graph Size plt.figure(figsize=(15, 6)) # Put more than 1 graph plt.subplot(1, 2, 1) # Title plt.title("Learned Line by Period") # Y Label plt.ylabel(my_label) # X Label plt.xlabel(my_feature) # Number of Samples sample = california_housing_dataframe.sample(n=300) # Scatter Plot Setting plt.scatter(sample[my_feature], sample[my_label]) # Just make Color Array colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] ################################################################################################################ # Train the model, but do so inside a loop so that we can periodically assess # loss metrics. print("Training model...") print("RMSE (on training data):") root_mean_squared_errors = [] for period in range(0, periods): # Train the model, starting from the prior state. linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period) # Take a break and compute predictions. predictions = linear_regressor.predict( input_fn=prediction_input_fn) predictions = np.array( [item['predictions'][0] for item in predictions]) # Compute loss. # Basically, Comparing Predictions and Targets root_mean_squared_error = math.sqrt( metrics.mean_squared_error(predictions, targets)) # Occasionally print the current loss. print(" period %02d : %0.2f" % (period, root_mean_squared_error)) # Add the loss metrics from this period to our list. root_mean_squared_errors.append(root_mean_squared_error) # Finally, track the weights and biases over time. # Apply some math to ensure that the data and line are plotted neatly. # Calculate Maximum Height y_extents = np.array([0, sample[my_label].max()]) # Sprintf like weight = linear_regressor.get_variable_value( 'linear/linear_model/%s/weights' % input_feature)[0] bias = linear_regressor.get_variable_value( 'linear/linear_model/bias_weights') # Do some complicated calculation here x_extents = (y_extents - bias) / weight x_extents = np.maximum( np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min()) y_extents = weight * x_extents + bias # Just Plot Different Color Graph plt.plot(x_extents, y_extents, color=colors[period]) print("Model training finished.") ################################################################################################################ # Output a graph of loss metrics over periods. plt.subplot(1, 2, 2) plt.ylabel('RMSE') plt.xlabel('Periods') plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) ################################################################################################################ # Output a table with calibration data. calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(predictions) calibration_data["targets"] = pd.Series(targets) display.display(calibration_data.describe()) print("Final RMSE (on training data): %0.2f" % root_mean_squared_error) return calibration_data
] running = np.zeros(len(positions)) fig = plt.figure(figsize=(4, 3)) left = 0.15 bottom = 0.35 width = 0.6 height = 0.6 ax = fig.add_axes((left, bottom, width, height)) for i, g in enumerate(['male', 'unknown', 'female']): c = cm.coolwarm(i / 2) values = 100 * np.array( [a[label][g] / a[label]['total'] for label in positions]) ax.bar(labels, values, bar_width, bottom=running, label=g, color=c) if g == 'male' or g == 'female': for i, v in enumerate(values): ax.text(i, v / 2 + running[i], np.round(v, 1), va='center', fontsize=5, ha='center', c='1',
def train_model(self,learning_rate, steps, batch_size, input_feature="total_rooms"): """ 训练模型,寻找最好的模型超参数,一个特征 :param learning_rate: 学习率 :param steps: 步长 :param batch_size:每一次训练模型输入数量 :param input_feature: 输入特征 :return: """ periods = 10 steps_per_period = steps / periods my_feature = input_feature my_feature_data = california_housing_dataframe[[my_feature]] my_label = "median_house_value" targets = california_housing_dataframe[my_label] #构造特征列 feature_columns = [tf.feature_column.numeric_column(my_feature)] #构造输入函数 training_input_fn = lambda :self.my_input_fn(my_feature_data, targets,batch_size=batch_size) predictions_input_fn = lambda :self.my_input_fn(my_feature_data, targets,num_epochs=1,shuffle=False) #LR my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) linear_regressor = tf.estimator.LinearRegressor(feature_columns=feature_columns, optimizer=my_optimizer) #对于每个阶段的模型绘图 plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.title("Learn Line by Period") plt.ylabel(my_label) plt.xlabel(my_feature) sample =california_housing_dataframe.sample(n=300) plt.scatter(sample[my_feature],sample[my_label]) colors = [cm.coolwarm(x) for x in np.linspace(-1,1,periods)] print("Training model...") print("RMSE (on training data):") root_mean_squared_errors = [] for period in range(0, periods): # 训练每个阶段的模型 linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period) # 预测 predictions = linear_regressor.predict(input_fn=predictions_input_fn) predictions = np.array([item['predictions'][0] for item in predictions]) # 计算损失 root_mean_squared_error = math.sqrt( metrics.mean_squared_error(predictions, targets)) # 输出当前损失 print(" period %02d : %0.2f" % (period, root_mean_squared_error)) # 将当前阶段的损失加入list中 root_mean_squared_errors.append(root_mean_squared_error) # Finally, track the weights and biases over time. # Apply some math to ensure that the data and line are plotted neatly. y_extents = np.array([0, sample[my_label].max()]) weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0] bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights') x_extents = (y_extents - bias) / weight x_extents = np.maximum(np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min()) y_extents = weight * x_extents + bias plt.plot(x_extents, y_extents, color=colors[period]) print("Model training finished.") # 输出每个阶段的损失矩阵 plt.subplot(1, 2, 2) plt.ylabel('RMSE') plt.xlabel('Periods') plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) plt.show() calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(predictions) calibration_data["targets"] = pd.Series(targets) display.display(calibration_data.describe()) print("Final RMSE (on training data): %0.2f" % root_mean_squared_error) return calibration_data
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"): periods = 10 steps_per_period = steps / periods X_train = df[[input_feature]] target = "target" y_train = df[target] feature_columns = [tf.feature_column.numeric_column(input_feature)] def input_fn(X_data, y_data, batch_size=1, repeat=1, shuffle=True): X_tensor = { key: np.array(value) for key, value in dict(X_data).items() } ds = Dataset.from_tensor_slices((X_tensor, y_data)) ds = ds.batch(int(batch_size)).repeat(repeat) X, y = ds.make_one_shot_iterator().get_next() return X, y train_input_fn = lambda: input_fn( X_train, y_train, batch_size=batch_size, repeat=1) predict_input_fn = lambda: input_fn( X_train, y_train, batch_size=1, repeat=1) gdo = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) gdo = tf.contrib.estimator.clip_gradients_by_norm(gdo, 5.0) linear_regressor = tf.estimator.LinearRegressor( feature_columns=feature_columns, optimizer=gdo) plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.title("Learned Line by Period") plt.ylabel(target) plt.xlabel(input_feature) sample = df.sample(n=300) plt.scatter(sample[input_feature], sample[target]) colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] root_mean_squared_errors = [] for period in range(0, periods): linear_regressor.train(input_fn=train_input_fn, steps=steps_per_period) predictions = linear_regressor.predict(input_fn=predict_input_fn) predictions = np.array( [item['predictions'][0] for item in predictions]) mean_squared_error = metrics.mean_squared_error(predictions, y_train) # print('mean_squared_error', mean_squared_error) root_mean_squared_error = math.sqrt(mean_squared_error) # print('root_mean_squared_error', root_mean_squared_error) root_mean_squared_errors.append(root_mean_squared_error) print("period %02d: %0.2f" % (period, root_mean_squared_error)) weight = linear_regressor.get_variable_value( 'linear/linear_model/%s/weights' % input_feature)[0][0] bias = linear_regressor.get_variable_value( 'linear/linear_model/bias_weights')[0] x_min = sample[input_feature].min() x_max = sample[input_feature].max() y_min = x_min * weight + bias y_max = x_max * weight + bias plt.plot([x_min, x_max], [y_min, y_max], c=colors[period]) plt.subplot(1, 2, 2) plt.ylabel('RMSE') plt.xlabel('Periods') plt.tight_layout() np.arange(len(root_mean_squared_errors)) predictions result_df = pd.DataFrame({ "prediction": pd.Series(predictions), "target": df["target"] }) print(result_df.describe()) print("RMSE: %0.2f" % root_mean_squared_error) plt.plot(np.arange(len(root_mean_squared_errors)), root_mean_squared_errors) plt.show()
def showAndSaveHistogram_2Dim(samples, num_bins, expID, path, D, proposal_dist, sigma, n, burn_in, keep_every, num_samples_fnl, err_fnl): # get figure and axes in 3D: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # set min and max for figure: x1_min = -20 x1_max = 15 x2_min = -15 x2_max = 20 # create a surface_plot of our target mulitvariate Guassian Mixture distribution: x1_surface = np.arange(x1_min, x1_max+1, .25) x2_surface = np.arange(x2_min, x2_max+1, .25) X1_surface, X2_surface = np.meshgrid(x1_surface, x2_surface) Z_surface = np.empty_like(X1_surface) for i in xrange (0, len(X1_surface)): for j in xrange(0, len(X1_surface)): Z_surface[i,j] = p_nDims([X1_surface[i,j], X2_surface[i,j]]) ax.plot_surface(X1_surface, X2_surface, Z_surface, rstride=5, cstride=5, alpha=0.4) # set labels, etc. for 3D surface plot: axis_label_fontdict = {'fontsize':25, 'fontweight':'bold', 'color':'r'} ax.set_xlabel('X1', fontdict=axis_label_fontdict) ax.set_xlim(-20, 15) ax.set_ylabel('X2', fontdict=axis_label_fontdict) ax.set_ylim(-15, 20) ax.set_zlabel('Z', fontdict=axis_label_fontdict) x1 = samples[0,:] x2 = samples[1,:] rng = [[x1_min,x1_max],[x2_min,x2_max]] # REFERENCE NOTE: the second half of this function for plottingin 3D was adapted from an online source: # http://matplotlib.org/examples/mplot3d/hist3d_demo.html hist, x1edges, x2edges = np.histogram2d(x1, x2, bins=50, range=rng, normed=True) elements = (len(x1edges) - 1) * (len(x2edges) - 1) x1pos, x2pos = np.meshgrid(x1edges[:-1], x2edges[:-1]) x1pos = x1pos.flatten() x2pos = x2pos.flatten() zpos = np.zeros(elements) dx1 = 0.5 * np.ones_like(zpos) dx2 = dx1.copy() dz = hist.flatten() # REFERENCE NOTE: this technique for setting the colormap was adapted from matplotlib's online demo repository: # http://matplotlib.org/examples/... offset = dz + np.abs(dz.min()) fracs = offset.astype(float)/offset.max() norm = clrs.Normalize(fracs.min(), fracs.max()) my_colors = cm.coolwarm(norm(fracs)) # plot the histogram2d data as a bar3d on the same axes: ax.bar3d(x1pos, x2pos, zpos, dx1, dx2, dz, color=my_colors, zsort='average', alpha=.4) title = "2D HISTOGRAM exp" + str(expID) +" "\ + str(D) + "D" +" "\ + "Q=" + str(proposal_dist) +" "\ + "\nsigma=" + str(sigma[0,:]) + str(sigma[1,:]) fig.suptitle(title, fontsize='20') txt = "exp" + str(expID) +" "\ + str(D) + "D\n"\ + "Q=" + str(proposal_dist) +" "\ + "sigma=" + str(sigma[0,:]) + str(sigma[1,:]) +"\n"\ + "n=" + str(n) +" "\ + "burn_in=" + str(burn_in) +"\n"\ + "keep_every=" + str(keep_every) +"\n"\ + "num_samples_fnl=" + str(num_samples_fnl) +"\n"\ + "err_fnl=" + str(round(err_fnl, 8)) plt.figtext(0, 0, txt, style='italic', bbox={'facecolor':'blue', 'alpha':0.1, 'pad':10}, figure=fig) os.chdir(path) save_name = "3DHIST_exp" + str(expID) +"_"\ + str(D) +"D_"\ + str(proposal_dist) +"_"\ + "n" + str(n) +"_"\ + "burn" + str(burn_in) +"_"\ + str(keep_every) +"th_"\ + "err" + str(round(err_fnl, 5)) +"." plt.savefig(save_name) # display the histogram2d by itself in a new figure: fig1 = plt.figure() ax1 = fig1.add_subplot() count, xedges, yedges, image = plt.hist2d(x1, x2, num_bins, normed=True) pylab.colorbar() fig1.suptitle(title, fontsize='20') plt.figtext(0, 0, txt, style='italic', bbox={'facecolor':'blue', 'alpha':0.1, 'pad':10}, figure=fig1) save_name2 = "2DHIST_exp" + str(expID) +"_"\ + str(D) +"D_"\ + str(proposal_dist) +"_"\ + "n" + str(n) +"_"\ + "burn" + str(burn_in) +"_"\ + str(keep_every) +"th_"\ + "err" + str(round(err_fnl, 5)) +"." plt.savefig(save_name2) plt.show() plt.close()
test_high_decoded1 = AE_high.predict(downPSFs_test_flat1) high_guessed_encoded1 = high_model_encoded.predict(X=test_high_decoded1) test_low_decoded1 = AE_low.predict(downPSFs_test_flat1) low_guessed_encoded1 = low_model_encoded.predict(X=test_low_decoded1) both_encoded1 = np.concatenate((low_guessed_encoded1, high_guessed_encoded1), axis=1) pp, both_encoded_rms1 = evaluate_wavefront_performance(N_high + N_low, coef_test1, both_encoded1, zern_list=zern_list_high, show_predic=False) rms_autoencoder.append(both_encoded_rms1) ### Plot results n = len(rms_encoder) rms_encoder_arr = wave_nom * np.array(rms_encoder) rms_autoencoder_arr = wave_nom * np.array(rms_autoencoder) colors = cm.coolwarm(np.linspace(0, 1, N_test)) plt.figure() plt.subplot(1, 2, 1) i = 0 plt.scatter(i * np.ones(N_test) + 0.025, np.sort(rms_autoencoder_arr[i]), color='coral', s=4, label=r'Reconstructed $x$') plt.scatter(i * np.ones(N_test) - 0.025, np.sort(rms_encoder_arr[i]), color='blue', s=4, label=r'Encoded $h$') for i in np.arange(1, n): plt.scatter(i * np.ones(N_test) + 0.025, np.sort(rms_autoencoder_arr[i]), color='coral', s=4) plt.scatter(i*np.ones(N_test) - 0.025, np.sort(rms_encoder_arr[i]), color='blue', s=4) plt.legend(title='Architecture') plt.ylim([0, 200]) plt.ylabel('RMS [nm]') plt.xlabel('Iteration')
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import func from matplotlib import cm, colors fina = '/u/tsun/ITER_RMP/ITER_RMP_n1/gpec.boundary' zeta = np.linspace(0., 2. * np.pi, 100) theta = zeta x, y, r, z, b = func.plabry_and_bn_3d(fina, theta, zeta) strength = b norm = colors.Normalize(vmin=np.min(strength), vmax=np.max(strength), clip=False) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(x, y, z, colors=cm.coolwarm(norm(strength)), cmap=cm.coolwarm) #cmap=cm.coolwarm,facecolors=cm.coolwarm(norm(strength))) plt.figure() plt.plot(b[0, :]) plt.show()
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"): """Trains a linear regression model of one feature. Args: learning_rate: A `float`, the learning rate. steps: A non-zero `int`, the total number of training steps. A training step consists of a forward and backward pass using a single batch. batch_size: A non-zero `int`, the batch size. input_feature: A `string` specifying a column from `california_housing_dataframe` to use as input feature. """ peroids = 10 steps_per_period = steps / peroids # feature and targets my_feature = input_feature my_feature_data = california_housing_dataframe[[my_feature]] my_label = "median_house_value" targets = california_housing_dataframe[my_label] feature_columns = [tf.feature_column.numeric_column(my_feature)] training_input_fn = lambda: my_input_fn( my_feature_data, targets, batch_size=batch_size) prediction_input_fn = lambda: my_input_fn( my_feature_data, targets, num_epochs=1, shuffle=False) # 使用梯度下降算法做的优化器 my_optimizer = tf.train.GradientDescentOptimizer( learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm( my_optimizer, 5.0) linear_regressor = tf.estimator.LinearRegressor( feature_columns=feature_columns, optimizer=my_optimizer) plt.figure(figsize=(15, 6)) # 15cm*6cm plt.subplot(1, 2, 1) # subplot子图,使用这个函数的重点是将多个图像画在同一个绘画窗口.参数分别代表:行、列、第几个 plt.title("Learned Line by Period") plt.ylabel(my_label) plt.xlabel(my_feature) sample = california_housing_dataframe.sample(n=300) # 取样300个 plt.scatter(sample[my_feature], sample[my_label]) # 画三点图 colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, peroids)] # 颜色调制,numpy.linspace(start, stop, num),从start到stop之间,走num步 # Train the model, but do so inside a loop so that we can periodically assess # loss metrics. print "Training model..." print "RMSE (on training data):" root_mean_squared_errors = [] for period in range(0, peroids): # 优化器开始训练,其中input_fn是要传入的函数,理论上函数每次打乱,给他系统新的数据训练。 linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period) # Take a break and compute predictions. predictions = linear_regressor.predict(input_fn=prediction_input_fn) predictions = np.array( [item['predictions'][0] for item in predictions]) # Compute loss. root_mean_squared_error = math.sqrt( metrics.mean_squared_error(predictions, targets)) # Occasionally print the current loss. print " period %02d : %0.2f" % (period, root_mean_squared_error) # Add the loss metrics from this period to our list. root_mean_squared_errors.append(root_mean_squared_error) # Finally, track the weights and biases over time. # Apply some math to ensure that the data and line are plotted neatly. y_extents = np.array([0, sample[my_label].max()]) weight = linear_regressor.get_variable_value( 'linear/linear_model/%s/weights' % input_feature)[0] bias = linear_regressor.get_variable_value( 'linear/linear_model/bias_weights') x_extents = (y_extents - bias) / weight x_extents = np.maximum(np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min()) y_extents = weight * x_extents + bias plt.plot(x_extents, y_extents, color=colors[period]) # 随机颜色在这里显示 print "Model training finished." # Output a graph of loss metrics over periods. plt.subplot(1, 2, 2) plt.ylabel('RMSE') plt.xlabel('Periods') plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) plt.show() # Output a table with calibration data. calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(predictions) calibration_data["targets"] = pd.Series(targets) display.display(calibration_data.describe()) print "Final RMSE (on training data): %0.2f" % root_mean_squared_error
def train_model(learning_rate, steps, batch_size, input_feature): """Trains a linear regression model. Args: learning_rate: A `float`, the learning rate. steps: A non-zero `int`, the total number of training steps. A training step consists of a forward and backward pass using a single batch. batch_size: A non-zero `int`, the batch size. input_feature: A `string` specifying a column from `california_housing_dataframe` to use as input feature. Returns: A Pandas `DataFrame` containing targets and the corresponding predictions done after training the model. """ periods = 10 steps_per_period = steps / periods my_feature = input_feature my_feature_data = california_housing_dataframe[[my_feature ]].astype('float32') my_label = "median_house_value" targets = california_housing_dataframe[my_label].astype('float32') # Create input functions. training_input_fn = lambda: input_fn( my_feature_data, targets, batch_size=batch_size) predict_training_input_fn = lambda: input_fn( my_feature_data, targets, num_epoch=1, shuffle=False) # Create feature columns. feature_columns = [tf.feature_column.numeric_column(my_feature)] # Create a linear regressor object. my_optimizer = tf.train.GradientDescentOptimizer( learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm( my_optimizer, 5.0) linear_regressor = tf.estimator.LinearRegressor( feature_columns=feature_columns, optimizer=my_optimizer) # Set up to plot the state of our model's line each period. plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.title("Learned Line by Period") plt.ylabel(my_label) plt.xlabel(my_feature) sample = california_housing_dataframe.sample(n=300) plt.scatter(sample[my_feature], sample[my_label]) colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] # Train the model, but do so inside a loop so that we can periodically assess # loss metrics. print("Training model...") print("RMSE (on training data):") root_mean_squared_errors = [] for period in range(0, periods): # Train the model, starting from the prior state. linear_regressor.train( input_fn=training_input_fn, steps=steps_per_period, ) # Take a break and compute predictions. predictions = linear_regressor.predict( input_fn=predict_training_input_fn) predictions = np.array( [item['predictions'][0] for item in predictions]) # Compute loss. root_mean_squared_error = math.sqrt( metrics.mean_squared_error(predictions, targets)) # Occasionally print the current loss. print(" period %02d : %0.2f" % (period, root_mean_squared_error)) # Add the loss metrics from this period to our list. root_mean_squared_errors.append(root_mean_squared_error) # Finally, track the weights and biases over time. # Apply some math to ensure that the data and line are plotted neatly. y_extents = np.array([0, sample[my_label].max()]) weight = linear_regressor.get_variable_value( 'linear/linear_model/%s/weights' % input_feature)[0] bias = linear_regressor.get_variable_value( 'linear/linear_model/bias_weights') x_extents = (y_extents - bias) / weight x_extents = np.maximum(np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min()) y_extents = weight * x_extents + bias plt.plot(x_extents, y_extents, color=colors[period]) print("Model training finished.") # Output a graph of loss metrics over periods. plt.subplot(1, 2, 2) plt.ylabel('RMSE') plt.xlabel('Periods') plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) # Create a table with calibration data. calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(predictions) calibration_data["targets"] = pd.Series(targets) display.display(calibration_data.describe()) print("Final RMSE (on training data): %0.2f" % root_mean_squared_error) return calibration_data
def train_model(learning_rate, steps, batch_size, input_feature): """선형 회귀 모델을 훈련합니다. Args: learning_rate: `float`, 학습율. steps: 0이 아닌 `int`, 총 훈련 단계 수. 훈련 단계는 단일 배치를 사용하는 전진 및 역진 통과(forward/backward pass)로 구성됩니다. batch_size: 0이 아닌 `int`, 배치 크기. input_feature: 입력 특징으로 쓰기 위하여 `california_housing_dataframe`에서 지정한 열 이름 `string`. Returns: 모델 훈련 후 목표 및 그에 해당하는 예측을 담은 Pandas `DataFrame`. """ periods = 10 steps_per_period = steps / periods my_feature = input_feature my_feature_column = california_housing_dataframe[[my_feature]].astype('float32') my_label = "median_house_value" targets = california_housing_dataframe[my_label].astype('float32') # 입력 함수들 만들기 training_input_fn = learn_io.pandas_input_fn( x=my_feature_column, y=targets, num_epochs=None, batch_size=batch_size) predict_training_input_fn = learn_io.pandas_input_fn( x=my_feature_column, y=targets, num_epochs=1, shuffle=False) # 선형 회귀 객체 만들기 feature_columns = [tf.contrib.layers.real_valued_column(my_feature)] linear_regressor = tf.contrib.learn.LinearRegressor( feature_columns=feature_columns, optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate), gradient_clip_norm=5.0 ) # 각 주기별로 모델의 상태를 플롯하기 위해 준비 plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.title("Learned Line by Period") plt.ylabel(my_label) plt.xlabel(my_feature) sample = california_housing_dataframe.sample(n=300) plt.scatter(sample[my_feature], sample[my_label]) colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] # 모델을 훈련 시키되 루프 내부에서 수행하여 손실 매트릭을 주기적으로 평가할 수 있습니다. print("Training model...") print("RMSE (on training data):") root_mean_squared_errors = [] for period in range (0, periods): # 이전 상태에서 시작하여 모델을 교육. linear_regressor.fit( input_fn=training_input_fn, steps=steps_per_period, ) # 잠시 멈추고 예측을 계산합니다. predictions = list(linear_regressor.predict(input_fn=predict_training_input_fn)) # 손실 계산. root_mean_squared_error = math.sqrt( metrics.mean_squared_error(predictions, targets)) # 주기적으로 현재의 손실을 출력. print(" period %02d : %0.2f" % (period, root_mean_squared_error)) # 이번 주기의 손실 매트릭을 리스트에 추가. root_mean_squared_errors.append(root_mean_squared_error) # 마지막으로 시간 경과에 따라 가중치와 편향을 추적합니다. # 몇 가지 수학을 적용하여 데이터와 선이 깔끔하게 정리되도록 합니다. y_extents = np.array([0, sample[my_label].max()]) x_extents = (y_extents - linear_regressor.bias_) / linear_regressor.weights_[0] x_extents = np.maximum(np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min()) y_extents = linear_regressor.weights_[0] * x_extents + linear_regressor.bias_ plt.plot(x_extents, y_extents, color=colors[period]) print("Model training finished.") # 주기에 따른 손실 매트릭 그래프 출력 plt.subplot(1, 2, 2) plt.ylabel('RMSE') plt.xlabel('Periods') plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) # 보정 데이터가 있는 표를 출력합니다. calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(predictions) calibration_data["targets"] = pd.Series(targets) display(calibration_data.describe()) print("Final RMSE (on training data): %0.2f" % root_mean_squared_error) return calibration_data
def get_color(value): return cm.coolwarm(float((value - low) / (high - low)))
def _plot_peak(structure, h, k, l, _magnetic_field, _polarization): """Show the plot of a Bragg-peak specified by (h,k,l) using the nuclear and magnetic structure read from input structure. Inputs structure: an instance of crystallographyClasses.crystalStructure with a specified nuclear and magnetic structure hkl: the reciprocal lattice point for which to plot the peak _magnetic_field: strength of the magnetic field [unit: tesla] _polarization: number between 0 and 1 indicating beam polarization Outputs """ # NOTE: Set to 1 if simulating HB3A, set to -1 if simulating LLB HB3A = 1 # Nuclear structure independent of direction of magnetic field Fn = structure._calculate_structureFactor(h, k, l) t, p = np.linspace(0, np.pi, 10), np.linspace(0, 2 * np.pi, 20) T, P = np.meshgrid(t, p) X, Y, Z = np.sin(T) * np.cos(P), np.sin(T) * np.sin(P), np.cos(T) # Filling an array FR with magnetic field-dependent flipping ratios FR1 = np.zeros(P.shape) for i in range(P.shape[0]): for j in range(P.shape[1]): t, p = T[i, j], P[i, j] Uijk = np.array([[np.sin(t) * np.cos(p)], [np.sin(t) * np.sin(p)], [np.cos(t)]]) Fm_perp_Q = structure._calculate_Fm_perp_Q( h, k, l, HB3A * _magnetic_field * Uijk) FR1[i, j] = _calculate_flipping_ratio(Fn, Fm_perp_Q, _polarization * Uijk)[2] # Constructing figure from X, Y, Z and FR arrays fig = plt.figure() ax = fig.gca(projection='3d') norm = matplotlib.colors.SymLogNorm(1, vmin=FR1.min(), vmax=FR1.max()) surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, facecolors=cm.coolwarm(norm(FR1)), antialiased=False, shade=False) surf.set_edgecolors('None') m = cm.ScalarMappable(cmap=cm.coolwarm) m.set_array(FR1) fig.colorbar(m) # Calculating coordinates of unit cell axes in orthonormal system _a_coords_ijk = np.matmul(structure.IJK_Mct_ABC, np.array([[1], [0], [0]])) _b_coords_ijk = np.matmul(structure.IJK_Mct_ABC, np.array([[0], [1], [0]])) _c_coords_ijk = np.matmul(structure.IJK_Mct_ABC, np.array([[0], [0], [1]])) _a_coords_ijk = _a_coords_ijk / np.linalg.norm(_a_coords_ijk) * 1.3 _b_coords_ijk = _b_coords_ijk / np.linalg.norm(_b_coords_ijk) * 1.3 _c_coords_ijk = _c_coords_ijk / np.linalg.norm(_c_coords_ijk) * 1.3 # Setting text on figure indicating directions of crystal axes ax.text(1.2, 0, 0, 'i') ax.text(0, 1.2, 0, 'j') ax.text(0, 0, 1.2, 'k') ax.text(_a_coords_ijk[0, 0], _a_coords_ijk[1, 0], _a_coords_ijk[2, 0], 'a') ax.text(_b_coords_ijk[0, 0], _b_coords_ijk[1, 0], _b_coords_ijk[2, 0], 'b') ax.text(_c_coords_ijk[0, 0], _c_coords_ijk[1, 0], _c_coords_ijk[2, 0], 'c') ax.set_aspect('equal') ax.set_axis_off() plt.show()
# Create a new EstimateSurface object. gp = EstimateSurface(training_points, training_dists, GAMMA, SD) # Generate a meshgrid and plot the isosurface. x, y = np.meshgrid(np.arange(-BOX, BOX, RES), np.arange(-BOX, BOX, RES)) distances = np.zeros(x.shape) errors = np.zeros(x.shape) for ii in range(x.shape[0]): for jj in range(x.shape[1]): query = Point2D(x[ii, jj], y[ii, jj]) dist, err = gp.SignedDistance(query, GAMMA, SD) distances[ii, jj] = dist errors[ii, jj] = err plt.figure(1) cs = plt.contour(x, y, distances, [-0.1, 0, 0.1]) plt.colorbar() # Plot the training points. for p in training_points: plt.plot(p.x_, p.y_, 'ro') fig = plt.figure(2) ax = fig.gca(projection="3d") cmap = cm.ScalarMappable(cmap=cm.coolwarm) cmap.set_array(errors) surf = ax.plot_surface(x, y, distances, rstride=1, cstride=1, antialiased=False, facecolors=cm.coolwarm(errors)) fig.colorbar(cmap) plt.show()
def train_model(dataframe, learning_rate=0.00002001, steps=500, batch_size=5, input_feature="total_rooms", periods=10, data_label="median_house_value", show_sample=300): '''Trains a linear regression model of one feature. Args: learning_rate: A 'float', the learning rate. steps: A non-zero 'int', the total number of training steps. A training step consists of a forward and backward pass using a single batch. batch_size: A non-zero 'int', the batch size. input_feature: A 'string' specifying a column from 'california_housing_datarfame' to use as input feature. periods: number of iterations in between two plots. Return: root_mean_squared_errors: list of the RMSE of each period. ''' #In this function, we'll proceed in 10 evenly divided periods so that we #can observe the model improvement at each period. steps_per_period = steps / periods my_feature_data = dataframe[[input_feature]] #Define the target. targets = dataframe[data_label] #Create feature columns. feature_columns = [tf.feature_column.numeric_column(input_feature)] #Create input functions. training_input_fn = lambda: Input_1_Feature( my_feature_data, targets, batch_size=batch_size) prediction_input_fn = lambda: Input_1_Feature( my_feature_data, targets, num_epochs=1, shuffle=False) #Configure the LinearRegressor. my_optimizer = Optimizer_Gradient_Descent(learning_rate) #Configure the linear regression model with our feature columns and optimizer. linear_regressor = tf.estimator.LinearRegressor( feature_columns=feature_columns, optimizer=my_optimizer) #CAN THAT HAPPEN SOMEWHERE ELSE? #Set up to plot the state of our model's line each period. plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.title("Learned Line by Period") plt.ylabel(data_label) plt.xlabel(input_feature) sample = dataframe.sample(n=show_sample) plt.scatter(sample[input_feature], sample[data_label]) colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] #Train the model, but do so inside a loop so that we can periodically asses # loss metrics. print('Training model...') print("RMSE (on training data):") root_mean_squared_errors = [] for period in range(0, periods): #Tran the model, starting from the prior state. linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period) #Take a brake and comput predictions. predictions = Predict_On_Training_Data(prediction_input_fn, linear_regressor) root_mean_squared_error = compute_loss_RMSE(predictions, targets, root_mean_squared_errors) #Occasionally print the current loss. print('period %02d: %0.2f' % (period, root_mean_squared_error)) #Track the weights and biases over time. #Apply some math to ensure that the data and line are plotted neatly. y_extents = np.array([0, sample[data_label].max()]) weight = linear_regressor.get_variable_value( 'linear/linear_model/%s/weights' % input_feature)[0] bias = linear_regressor.get_variable_value( 'linear/linear_model/bias_weights') x_extents = (y_extents - bias) / weight x_extents = np.maximum( np.minimum(x_extents, sample[input_feature].max()), sample[input_feature].min()) y_extents = weight * x_extents + bias plt.plot(x_extents, y_extents, color=colors[period]) print('Model training finished') #For each period, we'll compute and graph training loss andalso plot the #feature weight and bias term values learned by the model over time. #This may help you judge when a model is converged, or if it needs more #iterations. #Output a graph of loss metrics over periods. plt.subplot(1, 2, 2) plt.ylabel('RMSE') plt.xlabel('Periods') plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) #Output a table with calibration data. calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(predictions) calibration_data["targets"] = pd.Series(targets) display.display(calibration_data.describe()) print("Final RMSE (on training data): %0.2f" % root_mean_squared_error) return root_mean_squared_errors
def plot_wing(self): n_names = len(self.names) self.ax.cla() az = self.ax.azim el = self.ax.elev dist = self.ax.dist for j, name in enumerate(self.names): mesh0 = self.mesh[self.curr_pos*n_names+j].copy() self.ax.set_axis_off() if self.show_wing: def_mesh0 = self.def_mesh[self.curr_pos*n_names+j] x = mesh0[:, :, 0] y = mesh0[:, :, 1] z = mesh0[:, :, 2] try: # show deformed mesh option may not be available if self.show_def_mesh.get(): x_def = def_mesh0[:, :, 0] y_def = def_mesh0[:, :, 1] z_def = def_mesh0[:, :, 2] self.c2.grid(row=0, column=3, padx=5, sticky=Tk.W) if self.ex_def.get(): z_def = (z_def - z) * 10 + z_def def_mesh0 = (def_mesh0 - mesh0) * 30 + def_mesh0 else: def_mesh0 = (def_mesh0 - mesh0) * 2 + def_mesh0 self.ax.plot_wireframe(x_def, y_def, z_def, rstride=1, cstride=1, color='k') self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k', alpha=.3) else: self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k') self.c2.grid_forget() except: self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k') if self.show_tube: r0 = self.radius[self.curr_pos*n_names+j] t0 = self.thickness[self.curr_pos*n_names+j] colors = t0 colors = colors / np.max(colors) num_circ = 12 fem_origin = 0.35 n = mesh0.shape[1] p = np.linspace(0, 2*np.pi, num_circ) for i, thick in enumerate(t0): r = np.array((r0[i], r0[i])) R, P = np.meshgrid(r, p) X, Z = R*np.cos(P), R*np.sin(P) chords = mesh0[-1, :, 0] - mesh0[0, :, 0] comp = fem_origin * chords + mesh0[0, :, 0] X[:, 0] += comp[i] X[:, 1] += comp[i+1] Z[:, 0] += fem_origin * (mesh0[-1, i, 2] - mesh0[0, i, 2]) + mesh0[0, i, 2] Z[:, 1] += fem_origin * (mesh0[-1, i+1, 2] - mesh0[0, i+1, 2]) + mesh0[0, i+1, 2] Y = np.empty(X.shape) Y[:] = np.linspace(mesh0[0, i, 1], mesh0[0, i+1, 1], 2) col = np.zeros(X.shape) col[:] = colors[i] try: self.ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.viridis(col), linewidth=0) except: self.ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.coolwarm(col), linewidth=0) lim = 0. for j in range(n_names): ma = np.max(self.mesh[self.curr_pos*n_names+j], axis=(0,1,2)) if ma > lim: lim = ma lim /= float(zoom_scale) self.ax.auto_scale_xyz([-lim, lim], [-lim, lim], [-lim, lim]) self.ax.view_init(elev=el, azim=az) # Reproduce view self.ax.dist = dist
def plot_trisurf(fig, ax, vertices, triangles, texture=None, vmin=None, vmax=None): """ Display a tri surface. Parameters ---------- fig: Figure the matplotlib figure. ax: Axes3D axis to display the surface plot. vertices: array (N, 3) the surface vertices. triangles: array (N, 3) the surface triangles. texture: array (N,), default None a texture to display on the surface. vmin: float, default None minimum value to map. vmax: float, default None maximum value to map. """ # Parameters if vmin is None: vmin = texture.min() if vmax is None: vmax = texture.max() if texture is None: texture = np.ones((len(vertices), )) # Display tri surface x, y, z = vertices[:, 0], vertices[:, 1], vertices[:, 2] norm = colors.Normalize(vmin=0, vmax=vmax, clip=False) facecolors = cm.coolwarm(norm(texture)) triangle_vertices = np.array([vertices[tri] for tri in triangles]) polygon = Poly3DCollection(triangle_vertices, facecolors=facecolors, edgecolors="black") ax.add_collection3d(polygon) ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) ax.set_zlim(-1, 1) # Add colorbar m = cm.ScalarMappable(cmap=cm.coolwarm, norm=norm) m.set_array(texture) fig.colorbar(m, ax=ax, fraction=0.046, pad=0.04) # Get rid of the panes ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) # Get rid of the spines ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) # Get rid of the ticks ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([])
fig = Figure(figsize=(ntile[1], ntile[0]), dpi=tp) ax = fig.add_axes([0, 0, 1, 1]) import numpy vmax = numpy.nanmax(imgw) imgw *= (300. / vmax) numpy.arcsinh(imgw, out=imgw) imgw /= 5. brightness = imgw vmean = numpy.nanmean(imgv) vstd = numpy.nanstd(imgv) color = cm.coolwarm(Normalize(vmin=vmean-vstd * 2, vmax=vmean+vstd * 2)(imgv)) color[..., :3] *= brightness[..., None] color = color.reshape(-1, 4) bad = (color[..., :3] > 1.0).any(axis=-1) print(bad.sum(), color.shape) color[..., :3][bad] /= color[..., :3][bad].max(axis=-1)[..., None] color = color.reshape(imgw.shape[0], imgw.shape[1], 4) print numpy.histogram(brightness.ravel()) print numpy.histogram(color[..., :3].max(axis=-1).ravel()) ax.imshow(color) #ax.imshow(img)
def plot_wing(self): n_names = len(self.names) self.ax.cla() az = self.ax.azim el = self.ax.elev dist = self.ax.dist for j, name in enumerate(self.names): mesh0 = self.mesh[self.curr_pos * n_names + j].copy() self.ax.set_axis_off() if self.show_wing: def_mesh0 = self.def_mesh[self.curr_pos * n_names + j] x = mesh0[:, :, 0] y = mesh0[:, :, 1] z = mesh0[:, :, 2] try: # show deformed mesh option may not be available if self.show_def_mesh.get(): x_def = def_mesh0[:, :, 0] y_def = def_mesh0[:, :, 1] z_def = def_mesh0[:, :, 2] self.c2.grid(row=0, column=3, padx=5, sticky=Tk.W) if self.ex_def.get(): z_def = (z_def - z) * 10 + z_def def_mesh0 = (def_mesh0 - mesh0) * 30 + def_mesh0 else: def_mesh0 = (def_mesh0 - mesh0) * 2 + def_mesh0 self.ax.plot_wireframe(x_def, y_def, z_def, rstride=1, cstride=1, color='k') self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k', alpha=.3) else: self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k') self.c2.grid_forget() except: self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k') # cg = self.cg[self.curr_pos] # self.ax.scatter(cg[0], cg[1], cg[2], s=100, color='r') if self.point_masses_exist: for point_mass_loc in self.point_mass_locations[ self.curr_pos]: self.ax.scatter(point_mass_loc[0], point_mass_loc[1], point_mass_loc[2], s=100, color='b') if self.symmetry: self.ax.scatter(point_mass_loc[0], -point_mass_loc[1], point_mass_loc[2], s=100, color='b') if self.show_tube: # Get the array of radii and thickness values for the FEM system r0 = self.radius[self.curr_pos * n_names + j] t0 = self.thickness[self.curr_pos * n_names + j] # Create a normalized array of values for the colormap colors = t0 colors = colors / np.max(colors) # Set the number of rectangular patches on the cylinder num_circ = 12 fem_origin = self.fem_origin_dict[name.split('.')[-1] + '_fem_origin'] # Create an array of angles around a circle p = np.linspace(0, 2 * np.pi, num_circ) # This is just to show the deformed mesh if selected if self.show_wing: if self.show_def_mesh.get(): mesh0[:, :, 2] = def_mesh0[:, :, 2] # Loop through each element in the FEM system for i, thick in enumerate(t0): # Get the radii describing the circles at each nodal point r = np.array((r0[i], r0[i])) R, P = np.meshgrid(r, p) # Get the X and Z coordinates for all points around the circle X, Z = R * np.cos(P), R * np.sin(P) # Get the chord and center location for the FEM system chords = mesh0[-1, :, 0] - mesh0[0, :, 0] comp = fem_origin * chords + mesh0[0, :, 0] # Add the location of the element centers to the circle coordinates X[:, 0] += comp[i] X[:, 1] += comp[i + 1] Z[:, 0] += fem_origin * (mesh0[-1, i, 2] - mesh0[0, i, 2]) + mesh0[0, i, 2] Z[:, 1] += fem_origin * (mesh0[-1, i + 1, 2] - mesh0[0, i + 1, 2]) + mesh0[0, i + 1, 2] # Get the spanwise locations of the spar points Y = np.empty(X.shape) Y[:] = np.linspace(mesh0[0, i, 1], mesh0[0, i + 1, 1], 2) # Set the colors of the rectangular surfaces # if(i==19): print("X =", X) print("X.shape =", X.shape) print("self =", self) print("self.names =", self.names) # print("self.mesh =", self.mesh) print("mesh0 =", mesh0) print("self.show_tube =", self.show_tube) print("i =", i) print("t0 =", t0) print("self.show_def_mesh.get() =", self.show_def_mesh.get()) print("self.show_wing =", self.show_wing) print("colors =", colors) print("type(colors) =", type(colors)) print("np.ravel(colors) =", np.ravel(colors)) print("colors[i] =", colors[i]) print("np.zeros(X.shape) =", np.zeros(X.shape)) # t0 = np.ravel(t0) # colors = np.ravel(colors) col = np.zeros(X.shape) col[:] = colors[i] # Plot the rectangular surfaces for each individual FEM element try: self.ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.viridis(col), linewidth=0) except: self.ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.coolwarm(col), linewidth=0) lim = 0. for j in range(n_names): ma = np.max(self.mesh[self.curr_pos * n_names + j], axis=(0, 1, 2)) if ma > lim: lim = ma lim /= float(self.zoom_scale) self.ax.auto_scale_xyz([-lim, lim], [-lim, lim], [-lim, lim]) self.ax.set_title("Iteration: {}".format(self.curr_pos)) # round_to_n = lambda x, n: round(x, -int(np.floor(np.log10(abs(x)))) + (n - 1)) # print objective value under the wing if self.opt: obj_val = self.obj[self.curr_pos] self.ax.text2D(.15, .05, self.obj_key + ': {}'.format(obj_val), transform=self.ax.transAxes, color='k') self.ax.view_init(elev=el, azim=az) # Reproduce view self.ax.dist = dist
def train_model(learning_rate, steps, batch_size, input_features="total_rooms"): """ Trainin a linear regression model of one-hot value :param learning_rate: :param steps: total number of training steps :param batch_size: :param input_features: A string specifying a column from '***.csv' file to use as input feature :return: """ periods = 10 steps_per_period = steps / periods my_feature = input_features my_feature_data = california_housing_dataframe[[my_feature]] my_label = "median_house_value" targets = california_housing_dataframe[my_label] # create feature columns feature_columns = [tf.feature_column.numeric_column(my_feature)] # create input function '''why should we use lambda function to construct the input for train?''' training_input_fn = lambda: my_input_fn(my_feature_data, targets, batch_size=batch_size) prediction_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False) # create a linear regressor object my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) linear_regressor = tf.estimator.LinearRegressor(feature_columns=feature_columns, optimizer=my_optimizer) # set up to plot plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.title("Learned line by period") plt.ylabel(my_label) plt.xlabel(input_features) sample = california_housing_dataframe.sample(n=300) plt.scatter(sample[my_feature], sample[my_label]) colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] print("Training model ...") print("PMSE (on training data):") root_mean_square_errors = [] for period in range(0, periods): # traing the model, start from prior state linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period) # compute prediction predictions = linear_regressor.predict(input_fn=prediction_input_fn) predictions = np.array([item["predictions"][0] for item in predictions]) # compute loss root_mean_square_error = math.sqrt(metrics.mean_squared_error(targets, predictions)) print("period: %02d : %0.2f" % (period, root_mean_square_error)) root_mean_square_errors.append(root_mean_square_error) y_extents = np.array([0, sample[my_label].max()]) # track the weights and bias over time '''Is the weight and bias store in fixed directory?''' weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_features)[0] bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights') x_extents = (y_extents - bias) / weight x_extents = np.maximum(np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min()) y_extents = weight * x_extents + bias plt.plot(x_extents, y_extents, color=colors[period]) print('model train finished') # output a graph of loss metrics over period plt.subplot(1, 2, 2) plt.ylabel("RMSE") plt.xlabel("Periods") plt.title("Root Mean Square Error VS Periods") plt.tight_layout() plt.plot(root_mean_square_errors) # output a data with calibration data calibration_data = pd.DataFrame() calibration_data["Predictions"] = pd.Series(predictions) calibration_data["Targets"] = pd.Series(targets) display.display(calibration_data.describe()) print("Final RMSE error on training data: %.2f" % root_mean_square_error)
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"): periods = 10 steps_per_period = steps / periods my_feature = input_feature my_feature_data = california_housing_dataframe[[my_feature]] my_label = "median_house_value" targets = california_housing_dataframe[my_label] #Create feature columns training_input_fn = lambda: my_input_fn( my_feature_data, targets, batch_size=batch_size) prediction_input_fn = lambda: my_input_fn( my_feature_data, targets, num_epochs=1, shuffle=False) #Create Linear regressor object: my_optimizer = tf.train.GradientDescentOptimizer( learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm( my_optimizer, 5.0) linear_regressor = tf.estimator.LinearRegressor( feature_columns=feature_columns, optimizer=my_optimizer) #Setup to plot the models line each period plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.title("Learned Line by period") plt.ylabel(my_label) plt.xlabel(my_feature) sample = california_housing_dataframe.sample(n=300) plt.scatter(sample[my_feature], sample[my_label]) colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] #Train a model inside a loop so that we can periodically asses loss metrics print("Training Model...") print("RMSE(on training data):") root_mean_squared_errors = [] for period in range(0, periods): #Train the model starting from the prior state linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period) #Take a break and compute prediction predictions = linear_regressor.predict(input_fn=prediction_input_fn) predictions = np.array( [item['predictions'][0] for item in predictions]) #Compute loss root_mean_squared_error = math.sqrt( metrics.mean_squared_error(predictions, targets)) #Ocassionaly print the current Loss print(" period %02d : %0.2f" % (period, root_mean_squared_error)) #Add the loss metrics from the period to our list root_mean_squared_errors.append(root_mean_squared_error) #Finally, track the weights and bias over time #Apply some math to ensure that the data and line are plotted neatly y_extents = np.array([0, sample[my_label].max()]) weight = linear_regressor.get_variable_value( 'linear/linear_model/%s/weights' % input_feature)[0] bias = linear_regressor.get_variable_value( 'linear/linear_model/bias_weights') x_extents = (y_extents - bias) / weight x_extents = np.maximum(np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min()) y_extents = weight * x_extents + bias plt.plot(x_extents, y_extents, color=colors[period]) print("Model training finished") #Output the graph of loss metrics over period plt.subplot(1, 2, 2) plt.ylabel('RMSE') plt.xlabel('Periods') plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) #Output a table with calibration data calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(predictions) calibration_data["targets"] = pd.Series(targets) display.display(calibration_data.describe()) print("Final RMSE(on training data): %0.2f" % root_mean_squared_error)
def plot_quantile_average_cumulative_return(avg_cumulative_returns, by_quantile=False, std_bar=False, title=None, ax=None): """ Plots sector-wise mean daily returns for factor quantiles across provided forward price movement columns. Parameters ---------- avg_cumulative_returns: pd.Dataframe The format is the one returned by performance.average_cumulative_return_by_quantile by_quantile : boolean, optional Disaggregated figures by quantile (useful to clearly see std dev bars) std_bar : boolean, optional Plot standard deviation plot title: string, optional Custom title ax : matplotlib.Axes, optional Axes upon which to plot. Returns ------- ax : matplotlib.Axes """ avg_cumulative_returns = avg_cumulative_returns.multiply(DECIMAL_TO_BPS) quantiles = len(avg_cumulative_returns.index.levels[0].unique()) palette = [cm.coolwarm(i) for i in np.linspace(0, 1, quantiles)] palette = palette[::-1] # we want negative quantiles as 'red' if by_quantile: if ax is None: v_spaces = ((quantiles - 1) // 2) + 1 f, ax = plt.subplots(v_spaces, 2, sharex=False, sharey=False, figsize=(18, 6 * v_spaces)) ax = ax.flatten() for i, (quantile, q_ret) in enumerate(avg_cumulative_returns .groupby(level='factor_quantile') ): mean = q_ret.loc[(quantile, 'mean')] mean.name = 'Quantile ' + str(quantile) mean.plot(ax=ax[i], color=palette[i]) ax[i].set_ylabel('Mean Return (bps)') if std_bar: std = q_ret.loc[(quantile, 'std')] ax[i].errorbar(std.index, mean, yerr=std, fmt='none', ecolor=palette[i], label='none') ax[i].axvline(x=0, color='k', linestyle='--') ax[i].legend() i += 1 else: if ax is None: f, ax = plt.subplots(1, 1, figsize=(18, 6)) for i, (quantile, q_ret) in enumerate(avg_cumulative_returns .groupby(level='factor_quantile') ): mean = q_ret.loc[(quantile, 'mean')] mean.name = 'Quantile ' + str(quantile) mean.plot(ax=ax, color=palette[i]) if std_bar: std = q_ret.loc[(quantile, 'std')] ax.errorbar(std.index, mean, yerr=std, fmt='none', ecolor=palette[i], label='none') i += 1 ax.axvline(x=0, color='k', linestyle='--') ax.legend() ax.set(ylabel='Mean Return (bps)', title=("Average Cumulative Returns by Quantile" if title is None else title), xlabel='Periods') return ax
cstride=1, cmap=cm.YlGnBu_r) sigma_range = max(sigma_VM) - min(sigma_VM) N = data / 3.0 # print N # print cm.jet(N) ax.plot_surface(x_points, y_points, shape_x1, rstride=1, cstride=1, facecolors=cm.coolwarm(N), linewidth=1, antialiased=False, shade=False) # ax.set_zlim3d(0, cell_radius + shmoo_length) # ax.set_xlabel(r'$x$') # ax.set_ylabel(r'$y$') # ax.set_zlabel(r'$z$') ax.grid(False) for a in (ax.w_xaxis, ax.w_yaxis, ax.w_zaxis): for t in a.get_ticklines() + a.get_ticklabels(): t.set_visible(False) a.line.set_visible(False) a.pane.set_visible(False)
def scalp_with_circles(v, channels, ax=None, annotate=False, vmin=None, vmax=None, colormap=None, scalp_line_width=1, scalp_line_style='solid', chan_pos_list=CHANNEL_10_20_APPROX, interpolation='bilinear'): """Draw a scalp plot. Draws a scalp plot on an existing axes. The method takes an array of values and an array of the corresponding channel names. It matches the channel names with an internal list of known channels and their positions to project them correctly on the scalp. .. warning:: The behaviour for unkown channels is undefined. Parameters ---------- v : 1d-array of floats The values for the channels channels : 1d array of strings The corresponding channel names for the values in ``v`` ax : Axes, optional The axes to draw the scalp plot on. If not provided, the currently activated axes (i.e. ``gca()``) will be taken annotate : Boolean, optional Draw the channel names next to the channel markers. vmin, vmax : float, optional The display limits for the values in ``v``. If the data in ``v`` contains values between -3..3 and ``vmin`` and ``vmax`` are set to -1 and 1, all values smaller than -1 and bigger than 1 will appear the same as -1 and 1. If not set, the maximum absolute value in ``v`` is taken to calculate both values. colormap : matplotlib.colors.colormap, optional A colormap to define the color transitions. Returns ------- ax : Axes the axes on which the plot was drawn See Also -------- ax_colorbar """ if ax is None: ax = plt.gca() assert len(v) == len(channels), "Should be as many values as channels" assert interpolation=='bilinear' or interpolation=='nearest' if vmin is None: # added by me ([email protected]) assert vmax is None vmin, vmax = -np.max(np.abs(v)), np.max(np.abs(v)) # what if we have an unknown channel? points = [get_channelpos(c, chan_pos_list) for c in channels] for c in channels: assert get_channelpos(c, chan_pos_list) is not None, ("Expect " + c + " " "to exist in positions") values = [v[i] for i in range(len(points))] for (x,y),z in zip(points, values): if z > 0: fill = 'red' else: fill = False ax.add_artist(plt.Circle((x, y), 0.03, linestyle=scalp_line_style, linewidth=0.2, fill=fill, facecolor=cm.coolwarm(z))) #plt.plot(x,y,marker='x', markersize=5) # paint the head ax.add_artist(plt.Circle((0, 0), 1, linestyle=scalp_line_style, linewidth=scalp_line_width, fill=False)) # add a nose ax.plot([-0.1, 0, 0.1], [1, 1.1, 1], color='black', linewidth=scalp_line_width, linestyle=scalp_line_style,) # add ears add_ears(ax, scalp_line_width, scalp_line_style) # set the axes limits, so the figure is centered on the scalp ax.set_ylim([-1.05, 1.15]) ax.set_xlim([-1.15, 1.15]) # hide the frame and ticks ax.set_frame_on(False) ax.set_xticks([]) ax.set_yticks([]) # draw the channel names if annotate: for i in zip(channels, list(zip(x, y))): ax.annotate(" " + i[0], i[1],horizontalalignment="center", verticalalignment='center') ax.set_aspect(1)
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"): periods = 10 steps_per_period = steps / periods my_feature = input_feature my_feature_data = california_housing_dataframe[[my_feature]] my_label = "median_house_value" feature_columns = [tf.feature_column.numeric_column(my_feature)] training_input_fn = lambda: my_input_fn( my_feature_data, targets, batch_size=batch_size) prediction_input_fn = lambda: my_input_fn( my_feature_data, targets, num_epochs=1, shuffle=False) my_optimizer = tf.train.GradientDescentOptimizer( learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm( my_optimizer, 5.0) linear_regressor = tf.estimator.LinearRegressor( feature_columns=feature_columns, optimizer=my_optimizer) plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) plt.title("Learned Line by Period") plt.ylabel(my_label) plt.xlabel(my_feature) sample = california_housing_dataframe.sample(n=300) plt.scatter(sample[my_feature], sample[my_label]) colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)] print("Training model") print("RMSE one training data:") root_mean_squared_error = [] for period in range(0, periods): linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period) predictions = linear_regressor.predict(input_fn=prediction_input_fn) predictions = np.array( [item['predictions'][0] for item in predictions]) root_mean_squared_error = math.sqrt( metrics.mean_squared_error(predictions, targets)) print("period %02d : %0.2f" % (period, root_mean_squared_error)) root_mean_squared_errors.append(root_mean_squared_error) y_extents = np.array([0, sample[my_label].max()]) weight = linear_regressor.get_variable_value( 'linear/linear_model/%s/weights' % input_feature)[0] bias = linear_regressor.get_variable_value( 'linear/linear_model/bias_weights') x_extents = (y_extents - bias) / weight x_extents = np.maximum(np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min()) y_extents = weight * x_extents + bias plt.plot(x_extents, y_extents, color=colors[period]) print("Model training finished.") # Output a graph of loss metrics over periods. plt.subplot(1, 2, 2) plt.ylabel('RMSE') plt.xlabel('Periods') plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(root_mean_squared_errors) # Output a table with calibration data. calibration_data = pd.DataFrame() calibration_data["predictions"] = pd.Series(predictions) calibration_data["targets"] = pd.Series(targets) display.display(calibration_data.describe()) print("Final RMSE (on training data): %0.2f" % root_mean_squared_error)