def test3(): #np.random.seed(4) #np.random.seed(10) # Let's consider the case with four nodes and no cost of establishing a link. N = 20#5 all_agents = np.arange(0,N) # We start of with the empty network or two disconnected components initial_net = 'empty' if initial_net == 'empty': M = np.zeros((N,N)) elif initial_net == 'two': M = np.zeros((N,N)) M[0,1] = M[1,0] = 1 M[2,3] = M[3,2] = 1 mu0s = np.ones(N)*0.4 s = np.sqrt(0.1) Us = np.zeros(N) cs = np.ones(N)*0.05 mu0s[2] = 0.3 for i in range(N): ids = all_agents[M[i,:]==1] Us[i] = prob_correct_action(mu0s[i],mu0s[ids],s,q = 0.5) #print(M) runs = 500 density = np.zeros(runs) clustering = np.zeros(runs) for i in range(runs): if i%100 == 0: print(i) [M,Us] = update_neighbor(mu0s,s,M,Us,cs) density[i] = np.sum(M)/(N*(N-1)) G =nx.Graph(M) clustering[i] = nx.average_clustering(G) #print(M) clustering_ER = np.zeros(runs) for i in range(100): G = nx.erdos_renyi_graph(N, 0.22) clustering_ER[i] = nx.average_clustering(G) av_clustering_ER = np.mean(clustering_ER) pl.figure() pl.plot(density) pl.plot(clustering) pl.hlines(av_clustering_ER,0,200) pl.figure() nx.draw(nx.Graph(M)) pl.show()
def test2(): mu0 = 0.4 mu1 = 0.6 s = np.sqrt(0.1) runs = 14 Ps = np.zeros(runs) for run in range(1,runs): k = run mu0ks = np.ones(k)*0.3 Ps[run] = prob_correct_action(mu0,mu0ks,s,q=0.5) print('P with social belief: '+str(Ps[run])) Pp = integrate.quad(p_belief_dist,0.01,0.5,args=(mu0,1-mu0,s))[0] print('P with private belief only: '+str(Pp)) Ps[0] = Pp pl.figure() pl.plot(np.arange(0,runs),Ps) pl.plot(np.arange(1,runs),np.diff(Ps)) pl.hlines(Pp,0,runs) pl.show()
def plot_group_ams(curves, thresholds, stages, algorithm): plt.figure() plt.hlines(y=3.8058, xmin=80, xmax=95, colors='r') plt.grid(b=True, which='both', axis='both') plt.minorticks_on() plt.title('AMS ' + '($\sigma$)' + ' vs. Cut-off [Algorithm: ' + algorithm + ']', fontsize='small') for i in xrange(len(stages)): plt.plot(thresholds, curves[i], label='n_stages ' + str(stages[i] + 1)) max_thresh = curves[i].index(max(curves[i])) plt.scatter(thresholds[max_thresh], max(curves[i]), marker='o', color='magenta') plt.xlabel('Selection Threshold %', fontsize='small') plt.ylabel('$\sigma$', fontsize='small') plt.legend(fontsize='small') locs = np.arange(2.5, 4, 0.1) labels = map(lambda x: str(x) + '$\sigma$', locs) plt.legend(fontsize='small') plt.yticks(locs, labels) plt.ylim(2.5, 4) plt.xlim(80, 95) plt.tight_layout() title = '../Graphs/AMS_Curve_' + algorithm + '_Cluster' + '.png' print 'Saving graph in ' + title plt.savefig(title)
def _plot_thr(con_b, fdr_thr, max_thr, alpha, fig_out): ''' Plot the significant threshold of causality analysis. Parameter --------- con_b: array Causality matrix. fdr_thr: float Threshold combining with FDR. max_thr: float Threshold from the maximum causality value of surrogates. fig_out: string The path to store the threshold plots. ''' # plt.ioff() plt.close('all') c = np.unique(con_b) plt.plot(c, 'k', label='real con') xmin, xmax = plt.xlim() plt.hlines(fdr_thr, xmin, xmax, linestyle='--', colors='k', label='per=%.2f:%.2f' % (alpha,fdr_thr), linewidth=2) # label='p=%.2f(FDR):%.2f' % (alpha,fdr_thr), linewidth=2) plt.hlines(max_thr, xmin, xmax, linestyle='--', colors='g', label='Max surr', linewidth=2) plt.legend() plt.xlabel('points') plt.ylabel('causality values') # pl.show() plt.savefig(fig_out) plt.close() return
def plot_grad_flow(named_parameters, figsize=None): '''Plots the gradients flowing through different layers in the net during training. Can be used for checking for possible gradient vanishing / exploding problems. Usage: Plug this function in Trainer class after loss.backwards() as "plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow https://discuss.pytorch.org/t/check-gradient-flow-in-network/15063/8#post_10''' from matplotlib.lines import Line2D avg_grads = [] max_grads = [] layers = [] for n, p in named_parameters: if (p.grad is not None) and ("bias" not in n): layers.append(n) avg_grads.append(p.grad.abs().mean()) max_grads.append(p.grad.abs().max()) if figsize is None: figsize = (np.min([16, len(avg_grads)]), 6) plt.figure(figsize=figsize) plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c") plt.bar(np.arange(len(max_grads)), avg_grads, alpha=0.1, lw=1, color="b") plt.hlines(0, 0, len(layers) + 1, lw=2, color="k") plt.xticks(range(0, len(layers), 1), layers, rotation="vertical") plt.xlim(left=-1, right=len(layers)) plt.xlabel("Layers") plt.ylabel("Gradient Magnitude") plt.yscale('log') plt.title("Gradient flow") plt.grid(True) plt.legend([ Line2D([0], [0], color="c", lw=4), Line2D([0], [0], color="b", lw=4), Line2D([0], [0], color="k", lw=4) ], ['max-gradient', 'mean-gradient', 'zero-gradient'])
def plot_ams_curve(cls, thresh, ams_curve, classifier, legend_text, new_fig, save, settings): if new_fig: plt.figure() plt.hlines(y=3.8058,xmin=80,xmax=95,colors='r') plt.grid(b=True,which='both',axis='both') plt.minorticks_on() plt.title('AMS vs. Cut-off') plt.plot(thresh, ams_curve, label=legend_text) max_thresh = ams_curve.index(max(ams_curve)) plt.scatter(thresh[max_thresh],max(ams_curve),marker='o',color='r') plt.xlabel('Selection Threshold %') plt.ylabel('$\sigma$') plt.title('AMS ' + '($\sigma$)' + ' for ' + 'Classifier: ' + settings.get('algorithmName', classifier.algorithm)) plt.legend(prop={'size': 9}) locs = np.arange(2.5, 4, 0.1) labels = labels = map(lambda x: str(x) + '$\sigma$', locs) plt.yticks(locs, labels) plt.ylim(2.5, 4) plt.xlim(80,95) plt.tight_layout() if save: title = 'Graphs/AMS_Curve_' + classifier.algorithm + '.png' print 'Saving graph in ' + title plt.savefig(title)
def psf_model_plot(rows, counts, result, save_dir, save_name, show=False): fwhm = float(2.355 * np.sqrt(result.params['wid'].value / 2.)) plt.figure() # plt.plot(rows, counts,'bo',color='red',label='psf') # plt.plot(rows, counts,'bo',color='red',label='psf') # plt.plot(rows, result.init_fit, 'k--') plt.plot(rows, result.best_fit, 'b-', label='model') # plt.vlines(result.params['cen'].value+result.params['wid'].value/2.,counts.min()-100,counts.max()+100) # plt.vlines(int(result.params['cen'].value),counts.min(),counts[np.ceil(result.params['cen'].value)], # linestyles='dashed') plt.hlines(counts.min(), rows.min(), rows.max(), linestyles='dashed') plt.hlines(counts.max() / 2., result.params['cen'].value - fwhm / 2., result.params['cen'].value + fwhm / 2., color='red') # plt.text(result.params['cen'].value,counts.max()/2.,'FWHM = '+str(round(fwhm,4))) # plt.title('FWHM = '+str(round(fwhm,4))) plt.title('FWHM = ' + str(9.420)) plt.legend() plt.xlabel('column') plt.ylabel('counts') plt.savefig(save_dir + save_name) if show == True: plt.show() plt.close()
def plotAnomalyScores(df1, anomDF, u, ylab, ptIndice): """ Plot the raw data and classical and janossy density anomaly scores ---------- Input df1: pandas dataframe dataframe containing 'time' and 'value' columns, containing the raw data anomDF: pandas dataframe dataframe containing 'time' and 'c_anomScore' and 'j_anomScore' columns, containing the fitted anomaly scores u: float cutoff threshold for fitting PP model (will be plotted as horizontal line on graph) ylab: string label for y-axis pIndice: float or int or string identifier for given subject that is being plotted ---------- Output (none): graph Will produce a 1x2 subplot graph with the raw data on the left and the anomaly scores on the right Written by KN, 12-Aug-2016 """ plt.figure(figsize=[12, 5]) # raw data plt.subplot(1, 2, 1) plt.plot(df1['time'], df1['value']) plt.xlabel('Time from start of labs') plt.ylabel(ylab) plt.title('Pt {0} raw data'.format(ptIndice)) plt.hlines(u, np.min(df1['time']), np.max(df1['time']), linestyle='--') # anomaly scores plt.subplot(1, 2, 2) plt.plot(anomDF['time'], anomDF['c_anomScore'], label='Classical') plt.plot(anomDF['time'], anomDF['j_anomScore'], label='Janossy') plt.xlabel('Time from start of labs') plt.ylabel('Anomaly score') plt.legend(loc='best') plt.title('Pt {0} anomaly scores'.format(ptIndice)) plt.tight_layout()
def plot_all_v_ext(v_ext, timeline, color = 'blue', label = '', yzoom = [-100000, 50000], clean_plot = False): idx = 0 xlim = [40,56] scale = 2 for i in range(1, np.size(v_ext,1)+1)[::-1]: for j in range(1,np.size(v_ext, 2)+1): idx+=1 ax = plt.subplot(np.size(v_ext,1), np.size(v_ext, 2), idx) plt.hlines(0, timeline[0], timeline[-1], linestyles = 'dashed', colors = 'k') plt.plot(timeline, v_ext[:,i-1,j-1], color = color, label = label) plt.plot([49, 49], [-0.5*scale,0.5*scale], color='m', lw=2) if i == 1 and j == 5: plt.text(15, 30, u'scale='+str(scale)+r'$\mu$V') #plt.ylim(np.min(v_ext), np.max(v_ext)) # Hide the right and top spines #plt.ylim(yzoom) # #print labels #max_y = np.max(np.abs(labels[0]), labels[-1]) #plt.xlim([42.001,50]) plt.xlim(xlim) if clean_plot: labels=ax.get_yticks().tolist() max_plot = max(np.abs(labels)) yzoom = [-max_plot, max_plot] plt.ylim(yzoom) if idx %5 == 1 or idx in [21, 22, 23, 24, 25]: ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) if idx %5 == 1: labels=ax.get_yticks().tolist() ax.yaxis.set_ticks_position('left') #labels = [item.get_text() for item in ax.get_yticklabels()] idx_0 = labels.index(0.0) labels[1:-1] = ['']*len(labels[1:-1]) labels[idx_0] = 0 ax.set_yticklabels(labels) ax.spines['left'].set_visible(False) ax.set_yticks([]) else: ax.spines['left'].set_visible(False) ax.set_yticks([]) if idx in [21, 22, 23, 24, 25]: ax.xaxis.set_ticks_position('bottom') locs = np.arange(xlim[0],xlim[-1],step=5) # locations ticks = np.arange(0,np.diff(xlim),step=5) plt.xticks(locs, ticks)#, rotation="vertical" ) ax.set_xlabel('time (ms)') else: ax.spines['bottom'].set_visible(False) ax.set_xticks([]) else: plt.axis('off') plt.tight_layout() plt.subplots_adjust(hspace=0.1, wspace=0.1)
def paramCls(self, pred, exact, savename='loss'): sns.set_style('dark') plt.plot(pred[0], linewidth=5, color='dimgrey', label='predict') plt.hlines(exact[0], xmin=0, xmax=pred[0].shape[0], linewidth=5, color='coral', label='exact') plt.title('lpred: %.5f, vpred: %.5f, exact: %.5f' % (pred[0][0], pred[0][-1], exact[0][0])) #pdb.set_trace() plt.xlabel('iteration', fontsize='18') plt.ylabel('parameter', fontsize='18') plt.legend(fontsize='18') losspath = os.path.join( self.figurePath, 'loss', f'{savename}_{self.dataMode}_{self.trialID}.png') plt.savefig(losspath) plt.close()
def plot_ams_brf_bxt(thresh, brf_curve, bxt_curve): """ This function generates and saves two overlaid ams curves""" plt.close() plt.figure() plt.hlines(y=3.8058, xmin=80, xmax=95, colors='r') plt.grid(b=True, which='both', axis='both') plt.minorticks_on() plt.title('AMS ' + '($\sigma$)' + ' vs. Cut-off [BRF | BXT]', fontsize='small') plt.plot(thresh, brf_curve, label='BRF', color='g') max_thresh = brf_curve.index(max(brf_curve)) plt.scatter(thresh[max_thresh], max(brf_curve), marker='o', color='r') plt.plot(thresh, bxt_curve, label='BXT', color='b') max_thresh = bxt_curve.index(max(bxt_curve)) plt.scatter(thresh[max_thresh], max(bxt_curve), marker='o', color='r') plt.xlabel('Selection Threshold %', fontsize='small') plt.ylabel('$\sigma$', fontsize='small') plt.legend(fontsize='small') locs = np.arange(2.5, 4, 0.1) labels = labels = map(lambda x: str(x) + '$\sigma$', locs) plt.yticks(locs, labels) plt.ylim(2.5, 4) plt.xlim(80, 95) plt.tight_layout() title = '../Graphs/AMS_Curve_BXT_BRF' + '.png' print 'Saving graph in ' + title plt.savefig(title)
def plot(all_models): import matplotlib.pylab as plt import numpy.random plt.close("all") plt.figure() plt.subplot(211) alt = np.arange(0., 500., 2.) sza = 0. for m in all_models: d = m(alt, sza) plt.plot(ne_to_fp(d)/1E6, alt,lw=2) # plt.plot(m(alt, sza),alt,lw=2) plt.ylim(0., 400.) plt.ylabel('Altitude / km') # plt.xlabel(r'$n_e / cm^{-3}$') plt.xlabel(r'$f / MHz$') plt.subplot(212) for m in all_models: delay, freq = m.ais_response() plt.plot(freq/1E6, delay*1E3, lw=2.) plt.hlines(-2 * np.amax(alt) / speed_of_light_kms * 1E3, *plt.xlim(), linestyle='dashed') # plt.vlines(ne_to_fp(1E5)/1E6, *plt.ylim()) # plt.hlines( -(500-150) * 2 / speed_of_light_kms * 1E3, *plt.xlim()) plt.ylim(-10,0) plt.ylabel('Delay / ms') plt.xlim(0, 7) plt.xlabel('f / MHz') plt.show()
def plot_misfit_curves(items, threshold, threshold_is_upper_limit, logarithmic, component, pretty_misfit_name, filename): plt.close() crossing_periods = [] crossing_values = [] for item in items: if logarithmic: plt.semilogy(item["periods"], item["misfit_values"]) else: plt.plot(item["periods"], item["misfit_values"]) # Find the threshold. point = rightmost_threshold_crossing( item["periods"], item["misfit_values"], threshold, threshold_is_upper_limit) crossing_periods.append(point[0]) crossing_values.append(point[1]) plt.title("%s misfit curves for component %s" % ( pretty_misfit_name, component)) plt.xlabel("Lowpass Period [s]") plt.ylabel("%s" % pretty_misfit_name) x = items[0]["periods"][0] - 0.5, items[0]["periods"][-1] + 0.5 plt.hlines(threshold, x[0], x[1], linestyle="--", color="0.5") plt.scatter(crossing_periods, crossing_values, color="0.2", s=10, zorder=5) plt.xlim(*x) plt.savefig(filename)
def plot_position_line(position_px, positive): if position_px is not None: plt.hlines( position_px, plt.xlim()[0], plt.xlim()[1], "r", linestyles="dotted" if positive else "dashed", )
def make_campinas_plot(path:str, local: bool = False): df = process_covid_data(path) df = df.loc[df['date'] > df['date'].min(), :] max_rolling_daily_deaths = int(df["obitos_mm7d"].max()) max_rolling_daily_confirmed = int(df['casos_mm7d'].max()) plt.style.use('seaborn') fig, ax = plt.subplots(figsize=(18,8)) plt.hlines(y=max_rolling_daily_deaths, colors='g', alpha=0.6, lw=3, xmin=df['date'].min(), xmax=df['date'].max()) plt.text(df['date'].min(), max_rolling_daily_deaths*1.05, f"max {int(round(max_rolling_daily_deaths,0))}", fontsize=17) df.plot(x='date', y='obitos_novos', lw = 4, alpha = 0.6, ax=ax) df.plot(x='date', y='obitos_mm7d', lw = 4, alpha = 1.0, ax=ax) plt.ylabel('Daily deaths', fontsize=15) plt.xlabel("Date", fontsize=15) plt.legend(fontsize=19) ax.tick_params(axis='both', which='major', labelsize=12) ax.tick_params(axis='both', which='minor', labelsize=10) plt.title(f'Daily number of deaths for Campinas', fontsize=15) plt.tight_layout() if local: plt.show() else: plt.savefig(f"/github/workspace/daily_deaths_campinas.png".replace(' ', '_'), dpi=150) fig, ax2 = plt.subplots(figsize=(18,8)) plt.hlines(y=max_rolling_daily_confirmed, colors='r', alpha=0.6, lw=3, xmin=df['date'].min(), xmax=df['date'].max()) plt.text(df['date'].min(), max_rolling_daily_confirmed*1.05, f"max {int(round(max_rolling_daily_confirmed,0))}", fontsize=17) df.plot(x='date', y='casos_novos', lw = 4, alpha = 0.6, ax=ax2) df.plot(x='date', y='casos_mm7d', lw = 4, alpha = 1.0, ax=ax2) plt.ylabel('Daily confirmed cases', fontsize=15) plt.xlabel("Date", fontsize=15) plt.title(f'Daily number of confirmed cases for Campinas', fontsize=15) plt.legend(fontsize=19) ax2.tick_params(axis='both', which='major', labelsize=12) ax2.tick_params(axis='both', which='minor', labelsize=10) plt.tight_layout() if local: plt.show() else: plt.savefig(f"/github/workspace/daily_confirmed_cases_campinas.png".replace(' ', '_'), dpi=150) return None
def plot_importance(model, x_train): coefs = pd.DataFrame(model.coef_, x_train.columns) #coefs = pd.DataFrame(model.feature_importances_, x_train.columns) coefs.columns = ["coefs"] coefs["coefs_abs"] = coefs.coefs.apply(np.abs) coefs = coefs.sort_values(by="coefs_abs", ascending=False).drop(["coefs_abs"], axis=1) plt.figure(figsize=(16, 6)) coefs.coefs.plot(kind="bar") plt.grid(True, axis="y") plt.hlines(y=0, xmin=0, xmax=len(coefs), linestyles="dashed") plt.show()
def plot_tangent(function, target_x, x): katamuki = numeric_diff(function, target_x) target_y = function(target_x) seppen = target_y - katamuki * target_x y = katamuki * x + seppen ## dot and lines plt.plot(target_x, target_y, marker='.') plt.vlines(target_x, -1, target_y, "m", linestyle=":") plt.hlines(target_y, 0, target_x, "m", linestyle=":") plt.plot(x, y)
def plot_correlation(self, other_interface, lines=False, bounds=None): """ Plot this interface's view on the underlying product vs another interface's view. Each vulnerability will be represented as a point, with the x coordinate representing the likelihood that this interface will discover the vulnerability in the next round, and the y coordinate representing the likelihood that the other interface will discover it this round. This plot is helpful for understanding the discovery correlation that arises with learning. Parameters ---------- other_interface: Interface object The interface object must be built upon the same underlying 'product' as the present interface lines: True/False If true, will overlay horizontal and vertical lines representing the mean discovery profile for each actor, and label them with the mean value. bounds: None or float If float will set the x and y limits to this value. TODO ---- It might be a good idea to make the bounds an x/y tuple if we want to initialize the interfaces with different `max_area` values. """ plt.plot(self.circles.area, other_interface.circles.area, '.', alpha=.1) if bounds == None: window_size = max(self.circles.area.max(), other_interface.circles.area.max()) else: window_size = bounds if lines: y_mean = np.mean(other_interface.circles.area) x_mean = np.mean(self.circles.area) plt.hlines(y_mean, 0, window_size) plt.text(window_size, y_mean, 'mean=%f'%y_mean, ha='right', va='bottom') plt.vlines(x_mean, 0, window_size) plt.text(x_mean, window_size, 'mean=%f'%x_mean, rotation=90, ha='right', va='top') plt.xlim(0, window_size) plt.ylim(0, window_size) plt.box('off') plt.xlabel(self.name + ' likelihood of discovery', fontsize=14) plt.ylabel(other_interface.name + ' likelihood of discovery', fontsize=14)
def plot_misfit_curves(items, threshold, threshold_is_upper_limit, logarithmic, component, pretty_misfit_name, filename): plt.close() crossing_periods = [] crossing_values = [] misfit_all = [] for item in items: if logarithmic: plt.semilogy(item["periods"], item["misfit_values"]) else: plt.plot(item["periods"], item["misfit_values"], color="blue", alpha=0.15, lw = 3) # Find the threshold. point = rightmost_threshold_crossing( item["periods"], item["misfit_values"], threshold, threshold_is_upper_limit) crossing_periods.append(point[0]) crossing_values.append(point[1]) misfit_all.append(item['misfit_values']) # compute mean and median of misfit for all stations at each filter period misfit_all= np.asarray(misfit_all) misfit_mean = misfit_all.mean(axis=0) misfit_std = misfit_all.std(axis=0) misfit_median = np.median(misfit_all, axis=0) plt.plot(np.asarray(items[0]["periods"]), misfit_mean, color="red", lw = 2, label='mean') # Standard deviation doesn't make sense for a non-normal distribution #plt.errorbar(np.asarray(items[0]["periods"]), misfit_mean, misfit_std, # lw = 2, zorder=3) plt.plot(np.asarray(items[0]["periods"]), misfit_median, color="Chartreuse", lw = 2, label='median', linestyle="--") plt.title("%s misfit curves for component %s" % ( pretty_misfit_name, component)) plt.xlabel("Lowpass Period [s]") plt.ylabel("%s" % pretty_misfit_name) x = items[0]["periods"][0] - 0.5, items[0]["periods"][-1] + 0.5 plt.hlines(threshold, x[0], x[1], linestyle="--", color="0.5") plt.scatter(crossing_periods, crossing_values, color="orange", s=10, zorder=5, alpha=0.3) plt.xlim(*x) plt.savefig(filename)
def plot_start_end_lines(start, end): if start: plt.hlines(start, plt.xlim()[0], plt.xlim()[1], 'r', linestyles='dotted') if end: plt.hlines(end, plt.xlim()[0], plt.xlim()[1], 'r', linestyles='dotted')
def _windowErrorPlotterImpl(data, windowLength, waveNumber, samples): # Calculate the filtered wave with the medianFiltered function. datafiltered = _medianFilter(data, windowLength) error = _ErrorRateWindow(data, datafiltered, windowLength) error = _np.asarray(error) error[1:] = error[1:] / 0.63662 ax = _plt.subplot() _plt.axis([0, windowLength + 1, 0, 1.5]) xticks = _np.arange(0, windowLength + 1, samples / waveNumber) ax.set_xticks(xticks) x_label = [r"${%s\pi}$" % (2 * w) for w in range(0, len(xticks))] ax.set_xticklabels(x_label) _plt.scatter(*error, c='red', lw=0) _plt.hlines(1, 0, windowLength, color='b', linestyle='--')
def plotCoefficients(model, X_train): """ Plots sorted coefficient values of the model """ coefs = pd.DataFrame(model.coef_, X_train.columns) coefs.columns = ["coef"] coefs["abs"] = coefs.coef.apply(np.abs) coefs = coefs.sort_values(by="abs", ascending=False).drop(["abs"], axis=1) plt.figure(figsize=(15, 7)) coefs.coef.plot(kind='bar') plt.grid(True, axis='y') plt.hlines(y=0, xmin=0, xmax=len(coefs), linestyles='dashed')
def _windowErrorPlotterImpl(data, windowLength, waveNumber, samples): # Calculate the filtered wave with the medianFiltered function. datafiltered = _medianFilter(data, windowLength) error = _ErrorRateWindow(data, datafiltered, windowLength) error = _np.asarray(error) error[1:] = error[1:]/0.63662 ax = _plt.subplot() _plt.axis([0, windowLength+1, 0, 1.5]) xticks = _np.arange(0, windowLength + 1, samples/waveNumber) ax.set_xticks(xticks) x_label = [r"${%s\pi}$" % (2*w) for w in range(0, len(xticks))] ax.set_xticklabels(x_label) _plt.scatter(*error, c='red', lw=0) _plt.hlines(1, 0, windowLength, color='b', linestyle='--')
def sample_compare(N, samples, truestate, burn=0): h = samples[burn:] strue = truestate mu = h.mean(axis=0) std = h.std(axis=0) pl.figure(figsize=(20,4)) pl.errorbar(range(len(mu)), (mu-strue), yerr=5*std/np.sqrt(h.shape[0]), fmt='.', lw=0.15, alpha=0.5) pl.vlines([0,3*N-0.5, 4*N-0.5], -1, 1, linestyle='dashed', lw=4, alpha=0.5) pl.hlines(0, 0, len(mu), linestyle='dashed', lw=5, alpha=0.5) pl.xlim(0, len(mu)) pl.ylim(-0.02, 0.02) pl.show()
def plot_psd_score(filename): ds = xr.open_dataset(filename) resolved_scale = find_wavelength_05_crossing(filename) plt.figure(figsize=(10, 5)) ax = plt.subplot(121) ax.invert_xaxis() plt.plot((1./ds.wavenumber), ds.psd_ref, label='reference', color='k') plt.plot((1./ds.wavenumber), ds.psd_study, label='reconstruction', color='lime') plt.xlabel('wavelength [km]') plt.ylabel('Power Spectral Density [m$^{2}$/cy/km]') plt.xscale('log') plt.yscale('log') plt.legend(loc='best') plt.grid(which='both') ax = plt.subplot(122) ax.invert_xaxis() plt.plot((1./ds.wavenumber), (1. - ds.psd_diff/ds.psd_ref), color='k', lw=2) plt.xlabel('wavelength [km]') plt.ylabel('PSD Score [1. - PSD$_{err}$/PSD$_{ref}$]') plt.xscale('log') plt.hlines(y=0.5, xmin=np.ma.min(np.ma.masked_invalid(1./ds.wavenumber)), xmax=np.ma.max(np.ma.masked_invalid(1./ds.wavenumber)), color='r', lw=0.5, ls='--') plt.vlines(x=resolved_scale, ymin=0, ymax=1, lw=0.5, color='g') ax.fill_betweenx((1. - ds.psd_diff/ds.psd_ref), resolved_scale, np.ma.max(np.ma.masked_invalid(1./ds.wavenumber)), color='green', alpha=0.3, label=f'resolved scales \n $\lambda$ > {int(resolved_scale)}km') plt.legend(loc='best') plt.grid(which='both') logging.info(' ') logging.info(f' Minimum spatial scale resolved = {int(resolved_scale)}km') plt.show() return resolved_scale
def hess_accuracy_curve(raw_corr_tab, PSD_corr_tab, EPS_list, figdir=figdir, raw_corr_BI=None, PSD_corr_BI=None, savestr="StyleGAN2"): fig1 = plt.figure() plt.plot(PSD_corr_tab.T) plt.xticks(np.arange(len(EPS_list)), labels=EPS_list) plt.ylabel("Correlation of Vectorized Hessian") plt.xlabel("EPS for Forward Diff") plt.title("StyleGAN2 BP vs ForwardIter Pos-Semi-Definite Hessian Correlation") plt.savefig(join(figdir, "%s_BP-FI-PSD-HessCorr.png"%savestr)) plt.show() fig2 = plt.figure() plt.plot(raw_corr_tab.T) plt.xticks(np.arange(len(EPS_list)), labels=EPS_list) plt.ylabel("Correlation of Vectorized Hessian") plt.xlabel("EPS for Forward Diff") plt.title("StyleGAN2 BP vs ForwardIter Raw Hessian Correlation") plt.savefig(join(figdir, "%s_BP-FI-raw-HessCorr.png"%savestr)) plt.show() fig3 = plt.figure() men = raw_corr_tab.mean(axis=0) err = raw_corr_tab.std(axis=0)/np.sqrt(raw_corr_tab.shape[0]) plt.plot(men, ) plt.fill_between(range(len(men)), men-err, men+err, alpha=0.3, label="FI raw-BP") men = PSD_corr_tab.mean(axis=0) err = PSD_corr_tab.std(axis=0)/np.sqrt(PSD_corr_tab.shape[0]) plt.plot(men, ) plt.fill_between(range(len(men)), men-err, men+err, alpha=0.3, label="FI PSD-BP") if raw_corr_BI is not None: men_corr_BI, std_corr_BI = raw_corr_BI.mean(), raw_corr_BI.std() plt.hlines(men_corr_BI, 0, len(men)-1, colors="red")# ], ) plt.fill_between(range(len(men)), men_corr_BI - std_corr_BI, men_corr_BI + std_corr_BI, alpha=0.3, label="FI raw-BI") men_corr_BI, std_corr_BI = PSD_corr_BI.mean(), PSD_corr_BI.std() plt.hlines(men_corr_BI, 0, len(men)-1, colors="blue") #plt.plot([0, len(men)], men_corr_BI) plt.fill_between(range(len(men)), men_corr_BI - std_corr_BI, men_corr_BI + std_corr_BI, alpha=0.3, label="FI PSD-BI") plt.xticks(np.arange(len(EPS_list)), labels=EPS_list) plt.legend() plt.ylabel("Correlation of Vectorized Hessian") plt.xlabel("EPS for Forward Diff") plt.title("StyleGAN2 BP vs ForwardIter Hessian Correlation") plt.savefig(join(figdir, "%s_BP-FI-HessCorr-cmp.png"%savestr)) plt.savefig(join(figdir, "%s_BP-FI-HessCorr-cmp.pdf"%savestr)) plt.show() return fig1, fig2, fig3
def plot_grad_flow(named_parameters, output_dir, name): ave_grads = [] layers = [] for n, p in named_parameters: if (p.requires_grad) and ("bias" not in n): layers.append(n) ave_grads.append(p.grad.abs().mean()) plt.plot(ave_grads, alpha=0.3, color="b") plt.hlines(0, 0, len(ave_grads) + 1, linewidth=1, color="k") plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical") plt.xlim(xmin=0, xmax=len(ave_grads)) plt.xlabel("Layers") plt.ylabel("average gradient") plt.title("Gradient flow") plt.grid(True) plt.tight_layout() fname = os.path.join(output_dir, name + "png") plt.savefig(fname)
def plot_volcano(logFC, p_val, sample_name, saveName, logFC_thresh): fig = pl.figure() ## To plot and save pl.scatter(logFC[(p_val > 0.05) | (abs(logFC) < logFC_thresh)], -np.log10(p_val[(p_val > 0.05) | (abs(logFC) < logFC_thresh)]), color='blue', alpha=0.5) pl.scatter(logFC[(p_val < 0.05) & (abs(logFC) > logFC_thresh)], -np.log10(p_val[(p_val < 0.05) & (abs(logFC) > logFC_thresh)]), color='red') pl.hlines(-np.log10(0.05), min(logFC), max(logFC)) pl.vlines(-logFC_thresh, min(-np.log10(p_val)), max(-np.log10(p_val))) pl.vlines(logFC_thresh, min(-np.log10(p_val)), max(-np.log10(p_val))) pl.xlim(-3, 3) pl.xlabel('Log Fold Change') pl.ylabel('-log10(p-value)') pl.savefig(saveName) pl.close(fig)
def spectral_score_intercomparison(list_of_filename, list_of_label): plt.figure(figsize=(15, 6)) ax = plt.subplot(121) ax.invert_xaxis() ds = xr.open_dataset(list_of_filename[0]) plt.plot((1./ds.wavenumber), ds.psd_ref, label='reference', color='k') for cfilename, clabel in zip(list_of_filename, list_of_label): ds = xr.open_dataset(cfilename) plt.plot((1./ds.wavenumber), ds.psd_study, label=clabel) plt.xlabel('wavelength [km]') plt.ylabel('Power Spectral Density [m$^{2}$/cy/km]') plt.xscale('log') plt.yscale('log') plt.legend(loc='best') plt.grid(which='both') plt.xticks([50, 100, 200, 500, 1000], ["50km", "100km", "200km", "500km", "1000km"]) ax = plt.subplot(122) ax.invert_xaxis() for cfilename, clabel in zip(list_of_filename, list_of_label): ds = xr.open_dataset(cfilename) plt.plot((1./ds.wavenumber), (1. - ds.psd_diff/ds.psd_ref), lw=2, label=clabel) plt.xlabel('wavelength [km]') plt.ylabel('PSD Score [1. - PSD$_{err}$/PSD$_{ref}$]') plt.xscale('log') plt.hlines(y=0.5, xmin=np.ma.min(np.ma.masked_invalid(1./ds.wavenumber)), xmax=np.ma.max(np.ma.masked_invalid(1./ds.wavenumber)), color='r', lw=0.5, ls='--') # plt.vlines(x=resolved_scale, ymin=0, ymax=1, lw=0.5, color='g') # ax.fill_betweenx((1. - ds.psd_diff/ds.psd_ref), # resolved_scale, # np.ma.max(np.ma.masked_invalid(1./ds.wavenumber)), # color='green', # alpha=0.3, # label=f'resolved scales \n $\lambda$ > {int(resolved_scale)}km') plt.legend(loc='best') plt.grid(which='both') plt.xticks([50, 100, 200, 500, 1000], ["50km", "100km", "200km", "500km", "1000km"]) plt.show()
def heatmap(X, y=None, col_labels=None, figsize=None, cmap=None, **kwargs): n_items, n_cols = X.shape if col_labels is not None: assert len(col_labels) == n_cols, "col_labels length should be the same as the number of columns in the matrix" if figsize is None: x_size, y_size = X.shape if x_size >= y_size: figsize = (6, min(18, 6 * x_size / y_size)) else: figsize = (min(18, 6 * y_size / x_size), 6) if cmap is None: if X.min() < 0: cmap = 'RdBu_r' else: cmap = 'hot_r' kwargs['cmap'] = cmap kwargs = dict(kwargs, interpolation='nearest', aspect='auto') plt.figure(figsize=figsize) plt.imshow(X, **kwargs) plt.grid(None) if y is not None: y = np.array(y) assert all(sorted(y) == y), "This will only work if your row_labels are sorted" unik_ys, unik_ys_idx = np.unique(y, return_index=True) for u, i in zip(unik_ys, unik_ys_idx): plt.hlines(i - 0.5, 0 - 0.5, n_cols - 0.5, colors='b', linestyles='dotted', alpha=0.5) plt.hlines(n_items - 0.5, 0 - 0.5, n_cols - 0.5, colors='b', linestyles='dotted', alpha=0.5) plt.yticks(unik_ys_idx + np.diff(np.hstack((unik_ys_idx, n_items))) / 2, unik_ys) if col_labels is not None: assert len( col_labels) == n_cols, "col_labels length should be the same as the number of columns in the matrix" plt.xticks(range(len(col_labels)), col_labels); else: plt.xticks([]) plt.gca().xaxis.set_tick_params(labeltop='on') return plt.gcf()
def plot_stroke(region: SpliceRegion, font_size: float = 5): strokes = sorted(region.stroke, key=lambda x: [x.start, x.end]) for i, stroke in enumerate(strokes): pylab.hlines(y=i, xmin=stroke.start, xmax=stroke.end, color=stroke.color, lw=2) pylab.text(stroke.center - len(stroke.label) / 2, i + .2, stroke.label, fontsize=font_size) pylab.xlim(left=0, right=len(region)) pylab.ylim(bottom=-1, top=len(strokes)) pylab.box(on=False) pylab.xticks([]) pylab.yticks([])
def show_relu(x, y, v, with_min=False): v_scaled = scale(v) plt.imshow(v_scaled, interpolation='bicubic') x_min, y_min, v_min, v_max = get_mins_max(x, y, v) nx = x.shape[0] cen = nx/2.0 vs_min = v_scaled.min() vs_max = v_scaled.max() levels = np.linspace(vs_min, vs_max, 300) plt.contour(v_scaled, levels) plt.vlines(cen, 0, nx, color='gray') plt.hlines(cen, 0, nx, color='gray') if with_min: plt.vlines(cen + x_min, 0, nx, color='red', linestyle='--') plt.hlines(cen + y_min, 0, nx, color='red', linestyle='--') plt.draw()
def plot_grad_flow(named_parameters, fig): ave_grads = [] layers = [] for n, p in named_parameters: if (p.requires_grad) and ("bias" not in n): layers.append(n[-9]) ave_grads.append(p.grad.abs().mean()) plt.plot(ave_grads, color="b", linewidth=0.1) plt.hlines(0, 0, len(ave_grads) + 1, linewidth=1, color="k") plt.xlim(xmin=0, xmax=len(ave_grads)) plt.xlabel("Layers", labelpad=20) plt.ylabel("average gradient") plt.title("Gradient flow") plt.grid(True) plt.xticks(range(0, len(ave_grads)), layers, rotation='vertical') #plt.margins(0.2) fig.subplots_adjust(bottom=0.15) fig.canvas.draw() fig.show() plt.savefig('/home/robotics/pytorch-semseg/tmp.png', bbox_inches='tight')
def plot_step(width,tstep, height, base, data): plt.vlines(x=tstep, ymin=base, ymax=height, colors='red') plt.vlines(x=tstep+width, ymin=base, ymax=height, colors='red') plt.hlines(y=base, xmin=np.min(data), xmax=tstep, colors='red') plt.hlines(y=height, xmin=tstep, xmax=tstep+width,colors='red') plt.hlines(y=base, xmin=tstep+width, xmax=np.max(data),colors='red') return
def plot_correlogram(self, forecast_error): """ Plots the correlogram for a given forecast. forecast_error (numpy array): Forecast error for Holt-Winters """ # Autocorrelation error_mean_delta = forecast_error - np.mean(forecast_error) total_error = np.dot(error_mean_delta, error_mean_delta) lags = np.zeros(self.window) for i in xrange(1, self.window + 1): lags[i - 1] = np.dot(error_mean_delta[i:], error_mean_delta[:-i]) / total_error plt.vlines(range(1, self.window + 1), 0, lags) plt.hlines(0, 0, self.window + 2) # Significance level for autocorrelation ac_bound = 2. / np.sqrt(self.n) plt.hlines(ac_bound, 0, self.window + 2, color="k", linestyles="dashed") plt.hlines(-ac_bound, 0, self.window + 2, color="k", linestyles="dashed") plt.xlabel("Lag") plt.ylabel("Autocorrelation") plt.title("Correlogram for Forecast Error") plt.show()
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 9 * step, ' resulting in a pixelsize of ' + str( '%.2f' % (SensorPixelSize * 1000)) + ' um.') # Plot NA plt.subplot(234) plt.axis('equal') Magnification = numpy.arange(0, 1.01, 0.01) for FStop in [0.5, 0.8, 1, 1.2, 1.4, 2]: plt.plot(Magnification, Magnification / (2 * FStop * (1 + Magnification)), label='f/' + str('%0.2f' % FStop)) plt.plot(Magnification, Magnification / (2 * options.FStop * (1 + Magnification)), 'g--', linewidth=5, label='f/' + str('%0.2f' % options.FStop)) plt.legend(loc='upper left') plt.hlines(NumericalApertureAverage, 0, 1) plt.text(0.618, NumericalApertureAverage, 'NA flat panel') plt.hlines(NumericalApertureDetermined, 0, 1) plt.text(0.618, NumericalApertureDetermined, 'simulated NA of our lens') plt.hlines(NumericalApertureJBAG, 0, 1) plt.text(0.618, NumericalApertureJBAG, 'NA JBAG (?)') plt.vlines(1 / Demagnification, 0, 1, 'g', '--') plt.text(1 / Demagnification + 0.25, 0.8, 'Our calculated\nDemagnification: ' + str(Demagnification) + 'x=' + str(round(1 / Demagnification, 3))) plt.title('NA') plt.xlabel('Magnification') plt.ylabel('NA') plt.xlim([0, 1]) # Plot X-ray spectra
# bootstrap confidence interval means[nmm, nc]=tempMeans stds[nmm, nc]=tempStds CIsMeanUB=CI.variables['meanUB'][ny+70,nj,ni] CIsMeanLB=CI.variables['meanLB'][ny+70,nj,ni] CIsStdsUB=CI.variables['sdUB'][ny+70,nj,ni] CIsStdsLB=CI.variables['sdLB'][ny+70,nj,ni] numMean=minNum_nSIF_mean_85[ny, nj, ni] numStd=minNum_nSIF_std_85[ny, nj, ni] plt.figure() plt.hlines(CIsMeanUB, 0, 31) plt.hlines(CIsMeanLB, 0, 31) plt.hlines(means[-1][0], 0, 31) plt.boxplot(means.T) plt.title('Example mean distributions ('+str(int(numMean))+' members needed), YEAR=' + str(yr)+ ' NI='+str(ni)+ 'NJ='+str(nj)) plt.ylabel('Number of open water days') plt.xlabel('Number of subsampled ensemble members') plt.savefig('SI_FigXx_numNeeded_Mean.'+rcpName[ittR]+'.'+nsk+'.pdf', format='pdf') #plt.show() plt.figure() plt.hlines(CIsStdsUB, 0, 31) plt.hlines(CIsStdsLB, 0, 31) plt.hlines(stds[-1][0], 0, 31) plt.boxplot(stds.T) plt.title('Example standard deviation distributions('+str(int(numStd))+' members needed), YEAR=' + str(yr)+ ' NI='+str(ni)+ 'NJ='+str(nj))
ax=plt.subplot(2,1,1)#(num rows, num columns, subplot position) #apply letter label: coordinates in subplot object space (x,y) where (0,0) = bottom left #also make sure transform uses correct subplot object) plt.text(0.07,0.92,'(a)',horizontalalignment='center',verticalalignment='center',transform=ax.transAxes) #plot curve plt.plot(x,y,color='black',linewidth=1.5,label='Sin') #set labels, labels sizes, ticks, ticks sizes (Note: only y label, but both x,y ticks) plt.ylabel('Power [arb]',fontsize=9) plt.xticks(fontsize=9) plt.yticks(fontsize=9) plt.xlim(0,20) plt.ylim(-1.5,1.5) #suppress xtick values (modify subplot object ax) ax.set_xticklabels([]) #Add Zero line (y position, xmin, xmax) plt.hlines(0,0,20,color='red',linestyle='dotted',linewidth=0.5) plt.text(5,0.05,'y=0',fontsize=4,color='red',horizontalalignment='center')#(x,y) is in data coordinates when transform is not specified #cal second subplot object ax2=plt.subplot(2,1,2)#(num rows, num columns, subplot position) #apply letter label: coordinates in subplot object space (x,y) where (0,0) = bottom left #also make sure transform uses correct subplot object) plt.text(0.07,0.92,'(b)',horizontalalignment='center',verticalalignment='center',transform=ax2.transAxes) #plot curve plt.plot(x,z,color='black',linewidth=1.5,label='Cos') #set labels, labels sizes, ticks, ticks sizes plt.xlabel('Time [s]',fontsize=9) plt.ylabel('Power [arb]',fontsize=9) plt.xticks(fontsize=9) plt.yticks(fontsize=9) plt.xlim(0,20)
def plot_windows(data_trace, synthetic_trace, windows, dominant_period, filename=None, debug=False): """ Helper function plotting the picked windows in some variants. Useful for debugging and checking what's actually going on. If using the debug option, please use the same data_trace and synthetic_trace as you used for the select_windows() function. They will be augmented with certain values used for the debugging plots. :param data_trace: The data trace. :type data_trace: obspy.core.trace.Trace :param synthetic_trace: The synthetic trace. :type synthetic_trace: obspy.core.trace.Trace :param windows: The windows, as returned by select_windows() :type windows: list :param dominant_period: The dominant period of the data. Used for the tapering. :type dominant_period: float :param filename: If given, a file will be written. Otherwise the plot will be shown. :type filename: basestring :param debug: Toggle plotting debugging information. Optional. Defaults to False. :type debug: bool """ import matplotlib.pylab as plt from obspy.signal.invsim import cosTaper plt.figure(figsize=(16, 10)) plt.subplots_adjust(hspace=0.3) npts = synthetic_trace.stats.npts # Plot the raw data. time_array = np.linspace(0, (npts - 1) * synthetic_trace.stats.delta, npts) plt.subplot(411) plt.plot(time_array, data_trace.data, color="black", label="data") plt.plot(time_array, synthetic_trace.data, color="red", label="synthetics") plt.xlim(0, time_array[-1]) plt.title("Raw data") # Plot the chosen windows. bottom = np.ones(npts) * -10.0 top = np.ones(npts) * 10.0 for left_idx, right_idx in windows: top[left_idx: right_idx + 1] = -10.0 plt.subplot(412) plt.plot(time_array, data_trace.data, color="black", label="data") plt.plot(time_array, synthetic_trace.data, color="red", label="synthetics") ymin, ymax = plt.ylim() plt.fill_between(time_array, bottom, top, color="red", alpha="0.5") plt.xlim(0, time_array[-1]) plt.ylim(ymin, ymax) plt.title("Chosen windows") # Plot the tapered data. final_data = np.zeros(npts) final_data_scaled = np.zeros(npts) synth_data = np.zeros(npts) synth_data_scaled = np.zeros(npts) for left_idx, right_idx in windows: right_idx += 1 length = right_idx - left_idx # Setup the taper. p = (dominant_period / synthetic_trace.stats.delta / length) / 2.0 if p >= 0.5: p = 0.49 elif p < 0.1: p = 0.1 taper = cosTaper(length, p=p) data_window = taper * data_trace.data[left_idx: right_idx].copy() synth_window = taper * synthetic_trace.data[left_idx: right_idx].copy() data_window_scaled = data_window / data_window.ptp() * 2.0 synth_window_scaled = synth_window / synth_window.ptp() * 2.0 final_data[left_idx: right_idx] = data_window synth_data[left_idx: right_idx] = synth_window final_data_scaled[left_idx: right_idx] = data_window_scaled synth_data_scaled[left_idx: right_idx] = synth_window_scaled plt.subplot(413) plt.plot(time_array, final_data, color="black") plt.plot(time_array, synth_data, color="red") plt.xlim(0, time_array[-1]) plt.title("Tapered windows") plt.subplot(414) plt.plot(time_array, final_data_scaled, color="black") plt.plot(time_array, synth_data_scaled, color="red") plt.xlim(0, time_array[-1]) plt.title("Tapered windows, scaled to same amplitude") if debug: first_valid_index = data_trace.stats.first_valid_index * \ synthetic_trace.stats.delta noise_level = data_trace.stats.noise_level data_p, data_t, data_e = find_local_extrema( data_trace.data, start_index=first_valid_index) synth_p, synth_t, synth_e = find_local_extrema( synthetic_trace.data, start_index=first_valid_index) for _i in xrange(1, 3): plt.subplot(4, 1, _i) ymin, ymax = plt.ylim() xmin, xmax = plt.xlim() plt.vlines(first_valid_index, ymin, ymax, color="green", label="Theoretical First Arrival") plt.hlines(noise_level, xmin, xmax, color="0.5", label="Noise Level", linestyles="--") plt.hlines(-noise_level, xmin, xmax, color="0.5", linestyles="--") plt.hlines(noise_level * 5, xmin, xmax, color="0.8", label="Minimal acceptable amplitude", linestyles="--") plt.hlines(-noise_level * 5, xmin, xmax, color="0.8", linestyles="--") if _i == 2: plt.scatter(time_array[data_e], data_trace.data[data_e], color="black", s=10) plt.scatter(time_array[synth_e], synthetic_trace.data[synth_e], color="red", s=10) plt.ylim(ymin, ymax) plt.xlim(xmin, xmax) plt.subplot(411) plt.legend(prop={"size": "small"}) plt.suptitle(data_trace.id) if filename: plt.savefig(filename) else: plt.show()
isiHst = gs_filter(isiHst, 2) tmaxInd = np.argmax(isiHst) tmax = Trange[tmaxInd] atmax = np.max(isiHst) df=np.diff(np.array(isiHst[:tmaxInd] > atmax/np.sqrt(2), dtype='int')) st = Trange[np.nonzero(df == 1)[0][0]] df=np.diff(np.array(isiHst[tmaxInd:] < atmax/np.sqrt(2), dtype='int')) stp = Trange[tmaxInd + np.nonzero(df == 1)[0][0] + 1] qual.append(atmax*tmax/(stp - st)) if idx % 2 == 0: pl.figure('isi spectras') pl.plot(Trange, isiHst, label='D={:.2f}'.format(10**w_p)) pl.hlines(atmax/np.sqrt(2), st, stp) pl.legend() pl.xlabel('isi, ms', fontsize=16) #varParam = 10**varParam isiMn = np.array(isiMn) isiStd = np.array(isiStd) pl.figure('qual[isi]') pl.plot(varParam, qual, label='Ie={}'.format(Ie)) #pl.xlabel(r'$D, pA^2$', fontsize=18) pl.xlabel(r'$w_p, pA$', fontsize=16) pl.ylabel(r'$\beta_{isi}$', fontsize=16) #pl.legend() #%%
def do_plot(x, insignificant_indices, names=None, arrow_vgap=.2, link_voffset=.15, link_vgap=.1, xlabel=None): """ Draws a critical difference graph, which is used to display the differences in methods' performance. This is inspired by the plots used in: See Janez Demsar, Statistical Comparisons of Classifiers over Multiple Data Sets, 7(Jan):1--30, 2006. Methods are drawn on an axis and connected with a line if their performance is not significantly different. :param x: List of average methods' scores. :type x: list-like :param insignificant_indices: list of tuples that specify the indices of all pairs of methods that are not significantly different and should be connected in the diagram. Each tuple must be sorted, and no duplicate tuples should be contained in the list. Examples: - [(0, 1), (3, 4), (4, 5)] is correct - [(0, 1), (3, 4), (4, 5), (3,4)] contains a duplicate - [(4, 3)] contains a non-sorted tuple If there is a cluster of non-significant differences (e.g. 1=2, 2=3, 1=3), `graph_ranks` will draw just a single link connecting all of them. Note: the indices returned by this callable should refer to positions in `scores` after it is sorted in increasing order. It is to avoid confusion this function raises if `scores` is not sorted. :param names: List of methods' names. :param arrow_vgap: vertical space between the arrows that point to method names. Scale is 0 to 1, fraction of axis :param link_vgap: vertical space between the lines that connect methods that are not significantly different. Scale is 0 to 1, fraction of axis size :param link_voffset: offset from the axis of the links that connect non-significant methods """ if names is None: names = list(range(len(x))) for pair in insignificant_indices: assert all(0 <= idx < len(x) for idx in pair), 'Check indices' # remove both axes and the frame: http://bit.ly/2tBIlWv fig, ax = plt.subplots(1, 1, figsize=(6, 2), subplot_kw=dict(frameon=False)) ax.get_xaxis().tick_bottom() ax.get_yaxis().set_visible(False) size = len(x) y = [0] * size ax.plot(x, y, 'ko') plt.xlim(0.9 * x[0], 1.1 * x[-1]) plt.ylim(0, 1) # draw the x axis again # this must be done after plotting xmin, xmax = ax.get_xaxis().get_view_interval() ymin, ymax = ax.get_yaxis().get_view_interval() ax.add_artist(Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=2)) # add an optional label to the x axis if xlabel: ax.annotate(xlabel, xy=(xmax, 0), xytext=(0.95, 0.1), textcoords='axes fraction', ha='center', va='center', fontsize=9) # text slightly smaller half = int(ceil(len(x) / 2.)) # make sure the topmost annotation in at 90% of figure height ycoords = list(reversed([0.9 - arrow_vgap * i for i in range(half)])) ycoords.extend(reversed(ycoords)) for i in range(size): ax.annotate(str(names[i]), xy=(x[i], y[i]), xytext=(-.05 if i < half else .95, # x coordinate ycoords[i]), # y coordinate textcoords='axes fraction', ha='center', va='center', arrowprops={'arrowstyle': '-', 'connectionstyle': 'angle,angleA=0,angleB=90'}) # draw horizontal lines linking non-significant methods linked_methods = merge_nonsignificant_cliques(insignificant_indices) # where do the existing lines begin and end, (X, Y) coords used_endpoints = set() y = link_voffset dy = link_vgap def overlaps_any(foo, existing): """ Checks if the proposed horizontal line (given with its x-y coordinates) overlaps any of the existing horizontal lines """ return _contained_in_larger_interval(foo[0], foo[1], existing) for i, (x1, x2) in enumerate(sorted(linked_methods)): # determine how far up/down the line should be drawn # 1. can we lower it any further- not if it would be too low and if it # would overlap another line if y > link_voffset and overlaps_any((x1, y - dy), used_endpoints): y -= dy # 2. can we draw it at the current value of y- not if its left # end would overlap with the right end of an existing line # need to lift up a bit elif overlaps_any((x1, y), used_endpoints): y += dy else: pass plt.hlines(y, x[x1], x[x2], linewidth=3) # y, x0, x1 used_endpoints.add((x1, y)) used_endpoints.add((x2, y))
pp.subplot(3,1,3) point_coord = [a0.shape[0]/2,a0.shape[1]/2] steps = ffs[:,point_coord[0], point_coord[1] ] dsteps = np.hstack((steps,steps)) N=np.linspace(1,ffs.shape[0]*2,ffs.shape[0]*2) pp.plot(N,dsteps, 'bo--',label="measured") Np=np.linspace(0.5,ffs.shape[0]+0.5,ffs.shape[0]*100) Np2=np.linspace(0.5,ffs.shape[0]*2+0.5,ffs.shape[0]*2*100) a0c = coeff[0,0,point_coord[0],point_coord[1]] a1c = coeff[0,1,point_coord[0],point_coord[1]] phc = coeff[1,1,point_coord[0],point_coord[1]] curve1 = a0c + a1c * np.sin(2*np.pi*(Np-1)/(ffs.shape[0]) + phc + np.pi/2) curve = np.hstack((curve1,curve1)) pp.plot(Np2,curve, 'r-',label="fitted") pp.hlines(coeff[0,0,point_coord[0],point_coord[1]],0, ffs.shape[0]*2, 'k','--') pp.vlines(ffs.shape[0]+0.5, coeff[0,0,point_coord[0],point_coord[1]]-coeff[0,1,point_coord[0],point_coord[1]],coeff[0,0,point_coord[0],point_coord[1]]+coeff[0,1,point_coord[0],point_coord[1]],'k','--') pp.title('stepping curve of pixel (' + str(point_coord[0]) + ',' + str(point_coord[1]) + '). The curve is displayed twice.') pp.ylabel('intensity [adu.]') pp.xlabel('phase steps') pp.legend() pp.grid() pp.show() if process_sample: datm = np.zeros((19,800,770)) for i in range(19): datm[i] = e17.io.h5read(filepath+"paximage_ct_4809%02d.h5"%(i+27))["raw_data"]
maxres = 1.3 epx = 1 / (2 * maxres) tsl = 1.0 nxp = 1.0 NAc = pxs / (tsl / 2) NAa = integrate.quad(lambda x: numpy.arctan(pxs / (2 * x)), 0.01, 1)[0] print 'middle NA:', NAc print 'average NA:', NAa pcs = 0.006944 mag = 36 / lpa b = 50.0 g = b / mag FStop = 1.4 NAdet = b / (FStop * 2 * g) mag = numpy.arange(0, 1.01, 0.01) legend = [] plt.figure() for FStop in [0.5, 0.8, 1, 1.25, 1.4, 2]: plt.plot(mag, mag / (2 * FStop * (1 + mag))) legend.append(FStop) plt.legend(legend) plt.hlines(NAa, 0, 1) plt.xlabel('Magnification') plt.ylabel('NA') plt.show()
rstd = (rprecip - np.mean(rprecip))/ np.std(rprecip) dstd = (d2 - np.mean(d2))/ np.std(d2) print rstd.shape, dstd.shape corr, pval = pearsonr(rstd, dstd) print "corr2, pval2" print corr, pval # plot mj precip and cook pdsi plt.figure() plt.subplot(211) plt.xlim([1745, 1985]) plt.plot(t, dstd, color='0.5', linestyle='-', label='Cook PDSI') plt.plot(t, rstd, color='0.0', linestyle='-', label='MJ Precip') plt.hlines(0, 1745, 1985, colors='grey', linestyles='solid') #plt.xlabel('Year') plt.ylabel('Standardized value') #plt.legend(loc='lower right') plt.subplot(212) plt.xlim([1745, 1985]) plt.plot(t, dstd/spline(dstd, nyears=10), color='0.5', linestyle='-', label='Cook PDSI') plt.plot(t, rstd/spline(rstd, nyears=10), color='0.0', linestyle='-', label='MJ Precip') plt.hlines(0, 1745, 1985, colors='grey', linestyles='solid') plt.xlabel('Year') plt.ylabel('Standardized value') #plt.legend(loc='lower right') plt.savefig('plots/reconCompare.png')
#plot data from ex22_data_single.txt and ex22_data_double.txt from file_interact import Read import matplotlib.pylab as plt inst = Read("ex22_data.txt") #inst_sing.matrix is data_points[:,i] --> i=N,up,down N = inst.array[:,0] upsum_sing = inst.array[:,1] downsum_sing = inst.array[:,2] upsum_doub = inst.array[:,3] downsum_doub = inst.array[:,4] plt.semilogx(N,upsum_sing,'b-',label="single precision") plt.semilogx(N,downsum_sing,'r-',label="single precision") plt.semilogx(N,upsum_doub,'b--',label="double precision") plt.semilogx(N,downsum_doub,'r--',label="double precision") plt.xlabel("N iterations log-scale") plt.ylabel("sum-value") plt.title("sum up is blue; sum down is red") plt.hlines(y=2,xmin=N[0],xmax=N[-1])#,'k-',label="Analytical sol.") plt.legend(numpoints=1,bbox_to_anchor=[1,1.5]) plt.show()
def plot_nutrient_pulses(): step_size = 0.1 time_obj = time_unit.Time(0, 100, step_size) ### Decay regimen initial_amount = 0 flow_rates = [1, 5, 10, 20] volume = 200 df = {} flow_in_conc = 0.1 # interval for flow in flow_in_interval = [0, 10] # interval for flow out flow_out_interval = [10 + step_size, 100] for rate in flow_rates: times_to_flows = OrderedDict() for fi in flow_in_interval: times_to_flows[fi] = {"flow_in": rate, "flow_out": rate, "flow_in_conc": flow_in_conc} times_to_flows[fi] = {"flow_in": rate, "flow_out": rate, "flow_in_conc": flow_in_conc} for fo in flow_out_interval: times_to_flows[fo] = {"flow_in": rate, "flow_out": rate, "flow_in_conc": 0.} sim_results = \ noisy_glucose_flow(time_obj, initial_amount, volume, times_to_flows, time_bin=time_obj.step_size) df[rate] = sim_results["amounts"] df["Time"] = sim_results["times"] df = pandas.DataFrame(df) sim_df = \ pandas.melt(df, id_vars=["Time"]).rename(columns={"variable": "Flow rate (ml/min)"}) plt.figure() sns.tsplot(time="Time", value="value", unit="Flow rate (ml/min)", condition="Flow rate (ml/min)", # color=sns.color_palette("Blues"), color=sns.color_palette("Greys")[::-1][0:len(flow_rates)][::-1], data=sim_df) plt.xlabel("Time (min)") plt.ylabel("Glucose (mM)") plt.title(r"Initial amount: $%.1f$ mM, $V = %.1f$ ml, $c_{in}$ = %.1f mM" \ %(float(initial_amount), float(volume), float(flow_in_conc))) max_amount = sim_df["value"].max() plt.ylim([0, round(max_amount + 1)]) y_coord = 0.5 plt.hlines(y_coord, flow_in_interval[0], flow_in_interval[1], colors="r", zorder=10, linewidth=4) plt.hlines(y_coord, flow_out_interval[0], flow_out_interval[1], colors="darkblue", zorder=10, linewidth=4) plot_fname = os.path.join(paths.PLOTS_DIR, "nutrient_pulses_rates.pdf") sns.despine() plt.savefig(plot_fname) plt.show()
def select_windows(data_trace, synthetic_trace, event_latitude, event_longitude, event_depth_in_km, station_latitude, station_longitude, minimum_period, maximum_period, min_cc=0.10, max_noise=0.10, max_noise_window=0.4, min_velocity=2.4, threshold_shift=0.30, threshold_correlation=0.75, min_length_period=1.5, min_peaks_troughs=2, max_energy_ratio=10.0, min_envelope_similarity=0.2, verbose=False, plot=False): """ Window selection algorithm for picking windows suitable for misfit calculation based on phase differences. Returns a list of windows which might be empty due to various reasons. This function is really long and a lot of things. For a more detailed description, please see the LASIF paper. :param data_trace: The data trace. :type data_trace: :class:`~obspy.core.trace.Trace` :param synthetic_trace: The synthetic trace. :type synthetic_trace: :class:`~obspy.core.trace.Trace` :param event_latitude: The event latitude. :type event_latitude: float :param event_longitude: The event longitude. :type event_longitude: float :param event_depth_in_km: The event depth in km. :type event_depth_in_km: float :param station_latitude: The station latitude. :type station_latitude: float :param station_longitude: The station longitude. :type station_longitude: float :param minimum_period: The minimum period of the data in seconds. :type minimum_period: float :param maximum_period: The maximum period of the data in seconds. :type maximum_period: float :param min_cc: Minimum normalised correlation coefficient of the complete traces. :type min_cc: float :param max_noise: Maximum relative noise level for the whole trace. Measured from maximum amplitudes before and after the first arrival. :type max_noise: float :param max_noise_window: Maximum relative noise level for individual windows. :type max_noise_window: float :param min_velocity: All arrivals later than those corresponding to the threshold velocity [km/s] will be excluded. :type min_velocity: float :param threshold_shift: Maximum allowable time shift within a window, as a fraction of the minimum period. :type threshold_shift: float :param threshold_correlation: Minimum normalised correlation coeeficient within a window. :type threshold_correlation: float :param min_length_period: Minimum length of the time windows relative to the minimum period. :type min_length_period: float :param min_peaks_troughs: Minimum number of extrema in an individual time window (excluding the edges). :type min_peaks_troughs: float :param max_energy_ratio: Maximum energy ratio between data and synthetics within a time window. Don't make this too small! :type max_energy_ratio: float :param min_envelope_similarity: The minimum similarity of the envelopes of both data and synthetics. This essentially assures that the amplitudes of data and synthetics can not diverge too much within a window. It is a bit like the inverse of the ratio of both envelopes so a value of 0.2 makes sure neither amplitude can be more then 5 times larger than the other. :type min_envelope_similarity: float :param verbose: No output by default. :type verbose: bool :param plot: Create a plot of the algortihm while it does its work. :type plot: bool """ # Shortcuts to frequently accessed variables. data_starttime = data_trace.stats.starttime data_delta = data_trace.stats.delta dt = data_trace.stats.delta npts = data_trace.stats.npts synth = synthetic_trace.data data = data_trace.data times = data_trace.times() # Fill cache if necessary. if not TAUPY_MODEL_CACHE: from obspy.taup import TauPyModel # NOQA TAUPY_MODEL_CACHE["model"] = TauPyModel("AK135") model = TAUPY_MODEL_CACHE["model"] # ------------------------------------------------------------------------- # Geographical calculations and the time of the first arrival. # ------------------------------------------------------------------------- dist_in_deg = geodetics.locations2degrees(station_latitude, station_longitude, event_latitude, event_longitude) dist_in_km = geodetics.calcVincentyInverse( station_latitude, station_longitude, event_latitude, event_longitude)[0] / 1000.0 # Get only a couple of P phases which should be the first arrival # for every epicentral distance. Its quite a bit faster than calculating # the arrival times for every phase. # Assumes the first sample is the centroid time of the event. tts = model.get_travel_times(source_depth_in_km=event_depth_in_km, distance_in_degree=dist_in_deg, phase_list=["ttp"]) # Sort just as a safety measure. tts = sorted(tts, key=lambda x: x.time) first_tt_arrival = tts[0].time # ------------------------------------------------------------------------- # Window settings # ------------------------------------------------------------------------- # Number of samples in the sliding window. Currently, the length of the # window is set to a multiple of the dominant period of the synthetics. # Make sure it is an uneven number; just to have a trivial midpoint # definition and one sample does not matter much in any case. window_length = int(round(float(2 * minimum_period) / dt)) if not window_length % 2: window_length += 1 # Use a Hanning window. No particular reason for it but its a well-behaved # window and has nice spectral properties. taper = np.hanning(window_length) # ========================================================================= # check if whole seismograms are sufficiently correlated and estimate # noise level # ========================================================================= # Overall Correlation coefficient. norm = np.sqrt(np.sum(data ** 2)) * np.sqrt(np.sum(synth ** 2)) cc = np.sum(data * synth) / norm if verbose: _log_window_selection(data_trace.id, "Correlation Coefficient: %.4f" % cc) # Estimate noise level from waveforms prior to the first arrival. idx_end = int(np.ceil((first_tt_arrival - 0.5 * minimum_period) / dt)) idx_end = max(10, idx_end) idx_start = int(np.ceil((first_tt_arrival - 2.5 * minimum_period) / dt)) idx_start = max(10, idx_start) if idx_start >= idx_end: idx_start = max(0, idx_end - 10) abs_data = np.abs(data) noise_absolute = abs_data[idx_start:idx_end].max() noise_relative = noise_absolute / abs_data.max() if verbose: _log_window_selection(data_trace.id, "Absolute Noise Level: %e" % noise_absolute) _log_window_selection(data_trace.id, "Relative Noise Level: %e" % noise_relative) # Basic global rejection criteria. accept_traces = True if (cc < min_cc) and (noise_relative > max_noise / 3.0): msg = "Correlation %.4f is below threshold of %.4f" % (cc, min_cc) if verbose: _log_window_selection(data_trace.id, msg) accept_traces = msg if noise_relative > max_noise: msg = "Noise level %.3f is above threshold of %.3f" % ( noise_relative, max_noise) if verbose: _log_window_selection( data_trace.id, msg) accept_traces = msg # Calculate the envelope of both data and synthetics. This is to make sure # that the amplitude of both is not too different over time and is # used as another selector. Only calculated if the trace is generally # accepted as it is fairly slow. if accept_traces is True: data_env = obspy.signal.filter.envelope(data) synth_env = obspy.signal.filter.envelope(synth) # ------------------------------------------------------------------------- # Initial Plot setup. # ------------------------------------------------------------------------- # All the plot calls are interleaved. I realize this is really ugly but # the alternative would be to either have two functions (one with plots, # one without) or split the plotting function in various subfunctions, # neither of which are acceptable in my opinion. The impact on # performance is minimal if plotting is turned off: all imports are lazy # and a couple of conditionals are cheap. if plot: import matplotlib.pylab as plt # NOQA import matplotlib.patheffects as PathEffects # NOQA if accept_traces is True: plt.figure(figsize=(18, 12)) plt.subplots_adjust(left=0.05, bottom=0.05, right=0.98, top=0.95, wspace=None, hspace=0.0) grid = (31, 1) # Axes showing the data. data_plot = plt.subplot2grid(grid, (0, 0), rowspan=8) else: # Only show one axes it the traces are not accepted. plt.figure(figsize=(18, 3)) # Plot envelopes if needed. if accept_traces is True: plt.plot(times, data_env, color="black", alpha=0.5, lw=0.4, label="data envelope") plt.plot(synthetic_trace.times(), synth_env, color="#e41a1c", alpha=0.4, lw=0.5, label="synthetics envelope") plt.plot(times, data, color="black", label="data", lw=1.5) plt.plot(synthetic_trace.times(), synth, color="#e41a1c", label="synthetics", lw=1.5) # Symmetric around y axis. middle = data.mean() d_max, d_min = data.max(), data.min() r = max(d_max - middle, middle - d_min) * 1.1 ylim = (middle - r, middle + r) xlim = (times[0], times[-1]) plt.ylim(*ylim) plt.xlim(*xlim) offset = (xlim[1] - xlim[0]) * 0.005 plt.vlines(first_tt_arrival, ylim[0], ylim[1], colors="#ff7f00", lw=2) plt.text(first_tt_arrival + offset, ylim[1] - (ylim[1] - ylim[0]) * 0.02, "first arrival", verticalalignment="top", horizontalalignment="left", color="#ee6e00", path_effects=[ PathEffects.withStroke(linewidth=3, foreground="white")]) plt.vlines(first_tt_arrival - minimum_period / 2.0, ylim[0], ylim[1], colors="#ff7f00", lw=2) plt.text(first_tt_arrival - minimum_period / 2.0 - offset, ylim[0] + (ylim[1] - ylim[0]) * 0.02, "first arrival - min period / 2", verticalalignment="bottom", horizontalalignment="right", color="#ee6e00", path_effects=[ PathEffects.withStroke(linewidth=3, foreground="white")]) for velocity in [6, 5, 4, 3, min_velocity]: tt = dist_in_km / velocity plt.vlines(tt, ylim[0], ylim[1], colors="gray", lw=2) if velocity == min_velocity: hal = "right" o_s = -1.0 * offset else: hal = "left" o_s = offset plt.text(tt + o_s, ylim[0] + (ylim[1] - ylim[0]) * 0.02, str(velocity) + " km/s", verticalalignment="bottom", horizontalalignment=hal, color="0.15") plt.vlines(dist_in_km / min_velocity + minimum_period / 2.0, ylim[0], ylim[1], colors="gray", lw=2) plt.text(dist_in_km / min_velocity + minimum_period / 2.0 - offset, ylim[1] - (ylim[1] - ylim[0]) * 0.02, "min surface velocity + min period / 2", verticalalignment="top", horizontalalignment="right", color="0.15", path_effects=[ PathEffects.withStroke(linewidth=3, foreground="white")]) plt.hlines(noise_absolute, xlim[0], xlim[1], linestyle="--", color="gray") plt.hlines(-noise_absolute, xlim[0], xlim[1], linestyle="--", color="gray") plt.text(offset, noise_absolute + (ylim[1] - ylim[0]) * 0.01, "noise level", verticalalignment="bottom", horizontalalignment="left", color="0.15", path_effects=[ PathEffects.withStroke(linewidth=3, foreground="white")]) plt.legend(loc="lower right", fancybox=True, framealpha=0.5, fontsize="small") plt.gca().xaxis.set_ticklabels([]) # Plot the basic global information. ax = plt.gca() txt = ( "Total CC Coeff: %.4f\nAbsolute Noise: %e\nRelative Noise: %.3f" % (cc, noise_absolute, noise_relative)) ax.text(0.01, 0.95, txt, transform=ax.transAxes, fontdict=dict(fontsize="small", ha='left', va='top'), bbox=dict(boxstyle="round", fc="w", alpha=0.8)) plt.suptitle("Channel %s" % data_trace.id, fontsize="larger") # Show plot and return if not accepted. if accept_traces is not True: txt = "Rejected: %s" % (accept_traces) ax.text(0.99, 0.95, txt, transform=ax.transAxes, fontdict=dict(fontsize="small", ha='right', va='top'), bbox=dict(boxstyle="round", fc="red", alpha=1.0)) plt.show() if accept_traces is not True: return [] # Initialise masked arrays. The mask will be set to True where no # windows are chosen. time_windows = np.ma.ones(npts) time_windows.mask = False if plot: old_time_windows = time_windows.copy() # Elimination Stage 1: Eliminate everything half a period before or # after the minimum and maximum travel times, respectively. # theoretical arrival as positive. min_idx = int((first_tt_arrival - (minimum_period / 2.0)) / dt) max_idx = int(math.ceil(( dist_in_km / min_velocity + minimum_period / 2.0) / dt)) time_windows.mask[:min_idx + 1] = True time_windows.mask[max_idx:] = True if plot: plt.subplot2grid(grid, (8, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="TRAVELTIME ELIMINATION") old_time_windows = time_windows.copy() # ------------------------------------------------------------------------- # Compute sliding time shifts and correlation coefficients for time # frames that passed the traveltime elimination stage. # ------------------------------------------------------------------------- # Allocate arrays to collect the time dependent values. sliding_time_shift = np.ma.zeros(npts, dtype="float32") sliding_time_shift.mask = True max_cc_coeff = np.ma.zeros(npts, dtype="float32") max_cc_coeff.mask = True for start_idx, end_idx, midpoint_idx in _window_generator(npts, window_length): if not min_idx < midpoint_idx < max_idx: continue # Slice windows. Create a copy to be able to taper without affecting # the original time series. data_window = data[start_idx: end_idx].copy() * taper synthetic_window = \ synth[start_idx: end_idx].copy() * taper # Elimination Stage 2: Skip windows that have essentially no energy # to avoid instabilities. No windows can be picked in these. if synthetic_window.ptp() < synth.ptp() * 0.001: time_windows.mask[midpoint_idx] = True continue # Calculate the time shift. Here this is defined as the shift of the # synthetics relative to the data. So a value of 2, for instance, means # that the synthetics are 2 timesteps later then the data. cc = np.correlate(data_window, synthetic_window, mode="full") time_shift = cc.argmax() - window_length + 1 # Express the time shift in fraction of the minimum period. sliding_time_shift[midpoint_idx] = (time_shift * dt) / minimum_period # Normalized cross correlation. max_cc_value = cc.max() / np.sqrt((synthetic_window ** 2).sum() * (data_window ** 2).sum()) max_cc_coeff[midpoint_idx] = max_cc_value if plot: plt.subplot2grid(grid, (9, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="NO ENERGY IN CC WINDOW") # Axes with the CC coeffs plt.subplot2grid(grid, (15, 0), rowspan=4) plt.hlines(0, xlim[0], xlim[1], color="lightgray") plt.hlines(-threshold_shift, xlim[0], xlim[1], color="gray", linestyle="--") plt.hlines(threshold_shift, xlim[0], xlim[1], color="gray", linestyle="--") plt.text(5, -threshold_shift - (2) * 0.03, "threshold", verticalalignment="top", horizontalalignment="left", color="0.15", path_effects=[ PathEffects.withStroke(linewidth=3, foreground="white")]) plt.plot(times, sliding_time_shift, color="#377eb8", label="Time shift in fraction of minimum period", lw=1.5) ylim = plt.ylim() plt.yticks([-0.75, 0, 0.75]) plt.xticks([300, 600, 900, 1200, 1500, 1800]) plt.ylim(ylim[0], ylim[1] + ylim[1] - ylim[0]) plt.ylim(-1.0, 1.0) plt.xlim(xlim) plt.gca().xaxis.set_ticklabels([]) plt.legend(loc="lower right", fancybox=True, framealpha=0.5, fontsize="small") plt.subplot2grid(grid, (10, 0), rowspan=4) plt.hlines(threshold_correlation, xlim[0], xlim[1], color="0.15", linestyle="--") plt.hlines(1, xlim[0], xlim[1], color="lightgray") plt.hlines(0, xlim[0], xlim[1], color="lightgray") plt.text(5, threshold_correlation + (1.4) * 0.01, "threshold", verticalalignment="bottom", horizontalalignment="left", color="0.15", path_effects=[ PathEffects.withStroke(linewidth=3, foreground="white")]) plt.plot(times, max_cc_coeff, color="#4daf4a", label="Maximum CC coefficient", lw=1.5) plt.ylim(-0.2, 1.2) plt.yticks([0, 0.5, 1]) plt.xticks([300, 600, 900, 1200, 1500, 1800]) plt.xlim(xlim) plt.gca().xaxis.set_ticklabels([]) plt.legend(loc="lower right", fancybox=True, framealpha=0.5, fontsize="small") # Elimination Stage 3: Mark all areas where the normalized cross # correlation coefficient is under threshold_correlation as negative if plot: old_time_windows = time_windows.copy() time_windows.mask[max_cc_coeff < threshold_correlation] = True if plot: plt.subplot2grid(grid, (14, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="CORRELATION COEFF THRESHOLD ELIMINATION") # Elimination Stage 4: Mark everything with an absolute travel time # shift of more than # threshold_shift times the dominant period as # negative if plot: old_time_windows = time_windows.copy() time_windows.mask[np.ma.abs(sliding_time_shift) > threshold_shift] = True if plot: plt.subplot2grid(grid, (19, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="TIME SHIFT THRESHOLD ELIMINATION") # Elimination Stage 5: Mark the area around every "travel time shift # jump" (based on the traveltime time difference) negative. The width of # the area is currently chosen to be a tenth of a dominant period to # each side. if plot: old_time_windows = time_windows.copy() sample_buffer = int(np.ceil(minimum_period / dt * 0.1)) indices = np.ma.where(np.ma.abs(np.ma.diff(sliding_time_shift)) > 0.1)[0] for index in indices: time_windows.mask[index - sample_buffer: index + sample_buffer] = True if plot: plt.subplot2grid(grid, (20, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="TIME SHIFT JUMPS ELIMINATION") # Clip both to avoid large numbers by division. stacked = np.vstack([ np.ma.clip(synth_env, synth_env.max() * min_envelope_similarity * 0.5, synth_env.max()), np.ma.clip(data_env, data_env.max() * min_envelope_similarity * 0.5, data_env.max())]) # Ratio. ratio = stacked.min(axis=0) / stacked.max(axis=0) # Elimination Stage 6: Make sure the amplitudes of both don't vary too # much. if plot: old_time_windows = time_windows.copy() time_windows.mask[ratio < min_envelope_similarity] = True if plot: plt.subplot2grid(grid, (25, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="ENVELOPE AMPLITUDE SIMILARITY ELIMINATION") if plot: plt.subplot2grid(grid, (21, 0), rowspan=4) plt.hlines(min_envelope_similarity, xlim[0], xlim[1], color="gray", linestyle="--") plt.text(5, min_envelope_similarity + (2) * 0.03, "threshold", verticalalignment="bottom", horizontalalignment="left", color="0.15", path_effects=[ PathEffects.withStroke(linewidth=3, foreground="white")]) plt.plot(times, ratio, color="#9B59B6", label="Envelope amplitude similarity", lw=1.5) plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0]) plt.ylim(0.05, 1.05) plt.xticks([300, 600, 900, 1200, 1500, 1800]) plt.xlim(xlim) plt.gca().xaxis.set_ticklabels([]) plt.legend(loc="lower right", fancybox=True, framealpha=0.5, fontsize="small") # First minimum window length elimination stage. This is cheap and if # not done it can easily destabilize the peak-and-trough marching stage # which would then have to deal with way more edge cases. if plot: old_time_windows = time_windows.copy() min_length = \ min(minimum_period / dt * min_length_period, maximum_period / dt) for i in flatnotmasked_contiguous(time_windows): # Step 7: Throw away all windows with a length of less then # min_length_period the dominant periodele if (i.stop - i.start) < min_length: time_windows.mask[i.start: i.stop] = True if plot: plt.subplot2grid(grid, (26, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="MINIMUM WINDOW LENGTH ELIMINATION 1") # ------------------------------------------------------------------------- # Peak and trough marching algorithm # ------------------------------------------------------------------------- final_windows = [] for i in flatnotmasked_contiguous(time_windows): # Cut respective windows. window_npts = i.stop - i.start synthetic_window = synth[i.start: i.stop] data_window = data[i.start: i.stop] # Find extrema in the data and the synthetics. data_p, data_t = find_local_extrema(data_window) synth_p, synth_t = find_local_extrema(synthetic_window) window_mask = np.ones(window_npts, dtype="bool") closest_peaks = find_closest(data_p, synth_p) diffs = np.diff(closest_peaks) for idx in np.where(diffs == 1)[0]: if idx > 0: start = synth_p[idx - 1] else: start = 0 if idx < (len(synth_p) - 1): end = synth_p[idx + 1] else: end = -1 window_mask[start: end] = False closest_troughs = find_closest(data_t, synth_t) diffs = np.diff(closest_troughs) for idx in np.where(diffs == 1)[0]: if idx > 0: start = synth_t[idx - 1] else: start = 0 if idx < (len(synth_t) - 1): end = synth_t[idx + 1] else: end = -1 window_mask[start: end] = False window_mask = np.ma.masked_array(window_mask, mask=window_mask) if window_mask.mask.all(): continue for j in flatnotmasked_contiguous(window_mask): final_windows.append((i.start + j.start, i.start + j.stop)) if plot: old_time_windows = time_windows.copy() time_windows.mask[:] = True for start, stop in final_windows: time_windows.mask[start:stop] = False if plot: plt.subplot2grid(grid, (27, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="PEAK AND TROUGH MARCHING ELIMINATION") # Loop through all the time windows, remove windows not satisfying the # minimum number of peaks and troughs per window. Acts mainly as a # safety guard. old_time_windows = time_windows.copy() for i in flatnotmasked_contiguous(old_time_windows): synthetic_window = synth[i.start: i.stop] data_window = data[i.start: i.stop] data_p, data_t = find_local_extrema(data_window) synth_p, synth_t = find_local_extrema(synthetic_window) if np.min([len(synth_p), len(synth_t), len(data_p), len(data_t)]) < \ min_peaks_troughs: time_windows.mask[i.start: i.stop] = True if plot: plt.subplot2grid(grid, (28, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="PEAK/TROUGH COUNT ELIMINATION") # Second minimum window length elimination stage. if plot: old_time_windows = time_windows.copy() min_length = \ min(minimum_period / dt * min_length_period, maximum_period / dt) for i in flatnotmasked_contiguous(time_windows): # Step 7: Throw away all windows with a length of less then # min_length_period the dominant period. if (i.stop - i.start) < min_length: time_windows.mask[i.start: i.stop] = True if plot: plt.subplot2grid(grid, (29, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="MINIMUM WINDOW LENGTH ELIMINATION 2") # Final step, eliminating windows with little energy. final_windows = [] for j in flatnotmasked_contiguous(time_windows): # Again assert a certain minimal length. if (j.stop - j.start) < min_length: continue # Compare the energy in the data window and the synthetic window. data_energy = (data[j.start: j.stop] ** 2).sum() synth_energy = (synth[j.start: j.stop] ** 2).sum() energies = sorted([data_energy, synth_energy]) if energies[1] > max_energy_ratio * energies[0]: if verbose: _log_window_selection( data_trace.id, "Deselecting window due to energy ratio between " "data and synthetics.") continue # Check that amplitudes in the data are above the noise if noise_absolute / data[j.start: j.stop].ptp() > \ max_noise_window: if verbose: _log_window_selection( data_trace.id, "Deselecting window due having no amplitude above the " "signal to noise ratio.") final_windows.append((j.start, j.stop)) if plot: old_time_windows = time_windows.copy() time_windows.mask[:] = True for start, stop in final_windows: time_windows.mask[start:stop] = False if plot: plt.subplot2grid(grid, (30, 0), rowspan=1) _plot_mask(time_windows, old_time_windows, name="LITTLE ENERGY ELIMINATION") if verbose: _log_window_selection( data_trace.id, "Done, Selected %i window(s)" % len(final_windows)) # Final step is to convert the index value windows to actual times. windows = [] for start, stop in final_windows: start = data_starttime + start * data_delta stop = data_starttime + stop * data_delta windows.append((start, stop)) if plot: # Plot the final windows to the data axes. import matplotlib.transforms as mtransforms # NOQA ax = data_plot trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes) for start, stop in final_windows: ax.fill_between([start * data_delta, stop * data_delta], 0, 1, facecolor="#CDDC39", alpha=0.5, transform=trans) plt.show() return windows
else: if Camera == 'iPhone': PickPoint = [[1500, 1000]] elif Camera[:6] == 'tiscam': # Select middle of image... PickPoint = [[ImageHeight / 2, ImageWidth / 2]] elif Camera == 'Elphel': PickPoint = [[ImageHeight / 2, ImageWidth / 2]] plt.title('Original image') Horizon = int(PickPoint[0][1]) Vertigo = int(PickPoint[0][0]) if SelectStartPointManually: print 'You selected horizontal line', Horizon, 'and vertical line', Vertigo else: print 'I selected horizontal line', Horizon, 'and vertical line', Vertigo plt.hlines(Horizon, 0, ImageHeight, 'r') plt.vlines(Vertigo, 0, ImageWidth, 'b') plt.draw() plt.subplot(223) HorizontalProfile = Image[Horizon, :] plt.plot(HorizontalProfile, 'r') plt.title('Horizontal Profile') # plt.xlim(0, ImageHeight) # plt.ylim(0, 256) plt.subplot(222) VerticalProfile = Image[:, Vertigo] plt.plot(VerticalProfile, range(ImageWidth), 'b') # plt.xlim(0, 256) # plt.ylim(0, ImageWidth) plt.title('Vertical Profile') plt.draw()
def plot(self): if not hasattr(self, 't'): self._construct_calibration_array() if not hasattr(self, 'cal'): self.calibrate() # plt.close('all') plt.figure() plt.plot(self.x, self.ly, 'k.') plt.xlabel('Ionogram Signal') plt.ylabel(r'$log_10 n_e / cm^{-3}$') plt.hlines(np.log10(150.), plt.xlim()[0], plt.xlim()[1], color='green', linestyles='dashed') for i in range(self.cal.shape[1]): c = self.cal[:,i] print(c) plt.plot((c[0], c[0]), (c[1]-c[2], c[1]+c[2]), 'r-') plt.plot(c[0], c[1], 'r.') p = np.polyfit(self.x, self.ly, 10) x = np.arange(plt.xlim()[0], plt.xlim()[1], 0.01) plt.plot(x, np.poly1d(p)(x), 'b-') plt.figure() dists = np.empty_like(self.t) sigmas = np.empty_like(self.t) for i in range(self.t.shape[0]): # if i % 10 != 0: continue val = 10.**np.interp(self.x[i], self.cal[0], self.cal[1]) err = 10.**np.interp(self.x[i], self.cal[0], self.cal[2]) plt.plot(self.y[i],val,'k.') plt.plot((self.y[i],self.y[i]), (val-err, val+err), 'k-') dists[i] = self.y[i] - val sigmas[i] = np.abs(np.log10(dists[i])/np.log10(err)) dists[i] /= self.y[i] x = np.array((10., 1E4)) plt.plot(x,x, 'r-') plt.plot(x, x*2., 'r-') plt.plot(x, x*.5, 'r-') plt.yscale('log') plt.xscale('log') plt.figure() plt.hist(np.abs(dists), bins=20, range=(0., 4.)) # plt.figure() # plt.hist(sigmas, bins=20) dists = np.abs(dists) # some statistics: s = 100. / float(self.t.shape[0]) print() print('%f%% with relative error < 0.1' % (np.sum(dists < 0.1) * s)) print('%f%% with relative error < 0.5' % (np.sum(dists < 0.5) * s)) print('%f%% with relative error < 1.0' % (np.sum(dists < 1.0) * s)) print('%f%% with relative error < 2.0' % (np.sum(dists < 2.0) * s)) print() print('%f%% with sigma < 1.0' % (np.sum(sigmas < 1.0) * s)) print('%f%% with sigma < 2.0' % (np.sum(sigmas < 2.0) * s)) print('%f%% with sigma < 4.0' % (np.sum(sigmas < 4.0) * s)) plt.show()
def _plot_hdf5_model_horizontal(f, component, output_filename, vmin=None, vmax=None): import matplotlib.cm import matplotlib.pylab as plt data = xarray.DataArray( f["data"][component][:], [ ("latitude", 90.0 - f["coordinate_0"][:]), ("longitude", f["coordinate_1"][:]), ("radius", f["coordinate_2"][:] / 1000.0)]) plt.style.use('seaborn-pastel') from lasif.domain import RectangularSphericalSection domain = RectangularSphericalSection(**dict(f["_meta"]["domain"].attrs)) plt.figure(figsize=(32, 18)) depth_position_map = { 50: (0, 0), 100: (0, 1), 150: (1, 0), 250: (1, 1), 400: (2, 0), 600: (2, 1) } for depth, location in depth_position_map.items(): ax = plt.subplot2grid((3, 5), location) radius = 6371.0 - depth # set up a map and colourmap m = domain.plot(ax=ax, resolution="c", skip_map_features=True) import lasif.colors my_colormap = lasif.colors.get_colormap( "tomo_full_scale_linear_lightness") from lasif import rotations x, y = np.meshgrid(data.longitude, data.latitude) x_shape = x.shape y_shape = y.shape lat_r, lon_r = rotations.rotate_lat_lon( y.ravel(), x.ravel(), domain.rotation_axis, domain.rotation_angle_in_degree) x, y = m(lon_r, lat_r) x.shape = x_shape y.shape = y_shape plot_data = data.sel(radius=radius, method="nearest") plot_data = np.ma.masked_invalid(plot_data.data) # Overwrite colormap things if given. if vmin is not None and vmax is not None: min_val_plot = vmin max_val_plot = vmax else: mean = plot_data.mean() max_diff = max(abs(mean - plot_data.min()), abs(plot_data.max() - mean)) min_val_plot = mean - max_diff max_val_plot = mean + max_diff # Plotting essentially constant models. min_delta = 0.001 * abs(max_val_plot) if (max_val_plot - min_val_plot) < min_delta: max_val_plot = max_val_plot + min_delta min_val_plot = min_val_plot - min_delta # Plot. im = m.pcolormesh( x, y, plot_data, cmap=my_colormap, vmin=min_val_plot, vmax=max_val_plot, shading="gouraud") # make a colorbar and title m.colorbar(im, "right", size="3%", pad='2%') plt.title(str(depth) + ' km') # Depth based statistics. plt.subplot2grid((3, 5), (0, 4), rowspan=3) plt.title("Depth statistics") mean = data.mean(axis=(0, 1)) std = data.std(axis=(0, 1)) _min = data.min(axis=(0, 1)) _max = data.max(axis=(0, 1)) plt.fill_betweenx(data.radius, mean - std, mean + std, label="std", color="#FF3C83") plt.plot(mean, data.radius, label="mean", color="k", lw=2) plt.plot(_min, data.radius, color="grey", label="min") plt.plot(_max, data.radius, color="grey", label="max") plt.legend(loc="best") plt.xlabel("Value") plt.ylabel("Radius") plt.hlines(data.radius, plt.xlim()[0], plt.xlim()[1], color="0.8", zorder=-10, linewidth=0.5) # Roughness plots. plt.subplot2grid((3, 5), (0, 2)) _d = np.abs(data.diff("latitude", n=1)).sum("latitude").data plt.title("Roughness in latitude direction, Total: %g" % _d.sum()) plt.pcolormesh(data.longitude.data, data.radius.data, _d.T, cmap=matplotlib.cm.viridis) try: plt.colorbar() except: pass plt.xlabel("Longitude") plt.ylabel("Radius") plt.subplot2grid((3, 5), (1, 2)) _d = np.abs(data.diff("longitude", n=1)).sum("longitude").data plt.title("Roughness in longitude direction. Total: %g" % data.sum()) plt.pcolormesh(data.latitude.data, data.radius.data, _d.T, cmap=matplotlib.cm.viridis) try: plt.colorbar() except: pass plt.xlabel("Latitude") plt.ylabel("Radius") plt.subplot2grid((3, 5), (2, 2)) _d = np.abs(data.diff("radius", n=1)).sum("radius").data plt.title("Roughness in radius direction. Total: %g" % _d.sum()) plt.pcolormesh(data.longitude.data, data.latitude.data, _d, cmap=matplotlib.cm.viridis) try: plt.colorbar() except: pass plt.xlabel("Longitude") plt.ylabel("Latitude") # L2 plt.subplot2grid((3, 5), (0, 3)) _d = (data ** 2).sum("latitude").data plt.title("L2 Norm in latitude direction, Total: %g" % _d.sum()) plt.pcolormesh(data.longitude.data, data.radius.data, _d.T, cmap=matplotlib.cm.viridis) try: plt.colorbar() except: pass plt.xlabel("Longitude") plt.ylabel("Radius") plt.subplot2grid((3, 5), (1, 3)) _d = (data ** 2).sum("longitude").data plt.title("L2 Norm in longitude direction, Total: %g" % _d.sum()) plt.pcolormesh(data.latitude.data, data.radius.data, _d.T, cmap=matplotlib.cm.viridis) try: plt.colorbar() except: pass plt.xlabel("Latitude") plt.ylabel("Radius") plt.subplot2grid((3, 5), (2, 3)) _d = (data ** 2).sum("radius").data plt.title("L2 Norm in radius direction, Total: %g" % _d.sum()) plt.pcolormesh(data.longitude.data, data.latitude.data, _d, cmap=matplotlib.cm.viridis) try: plt.colorbar() except: pass plt.xlabel("Longitude") plt.ylabel("Latitude") plt.suptitle("Component %s - File %s" % (component, output_filename), fontsize=20) plt.tight_layout(rect=(0, 0, 1, 0.95)) plt.savefig(output_filename, dpi=150) plt.close()
def plot_ice_cover_eb( ice_cover, energy_balance, observed_ice, date, temp, snotot, filename, prec=None, wind=None, clouds=None): """ :param ice_cover: :param energy_balance: :param observed_ice: :param date: :param temp: :param snotot: :param filename: :param prec: :param wind: :param clouds: :return: Note: http://matplotlib.org/mpl_examples/color/named_colors.png """ fsize = (16, 16) plt.figure(figsize=fsize) #fig = pplt.figure(figsize=fsize) plt.clf() ############## First subplot plt.subplot2grid((11, 1), (0, 0), rowspan=2) # depending on how many days are in the plot, the line weight of the modelled data should be adjusted modelledLineWeight = 1100/len(ice_cover) # dont need to keep the colunm coordinates, but then again, why not..? Usefull for debuging allColumnCoordinates = [] # plot total snow depth on land plb.plot(date, snotot, "gray") plb.title('{0} - {1} days plotted.'.format(filename, len(ice_cover))) # a variable for the lowest point on the ice_cover. It is used for setting the lower left y-limit . lowest_point = 0. # Plot ice_cover for ic in ice_cover: # some idea of progress on the plotting if ic.date.day == 1: print((ic.date).strftime('%Y%m%d')) # make data for plotting. [icelayers.. [fro, too, icetype]]. columncoordinates = [] too = -ic.water_line # water line is on xaxis for i in range(len(ic.column)-1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height columncoordinates.append([fro, too, layer.type]) if fro < lowest_point: lowest_point = fro # add coordinates to a vline plot plb.vlines(ic.date, fro, too, lw=modelledLineWeight, color=layer.get_colour()) #ic.getColour(layer.type)) allColumnCoordinates.append(columncoordinates) # plot observed ice columns for ic in observed_ice: if len(ic.column) == 0: height = 0.05 plb.vlines(ic.date, -height, height, lw=4, color='white') plb.vlines(ic.date, -height, height, lw=2, color='red') else: # some idea of progress on the plotting print("Plotting observations.") # make data for plotting. [ice layers.. [fro, too, icetype]]. too = -ic.water_line # water line is on xaxis for i in range(len(ic.column)-1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height if fro < lowest_point: lowest_point = fro padding = 0. padding_color = 'white' # outline the observations in orange if I have modelled the ice height after observation. if ic.metadata.get('IceHeightAfter') == 'Modeled': padding_color = 'orange' # add coordinates to a vline plot plb.vlines(ic.date, fro-padding, too+padding, lw=6, color=padding_color) plb.vlines(ic.date, fro, too, lw=4, color=layer.get_colour()) # the limits of the left side y-axis is defined relative the lowest point in the ice cover # and the highest point of the observed snow cover. plb.ylim(lowest_point*1.1, max(snotot)*1.05) # Plot temperatures on a separate y axis plb.twinx() temp_pluss = [] temp_minus = [] for i in range(0, len(temp), 1): if temp[i] >= 0: temp_pluss.append(temp[i]) temp_minus.append(np.nan) else: temp_minus.append(temp[i]) temp_pluss.append(np.nan) plb.plot(date, temp, "black") plb.plot(date, temp_pluss, "red") plb.plot(date, temp_minus, "blue") plb.ylim(-4*(max(temp)-min(temp)), max(temp)) ######################################## temp_atm = [] temp_surf = [] atm_minus_surf = [] itterations = [] EB = [] S = [] L = [] H = [] LE = [] R = [] G = [] s_inn = [] albedo = [] SC = [] R_i = [] stability_correction = [] CC = [] SM = [] if energy_balance[0].date > date[0]: i = 0 while energy_balance[0].date > date[i]: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) i += 1 for eb in energy_balance: if eb.EB is None: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) else: temp_atm.append(eb.temp_atm) temp_surf.append(eb.temp_surface) atm_minus_surf.append(eb.temp_atm-eb.temp_surface) itterations.append(eb.iterations) EB.append(eb.EB) S.append(eb.S) L.append(eb.L_a+eb.L_t) H.append(eb.H) LE.append(eb.LE) R.append(eb.R) G.append(eb.G) s_inn.append(eb.s_inn) albedo.append(eb.albedo) SC.append(eb.SC) R_i.append(eb.R_i) stability_correction.append(eb.stability_correction) CC.append(eb.CC) SM.append(eb.SM) ############### Second sub plot ########################## plt.subplot2grid((11, 1), (2, 0), rowspan=1) plb.bar(date, itterations, label="Iterations for T_sfc", color="gray") plb.xlim(date[0], date[-1]) plb.xticks([]) plb.ylabel("#") # l = plb.legend() # l.set_zorder(20) ############## CC, wind and prec ########################## plt.subplot2grid((11, 1), (3, 0), rowspan=1) # plot precipitation prec_mm = [p*1000. for p in prec] plb.bar(date, prec_mm, width=1, lw=0.5, label="Precipitation", color="deepskyblue", zorder=10) plb.ylabel("RR [mm]") plb.xlim(date[0], date[-1]) plb.ylim(0, max(prec_mm)*1.1) plb.xticks([]) # plot cloud cover for i in range(0, len(clouds) - 1, 1): if clouds[i] > 0: plb.hlines(0, date[i], date[i + 1], lw=190, color=str(-(clouds[i] - 1.))) elif clouds[i] == np.nan: plb.hlines(0, date[i], date[i + 1], lw=190, color="pink") else: plb.hlines(0, date[i], date[i + 1], lw=190, color=str(-(clouds[i] - 1.))) plb.twinx() plb.plot(date, wind, color="greenyellow", label="Wind 2m", lw=2, zorder=15) plb.ylabel("FFM [m/s]") ############ Temp diff sfc and atm ############################# plt.subplot2grid((11, 1), (4, 0), rowspan=2) plb.plot(date, temp_atm, "black", zorder=5) plb.plot(date, temp, "blue", zorder=10) plb.plot(date, temp_surf, "green") area = np.minimum(temp_atm, temp_surf) plb.fill_between(date, temp_atm, area, color='red') #, alpha='0.5') plb.fill_between(date, temp_surf, area, color='blue') #, alpha='0.5') plb.ylim(-50, 20) plb.ylabel("[C]") # this plots temperature on separate right side axis plb.twinx() temp_pluss = [] temp_minus = [] for i in range(0, len(atm_minus_surf), 1): if atm_minus_surf[i] >= 0: temp_pluss.append(atm_minus_surf[i]) temp_minus.append(np.nan) else: temp_minus.append(atm_minus_surf[i]) temp_pluss.append(np.nan) plb.plot(date, atm_minus_surf, "black", lw=2) plb.plot(date, temp_pluss, "red", lw=2) plb.plot(date, temp_minus, "blue", lw=2) plb.xlim(date[0], date[-1]) plb.xticks([]) plb.ylim(-1, 15) plb.ylabel("atm minus surf [C]") ################# Richardson no and stability correction of turbulent fluxes ####################### plt.subplot2grid((11, 1), (6, 0), rowspan=1) plb.plot(date, R_i, color="blue", label="Richardson no.", lw=1, zorder=15) plb.ylabel("R_i (b) []") plb.twinx() stable = [] unstable = [] for i in range(0, len(R_i), 1): if R_i[i] > 0: stable.append(stability_correction[i]) unstable.append(np.nan) elif R_i[i] < 0: unstable.append(stability_correction[i]) stable.append(np.nan) else: unstable.append(np.nan) stable.append(np.nan) plb.plot(date, stability_correction, "black", lw=2) plb.plot(date, stable, "green", lw=2) plb.plot(date, unstable, "red", lw=2) plb.xlim(date[0], date[-1]) plb.xticks([]) plb.ylabel("stable(g) unstable(r) []") ############# Energy terms and albedo ################ plt.subplot2grid((11, 1), (7, 0), rowspan=4) # plot surface albedo for i in range(0, len(albedo) - 1, 1): if albedo[i] > 0.: plb.hlines(-11000, date[i], date[i + 1], lw=25, color=str(albedo[i])) elif clouds[i] == np.nan: plb.hlines(-11000, date[i], date[i + 1], lw=25, color="1.0") plb.plot(date, SM, "red", lw=3) plb.plot(date, SC, "blue", lw=3) plb.plot(date, [0.]*len(date), "white", lw=2) plb.plot(date, H, "blue") plb.plot(date, LE, "navy") plb.plot(date, R, "turquoise") plb.plot(date, G, "crimson") plb.plot(date, L, "green", lw=1) plb.plot(date, S, "gold", lw=1) #plb.plot(date, s_inn, "gold", lw=1) plb.plot(date, CC, "pink", lw=1) plb.plot(date, EB, "black") plb.ylim(-12000, 13000) plb.xlim(date[0], date[-1]) #fig.tight_layout() plb.ylabel("Q [kJ/m2/24hrs]") plb.savefig(filename)
def run_carma(band, filt, logscale): # Model lightcurves with CARMA(p,q) process, find best fitting PSD, and # dump pickle with carma_sample. # # :rtype: None # :param band: string with the energy band name, e.g. '2kev', '5395a', # corresponding to the data file names in data/ # :param filt: data filter; 'RXTE' to use only RXTE X-ray data, 'flag0' # to use only 'good data' (see the catalog for details). # :param logscale: True to work on logarithm of lightcurve # # Note: pickle files included in data/. # data_file = data_dir + 'data_' + band + '.txt' # clean data: apply filters, calculate log if required if (band=='fermi'): data = np.loadtxt(data_file) time = data[:,0] y1 = data[:,6] ysig1 = data[:,7] else: time0, y0, ysig0, flag, observatory = read_lightcurve(data_file) if (filt.lower()=='rxte'): idx = np.where((observatory=='RXTE') & (y0>0)) elif (filt.lower()=='flag0'): idx = np.where((flag=='0') & (y0>0)) time = time0[idx] y1 = y0[idx] ysig1 = ysig0[idx] if (logscale): y = np.log(y1) ysig = ysig1/y1 else: y = y1 ysig = ysig1 dt = time[1:] - time[0:-1] noise_level = 2.0 * np.median(dt) * np.mean(ysig ** 2) # create new CARMA process model carma_model = cm.CarmaModel(time, y, ysig) # only search over p < 7, q < p pmax = 7 # use all the processes through the multiprocessing module njobs = -1 MLE, pqlist, AICc_list = carma_model.choose_order(pmax, njobs=-1) carma_sample = carma_model.run_mcmc(50000) # name root for the pickle: e.g. 2kev_flag0 name = band + '_' + filt #cPickle.dump(carma_sample, open(data_dir + name + '.pickle', 'wb')) carma_sample.add_map(MLE) cPickle.dump(carma_sample, open(data_dir + name + '.addmle' + '.pickle', 'wb')) # only use 5000 MCMC samples for speed psd_lo, psd_hi, psd_mid, freq = \ carma_sample.plot_power_spectrum(percentile=95.0, nsamples=5000) plt.hlines( noise_level, freq[0], 1./(2. * np.median(dt)) ) plt.title(name) carma_sample.assess_fit() return
for i, d in enumerate(delaysRange): hst, bins = np.histogram(mArr[:, i].compressed(), bins=Nbins, range=(0, mArr.max())) tmpArr[1:-1] = hst if not mArr[:, i].mask.all(): indices = np.argmax(tmpArr) dphi = bins[indices - 1] pl.scatter([d*h], bins[indices - 1], color='C0') hst, bins = np.histogram(periodSteady[:, i].compressed(), bins=Nbins, range=(0, periodSteady.max())) tmpArr[1:-1] = hst if not mArr[:, i].mask.all(): indices = np.argmax(hst) pl.scatter([d*h], bins[indices - 1], color='C2') pl.scatter([d*h], (dphi + d*h) % bins[indices - 1], color='C1') pl.hlines(12, 0, T) pl.hlines(15, 0, T) #pl.xlim(0, T) #pl.ylim(0, 25) #pl.ylim(0, mArr.max()+1) #%% #phiIdx = int(8.0/dphi) #(f, ax) = pl.subplots(Ndelays, 1, sharex=True) #for i, a in enumerate(ax): # a.plot(np.linspace(0, SimTime, int((Tsim + recInt - 1)/recInt)), Vrec[:, phiIdx*2*Ndelays + i]) # a.plot(np.linspace(0, SimTime, int((Tsim + recInt - 1)/recInt)), Vrec[:, phiIdx*2*Ndelays + i + Ndelays]) # a.set_xlim((0, SimTime)) # #pl.xlabel('time, ms') pl.show()
def update(self): """ This redraws the various axes """ plt.sca(self.ig_ax) plt.cla() if debug: print('DEBUG: Plotting ionogram...') alpha = 0.5 self.current_ionogram.interpolate_frequencies() # does nothing if not required self.current_ionogram.plot(ax=self.ig_ax, colorbar=False, vmin=self.vmin, vmax=self.vmax, color='white', verbose=debug, overplot_digitization=True,alpha=alpha,errors=False, overplot_model=False, overplot_expected_ne_max=True) if debug: print('DEBUG: ... done') plt.colorbar(cax=self.cbar_ax, orientation='horizontal', ticks=mpl.ticker.MultipleLocator()) plt.sca(self.cbar_ax) plt.xlabel(r'spec. dens. / $V^2m^{-2}Hz^{-1}$') plt.sca(self.ig_ax) # Plasma and cyclotron lines if len(self.selected_plasma_lines) > 0: extent = plt.ylim() for v in self.selected_plasma_lines: plt.vlines(v, extent[0], extent[1], 'red',alpha=alpha) if len(self.selected_cyclotron_lines) > 0: extent = plt.xlim() for v in self.selected_cyclotron_lines: plt.hlines(v, extent[0], extent[1], 'red',alpha=alpha) f = self.current_ionogram.digitization.morphology_fp_local if np.isfinite(f): plt.vlines( np.arange(1., 5.) * f / 1E6, plt.ylim()[0], plt.ylim()[1], color='red', lw=1.,alpha=alpha) # If current digitization is invertible, do it and plot it if self.current_ionogram.digitization: if debug: print('DEBUG: Inverting, computing model...') d = self.current_ionogram.digitization plt.sca(self.ne_ax) plt.cla() if d.is_invertible(): winning = d.invert() if winning & np.all(d.density > 0.) & np.all(d.altitude > 0.): plt.plot(d.density, d.altitude, color='k') plt.xlim(5.E1, 5E5) plt.ylim(0,499) alt = np.arange(0., 499., 5.) if self.current_ionogram.sza < 89.9: plt.plot(self.ionospheric_model(alt, np.deg2rad(self.current_ionogram.sza)), alt, color='green') plt.grid() plt.xscale('log') plt.xlabel(r'$n_e / cm^{-3}$') plt.ylabel('alt. / km') fname = self.digitization_db.filename if len(fname) > 30: fname = fname[:10] + '...' + fname[-20:] plt.title('Database: ' + fname) if debug: print('DEBUG: Plotting timeseries....') # Timeseries integrated bar plt.sca(self.tser_ax) plt.cla() plt.imshow(self.tser_arr[::-1,:], vmin=self.vmin, vmax=self.vmax, interpolation='Nearest', extent=self.extent, origin='upper',aspect='auto') plt.xlim(self.extent[0], self.extent[1]) plt.ylim(self.extent[2], self.extent[3]) plt.ylim(0., 5.5) plt.vlines(self.current_ionogram.time, self.extent[2], self.extent[3], self.stored_color) plt.hlines(self.timeseries_frequency, self.extent[0], self.extent[1], self.stored_color, 'dashed') plt.ylabel('f / MHz') # Frequency bar plt.sca(self.freq_ax) plt.cla() freq_extent = (self.extent[0], self.extent[1], ais.ais_max_delay*1E3, ais.ais_min_delay*1E3) inx = 1.0E6 * (self.current_ionogram.frequencies.shape[0] * self.timeseries_frequency) /\ (self.current_ionogram.frequencies[-1] - self.current_ionogram.frequencies[0]) self._freq_bar_data = self.tser_arr_all[:,int(inx),:] plt.imshow(self.tser_arr_all[:,int(inx),:], vmin=self.vmin, vmax=self.vmax, interpolation='Nearest', extent=freq_extent, origin='upper',aspect='auto') plt.xlim(freq_extent[0], freq_extent[1]) plt.ylim(freq_extent[2], freq_extent[3]) plt.vlines(self.current_ionogram.time, freq_extent[2],freq_extent[3], self.stored_color) plt.ylabel(r'$\tau_D / ms$') title = "AISTool v%s, Orbit = %d, Ionogram=%s " % (__version__, self.orbit, celsius.spiceet_to_utcstr(self.current_ionogram.time, fmt='C')) if self.browsing: title += '[Browsing] ' if self.minimum_interaction_mode: title += '[Quick] ' if self._digitization_saved == False: title += 'UNSAVED ' if self.get_status() is not None: title += '[Status = %s] ' % self.get_status() pos, sza = mex.mso_r_lat_lon_position(float(self.current_ionogram.time), sza=True) title += '\nMSO: Altitude = %.1f km, Elevation = %.1f, Azimuth = %.1f deg, SZA = %.1f' % (pos[0] - mex.mars_mean_radius_km, mex.modpos(pos[1]), mex.modpos(pos[2]), sza) pos = mex.iau_pgr_alt_lat_lon_position(float(self.current_ionogram.time)) title += '\nIAU: Altitude = %.1f km, Latitude = %.1f, Longitude = %.1f deg' % ( pos[0], pos[1], mex.modpos(pos[2])) plt.sca(self.tser_ax) plt.title(title) # Message history: if len(self._messages): txt = '' for i, s in enumerate(self._messages): txt += str(i + self._message_counter) + ': ' + s + '\n' plt.annotate(txt, (0.05, 0.995), xycoords='figure fraction', fontsize=8, horizontalalignment='left', verticalalignment='top') # Axis formatters need redoing after each cla() nf = mpl.ticker.NullFormatter loc_f = celsius.SpiceetLocator() loc_t = celsius.SpiceetLocator() self.freq_ax.xaxis.set_major_formatter(celsius.SpiceetFormatter(loc_f)) self.tser_ax.xaxis.set_major_formatter(nf()) self.freq_ax.xaxis.set_major_locator(loc_f) self.tser_ax.xaxis.set_major_locator(loc_t) if debug: print('DEBUG: drawing...') self.figure.canvas.draw() return self
def FSM_full_test(model=False, AT='AT4'): """ """ directory = '/Volumes/DATA500/PRIMA/COMM16/' ### AT4 if AT=='AT4' or AT==4: AT = 'AT4'; DT = .6/(24*3600) #a = prima.drs(directory+'2011-08-29/PACMAN_OBJ_ASTRO_242_0014.fits') #p = prima.pssRecorder(directory+'PSSRECORDER/pssguiRecorder_lat4vcm_2011-08-30_09-09-54.dat') #f = open(directory+'PIEZOSCAN/pscsosfPiezoScan2_lat4fsm_2011-08-30T09_10_46.txt') a = prima.drs(directory+'2011-08-29/PACMAN_OBJ_ASTRO_242_0018.fits') p = prima.pssRecorder(directory+'PSSRECORDER/pssguiRecorder_lat4vcm_2011-08-30_09-49-23.dat') f = open(directory+'PIEZOSCAN/pscsosfPiezoScan2_lat4fsm_2011-08-30T09_50_11.txt') #a = prima.drs(directory+'2011-08-29/PACMAN_OBJ_ASTRO_242_0021.fits') #p = prima.pssRecorder(directory+'PSSRECORDER/pssguiRecorder_lat4vcm_2011-08-30_10-24-43.dat') #f = open(directory+'PIEZOSCAN/pscsosfPiezoScan2_lat4fsm_2011-08-30T10_25_14.txt') #a = prima.drs(directory+'2011-08-29/PACMAN_OBJ_ASTRO_242_0023.fits') #p = prima.pssRecorder(directory+'PSSRECORDER/pssguiRecorder_lat4vcm_2011-08-30_10-35-52.dat') #f = open(directory+'PIEZOSCAN/pscsosfPiezoScan2_lat4fsm_2011-08-30T10_36_31.txt') else: ### AT3 AT = 'AT3'; DT=0.75/(24.*3600) #a = prima.drs(directory+'2011-08-29/PACMAN_OBJ_ASTRO_242_0016.fits') #p = prima.pssRecorder(directory+'PSSRECORDER/pssguiRecorder_lat3vcm_2011-08-30_09-27-37.dat') #f = open(directory+'PIEZOSCAN/pscsosfPiezoScan2_lat3fsm_2011-08-30T09_28_22.txt') #a = prima.drs(directory+'2011-08-29/PACMAN_OBJ_ASTRO_242_0020.fits') #p = prima.pssRecorder(directory+'PSSRECORDER/pssguiRecorder_lat3vcm_2011-08-30_10-09-40.dat') #f = open(directory+'PIEZOSCAN/pscsosfPiezoScan2_lat3fsm_2011-08-30T10_10_09.txt') #a = prima.drs(directory+'2011-08-29/PACMAN_OBJ_ASTRO_242_0024.fits') #p = prima.pssRecorder(directory+'PSSRECORDER/pssguiRecorder_lat3vcm_2011-08-30_10-41-53.dat') #f = open(directory+'PIEZOSCAN/pscsosfPiezoScan2_lat3fsm_2011-08-30T10_43_04.txt') #a = prima.drs(directory+'2011-08-29/PACMAN_OBJ_ASTRO_242_0026.fits') #p = prima.pssRecorder(directory+'PSSRECORDER/pssguiRecorder_lat3vcm_2011-08-30_10-53-32.dat') #f = open(directory+'PIEZOSCAN/pscsosfPiezoScan2_lat3fsm_2011-08-30T10_54_20.txt') a = prima.drs(directory+'2011-08-29/PACMAN_OBJ_ASTRO_242_0027.fits') p = prima.pssRecorder(directory+'PSSRECORDER/pssguiRecorder_lat3vcm_2011-08-30_11-00-29.dat') f = open(directory+'PIEZOSCAN/pscsosfPiezoScan2_lat3fsm_2011-08-30T11_00_58.txt') lines = f.readlines() f.close() lines = filter(lambda x: not '#' in x and len(x)>10, lines) mjd = [astro.tag2mjd('2011-08-30T'+x.split()[1])+DT for x in lines] xmjd = np.linspace(min(mjd), max(mjd), 200) FSM1X = [float(x.split()[2]) for x in lines] FSM1Y = [float(x.split()[3]) for x in lines] FSM2X = [float(x.split()[4]) for x in lines] FSM2Y = [float(x.split()[5]) for x in lines] print 'PCR START:', a.raw[0].header['ESO PCR ACQ START'] print 'PCR END :', a.raw[0].header['ESO PCR ACQ END'] min_mjd = min(mjd) max_mjd = max(mjd) if model: param = np.array([0,0,.1,.1,.1,.1]) mjd_primet = 1e-6*a.raw['METROLOGY_DATA'].data.field('TIME')/\ (24*3600)+astro.tag2mjd(a.raw[0].header['ESO PCR ACQ START']) primet = (a.raw['METROLOGY_DATA'].data.field('DELTAL')- a.raw['METROLOGY_DATA'].data.field('DELTAL').mean())*1e6 print 'w_fit:' w_fit = np.where((mjd_primet>min_mjd)*(mjd_primet<max_mjd)) #w_fit = np.where((mjd_primet>min_mjd)) w_fit = (w_fit[0][::200],) print 'x_fit:' X_fit = [mjd_primet[w_fit]-mjd_primet[w_fit].mean(), interpByStep(mjd_primet[w_fit], mjd, np.array(FSM1X)-FSM1X[0]), interpByStep(mjd_primet[w_fit], mjd, np.array(FSM1Y)-FSM1Y[0]), interpByStep(mjd_primet[w_fit], mjd, np.array(FSM2X)-FSM2X[0]), interpByStep(mjd_primet[w_fit], mjd, np.array(FSM2Y)-FSM2Y[0])] Y_fit = primet[w_fit] print 'fit:' fit = myfit.fit(primetFsmBias, X_fit, param, Y_fit) fit.leastsqfit() print 'dOPD_um/(FSM1X_um - %5.3f) = %6.4f' %\ (FSM1X[0], fit.leastsq_best_param[2]) print 'dOPD_um/(FSM1Y_um - %5.3f) = %6.4f' %\ (FSM1Y[0], fit.leastsq_best_param[3]) print 'dOPD_um/(FSM2X_um - %5.3f) = %6.4f' %\ (FSM2X[0], fit.leastsq_best_param[4]) print 'dOPD_um/(FSM2Y_um - %5.3f) = %6.4f' %\ (FSM2Y[0], fit.leastsq_best_param[5]) pylab.figure(4, figsize=(17,3)) pylab.clf() pylab.subplots_adjust(left=0.06, bottom=0.15, right=0.96, top=0.85, wspace=0.15, hspace=0.01) pylab.title(AT+' FSM test:'+a.filename) pylab.plot(mjd_primet, primet, 'b-', label='PRIMET A-B') pylab.plot(mjd_primet[w_fit[0]], primetFsmBias(X_fit,fit.leastsq_best_param), 'r-', alpha=0.5, linewidth=3, label='linear model FSM') pylab.plot(mjd_primet[w_fit[0]], Y_fit-primetFsmBias(X_fit,fit.leastsq_best_param), '-', color='g', alpha=0.5, linewidth=3, label='residuals') pylab.hlines([0], min(mjd), max(mjd), color='k', linestyle='dashed', linewidth=2) pylab.legend(ncol=3, loc=('upper left' if AT=='AT4' else 'upper right')) pylab.xlim(min_mjd, max_mjd) pylab.ylabel('PRIMET A-B ($\mu$m)') pylab.figure(3, figsize=(17,9)) pylab.subplots_adjust(left=0.06, bottom=0.07, right=0.96, top=0.96, wspace=0.15, hspace=0.01) pylab.clf() ax1 = pylab.subplot(5,2,1) pylab.title(AT+' FSM test:'+a.filename) pylab.plot(1e-6*a.raw['METROLOGY_DATA'].data.field('TIME')/(24*3600)+ astro.tag2mjd(a.raw[0].header['ESO PCR ACQ START']), (a.raw['METROLOGY_DATA'].data.field('DELTAL')- a.raw['METROLOGY_DATA'].data.field('DELTAL').mean())*1e6, 'b-') pylab.ylabel('PRIMET A-B ($\mu$m)') pylab.subplot(5,2,3, sharex=ax1) #pylab.plot(mjd, FSM1X, '-k', markersize=8, linestyle='steps') pylab.plot(xmjd, interpByStep(xmjd, mjd, FSM1X), 'k-') pylab.ylabel('FSM1 X ($\mu$m)') pylab.subplot(5,2,5, sharex=ax1) #pylab.plot(mjd, FSM1Y, '-k', markersize=8, linestyle='steps') pylab.plot(xmjd, interpByStep(xmjd, mjd, FSM1Y), 'k-') pylab.ylabel('FSM1 Y ($\mu$m)') pylab.subplot(5,2,7, sharex=ax1) #pylab.plot(mjd, FSM2X, '-k', markersize=8, linestyle='steps') pylab.plot(xmjd, interpByStep(xmjd, mjd, FSM2X), 'k-') pylab.ylabel('FSM2 X ($\mu$m)') pylab.subplot(5,2,9, sharex=ax1) #pylab.plot(mjd, FSM2Y, '-k', markersize=8, linestyle='steps') pylab.plot(xmjd, interpByStep(xmjd, mjd, FSM2Y), 'k-') pylab.ylabel('FSM2 Y ($\mu$m)') pylab.xlabel('MJD') pylab.subplot(5,2,2, sharex = ax1, sharey= ax1) pylab.title(AT+' FSM test:'+a.filename) pylab.plot(1e-6*a.raw['METROLOGY_DATA'].data.field('TIME')/(24*3600)+ astro.tag2mjd(a.raw[0].header['ESO PCR ACQ START']), (a.raw['METROLOGY_DATA'].data.field('DELTAL')- a.raw['METROLOGY_DATA'].data.field('DELTAL').mean())*1e6, 'b-') pylab.subplot(5,2,4, sharex=ax1) pylab.plot(p.mjd, p.data['VCM1X[um]'], '-k') pylab.ylabel('VCM1 X ($\mu$m)') pylab.subplot(5,2,6, sharex=ax1) pylab.plot(p.mjd, p.data['VCM1Y[um]'], '-k') pylab.ylabel('VCM1 Y ($\mu$m)') pylab.subplot(5,2,8, sharex=ax1) pylab.plot(p.mjd, p.data['VCM2X[um]'], '-k') pylab.ylabel('VCM2 X ($\mu$m)') pylab.subplot(5,2,10, sharex=ax1) pylab.plot(p.mjd, p.data['VCM2Y[um]'], '-k') pylab.ylabel('VCM2 Y ($\mu$m)') pylab.xlim(min_mjd, max_mjd) del a return
plt.title('Please select two corners of the ROI. Top left, bottom right') options.ROI = plt.ginput(2) # swap ROI coordinates if necessary (if user clicked right/left instead of # left/right) if options.ROI[0][0] > options.ROI[1][0]: plt.title('Select top left, then bottom right!') options.ROI = plt.ginput(2) if options.ROI[0][0] > options.ROI[1][0]: plt.title('TOP LEFT, BOTTOM RIGHT!') options.ROI = plt.ginput(2) if options.ROI[0][0] > 0: plt.subplot(211) plt.imshow(Image) plt.title('Original') plt.hlines(options.ROI[0][1], options.ROI[0][0], options.ROI[1][0], 'r', linewidth=3) plt.hlines(options.ROI[1][1], options.ROI[0][0], options.ROI[1][0], 'r', linewidth=3) plt.vlines(options.ROI[0][0], options.ROI[0][1], options.ROI[1][1], 'r', linewidth=3) plt.vlines(options.ROI[1][0], options.ROI[0][1], options.ROI[1][1], 'r', linewidth=3) Image = Image[options.ROI[0][1]:options.ROI[1][1], options.ROI[0][0]:options.ROI[1][0]] plt.subplot(212) plt.imshow(Image) plt.title('ROI: ' + str(int(numpy.round(options.ROI[0][0]))) + ':' + str(int(numpy.round(options.ROI[0][1]))) + ' to ' + str(int(numpy.round(options.ROI[1][0]))) + ':' + str(int(numpy.round(options.ROI[1][1]))))