def fit_result(obj, model, data, noise, mask=False, regrid=False): halo = obj.halo ra = halo.ra.value dec = halo.dec.value bmin = halo.bmin bmaj = halo.bmaj scale = 1. xlabel = 'RA [Deg]' ylabel = 'DEC [Deg]' scale = 1. #if mask: image_mask = obj.image_mask if regrid: data = utils.regridding(obj.halo,data, decrease_fov=True) model = utils.regridding(obj.halo,model) #if mask: image_mask = utils.regridding(obj.halo, obj.image_mask*u.Jy, mask= not obj.halo.cropped).value noise = utils.findrms(data.value)*u.Jy scale = (np.array((bmin.value,bmaj.value))/halo.pix_size).value bmin = bmin/(scale[0]*halo.pix_size) bmaj = bmaj/(scale[1]*halo.pix_size) ra = np.arange(0,data.shape[1])#halo.ra.value dec = np.arange(0,data.shape[0])#halo.dec.value xlabel = 'Pixels' ylabel = 'Pixels' #plt.imshow(image_mask) #plt.show() fig, axes = plt.subplots(ncols=3, nrows=1, sharey=True) for axi in axes.flat: axi.xaxis.set_major_locator(plt.MaxNLocator(5)) axi.xaxis.set_major_formatter(ScalarFormatter(useOffset=False)) axi.yaxis.set_major_formatter(ScalarFormatter(useOffset=False)) fig.set_size_inches(3.2*5,5.1) draw_sizebar(halo,axes[0], scale, regrid) draw_ellipse(halo,axes[0], bmin, bmaj, regrid) data = (data/halo.pix_area).to(uJyarcsec2).value noise = (noise/halo.pix_area).to(uJyarcsec2).value model = (model/halo.pix_area).to(uJyarcsec2).value masked_data = np.copy(data) #if mask: if regrid: masked_data[image_mask > obj.mask_treshold*image_mask.max()] =-10000. else: masked_data[image_mask==1]= -10000. if regrid: NORMres = mplc.Normalize(vmin=-2.*noise, vmax=1.*masked_data.max()) else: NORMres = mplc.Normalize(vmin=-2.*noise, vmax=1.*masked_data.max()) #Trying two different functions since names were changed in recent matplotlib 3.3 update. try: Normdiv = mplc.TwoSlopeNorm(vcenter=0., vmin=0.8*(data-model).min(), vmax=0.8*(data-model).max()) except: Normdiv = mplc.DivergingNorm(vcenter=0., vmin=0.8*(data-model).min(), vmax=0.8*(data-model).max()) im1 = axes[0].imshow(masked_data,cmap='inferno', origin='lower', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres) LEVEL = np.array([1,2,4,8,16,32,64,128,256,512,1024,2048,4096])*noise cont1 = axes[0].contour(model,colors='white', levels=LEVEL, alpha=0.6, extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.) cont2 = axes[0].contour(masked_data,colors='lightgreen', levels=np.array([-9999.8]), alpha=0.6, linestyles='-',extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.5) axes[0].annotate('$V(x,y)$',xy=(0.5, 1), xycoords='axes fraction', fontsize=titlesize, xytext=(0, -9), textcoords='offset points', ha='center', va='top', color='white') axes[0].set_title("Radio data", fontsize=titlesize) axes[0].set_xlabel(xlabel, fontsize=labelsize) axes[0].set_ylabel(ylabel, fontsize=labelsize) axes[0].grid(color='white', linestyle='-', alpha=0.25) plt.tight_layout() im2 = axes[1].imshow(model,cmap='inferno', origin='lower', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres) axes[1].annotate('$I(x,y)$',xy=(0.5, 1), xycoords='axes fraction', fontsize=titlesize, xytext=(0, -9), textcoords='offset points', ha='center', va='top', color='white') axes[1].set_title(obj.modelName.replace('_',' ')+" model", fontsize=titlesize) axes[1].set_xlabel(xlabel, fontsize=labelsize) axes[1].grid(color='white', linestyle='-', alpha=0.25) cbar = fig.colorbar(im2,ax=axes[1]) cbar.ax.set_ylabel('$\\mu$Jy arcsec$^{-2}$',fontsize=labelsize) #cbar.formatter = ScalarFormatter(useMathText=False) #cbar.formatter = ticker.LogFormatter(base=10.,labelOnlyBase=True) #cbar.formatter = ticker.StrMethodFormatter('%.2f') plt.tight_layout() im3 = axes[2].imshow(data-model, cmap='PuOr_r', origin='lower', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = Normdiv) cont4 = axes[2].contour(masked_data, colors='red', levels=np.array([-9999.8]), alpha=0.6, linestyles='-', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.5) try: cont3 = axes[2].contour(model, alpha=0.7, colors='black', levels=[2*noise], extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm=NORMres) axes[2].clabel(cont3, fontsize=12, inline=1, fmt='2$\\sigma_{\\mathrm{rms}}$',colors='black') except: pass axes[2].annotate('$V(x,y)-I(x,y)$',xy=(0.5, 1), xycoords='axes fraction', fontsize=titlesize, xytext=(0, -9), textcoords='offset points', ha='center', va='top', color='black') axes[2].set_title("Residual image", fontsize=titlesize) axes[2].set_xlabel(xlabel, fontsize=labelsize) axes[2].grid(color='black', linestyle='-', alpha=0.25) plt.tight_layout() import matplotlib.ticker as ticker cbar = fig.colorbar(im3,ax=axes[2]) cbar.ax.set_ylabel('$\\mu$Jy arcsec$^{-2}$',fontsize=labelsize) #cbar.formatter = ScalarFormatter(useMathText=False) #cbar.formatter = ticker.LogFormatter(base=10.,labelOnlyBase=True) #cbar.formatter = ticker.StrMethodFormatter('%.2f') if regrid: plt.savefig(halo.plotPath +halo.file.replace('.fits','')+'_mcmc_model'+obj.filename_append+'_REGRID.pdf') else: plt.savefig(halo.plotPath +halo.file.replace('.fits','')+'_mcmc_model'+obj.filename_append+'.pdf') #plt.show() plt.clf() plt.close(fig)
def make_plots(df, plot_folder, area_unit, unit_colors, data_type, plot_scope, name_dict, do_all): max_value = df.values.max() N = 12 last_file = None # replace area unit numeric codes with actual names # this is for USA counties only if isinstance(name_dict, pd.DataFrame): for c in df.columns: if c not in name_dict.index.tolist(): # if FIPS codes are not in population data # we don't draw represent them on the map anyway # so let's delete them from the plots as well df.drop(columns=[c], inplace=True) continue name = name_dict.loc[c, 'name'] df.rename(columns={c: name}, inplace=True) unit_colors[name] = unit_colors.pop(c) if do_all: day_list = df.index.tolist() else: day_list = df.index.tolist()[-1:] for day in day_list: # make it look like YYYY/MM/DD daystring = lfill_date(day) plot_file = 'frame-' + daystring.replace('/', '') + '.png' plot_full_path = os.path.join(plot_folder, plot_file) # if the file exists, skip this frame if os.path.exists(plot_full_path): continue plt.rcParams["figure.dpi"] = 192 fig = plt.figure(figsize=(10, 5.625)) plt.title(plot_scope + ' COVID-19 daily cases - ' + data_type + ' - linear scale - top ' + str(N) + ' ' + area_unit) # x ticks get too crowded, limit their number plt.gca().xaxis.set_major_locator(plt.MaxNLocator(nbins=7)) plt.gca().spines['right'].set_visible(False) plt.gca().spines['top'].set_visible(False) # extract one day from dataframe # put area units in one column # put values in the other column daydf = df.loc[day].to_frame() daydf.index.name = area_unit daydf.reset_index(inplace=True) topN = daydf.sort_values(by=[day], ascending=False).head(N) topNunits = topN[area_unit].tolist() all_champs = df.loc[:day, topNunits] #max_value = all_champs.values.max() plt.gca().set_ylim(bottom=0, top=max_value) for u in topNunits: champ = df.loc[:day, u].to_frame() p = plt.plot(champ.index.tolist(), champ[u].tolist(), color=unit_colors[u]) leg = plt.legend(topNunits, loc='upper left', frameon=False) for line, text in zip(leg.get_lines(), leg.get_texts()): line.set_color(unit_colors[text.get_text()]) fig.subplots_adjust(left = 0.07, right = 0.99, bottom = 0.065, top = 0.94) fig.savefig(plot_full_path) last_file = plot_full_path plt.close() curr_snap = plot_scope + '_' + data_type + '_top.png' curr_snap = curr_snap.replace(' ', '_') curr_snap = curr_snap.lower() if last_file != None: shutil.copyfile(last_file, curr_snap)
np.random.seed(42) x0 = [-4.0, 5.0] s = SQUARE_FIGSIZE s[0] *= 2 s[1] *= 0.8 fig, axes = pl.subplots(1, 3, sharex=True, sharey=True, figsize=s) for n, ax in zip([0.0, -1.0, 2.0], axes): chain, _ = run_mcmc(log_p_gauss, np.array(x0), nsteps=2e3, prop_sigma=10**n) ax.plot(chain[:, 0], chain[:, 1], "o-", color=COLORS["DATA"], ms=2) ax.plot(x0[0], x0[1], "o", color=COLORS["MODEL_1"]) ax.set_xlim(-6.3, 6.3) ax.set_ylim(-6.3, 6.3) ax.set_xlabel("$x$") ax.annotate(r"$\sigma_q = 10^{{{0:.0f}}}$".format(n), (1, 0), xycoords="axes fraction", xytext=(-5, 5), textcoords="offset points", ha="right", va="bottom") ax.yaxis.set_major_locator(pl.MaxNLocator(5)) ax.xaxis.set_major_locator(pl.MaxNLocator(5)) axes[0].set_ylabel("$y$") savefig(fig, "MH_sigma.pdf")
cut_wav = signa[int(begs[i] * samp_rate):int(ends[i] * samp_rate)] wavfile.write( '{}/silaba_{}.wav'.format(templates_folder, syl_type), samp_rate, cut_wav) ncol = syl_classes[syl_type] ax[0][ncol].plot(cut_wav) ax[0][ncol].set_xticklabels([]) ax[0][ncol].set_yticklabels([]) ax[0][ncol].set_title(syl_type) ax[1][ncol].specgram(cut_wav, NFFT=NFFT, Fs=samp_rate, noverlap=int(NFFT * p_overlap), cmap='Greys') ax[1][ncol].set_yticklabels([]) ax[1][ncol].xaxis.set_major_locator(plt.MaxNLocator(2)) ax[1][ncol].set_ylim(min_freq, max_freq) # conversion from times to 'bins' vector coordinates # 'bins' = center times of spec time_beg_coord = (np.abs(t_bins - begs[i])).argmin() time_end_coord = (np.abs(t_bins - ends[i])).argmin() for j in range(time_beg_coord, time_end_coord): # Taking the frame from the spectrogram training_frame = signa_relevant[j - half_window:j + 1 + half_window] training_set.append(training_frame) frame_class.append(syl_id) all_training_set += training_set fig.tight_layout() # %% # Shuffle the training list (we have to shuffle things together)
solution = np.arange(len(gp)) solution[0] = v0[0] solution[1] = v1[0] solution[2] = v2[0] solution[3:] = aa[:, 0] gp.set_parameter_vector(solution) # Make the maximum likelihood prediction t = np.linspace(min(x), max(x), 10000) mu, var = gp.predict(y, t, return_var=True) std = np.sqrt(var) # Plot the data plt.figure() color = "#ff7f0e" plt.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0) plt.plot(t, mu, color=color) plt.fill_between(t, mu + std, mu - std, color=color, alpha=0.3, edgecolor="none") plt.ylabel(r"$y$") plt.xlabel(r"$t$") plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5)) plt.title("maximum likelihood prediction") plt.savefig('HD85390-5-prediction.png') os.chdir('..')
print " - omega_0 = %.10g" % omega_best # do a fit to the first 4 Fourier components mtf = MultiTermFit(omega_best, 4) mtf.fit(t, y, dy) phase_fit, y_fit, phased_t = mtf.predict(1000, return_phased_times=True) # plot the phased data and best-fit curves ax = fig.add_subplot(321 + i) ax.errorbar(phased_t, y, dy, fmt='.k', ecolor='gray', lw=1, ms=4, capsize=1.5) ax.plot(phase_fit, y_fit, '-b', lw=2) ax.set_xlim(0, 1) ax.set_ylim(plt.ylim()[::-1]) ax.yaxis.set_major_locator(plt.MaxNLocator(4)) ax.text(0.03, 0.04, "ID = %i" % ids[i], ha='left', va='bottom', transform=ax.transAxes) ax.text(0.03, 0.96, "P = %.2f hr" % (2 * np.pi / omega_best * 24.), ha='left', va='top', transform=ax.transAxes) ylim = ax.get_ylim() ax.set_ylim(ylim[0], ylim[0] + 1.1 * (ylim[1] - ylim[0])) if i < 4: ax.xaxis.set_major_formatter(plt.NullFormatter()) if i % 2 == 0: ax.set_ylabel('mag')
def create(self,): if(any([item.updated for item in self.parent.custom_metrics])): if(len(self.parent.custom_metrics)!=self.parent.n_custom_plots): warnings.warn("Data provided does not match the number of custom plots") self.parent.total_plots=len(self.parent.custom_metrics) self.parent.n_custom_plots=len(self.parent.custom_metrics) else: self.parent.total_plots=self.parent.n_custom_plots if((self.parent.top_rows)*(self.parent.top_cols)<self.parent.total_plots): warnings.warn("Total number of plots does not match the number of rows, number of rows has been increased to \ account for the difference") while(((self.parent.top_rows)*(self.parent.top_cols)<self.parent.total_plots)): self.parent.top_rows+=1 n_splits= self.parent.n_splits # len(cm_df_overall.index)//2 self.parent.figure = plt.figure(figsize=self.parent.figsize) self.parent.main_grid = gridspec.GridSpec(self.parent.nrows, self.parent.ncols, hspace=self.main_grid_hspace, wspace=self.main_grid_wspace) self.parent.top_cell = self.parent.main_grid[0,0:] self.parent.bottom_cell = self.parent.main_grid[1,0:] self.parent.inner_grid_top = gridspec.GridSpecFromSubplotSpec( (self.parent.top_rows*self.parent.plot_width)+(self.parent.top_rows-1), (self.parent.top_cols*self.parent.plot_height)+(self.parent.top_cols-1), self.parent.top_cell,hspace=1 ) self.parent.bottom_cell = self.parent.main_grid[1,0:] self.parent.inner_grid_bottom = gridspec.GridSpecFromSubplotSpec(n_splits,3, self.parent.bottom_cell) self.parent.top_axes=[] for j,k in enumerate(range(0,self.parent.top_rows*self.parent.plot_width,self.parent.plot_width)): for l,m in enumerate(range(0,self.parent.top_cols*self.parent.plot_height,self.parent.plot_height)): temp=plt.subplot(self.parent.inner_grid_top[k+j:k+self.parent.plot_height+j, l+m:(l-1)+m+self.parent.plot_width]) self.parent.top_axes.append(temp) self.parent._avg_axes=[] for i,item in enumerate(self.parent.top_axes): if(i<self.parent.total_plots): if(self.parent.custom_metrics[i].average): ax_avg=item.twiny().twinx() self.parent._avg_axes.append(ax_avg) # self.parent.middle_cell = self.parent.main_grid[1,0:] # self.parent.inner_grid_middle = gridspec.GridSpecFromSubplotSpec(n_splits,3, self.parent.middle_cell) # middle_axes=[] # #adds the main plots for i in range(0,self.parent.n_custom_plots): try: if(self.parent.custom_metrics[i].updated): custom_data=self.parent.custom_metrics[i].window() if(custom_data.empty): custom_data=pd.DataFrame([0,0,0,0],columns=['No Data Available Yet']) #This can be optimized later # top_axes[i].clear() self.parent.top_axes[i].plot(custom_data.iloc[-1*self.parent.custom_metrics[i].w_size:,:]) self.parent.top_axes[i].legend(self.parent.custom_metrics[i].window().columns) self.parent.top_axes[i].set_title(self.parent.custom_metrics[i].name) self.parent.top_axes[i].set_ylabel('') self.parent.top_axes[i].set_xlabel('') if(self.parent.custom_metrics[i].xaxis_int): self.parent.top_axes[i].xaxis.set_major_locator(MaxNLocator(integer=True)) #limit the number of ticks if(self.parent.custom_metrics[i].n_ticks): _k,_l=self.parent.custom_metrics[i].n_ticks self.parent.top_axes[i].xaxis.set_major_locator(plt.MaxNLocator(_k)) self.parent.top_axes[i].yaxis.set_major_locator(plt.MaxNLocator(_l)) # self.parent.top_axes[i].set_xticks(self.parent.top_axes[i].get_xticks()[::2]) if(self.parent.custom_metrics[i].show_grid): item.grid() if(self.parent.custom_metrics[i].average): self.parent._avg_axes[i].clear() avg=self.parent.custom_metrics[i].means self.parent._avg_axes[i].plot(avg,linestyle='--',alpha=0.6) except Exception as error: print(error, 'Happened while adding main plots.') # Adds table split_df=[] bottom_axes=[] self.tail() if(self.parent.show_table): if(not self.parent.main_results.empty): for i,item in enumerate(self.chunks_df(self.parent.main_results.round(6).T, math.ceil(len(self.parent.main_results.columns)/2),'r')): if((i+1)%2==0 and (i>0)): loc='bottom right' else: loc='bottom' if(not item.empty): split_df.append(item) temp=plt.subplot(self.parent.inner_grid_bottom[i]) temp.axis('off') temp.axis('tight') table=temp.table(cellText=item.values, loc=loc, cellLoc='left', rowLabels=item.index, # colLabels=item.columns, ) bottom_axes.append(table) table.scale(1, 2) self.parent.split_df=split_df self.parent.bottom_axes=bottom_axes #moves legends to the bottom of the plots for i,item in enumerate(self.parent.top_axes[:self.parent.n_custom_plots]): box = item.get_position() item.set_position([box.x0, box.y0 + box.height * 0.1,box.width, box.height * 0.9]) lines=item.get_lines() if(custom_data.empty): custom_data=pd.DataFrame([0,0,0,0],columns=['No Data Available Yet']) lines[0].get_xydata() item.legend(self.parent.custom_metrics[i].window().columns,loc='upper center', bbox_to_anchor=(0.5, -0.1),fancybox=True, shadow=True, ncol=5) #add scientific formatting # item.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e')) #Removes empty plots for i,ax in enumerate(self.parent.top_axes): lines=ax.get_lines() try: lines[0].get_xydata() except Exception as error: self.parent.top_axes[i].axis('off') plt.show() for item in self.parent.custom_metrics: if(item.updated): item.updated=False # plt.close(self.parent.figure) clear_output(wait=True) gc.collect() self.parent.counter+=1
def getMLPlotExt(y_ext, y_predict_ext, X_ext, flag, nFeatures): dffExt = pd.read_csv(myConfig.featurePathExt) dffExt = dffExt.copy().dropna(axis=0, how='any').reset_index() getTrendPlot1(dffExt, y_predict_ext, 'fracNa') # Pb bandgap graph flagComp = False if flagComp == True: #ctuple = [c[k] for k in key] compounds = [] for index, row in dffExt.iterrows(): #row['counts'] counter = row['counts'] counter = counter.split('(')[1].split(')')[0] counter = literal_eval(counter) A_t = '' B_t = '' C_t = '' for i in counter: if i == 'Cs' or i == 'Rb' or i == 'Na' or i == 'K': A = i Ai = str(counter[i]) if counter[i] == 1: Ai = '' A_t += A + '$_{' + Ai + '}$' if i == 'Sn' or i == 'Ge' or i == 'Pb': B = i Bi = str(counter[i]) if counter[i] == 1: Bi = '' B_t += B + '$_{' + Bi + '}$' if i == 'Br' or i == 'Cl' or i == 'I': C = i Ci = str(counter[i]) if counter[i] == 1: Ci = '' C_t += C + '$_{' + Ci + '}$' compounds += [A_t + B_t + C_t] fig, ax1 = plt.subplots() plt.title('Pb Bandgap Predictions by Composition') plt.ylabel('Bandgap (eV)') x = range(len(compounds)) plt.xticks(x, compounds, rotation = 90) #plt.xlabel('CsSnI$_{3}$') fig.set_size_inches(8, 5) #ax1 = plt.axes() ax1.scatter(x, y_predict_ext, marker = 'o', color = 'xkcd:blue', label='Predicted') ax1.scatter(x, y_ext, marker = 'o', color = 'xkcd:red', label = 'DFT') plt.legend() ax1.plot(x[1:7], y_predict_ext[1:7], linestyle = '--', color = 'xkcd:blue') ax1.plot(x[1:7], y_ext[1:7], linestyle = '--', color = 'xkcd:red') ax1.tick_params(direction='in', top=True, right=True) ax1.yaxis.set_major_locator(plt.MaxNLocator(5)) plt.tight_layout() path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/' plt.savefig(path + 'predictPb1.png', dpi = 400, bbox_inches="tight") #plt.show() #print(dffExt['counts']) #cols = [x for x in list(X_ext) if "frac" in x] #print(cols) flagComp = False if flagComp == True: savePath = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/' #plt.scatter(y_ext, y_predict_ext, alpha=0.8, color='#00ccff', # marker='o', s=80) my_dpi = 500 fig = plt.figure(figsize=(5, 5), dpi=my_dpi) ymin = 1.08*min(y_ext) if min(y_ext) <=0 else 0.92*min(y_ext) ymax = 1.08*max(y_ext) if max(y_ext) >=0 else 0.92*max(y_ext) xmax = ymax print(len(y_ext)) plt.ylabel('$E_{g}$ Prediction (eV)') plt.xlabel('$E_{g}$ (eV)') plt.title('Bandgap Prediction', y=1.04) bandgapCs = np.array([0.213, ]) #plt.ylabel('$\Delta H_{f}$ Prediction (eV/atom)') #plt.xlabel('$\Delta H_{f}$ (eV/atom)') #plt.title('$\Delta H_{f}$ Prediction', y=1.04) #plt.ylabel('ML Prediction (eV)') #plt.xlabel('Bandgap (eV)') #err = round(sqrt(mean_squared_error(y_ext, y_predict_ext)),3) #plt.title('Bandgap Prediction') plt.ylim(ymin, ymax) plt.xlim(ymin, xmax) #plt.legend(['Train', 'Test']) plt.ylim(ymin, ymax) plt.xlim(ymin, xmax) ax = plt.axes() xy = [ymin, ymax] #ax.grid() #plt.xticks(np.arange(ymin, ymax, 0.25)) #plt.yticks(np.arange(round(ymin, 2), # round(ymax + 0.1, 2) + 0.25, 0.25)) #ax.xaxis.set_major_locator(plt.MaxNLocator(12)) #ax.yaxis.set_major_locator(plt.MaxNLocator(12)) ax.tick_params(direction='in', top=True, right=True) ax.xaxis.set_major_locator(plt.MaxNLocator(6)) ax.yaxis.set_major_locator(plt.MaxNLocator(6)) p0 = plt.plot(xy, xy, 'k', zorder=1) p1 = plt.scatter(y_ext, y_predict_ext, color='#00ccff', marker='o', s=80, label='Train', zorder=2) #plt.legend(['Train', 'Test']) #plt.legend(handles=[p1]) #xy = np.arange(0, 3.2, .005) #plt.plot(xy, xy, 'k', alpha = 0.75) #plt.show() plt.savefig(savePath + 'paper_predict_noCsnoRb_form.png', dpi=500, bbox_inches="tight")
def test_contourf_symmetric_locator(): # github issue 7271 z = np.arange(12).reshape((3, 4)) locator = plt.MaxNLocator(nbins=4, symmetric=True) cs = plt.contourf(z, locator=locator) assert_array_almost_equal(cs.levels, np.linspace(-12, 12, 5))
def plotElements(self): '''Plot elements and convective regions''' # Initialize parameters xLimit = None yLimit = None lines = [] # Plot preprocess if self.ax is None: self.ax = self.fig.add_subplot(1, 1, 1) self.ax.set_xlabel("M/M$_\odot$") self.ax.set_ylabel("Mass fraction") self.ax.set_yscale("log") # Manage plot attributes beforehand if self.loadedModels: xLimit = self.pltAtrb["xrange"] if xLimit is None: xLimit = [self.plotMasses[0], self.plotMasses[-1]] yLimit = self.pltAtrb["yrange"] if yLimit is None: yLimit = [1e-24, 1] # Plot convective regions if self.showConve: for region in self.plotConvRegions: self.ax.fill_between(region, yLimit[0], yLimit[1], facecolor="none", hatch="/", edgecolor="k") for ii in range(len(self.pltAtrb["elements"])): name = self.pltAtrb["elements"][ii] capel = name[0].upper() + name[1:] if self.dashElements: lines.append( self.ax.plot(self.plotMasses, self.plotListsOfData[ii], "--", label=capel, lw=2)) else: lines.append( self.ax.plot(self.plotMasses, self.plotListsOfData[ii], label=capel, lw=2)) # Plot mesh if self.showMesh: constLevel = [ math.sqrt(yLimit[0] * yLimit[1]) for x in self.plotBordMass ] lines.append( self.ax.plot(self.plotBordMass, constLevel, "r.", label="Mesh", lw=2)) # Plot grid if self.showGrid: nHz = int(math.log10(yLimit[1] / yLimit[0])) for ii in range(nHz): val = 10**(int(math.log10(yLimit[0])) + ii) self.ax.plot(xLimit, (val, val), "r-") val = xLimit[0] + ii * (xLimit[1] - xLimit[0]) / (nHz - 1) self.ax.plot((val, val), yLimit, "r-") # Plot temperature if self.showTemp: if self.ax2 is None: self.ax2 = self.ax.twinx() self.ax2.set_ylabel("Temperature (K)") self.ax2.set_yscale("log") self.ax2.set_ylim([1e5, 1e9]) lines.append( self.ax2.plot(self.plotMasses, self.plotTemp, "k--", label="Temperature", lw=2)) # Plot neutron density elif self.showRho: if self.ax2 is None: self.ax2 = self.ax.twinx() self.ax2.set_ylabel("Neutron density (n/cm$^3$)") self.ax2.set_yscale("log") lines.append( self.ax2.plot(self.plotMasses, self.plotRho, "k--", label="Neutron density", lw=2)) # Unpack lines lins = [] for line in lines: lins += line lines = lins labs = [l.get_label() for l in lines] self.ax.legend(lines, labs, prop={"size": 10}) # Limits self.ax.set_xlim(xLimit) self.ax.set_ylim(yLimit) self.ax.xaxis.set_major_locator(plt.MaxNLocator(nbins=5)) self.ax.xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
def getMLPlot(train_y, test_y, y_predict_train, y_predict_test, flag, nFeatures): flag = True train_y = 1000*train_y test_y = 1000*test_y y_predict_train = 1000*y_predict_train y_predict_test = 1000*y_predict_test if flag == True: savePath = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/' my_dpi = 500 fig = plt.figure(figsize=(5, 5), dpi=my_dpi) #ymax = 3.2 #xmax = ymax ymin = 1.08*min(train_y) if min(train_y) <=0 else 0.92*min(train_y) ymax = 1.08*max(train_y) if max(train_y) >=0 else 0.92*max(train_y) xmax = ymax plt.ylabel('$E_{g}$ Prediction (eV)') plt.xlabel('$E_{g}$ (eV)') plt.title('Bandgap Prediction', y=1.04) plt.ylabel('$\Delta E_{hull}$ Prediction (meV/atom)') plt.xlabel('$\Delta E_{hull}$ (meV/atom)') plt.title('$\Delta E_{hull}$ Prediction', y=1.04) #plt.ylabel('$\Delta H_{f}$ Prediction (eV/atom)') #plt.xlabel('$\Delta H_{f}$ (eV/atom)') #plt.title('$\Delta H_{f}$ Prediction', y=1.04) plt.ylim(ymin, ymax) plt.xlim(ymin, xmax) xy = [ymin, ymax] #np.arange(ymin, ymax, .0005) ax = plt.axes() #ax.grid() #plt.xticks(np.arange(ymin, ymax, 0.25)) #plt.yticks(np.arange(round(ymin, 2), # round(ymax + 0.1, 2) + 0.25, 0.25)) ax.xaxis.set_major_locator(plt.MaxNLocator(6)) ax.yaxis.set_major_locator(plt.MaxNLocator(6)) ax.tick_params(direction='in', top=True, right=True) p0 = plt.plot(xy, xy, 'k', zorder=1) p1 = plt.scatter(train_y, y_predict_train, color='#00ccff', marker='o', s=80, label='Train', zorder=2) p2 = plt.scatter(test_y, y_predict_test, color = '#ffb31a', marker='o', s=80, label = 'Test', zorder =3) #plt.legend(['Train', 'Test']) plt.legend(handles=[p1, p2]) #plt.plot(xy, xy, 'k', alpha = 1.0) plt.savefig(savePath + 'paper_ML_example_d2_junk.png', bbox_inches="tight") flag = False #plt.savefig(savePath + 'ML_example_d1.png') plt.show()
for i in range(10, 21): for f in range(1, 13): #print(len(f, 2000i, sep = '.') month = str(f) year = str(i) date = month + "." + year dates.append(date) fig, ax = plt.subplots() plt.tight_layout() ax.plot(dates, monthlyDev, color=('xkcd:emerald')) ax.axhline(y=0, color='orange', linestyle='solid') ax.xaxis.set_major_locator(plt.MaxNLocator(12)) #ax.set_xticklabels(dates, rotation = 90) plt.xlabel("Month and Year") plt.ylabel("Monthly Deviation (NDVI)") plt.title("Monthly deviation of NDVI - Sichuan Province from 2010 to 2021") plt.style.use("seaborn-dark") for param in ['figure.facecolor', 'axes.facecolor', 'savefig.facecolor']: plt.rcParams[param] = '#212946' # bluish dark grey for param in ['text.color', 'axes.labelcolor', 'xtick.color', 'ytick.color']: plt.rcParams[param] = '0.9' # very light grey ax.grid( color='#2A3459') # bluish dark grey, but slightly lighter than background plt.tight_layout()
def summarize_results(model_path, dest_path=None, fig_formats=['pdf']): if dest_path is None: dest_path = op.split(model_path)[0] results = np.load(model_path) y_pred = results['y_pred'] y = results['y'] fs = results['samplerate'] model = results['estimator'].item() cv_results = results['cv_results'].item() if 'frequency' in results: freqs = results['frequency'] else: feature_extractor = STFTFeatureExtractor(nfft=2 * 256, shift=1, samplerate=fs, f_lower=0, f_upper=10., winlen=2 * 256) freqs = feature_extractor.get_frequencies() if 'n_channels' in results: n_channels = results['n_channels'] else: n_channels = 3 all_labels = np.unique(MOUSE_GROUPS_SIMPLE.keys()) # ----- show in-sample predictions ----- fig, ax = plt.subplots() t = np.arange(y.shape[0]) / float(fs) ax.plot(t, y, 'r-', lw=1, label='Label') ax.plot(t, y_pred, '-', color=3 * [.25], lw=.5, label='Predicted') ax.set_xlabel('Time (s)') ax.set_xlim(t[0], t[-1]) ax.legend(loc='best', fontsize=8) ax.set_yticks(all_labels) ax.set_yticklabels([MOUSE_GROUPS_SIMPLE[k] for k in all_labels]) llutil.set_font_axes(ax, add_size=2) llutil.simple_xy_axes(ax) fig.set_size_inches(10, 3) fig.tight_layout() if dest_path is not None: for ff in fig_formats: fig.savefig(op.join(dest_path, 'prediction_insample.' + ff), format=ff) # ----- show separating hyperplanes ----- n_classes = len(model.classes_) colors = [ llutil.get_nice_color('blue'), llutil.get_nice_color('red'), llutil.get_nice_color('gray') ] if n_classes > 2: # one hyperplane per class for OVR classifier W = model.coef_ else: # single hyperplance for binary classifier W = np.vstack((model.coef_, model.coef_)) fig, axarr = plt.subplots(nrows=n_classes, ncols=1, sharex=True, sharey=True) for i, cls in enumerate(model.classes_): ax = axarr[i] w = W[i, :] n = w.shape[0] / n_channels for j in range(n_channels): ax.plot(freqs, w[j * n:(j + 1) * n], '-', color=colors[j], lw=1.5) ax.set_xlabel('Frequency (Hz)') ax.set_ylabel('Weight') ax.set_title('class {} ({})'.format(int(cls), MOUSE_GROUPS_SIMPLE[cls])) llutil.set_font_axes(ax, add_size=2) llutil.simple_xy_axes(ax) ax.yaxis.set_major_locator(plt.MaxNLocator(3)) ax.set_ylim(W.min(), W.max()) fig.set_size_inches(5, 1.1 * n_classes) fig.tight_layout() if dest_path is not None: for ff in fig_formats: fig.savefig(op.join(dest_path, 'classifier_weights.' + ff), format=ff) # ----- error rates ----- fig, ax = plt.subplots() xticklabels = [ MOUSE_GROUPS_SIMPLE[k].replace(' ', '\n') for k in all_labels ] tpr = np.mean(cv_results['tpr'], axis=1) fpr = np.mean(cv_results['fpr'], axis=1) ax.bar(all_labels, tpr, width=0.35, color=3 * [.2], label='TPR') ax.bar(all_labels + 0.35, fpr, width=0.35, color=3 * [.75], label='FPR') ax.set_xticks(all_labels + .35) ax.set_yticks([0, .25, .5, .75, 1]) # ax.set_xticklabels([groups[k] for k in all_labels]) ax.set_xticklabels(xticklabels) ax.set_ylabel('Rate') ax.set_xlim(0, all_labels.max() + 1) ax.set_ylim(0, 1.2) ax.axhline(1, linestyle='-', color=3 * [0], linewidth=1.5) ax.axhline(.9, linestyle='--', color=3 * [.5]) ax.axhline(.1, linestyle='--', color=3 * [.5]) ax.legend(loc='best', fontsize=8) llutil.set_font_axes(ax, add_size=3) llutil.simple_xy_axes(ax) fig.set_size_inches(7, 3) fig.tight_layout() if dest_path is not None: for ff in fig_formats: fig.savefig(op.join(dest_path, 'error_rates.' + ff), format=ff) plt.show()
def train_classifier(rec_paths, run_cv=True, samplerate=100., dest_path=None, algorithm='SVM', use_cluster=False, f_upper=10.): # ----- parameters ----- n_folds = 4 feature_extractor = STFTFeatureExtractor(nfft=2 * 256, shift=1, samplerate=samplerate, f_lower=0, f_upper=f_upper, winlen=2 * 256) if dest_path is None: dest_path = op.join(op.expanduser('~'), 'research', 'data', 'experiments', 'BehavioralScoring') llutil.makedirs_save(dest_path) fig_formats = ['pdf', 'png'] # ----- load and convert data ----- X, y, fs, n_channels, groups = load_data(rec_paths, feature_extractor, samplerate=samplerate) assert fs == samplerate freqs = feature_extractor.get_frequencies() all_labels = np.asarray(groups.keys()) n_labels = len(all_labels) print "total data size: {} observations x {} features".format( X.shape[0], X.shape[1]) # ----- set up estimation/validation methods ----- if algorithm.upper() == 'SVM': if use_cluster: tmp_path = op.join(dest_path, 'tmp') model = ParallelLinearSVC(tmp_path=tmp_path, local=False) else: cv = cross_validation.StratifiedKFold(y, n_folds=n_folds, shuffle=True, random_state=0) minor_version = get_sklearn_minor_version() if minor_version <= 16: class_weight = 'auto' else: class_weight = 'balanced' C_values = 2.**np.linspace(-3, 5, 10) model = LinearSVM(n_folds=n_folds, verbose=False, class_weight=class_weight, dual=False, penalty='l2', max_iter=1000, C_values=C_values) elif algorithm.upper() == 'LDA': priors = [] for i, label in enumerate(all_labels): n = np.sum(y == label) if n > 0: priors.append(n / float(y.shape[0])) model = lda.LDA(priors=np.asarray(priors)) # ----- cross-validation ----- if run_cv: # cross-validation cv = cross_validation.StratifiedKFold(y, n_folds=n_folds, shuffle=True, random_state=0) cv_results = { 'score': np.zeros((n_folds, )), 'tpr': np.zeros((n_labels, n_folds)), 'fpr': np.zeros(( n_labels, n_folds, )) } for i, (train_ind, test_ind) in enumerate(cv): print("Fold {}/{}".format(i + 1, n_folds)) model.fit(X[train_ind, :], y[train_ind]) cv_results['score'][i] = model.score(X[test_ind], y[test_ind]) y_hat = model.predict(X[test_ind, :]) tpr, fpr = compute_error_rates(y[test_ind], y_hat, all_labels) cv_results['tpr'][:, i] = tpr cv_results['fpr'][:, i] = fpr print "Cross-validation results:" for k in ['score', 'tpr', 'fpr']: print "{}: {:.2f} += {:.2f}".format(k, np.mean(cv_results[k]), np.std(cv_results[k])) else: cv_results = None # ----- fit model using all data (and do in-sample prediction) ----- model.fit(X, y) # clf = model.best_estimator_ y_pred = model.predict(X) if isinstance(model, LinearSVM): print "Best misclassification cost:", model.best_param_ tp_rate, fp_rate = compute_error_rates(y, y_pred, all_labels) print "in-sample prediction error rate:", np.mean(y_pred != y) for i, g in enumerate(all_labels): print "label {} ({}): TPR={:.2f}, FPR={:.2f}".format( g, groups[g], tp_rate[i], fp_rate[i]) # ----- save results ----- result_file = op.join(dest_path, 'classification_results.npz') np.savez(result_file, rec_paths=rec_paths, run_cv=run_cv, samplerate=samplerate, cv_results=cv_results, tp_rate=tp_rate, fp_rate=fp_rate, estimator=model, groups=groups, y=y.astype(np.uint8), y_pred=y_pred.astype(np.uint8), frequency=freqs, n_channels=n_channels) # ----- show in-sample predictions ----- fig, axarr = plt.subplots(nrows=2, ncols=1, sharex=True) ax = axarr[0] t = np.arange(y.shape[0]) / float(fs) ax.plot(t, y, 'r-', lw=2, label='label') ax.plot(t, y_pred, '--', color=3 * [.25], lw=1, label='pred') ax.set_xlabel('Time (s)') ax.set_ylabel('Label') ax.set_xlim(t[0], t[-1]) ax.legend(loc='best') ax = axarr[1] ax.plot(t, y_pred != y, 'k-', lw=1) ax.set_xlabel('Time (s)') ax.set_ylabel('Misclassified') ax.set_xlim(t[0], t[-1]) ax.set_ylim(0, 1.2) for ax in axarr.flat: # Hide the right and top spines ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') ax.tick_params(axis='both', which='major', labelsize=8) ax.xaxis.label.set_fontsize(9) ax.xaxis.label.set_fontname('Arial') ax.yaxis.label.set_fontsize(9) ax.xaxis.label.set_fontname('Arial') ax.xaxis.set_major_locator(plt.MaxNLocator(4)) ax.yaxis.set_major_locator(plt.MaxNLocator(4)) fig.set_size_inches(7, 3.5) fig.subplots_adjust() if dest_path is not None: for ff in fig_formats: fig.savefig(op.join(dest_path, 'prediction_insample.' + ff), format=ff) # ----- show separating hyperplanes ----- n_classes = len(model.classes_) colors = [ llutil.get_nice_color('blue'), llutil.get_nice_color('red'), llutil.get_nice_color('gray') ] if n_classes > 2: # one hyperplane per class for OVR classifier W = model.coef_ else: # single hyperplance for binary classifier W = np.vstack((model.coef_, model.coef_)) fig, axarr = plt.subplots(nrows=n_classes, ncols=1, sharex=True, sharey=True) for i, cls in enumerate(model.classes_): ax = axarr[i] w = W[i, :] n = w.shape[0] / n_channels for j in range(n_channels): ax.plot(freqs, w[j * n:(j + 1) * n], '-', color=colors[j]) ax.set_xlabel('Frequency (Hz)') ax.set_ylabel('Weight') ax.set_title('class {} ({})'.format(int(cls), groups[cls])) llutil.set_font_axes(ax, add_size=2) llutil.simple_xy_axes(ax) fig.set_size_inches(7, 2 * n_classes) fig.tight_layout() if dest_path is not None: for ff in fig_formats: fig.savefig(op.join(dest_path, 'classifier_weights.' + ff), format=ff) # ----- error rates ----- n_rows = 1 + int(run_cv) fig, axarr = plt.subplots(nrows=n_rows, ncols=1, sharex=True, sharey=True) axarr = np.atleast_1d(axarr) xticklabels = [groups[k].replace(' ', '\n') for k in all_labels] for i in range(n_rows): if i == 0: tpr = tp_rate fpr = fp_rate title = 'in-sample' else: tpr = np.mean(cv_results['tpr'], axis=1) fpr = np.mean(cv_results['fpr'], axis=1) title = 'cross-validated' ax = axarr[i] ax.set_title(title) ax.bar(all_labels, tpr, width=0.35, color=3 * [.25], label='TPR') ax.bar(all_labels + 0.35, fpr, width=0.35, color=3 * [.75], label='FPR') ax.set_xticks(all_labels + .35) ax.set_xticklabels(xticklabels) ax.set_ylabel('Rate') ax.set_xlim(0, all_labels.max() + 1) ax.set_ylim(0, 1.2) ax.axhline(1, linestyle='-', color=3 * [0], linewidth=1.5) ax.axhline(.9, linestyle='--', color=3 * [.5]) ax.axhline(.1, linestyle='--', color=3 * [.5]) ax.legend(loc='best') llutil.set_font_axes(ax, add_size=2) llutil.simple_xy_axes(ax) fig.set_size_inches(7, 5) fig.tight_layout() if dest_path is not None: for ff in fig_formats: fig.savefig(op.join(dest_path, 'error_rates.' + ff), format=ff) plt.show()
def plot_mcmc( samples, labels=None, priors=None, ptrue=None, precision=None, nbins=30, s=1.0 ): """Plots a Giant Triangle Confusogram Parameters ---------- samples: 2-D array, shape (N, ndim) Samples from ndim variables to be plotted in the GTC labels: list of strings, optional List of names for each variable (size ndim) priors: list of callables, optional List of prior functions for the variables distributions (size ndim) ptrue: list of floats, optional List of true estimates for each parameter precision: list of ints, optional List of decimal places to write down for each parameter. Defaults to 2 nbins: int, optional Number of bins to be used in 1D and 2D histograms. Defaults to 30 s: float, optional Standard deviation of Gaussian filter applied to smooth histograms. Defaults to 1.0 """ p = map( lambda v: (v[1], v[1] - v[0], v[2] - v[1]), zip(*np.percentile(samples, [16, 50, 84], axis=0)), ) p = list(p) ndim = samples.shape[-1] fig = plt.figure(figsize=(8, 8)) fig.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05) # TODO: pyplot style context grid = plt.GridSpec(ndim, ndim, wspace=0.0, hspace=0.0) handles = [] if precision is None: precision = 2 * np.ones(ndim, dtype=int) if ptrue is None: ptrue = np.array([None for _ in range(ndim)]) # PLOT 1D for i in range(ndim): ax = fig.add_subplot(grid[i, i]) H, edges = np.histogram(samples[:, i], bins=nbins, density=True) centers = (edges[1:] + edges[:-1]) / 2 data = ndimage.gaussian_filter1d((centers, H), sigma=s) data[1] /= data[1].sum() (l1,) = ax.plot(data[0], data[1], "b-", lw=1, label="posterior") if priors is not None: pr = priors[i](centers) pr /= pr.sum() (l2,) = ax.plot(centers, pr, "k-", lw=1, label="prior") l3 = ax.axvline(p[i][0], color="k", ls="--", label="median") mask = np.logical_and( centers - p[i][0] <= p[i][2], p[i][0] - centers <= p[i][1] ) ax.fill_between( centers[mask], np.zeros(mask.sum()), data[1][mask], color="b", alpha=0.3 ) if ptrue[i] is not None: l4 = ax.axvline(ptrue[i], color="gray", lw=1.5, label="true") if i < ndim - 1: ax.set_xticks([]) else: ax.tick_params(rotation=45) ax.set_yticks([]) ax.get_xaxis().set_major_locator(plt.MaxNLocator(3)) ax.set_ylim(0) if labels is not None: ax.set_title( "{0} = {1:.{4}f}$^{{+{2:.{4}f}}}_{{-{3:.{4}f}}}$".format( labels[i], p[i][0], p[i][2], p[i][1], precision[i] ) ) handles.append(l1) try: handles.append(l2) except UnboundLocalError: pass try: handles.append(l3) except UnboundLocalError: pass try: handles.append(l4) except UnboundLocalError: pass # PLOT 2D nbins_flat = np.linspace(0, nbins ** 2, nbins ** 2) for i in range(ndim): for j in range(i): ax = fig.add_subplot(grid[i, j]) H, xi, yi = np.histogram2d(samples[:, j], samples[:, i], bins=nbins) extents = [xi[0], xi[-1], yi[0], yi[-1]] H /= H.sum() H_order = np.sort(H.flat) H_cumul = np.cumsum(H_order) tmp = np.interp([0.0455, 0.3173, 1.0], H_cumul, nbins_flat) chainlevels = np.interp(tmp, nbins_flat, H_order) data = ndimage.gaussian_filter(H.T, sigma=s) xbins = (xi[1:] + xi[:-1]) / 2 ybins = (yi[1:] + yi[:-1]) / 2 ax.contourf( xbins, ybins, data, levels=chainlevels, colors=["#1f77b4", "#52aae7", "#85ddff"], alpha=0.3, ) ax.contour(data, chainlevels, extent=extents, colors="b") ax.get_xaxis().set_major_locator(plt.MaxNLocator(3)) ax.get_yaxis().set_major_locator(plt.MaxNLocator(3)) if ptrue[i] is not None: ax.axhline(ptrue[i], color="gray", lw=1.5) if ptrue[j] is not None: ax.axvline(ptrue[j], color="gray", lw=1.5) if i < ndim - 1: ax.set_xticks([]) else: ax.tick_params(rotation=45) if j > 0: ax.set_yticks([]) else: ax.tick_params(rotation=45) fig.legend(handles=handles) return fig
def plot_wind(data, specs, outputpath): ''' routine plots vertical profiles of wind speed and direction and saves plot as .png INPUT: - data: dictionnary with data (eg filled by read_ncfile()) - specs: dictionnary with filename specifications (filled by read_ncfile()) - outputpath: path where png will be stored in. OUTPUT: .png file stored in outputpath ''' logging.info('now plotting wind speed and direction sounding.........') # define outputname of .png-file: variable = 'wind' outputname = '{platform}_{instrument}{direction}_{variable}_{date}_{tempres}.png'.format( platform=specs['platform_short'], instrument=specs['type'].replace(' ', '').replace('_', ''), direction=specs['direction'], variable=variable, date=specs['date'] + '_' + specs['time'], tempres=specs['tempres'].replace(' ', '')) fig, ax = plt.subplots(1, 2, sharey=True, figsize=(8, 6)) # plot the data into subpanels: ax[0].plot(data['windSpeed'], data['altitude'], '.-k', markersize=1) ax[1].plot(data['windDirection'], data['altitude'], '.-k', markersize=1) # general cosmetics: for i in range(2): ax[i].spines['top'].set_visible(False) ax[i].spines['right'].set_visible(False) ax[i].spines['left'].set_visible(False) ax[i].grid(axis='y', linestyle='-', color='gray') ax[i].set_ylim(0, ax[i].get_ylim()[-1]) ax[i].xaxis.set_minor_locator(AutoMinorLocator()) ax[i].yaxis.set_minor_locator(AutoMinorLocator()) ax[i].xaxis.set_major_locator(plt.MaxNLocator(4)) ax[i].tick_params(top=False, right=False) # axis labels: ax[i].set_ylabel('Altitude [m]', fontsize=14) # switch off minor ticks for top axis: ax[i].tick_params(axis='x', which='minor', top=False) # make labels larger for all ticks: ax[i].tick_params(axis='both', labelsize=14) # switch off some ticks and labels and spines manually. ax[0].spines['left'].set_visible(True) ax[1].tick_params(left=False) ax[0].tick_params(right=False, which='minor', axis='y') ax[1].spines['right'].set_visible(True) ax[1].yaxis.set_ticks_position('right') ax[1].yaxis.set_label_position('right') # set wind direction axis to valid range: ax[1].set_xlim(0, 360) ax[0].set_xlabel('Wind Speed [m s$^{-1}$]', fontsize=14) ax[1].set_xlabel('Wind Direction [$^\circ$]', fontsize=14) plt.subplots_adjust(top=0.9, right=0.85, left=0.15) fig.suptitle( '%s, %s %sUTC' % (specs['location'], specs['date'], data['time_of_launch_HHmmss'][:-2]), fontsize=18) fig.savefig(outputpath + outputname) logging.info('Wind profile saved at {}'.format(outputpath + outputname))
def plotter(fdict): """ Go """ import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter pgconn = psycopg2.connect(database='postgis', host='iemdb', user='******') station = fdict.get('station', 'DMX')[:4] phenomena = fdict.get('phenomena', 'SV') significance = fdict.get('significance', 'W') split = fdict.get('split', 'jan1') nt = NetworkTable('WFO') if split == 'jan1': sql = """SELECT extract(year from issue)::int as year, min(issue at time zone 'UTC') as min_issue, max(issue at time zone 'UTC') as max_issue, count(distinct eventid) from warnings where wfo = %s and phenomena = %s and significance = %s and issue is not null GROUP by year ORDER by year ASC""" else: sql = """SELECT extract(year from issue - '6 months'::interval)::int as year, min(issue at time zone 'UTC') as min_issue, max(issue at time zone 'UTC') as max_issue, count(distinct eventid) from warnings where wfo = %s and phenomena = %s and significance = %s and issue is not null GROUP by year ORDER by year ASC""" df = read_sql(sql, pgconn, params=(station, phenomena, significance), index_col=None) # Since many VTEC events start in 2005, we should not trust any # data that has its first year in 2005 if df['year'].min() == 2005: df = df[df['year'] > 2005] def myfunc(row): year = row[0] valid = row[1] if year == valid.year: return int(valid.strftime("%j")) else: days = (datetime.date(year + 1, 1, 1) - datetime.date(year, 1, 1)).days return int(valid.strftime("%j")) + days df['startdoy'] = df[['year', 'min_issue']].apply(myfunc, axis=1) df['enddoy'] = df[['year', 'max_issue']].apply(myfunc, axis=1) df.set_index('year', inplace=True) ends = df['enddoy'].values starts = df['startdoy'].values years = df.index.values fig = plt.Figure() ax = plt.axes([0.1, 0.1, 0.7, 0.8]) ax.barh(years - 0.4, (ends - starts), left=starts, fc='blue') ax.axvline(np.average(starts[:-1]), lw=2, color='red') ax.axvline(np.average(ends[:-1]), lw=2, color='red') ax.set_xlabel(("Avg Start Date: %s, End Date: %s") % ((datetime.date(2000, 1, 1) + datetime.timedelta( days=int(np.average(starts[:-1])))).strftime("%-d %b"), (datetime.date(2000, 1, 1) + datetime.timedelta( days=int(np.average(ends[:-1])))).strftime("%-d %b")), color='red') ax.set_title(("[%s] NWS %s\nPeriod between First and Last %s %s") % (station, nt.sts[station]['name'], vtec._phenDict[phenomena], vtec._sigDict[significance])) ax.grid() days = [1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335] days = days + [x + 365 for x in days] ax.set_xticks(days) ax.set_xticklabels(calendar.month_abbr[1:] + calendar.month_abbr[1:]) ax.set_xlim(df['startdoy'].min() - 10, df['enddoy'].max() + 10) ax.set_ylabel("Year") ax.set_ylim(years[0] - 0.5, years[-1] + 0.5) xFormatter = FormatStrFormatter('%d') ax.yaxis.set_major_formatter(xFormatter) ax = plt.axes([0.82, 0.1, 0.13, 0.8]) ax.barh(years - 0.4, df['count'], fc='blue') ax.set_ylim(years[0] - 0.5, years[-1] + 0.5) plt.setp(ax.get_yticklabels(), visible=False) ax.grid(True) ax.set_xlabel("# Events") ax.yaxis.set_major_formatter(xFormatter) xloc = plt.MaxNLocator(3) ax.xaxis.set_major_locator(xloc) return fig, df
def plot_ptrh(data, specs, outputpath): ''' routine plots vertical profiles of temperature, pressure, rel humidity and saves plot as .png INPUT: - data: dictionnary with data (eg filled by read_ncfile()) - specs: dictionnary with filename specifications (filled by read_ncfile()) - outputpath: path where png will be stored in. OUTPUT: .png file stored in outputpath ''' logging.info( 'now plotting pressure, temperature, rel humidity sounding.........') # define outputname of .png-file: variable = 'ptrelh' outputname = '{platform}_{instrument}{direction}_{variable}_{date}_{tempres}.png'.format( platform=specs['platform_short'], instrument=specs['type'].replace(' ', '').replace('_', ''), direction=specs['direction'], variable=variable, date=specs['date'] + '_' + specs['time'], tempres=specs['tempres'].replace(' ', '')) fig, ax = plt.subplots(1, 3, sharey=True, figsize=(8, 6)) # plot temperature, pressure, humidity in three panels: ax[0].plot(data['temperature'], data['altitude'], '.-k', markersize=1) ax[1].plot(data['pressure'], data['altitude'], '.-k', markersize=1) ax[2].plot(data['humidity'], data['altitude'], '.-k', markersize=1) # do some cosmetics regarding the layout, axislabels, etc.: for i in range(3): # switch off some spines: ax[i].spines['top'].set_visible(False) ax[i].spines['right'].set_visible(False) ax[i].spines['left'].set_visible(False) ax[i].grid(axis='y', linestyle='-', color='gray') # set height axis to start at 0m: ax[i].set_ylim(0, ax[i].get_ylim()[-1]) # major minor ticks: ax[i].xaxis.set_minor_locator(AutoMinorLocator()) ax[i].yaxis.set_minor_locator(AutoMinorLocator()) ax[i].xaxis.set_major_locator(plt.MaxNLocator(4)) # switch off major ticks top and right axis: ax[i].tick_params(top=False, right=False) # switch off minor ticks for top axis: ax[i].tick_params(axis='x', which='minor', top=False) # make labels larger for all ticks: ax[i].tick_params(axis='both', labelsize=14) ax[0].spines['left'].set_visible(True) ax[0].tick_params(axis='y', right=False, which='minor') ax[1].tick_params(left=False) ax[1].tick_params(axis='y', right=False, left=False, which='minor') ax[2].tick_params(left=False) ax[2].spines['right'].set_visible(True) ax[2].yaxis.set_ticks_position('right') ax[2].yaxis.set_label_position('right') # set the relh panel always to values between 0 and 100: ax[2].set_xlim(0, 100) # and the pressure to max 1100 hPa: ax[1].set_xlim(ax[1].get_xlim()[0], 1100) # axis labels: ax[0].set_ylabel('Altitude [m]', fontsize=14) ax[2].set_ylabel('Altitude [m]', fontsize=14) ax[0].set_xlabel('Temperature [$^\circ$C]', fontsize=14) ax[1].set_xlabel('Pressure [hPa]', fontsize=14) ax[2].set_xlabel('Rel Humidity [%]', fontsize=14) plt.subplots_adjust(top=0.9, right=0.85, left=0.15) fig.suptitle( '%s, %s %sUTC' % (specs['location'], specs['date'], data['time_of_launch_HHmmss'][:-2]), fontsize=18) fig.savefig(outputpath + outputname) logging.info('{} profiles saved at {}'.format(variable, outputpath + outputname))
def generate_compare_plot(self, tran, ambig, min_read, max_read, master_filepath_dict, lite, offset_dict, ribocoverage, organism, normalize, short_code, background_col, hili_start, hili_stop, comp_uag_col, comp_uga_col, comp_uaa_col, title_size, subheading_size, axis_label_size, marker_size, cds_marker_size, cds_marker_colour, legend_size, transcriptome): labels = [] start_visible = [] line_collections = [] all_stops = ["TAG", "TAA", "TGA"] returnstr = "Position," y_max = 50 if normalize == True: y_max = 0 connection = sqlite3.connect( '/home/DATA/www/tripsviz/tripsviz/trips.sqlite') connection.text_factory = str cursor = connection.cursor() cursor.execute( "SELECT owner FROM organisms WHERE organism_name = '{}' and transcriptome_list = '{}';" .format(organism, transcriptome)) owner = (cursor.fetchone())[0] if owner == 1: if os.path.isfile("{0}/{1}/{2}/{2}.{3}.sqlite".format( config.SCRIPT_LOC, config.ANNOTATION_DIR, organism, transcriptome)): transhelve = sqlite3.connect("{0}/{1}/{2}/{2}.{3}.sqlite".format( config.SCRIPT_LOC, config.ANNOTATION_DIR, organism, transcriptome)) else: return_str = "Cannot find annotation file {}.{}.sqlite".format( organism, transcriptome) return { 'current': 400, 'total': 100, 'status': 'return_str', 'result': return_str } else: transhelve = sqlite3.connect( "{0}transcriptomes/{1}/{2}/{3}/{2}_{3}.sqlite".format( config.UPLOADS_DIR, owner, organism, transcriptome)) cursor = transhelve.cursor() cursor.execute( "SELECT * from transcripts WHERE transcript = '{}'".format(tran)) result = cursor.fetchone() traninfo = { "transcript": result[0], "gene": result[1], "length": result[2], "cds_start": result[3], "cds_stop": result[4], "seq": result[5], "strand": result[6], "stop_list": result[7].split(","), "start_list": result[8].split(","), "exon_junctions": result[9].split(","), "tran_type": result[10], "principal": result[11] } traninfo["stop_list"] = [int(x) for x in traninfo["stop_list"]] traninfo["start_list"] = [int(x) for x in traninfo["start_list"]] if str(traninfo["exon_junctions"][0]) != "": traninfo["exon_junctions"] = [ int(x) for x in traninfo["exon_junctions"] ] else: traninfo["exon_junctions"] = [] transhelve.close() gene = traninfo["gene"] tranlen = traninfo["length"] cds_start = traninfo["cds_start"] cds_stop = traninfo["cds_stop"] strand = traninfo["strand"] if cds_start == 'NULL' or cds_start == None: cds_start = 0 if cds_stop == 'NULL' or cds_stop == None: cds_stop = 0 all_starts = traninfo["start_list"] all_stops = {"TAG": [], "TAA": [], "TGA": []} seq = traninfo["seq"].upper() for i in range(0, len(seq)): if seq[i:i + 3] in all_stops: all_stops[seq[i:i + 3]].append(i + 1) start_stop_dict = { 1: { "starts": [0], "stops": { "TGA": [0], "TAG": [0], "TAA": [0] } }, 2: { "starts": [0], "stops": { "TGA": [0], "TAG": [0], "TAA": [0] } }, 3: { "starts": [0], "stops": { "TGA": [0], "TAG": [0], "TAA": [0] } } } for start in all_starts: rem = ((start - 1) % 3) + 1 start_stop_dict[rem]["starts"].append(start - 1) for stop in all_stops: for stop_pos in all_stops[stop]: rem = ((stop_pos - 1) % 3) + 1 start_stop_dict[rem]["stops"][stop].append(stop_pos - 1) fig = plt.figure(figsize=(23, 12)) ax_main = plt.subplot2grid((30, 1), (0, 0), rowspan=22) if normalize != True: label = 'Read count' else: label = 'Normalized read count' ax_main.set_ylabel(label, fontsize=axis_label_size, labelpad=30) label = 'Position (nucleotides)' ax_main.set_xlabel(label, fontsize=axis_label_size, labelpad=10) #if normalize is true work out the factors for each colour if normalize == True: all_mapped_reads = [] for color in master_filepath_dict: all_mapped_reads.append( master_filepath_dict[color]["mapped_reads"]) min_reads = float(min(all_mapped_reads)) for color in master_filepath_dict: factor = min_reads / float( master_filepath_dict[color]["mapped_reads"]) master_filepath_dict[color]["factor"] = factor # So items can be plotted alphabetically unsorted_list = [] for color in master_filepath_dict: input_list = [ color, master_filepath_dict[color]["file_names"], master_filepath_dict[color]["file_descs"], master_filepath_dict[color]["file_ids"], master_filepath_dict[color]["filepaths"], master_filepath_dict[color]["file_type"], master_filepath_dict[color]["minread"], master_filepath_dict[color]["maxread"] ] if "factor" in master_filepath_dict[color]: input_list.append(master_filepath_dict[color]["factor"]) unsorted_list.append(input_list) sorted_list = sorted(unsorted_list, key=lambda x: x[1][0]) returndict = {} for item in sorted_list: # needed to make get_reads accept file_paths file_paths = {"riboseq": {}} for i in range(0, len(item[3])): file_paths["riboseq"][item[3][i]] = item[4][i] file_names = item[1][0] file_descs = item[2] if item[5] == "riboseq": filename_reads, seqvar_dict = get_reads(ambig, item[6], item[7], tran, file_paths, tranlen, ribocoverage, organism, False, False, "fiveprime", "riboseq", 1) else: filename_reads, seqvar_dict = get_reads(ambig, item[6], item[7], tran, file_paths, tranlen, True, organism, False, False, "fiveprime", "riboseq", 1) if normalize == False: try: max_val = max(filename_reads.values()) * 1.1 if max_val > y_max: y_max = max_val except Exception as e: pass labels.append(file_names) start_visible.append(True) plot_filename = ax_main.plot(filename_reads.keys(), filename_reads.values(), alpha=1, label=labels, zorder=1, color=item[0], linewidth=3) line_collections.append(plot_filename) returndict[file_names] = {} for pos in filename_reads: returndict[file_names][pos] = filename_reads[pos] else: normalized_reads = {} for pos in filename_reads: normalized_reads[pos] = filename_reads[pos] * item[8] try: max_val = max(normalized_reads.values()) * 1.1 if max_val > y_max: y_max = max_val except Exception as e: pass labels.append(file_names) start_visible.append(True) plot_filename = ax_main.plot(normalized_reads.keys(), normalized_reads.values(), alpha=1, label=labels, zorder=1, color=item[0], linewidth=3) line_collections.append(plot_filename) returndict[file_names] = {} for pos in filename_reads: returndict[file_names][pos] = normalized_reads[pos] for plot_filename in returndict: returnstr += "{},".format(plot_filename) returnstr += "\n" for i in range(0, tranlen): returnstr += "{},".format(i) for plot_filename in returndict: returnstr += "{},".format(returndict[plot_filename][i]) returnstr += "\n" ax_main.set_ylim(0, y_max) # draw cds start #plt.plot((cds_start,cds_start), (0, y_max), cds_marker_colour,linestyle = ':',linewidth=cds_marker_size) # draw cds end #plt.plot((cds_stop, cds_stop), (0, y_max), cds_marker_colour,linestyle = ':',linewidth=cds_marker_size) cds_markers = ax_main.plot((cds_start, cds_start), (0, y_max * 0.97), color=cds_marker_colour, linestyle='solid', linewidth=cds_marker_size) ax_main.text(cds_start, y_max * 0.97, "CDS start", fontsize=18, color="black", ha="center") #ax_main.annotate('axes fraction',xy=(3, 1), xycoords='data',xytext=(0.8, 0.95), textcoords='axes fraction',arrowprops=dict(facecolor='black', shrink=0.05),horizontalalignment='right', verticalalignment='top') #trans = blended_transform_factory(ax_main.transData, ax_main.transAxes) #ax_main.annotate('CDS RELATIVE START',(100,100),transform=trans) #tform = blended_transform_factory(ax_main.transData, ax_main.transAxes) #r=10 #ax_main.text(cds_start, 0.9, "CDS START OR WHATEVER", fontsize='xx-large', color='r', transform=tform) cds_markers += ax_main.plot((cds_stop + 1, cds_stop + 1), (0, y_max * 0.97), color=cds_marker_colour, linestyle='solid', linewidth=cds_marker_size) ax_main.text(cds_stop, y_max * 0.97, "CDS stop", fontsize=18, color="black", ha="center") line_collections.append(cds_markers) start_visible.append(True) labels.append("CDS Markers") ax_f1 = plt.subplot2grid((30, 1), (27, 0), rowspan=1, sharex=ax_main) ax_f1.set_axis_bgcolor('lightgray') ax_f2 = plt.subplot2grid((30, 1), (28, 0), rowspan=1, sharex=ax_main) ax_f2.set_axis_bgcolor('lightgray') ax_f6 = plt.subplot2grid((30, 1), (29, 0), rowspan=1, sharex=ax_main) ax_f6.set_axis_bgcolor('lightgray') ax_f6.set_xlabel('Transcript: {} Length: {} nt'.format(tran, tranlen), fontsize=subheading_size, labelpad=10) for axis, frame in ((ax_f1, 1), (ax_f2, 2), (ax_f6, 3)): color = color_dict['frames'][frame - 1] axis.set_xlim(0, tranlen) starts = [(item, 1) for item in start_stop_dict[frame]['starts']] axis.broken_barh(starts, (0.5, 1), color='white', zorder=5, linewidth=2) stops = [(item, 1) for item in start_stop_dict[frame]['stops']] uag_stops = [(item, 1) for item in start_stop_dict[frame]['stops']['TAG']] uaa_stops = [(item, 1) for item in start_stop_dict[frame]['stops']['TAA']] uga_stops = [(item, 1) for item in start_stop_dict[frame]['stops']['TGA']] axis.broken_barh(uag_stops, (0, 1), color=comp_uag_col, zorder=2, linewidth=2) axis.broken_barh(uaa_stops, (0, 1), color=comp_uaa_col, zorder=2, linewidth=2) axis.broken_barh(uga_stops, (0, 1), color=comp_uga_col, zorder=2, linewidth=2) axis.set_ylabel('{}'.format(frame), rotation='horizontal', labelpad=10, verticalalignment='center') axis.set_ylim(0, 1) axis.tick_params(top=False, left=False, right=False, bottom=False, labeltop=False, labelleft=False, labelright=False, labelbottom=False) ax_f6.axes.get_yaxis().set_ticks([]) ax_f2.axes.get_yaxis().set_ticks([]) ax_f1.axes.get_yaxis().set_ticks([]) title_str = '{} ({})'.format(gene, short_code) plt.title(title_str, fontsize=title_size, y=36) if not (hili_start == 0 and hili_stop == 0): hili_start = int(hili_start) hili_stop = int(hili_stop) hili = ax_main.fill_between([hili_start, hili_stop], [y_max, y_max], zorder=0, alpha=0.75, color="#fffbaf") labels.append("Highligter") start_visible.append(True) line_collections.append(hili) leg_offset = (legend_size - 17) * 5 if leg_offset < 0: leg_offset = 0 leg_offset += 230 ilp = plugins.InteractiveLegendPlugin(line_collections, labels, alpha_unsel=0, alpha_sel=0.85, xoffset=leg_offset, yoffset=20, start_visible=start_visible, fontsize=legend_size) plugins.connect(fig, ilp, plugins.TopToolbar(yoffset=100), plugins.DownloadProfile(returnstr=returnstr), plugins.DownloadPNG(returnstr=title_str)) ax_main.set_axis_bgcolor(background_col) # This changes the size of the tick markers, works on both firefox and chrome. ax_main.tick_params('both', labelsize=marker_size) ax_main.xaxis.set_major_locator(plt.MaxNLocator(3)) ax_main.yaxis.set_major_locator(plt.MaxNLocator(3)) ax_main.grid(color="white", linewidth=20, linestyle="solid") graph = "<div style='padding-left: 55px;padding-top: 22px;'> <a href='https://trips.ucc.ie/short/{0}' target='_blank' ><button class='button centerbutton' type='submit'><b>Direct link to this plot</b></button></a> </div>".format( short_code) tot_prog = 100 graph += mpld3.fig_to_html(fig) return { 'current': 400, 'total': tot_prog, 'status': 'Complete', 'result': graph }
def generate_plot(self, tran, ambig, min_read, max_read, lite, ribocoverage, organism, readscore, noisered, primetype, minfiles, nucseq, user_hili_starts, user_hili_stops, uga_diff, file_paths_dict, short_code, color_readlen_dist, background_col, uga_col, uag_col, uaa_col, advanced, seqhili, seq_rules, title_size, subheading_size, axis_label_size, marker_size, transcriptome, trips_uploads_location, cds_marker_size, cds_marker_colour, legend_size, ribo_linewidth, secondary_readscore, pcr, mismatches, hili_start, hili_stop): #self.update_state(state='PROGRESS',meta={'current': 0, 'total': 100,'status': "Generate plot called"}) if lite == "n" and ribocoverage == True: return_str = "Error: Cannot display Ribo-Seq Coverage when 'Line Graph' is turned off" return { 'current': 100, 'total': 100, 'status': 'Complete', 'result': return_str } labels = [ "Frame 1 profiles", "Frame 2 profiles", "Frame 3 profiles", "RNA", "Exon Junctions" ] start_visible = [True, True, True, True, False] if mismatches == True: labels.append("Mismatches A") labels.append("Mismatches T") labels.append("Mismatches G") labels.append("Mismatches C") start_visible.append(False) start_visible.append(False) start_visible.append(False) start_visible.append(False) start_visible.append(True) labels.append("CDS markers") #This is a list of booleans that decide if the interactive legends boxes are filled in or not.Needs to be same length as labels stop_codons = ["TAG", "TAA", "TGA"] frame_orfs = {1: [], 2: [], 3: []} connection = sqlite3.connect('{}/trips.sqlite'.format(config.SCRIPT_LOC)) connection.text_factory = str cursor = connection.cursor() cursor.execute( "SELECT owner FROM organisms WHERE organism_name = '{}' and transcriptome_list = '{}';" .format(organism, transcriptome)) owner = (cursor.fetchone())[0] if owner == 1: if os.path.isfile("{0}/{1}/{2}/{2}.{3}.sqlite".format( config.SCRIPT_LOC, config.ANNOTATION_DIR, organism, transcriptome)): transhelve = sqlite3.connect("{0}/{1}/{2}/{2}.{3}.sqlite".format( config.SCRIPT_LOC, config.ANNOTATION_DIR, organism, transcriptome)) else: return_str = "Cannot find annotation file {}.{}.sqlite".format( organism, transcriptome) return { 'current': 100, 'total': 100, 'status': 'Complete', 'result': return_str } else: transhelve = sqlite3.connect( "{0}transcriptomes/{1}/{2}/{3}/{2}_{3}.sqlite".format( trips_uploads_location, owner, organism, transcriptome)) connection.close() cursor = transhelve.cursor() cursor.execute( "SELECT * from transcripts WHERE transcript = '{}'".format(tran)) result = cursor.fetchone() traninfo = { "transcript": result[0], "gene": result[1], "length": result[2], "cds_start": result[3], "cds_stop": result[4], "seq": result[5], "strand": result[6], "stop_list": result[7].split(","), "start_list": result[8].split(","), "exon_junctions": result[9].split(","), "tran_type": result[10], "principal": result[11] } try: traninfo["stop_list"] = [int(x) for x in traninfo["stop_list"]] except: traninfo["stop_list"] = [] try: traninfo["start_list"] = [int(x) for x in traninfo["start_list"]] except: traninfo["start_list"] = [] if str(traninfo["exon_junctions"][0]) != "": traninfo["exon_junctions"] = [ int(x) for x in traninfo["exon_junctions"] ] else: traninfo["exon_junctions"] = [] all_cds_regions = [] # Check if the 'coding_regions' table exists cursor.execute( "SELECT name FROM sqlite_master WHERE type='table' AND name='coding_regions';" ) result = cursor.fetchone() if result != None: cursor.execute( "SELECT * from coding_regions WHERE transcript = '{}'".format( tran)) result = cursor.fetchall() for row in result: all_cds_regions.append((row[1], row[2])) transhelve.close() gene = traninfo["gene"] tranlen = traninfo["length"] cds_start = traninfo["cds_start"] cds_stop = traninfo["cds_stop"] if cds_start == "NULL" or cds_start == None: cds_start = 0 if cds_stop == "NULL" or cds_stop == None: cds_stop = 0 all_starts = traninfo["start_list"] all_stops = {"TAG": [], "TAA": [], "TGA": []} exon_junctions = traninfo["exon_junctions"] seq = traninfo["seq"].upper() for i in range(0, len(seq)): if seq[i:i + 3] in stop_codons: all_stops[seq[i:i + 3]].append(i + 1) # Error occurs if one of the frames is empty for any given start/stop, so we initialise with -5 as this won't be seen by user and will prevent the error start_stop_dict = { 1: { "starts": [-5], "stops": { "TGA": [-5], "TAG": [-5], "TAA": [-5] } }, 2: { "starts": [-5], "stops": { "TGA": [-5], "TAG": [-5], "TAA": [-5] } }, 3: { "starts": [-5], "stops": { "TGA": [-5], "TAG": [-5], "TAA": [-5] } } } for start in all_starts: rem = ((start - 1) % 3) + 1 start_stop_dict[rem]["starts"].append(start) for stop in all_stops: for stop_pos in all_stops[stop]: rem = ((stop_pos - 1) % 3) + 1 start_stop_dict[rem]["stops"][stop].append(stop_pos) #find all open reading frames for frame in [1, 2, 3]: for start in start_stop_dict[frame]["starts"]: best_stop_pos = 10000000 for stop in start_stop_dict[frame]["stops"]: for stop_pos in start_stop_dict[frame]["stops"][stop]: if stop_pos > start and stop_pos < best_stop_pos: best_stop_pos = stop_pos if best_stop_pos != 10000000: frame_orfs[frame].append((start, best_stop_pos)) #self.update_state(state='PROGRESS',meta={'current': 100, 'total': 100,'status': "Fetching RNA-Seq Reads"}) all_rna_reads, rna_seqvar_dict = get_reads(ambig, min_read, max_read, tran, file_paths_dict, tranlen, True, organism, False, noisered, primetype, "rnaseq", readscore, pcr, get_mismatches=mismatches, self_obj=self) #self.update_state(state='PROGRESS',meta={'current': 100, 'total': 100,'status': "Fetching Ribo-Seq Reads"}) all_subcodon_reads, ribo_seqvar_dict = get_reads(ambig, min_read, max_read, tran, file_paths_dict, tranlen, ribocoverage, organism, True, noisered, primetype, "riboseq", readscore, secondary_readscore, pcr, get_mismatches=mismatches, self_obj=self) seq_var_dict = merge_dicts(ribo_seqvar_dict, rna_seqvar_dict) try: rnamax = max(all_rna_reads.values()) except: rnamax = 0 try: subcodonmax = max(all_subcodon_reads.values()) except: subcodonmax = 0 y_max = max(1, rnamax, subcodonmax) * 1.1 fig = plt.figure(figsize=(22, 13)) ax_main = plt.subplot2grid((30, 1), (0, 0), rowspan=22) ax_main.spines['bottom'].set_visible(False) for s in ['bottom', 'left', 'top', 'right']: ax_main.spines[s].set_linewidth(15) ax_main.spines[s].set_color("red") alt_seq_type_vars = [] # Plot any alternative sequence types if there are any #self.update_state(state='PROGRESS',meta={'current': 100, 'total': 100,'status': "Fetching alternative Seq Types"}) for seq_type in file_paths_dict: if seq_type != "riboseq" and seq_type != "rnaseq": if file_paths_dict[seq_type] == {}: continue if seq_rules[seq_type]["frame_breakdown"] == 1: frame_breakdown = True else: frame_breakdown = False alt_sequence_reads, empty_seqvar_dict = get_reads(ambig, min_read, max_read, tran, file_paths_dict, tranlen, True, organism, frame_breakdown, noisered, primetype, seq_type, readscore, self_obj=self) if frame_breakdown == False: alt_seq_plot = ax_main.plot(alt_sequence_reads.keys(), alt_sequence_reads.values(), alpha=1, label=seq_type, zorder=2, color='#5c5c5c', linewidth=2) labels.append(seq_type) start_visible.append(True) alt_seq_type_vars.append(alt_seq_plot) else: alt_frame_counts = { 0: collections.OrderedDict(), 1: collections.OrderedDict(), 2: collections.OrderedDict() } for key in alt_sequence_reads: start = key rem = start % 3 if rem == 1: # frame 1 frame = 2 elif rem == 2: # frame 2 frame = 0 elif rem == 0: # frame 3 frame = 1 alt_frame_counts[frame][key] = alt_sequence_reads[key] frame0_altseqplot = ax_main.plot(alt_frame_counts[0].keys(), alt_frame_counts[0].values(), alpha=0.75, label=seq_type + "frame0", zorder=2, color="#FF4A45", linewidth=2) frame1_altseqplot = ax_main.plot(alt_frame_counts[1].keys(), alt_frame_counts[1].values(), alpha=0.75, label=seq_type + "frame1", zorder=2, color="#64FC44", linewidth=2) frame2_altseqplot = ax_main.plot(alt_frame_counts[2].keys(), alt_frame_counts[2].values(), alpha=0.75, label=seq_type + "frame2*", zorder=2, color="#5687F9", linewidth=2) labels.append(seq_type + "frame 1") labels.append(seq_type + "frame 2") labels.append(seq_type + "frame 3") start_visible.append(True) start_visible.append(True) start_visible.append(True) alt_seq_type_vars.append(frame0_altseqplot) alt_seq_type_vars.append(frame1_altseqplot) alt_seq_type_vars.append(frame2_altseqplot) if max(alt_sequence_reads.values()) > y_max: y_max = max(alt_sequence_reads.values()) label = 'Reads' ax_main.set_ylabel(label, fontsize=axis_label_size, labelpad=30) label = 'Position (nucleotides)' ax_main.set_xlabel(label, fontsize=axis_label_size, labelpad=5) ax_main.set_ylim(0, y_max) if lite == "n": rna_bars = ax_main.bar(all_rna_reads.keys(), all_rna_reads.values(), alpha=1, label=labels, zorder=1, color='lightgray', linewidth=0, width=1) else: rna_bars = ax_main.plot(all_rna_reads.keys(), all_rna_reads.values(), alpha=1, label=labels, zorder=1, color='#a7adb7', linewidth=4) cds_markers = ax_main.plot((cds_start, cds_start), (0, y_max * 0.97), color=cds_marker_colour, linestyle='solid', linewidth=cds_marker_size) ax_main.text(cds_start, y_max * 0.97, "CDS start", fontsize=18, color="black", ha="center") #ax_main.annotate('axes fraction',xy=(3, 1), xycoords='data',xytext=(0.8, 0.95), textcoords='axes fraction',arrowprops=dict(facecolor='black', shrink=0.05),horizontalalignment='right', verticalalignment='top') #trans = blended_transform_factory(ax_main.transData, ax_main.transAxes) #ax_main.annotate('CDS RELATIVE START',(100,100),transform=trans) #tform = blended_transform_factory(ax_main.transData, ax_main.transAxes) #r=10 #ax_main.text(cds_start, 0.9, "CDS START OR WHATEVER", fontsize='xx-large', color='r', transform=tform) cds_markers += ax_main.plot((cds_stop + 1, cds_stop + 1), (0, y_max * 0.97), color=cds_marker_colour, linestyle='solid', linewidth=cds_marker_size) ax_main.text(cds_stop, y_max * 0.97, "CDS stop", fontsize=18, color="black", ha="center") ax_cds = plt.subplot2grid((31, 1), (26, 0), rowspan=1, sharex=ax_main) ax_cds.set_axis_bgcolor("white") ax_cds.set_ylabel('Merged CDS', labelpad=4, verticalalignment='center', horizontalalignment="right", rotation="horizontal", color="black", fontsize=(axis_label_size / 1.5)) ax_f1 = plt.subplot2grid((31, 1), (27, 0), rowspan=1, sharex=ax_main) ax_f1.set_axis_bgcolor(color_dict['frames'][0]) ax_f2 = plt.subplot2grid((31, 1), (28, 0), rowspan=1, sharex=ax_main) ax_f2.set_axis_bgcolor(color_dict['frames'][1]) ax_f3 = plt.subplot2grid((31, 1), (29, 0), rowspan=1, sharex=ax_main) ax_f3.set_axis_bgcolor(color_dict['frames'][2]) ax_nucseq = plt.subplot2grid((31, 1), (30, 0), rowspan=1, sharex=ax_main) ax_nucseq.set_xlabel('Transcript: {} Length: {} nt'.format(tran, tranlen), fontsize=subheading_size) for tup in all_cds_regions: ax_cds.fill_between([tup[0], tup[1]], [1, 1], zorder=0, alpha=1, color="#001285") #plot a dummy exon junction at postion -1, needed in cases there are no exon junctions, this wont be seen allexons = ax_main.plot((-1, -1), (0, 1), alpha=0.01, color='black', linestyle='-.', linewidth=2) for exon in exon_junctions: allexons += ax_main.plot((exon, exon), (0, y_max), alpha=0.01, color='black', linestyle='-.', linewidth=3) #dictionary for each frame in which the keys are the posistions and the values are the counts frame_counts = { 0: collections.OrderedDict(), 1: collections.OrderedDict(), 2: collections.OrderedDict() } for key in all_subcodon_reads: rem = key % 3 if rem == 1: # frame 1 frame = 0 elif rem == 2: # frame 2 frame = 1 elif rem == 0: # frame 3 frame = 2 frame_counts[frame][key] = all_subcodon_reads[key] if lite == "n": frame_counts[frame][key + 1] = 0 frame_counts[frame][key + 2] = 0 if lite == "n": frame0subpro = ax_main.bar(frame_counts[0].keys(), frame_counts[0].values(), alpha=0.75, label=labels, zorder=2, color="#FF4A45", edgecolor="#FF4A45", width=1, linewidth=4) frame1subpro = ax_main.bar(frame_counts[1].keys(), frame_counts[1].values(), alpha=0.75, label=labels, zorder=2, color="#64FC44", edgecolor="#64FC44", width=1, linewidth=4) frame2subpro = ax_main.bar(frame_counts[2].keys(), frame_counts[2].values(), alpha=0.75, label=labels, zorder=2, color="#5687F9", edgecolor="#5687F9", width=1, linewidth=4) else: frame0subpro = ax_main.plot(frame_counts[0].keys(), frame_counts[0].values(), alpha=0.75, label=labels, zorder=2, color="#FF4A45", linewidth=ribo_linewidth) frame1subpro = ax_main.plot(frame_counts[1].keys(), frame_counts[1].values(), alpha=0.75, label=labels, zorder=2, color="#64FC44", linewidth=ribo_linewidth) frame2subpro = ax_main.plot(frame_counts[2].keys(), frame_counts[2].values(), alpha=0.75, label=labels, zorder=2, color="#5687F9", linewidth=ribo_linewidth) if mismatches == True: a_mismatches = ax_main.plot(seq_var_dict["A"].keys(), seq_var_dict["A"].values(), alpha=0.01, label=labels, zorder=2, color="purple", linewidth=2) t_mismatches = ax_main.plot(seq_var_dict["T"].keys(), seq_var_dict["T"].values(), alpha=0.01, label=labels, zorder=2, color="yellow", linewidth=2) g_mismatches = ax_main.plot(seq_var_dict["G"].keys(), seq_var_dict["G"].values(), alpha=0.01, label=labels, zorder=2, color="orange", linewidth=2) c_mismatches = ax_main.plot(seq_var_dict["C"].keys(), seq_var_dict["C"].values(), alpha=0.01, label=labels, zorder=2, color="pink", linewidth=2) xy = 0 if nucseq == True: ax_nucseq.set_axis_bgcolor(background_col) mrnaseq = seq.replace("T", "U") color_list = ["#FF4A45", "#64FC44", "#5687F9"] char_frame = 0 for char in mrnaseq: ax_nucseq.text((xy + 1) - 0.1, 0.2, mrnaseq[xy], fontsize=20, color=color_list[char_frame % 3]) xy += 1 char_frame += 1 # If the user passed a list of sequences to highlight, find and plot them here. if seqhili != ['']: near_cog_starts, signalhtml = get_user_defined_seqs(seq, seqhili) for slip in near_cog_starts[0]: try: hili_sequences += ax_f1.plot((slip, slip), (0, 0.5), alpha=1, label=labels, zorder=4, color='black', linewidth=5) except Exception as e: hili_sequences = ax_f1.plot((slip, slip), (0, 0.5), alpha=1, label=labels, zorder=4, color='black', linewidth=5) for slip in near_cog_starts[1]: try: hili_sequences += ax_f2.plot((slip, slip), (0, 0.5), alpha=1, label=labels, zorder=4, color='black', linewidth=5) except: hili_sequences = ax_f2.plot((slip, slip), (0, 0.5), alpha=1, label=labels, zorder=4, color='black', linewidth=5) for slip in near_cog_starts[2]: try: hili_sequences += ax_f3.plot((slip, slip), (0, 0.5), alpha=1, label=labels, zorder=4, color='black', linewidth=5) except: hili_sequences = ax_f3.plot((slip, slip), (0, 0.5), alpha=1, label=labels, zorder=4, color='black', linewidth=5) #Plot sequence identifiers which will create a popup telling user what the subsequence is (useful if they have passed multiple subsequences) frame1_subsequences = ax_f1.plot(near_cog_starts[0], [0.25] * len(near_cog_starts[0]), 'o', color='b', mec='k', ms=12, mew=1, alpha=0, zorder=4) frame2_subsequences = ax_f2.plot(near_cog_starts[1], [0.25] * len(near_cog_starts[1]), 'o', color='b', mec='k', ms=12, mew=1, alpha=0, zorder=4) frame3_subsequences = ax_f3.plot(near_cog_starts[2], [0.25] * len(near_cog_starts[2]), 'o', color='b', mec='k', ms=12, mew=1, alpha=0, zorder=4) #Attach the labels to the subsequences plotted above signaltooltip1 = PointHTMLTooltip(frame1_subsequences[0], signalhtml[0], voffset=10, hoffset=10, css=point_tooltip_css) signaltooltip2 = PointHTMLTooltip(frame2_subsequences[0], signalhtml[1], voffset=10, hoffset=10, css=point_tooltip_css) signaltooltip3 = PointHTMLTooltip(frame3_subsequences[0], signalhtml[2], voffset=10, hoffset=10, css=point_tooltip_css) for axisname in (ax_f1, ax_f2, ax_f3, ax_nucseq, ax_cds): axisname.tick_params(top=False, bottom=False, labelleft=False, labelright=False, labelbottom=False) for label in ax_main.xaxis.get_majorticklabels(): label.set_fontsize(36) for axis, frame in ((ax_f1, 1), (ax_f2, 2), (ax_f3, 3)): axis.set_xlim(1, tranlen) starts = [(item, 1) for item in start_stop_dict[frame]['starts']] uag_stops = [(item, 1) for item in start_stop_dict[frame]['stops']['TAG']] uaa_stops = [(item, 1) for item in start_stop_dict[frame]['stops']['TAA']] uga_stops = [(item, 1) for item in start_stop_dict[frame]['stops']['TGA']] #Plot start positions axis.broken_barh(starts, (0.30, 1), color="white", zorder=2, linewidth=7) #Plot stop positions axis.broken_barh(uag_stops, (0, 1), color=uag_col, zorder=2, linewidth=4) axis.broken_barh(uaa_stops, (0, 1), color=uaa_col, zorder=2, linewidth=4) axis.broken_barh(uga_stops, (0, 1), color=uga_col, zorder=2, linewidth=4) axis.set_ylim(0, 1) axis.set_ylabel('Frame {}'.format(frame), labelpad=4, verticalalignment='center', horizontalalignment="right", rotation="horizontal", color="black", fontsize=(axis_label_size / 1.5)) title_str = '{} ({})'.format(gene, short_code) plt.title(title_str, fontsize=title_size, y=38) line_collections = [ frame0subpro, frame1subpro, frame2subpro, rna_bars, allexons ] if mismatches == True: line_collections.append(a_mismatches) line_collections.append(t_mismatches) line_collections.append(g_mismatches) line_collections.append(c_mismatches) line_collections.append(cds_markers) if not (hili_start == 0 and hili_stop == 0): hili_start = int(hili_start) hili_stop = int(hili_stop) hili = ax_main.fill_between([hili_start, hili_stop], [y_max, y_max], zorder=0, alpha=0.75, color="#fffbaf") labels.append("Highligted region") start_visible.append(True) line_collections.append(hili) for alt_plot in alt_seq_type_vars: line_collections.append(alt_plot) if 'hili_sequences' in locals(): labels.append("Highligted sequences") start_visible.append(True) line_collections.append(hili_sequences) if user_hili_starts != [] and user_hili_stops != []: for i in range(0, len(user_hili_starts)): user_hili_start = int(user_hili_starts[i]) user_hili_stop = int(user_hili_stops[i]) try: hili += ax_main.fill_between([user_hili_start, user_hili_stop], [y_max, y_max], alpha=0.75, color="#fffbaf") except: hili = ax_main.fill_between([user_hili_start, user_hili_stop], [y_max, y_max], alpha=0.75, color="#fffbaf") labels.append("Highligter") start_visible.append(True) line_collections.append(hili) leg_offset = (legend_size - 17) * 5 if leg_offset < 0: leg_offset = 0 ilp = InteractiveLegendPlugin(line_collections, labels, alpha_unsel=0, alpha_sel=0.85, start_visible=start_visible, fontsize=legend_size, xoffset=leg_offset) htmllabels = {1: [], 2: [], 3: []} all_start_points = {1: [], 2: [], 3: []} try: con_scores = SqliteDict( "{0}/{1}/homo_sapiens/score_dict.sqlite".format( config.SCRIPT_LOC, config.ANNOTATION_DIR)) except Exception as e: con_scores = [] for frame in [1, 2, 3]: orf_list = frame_orfs[frame] for tup in orf_list: orf_ribo = 0.0 outframe_ribo = 0.0 orf_rna = 0.0001 start = tup[0] try: context = (seq[start - 7:start + 4].upper()).replace("T", "U") except Exception as e: con_score = "?" if len(context) != 11 or context[6:9] != "AUG": con_score = "?" else: try: con_score = con_scores[context.upper()] except Exception as e: con_score = "?" all_start_points[frame].append(start - 1) stop = tup[1] other_ribo = 0.0 otherother_ribo = 0.0 for i in range(start + 2, stop, 3): for subframe in [0, 1, 2]: if i in frame_counts[subframe]: orf_ribo += frame_counts[subframe][i] for i in range(start, stop, 3): for subframe in [0, 1, 2]: if i in frame_counts[subframe]: outframe_ribo += frame_counts[subframe][i] for i in range(start + 1, stop, 3): for subframe in [0, 1, 2]: if i in frame_counts[subframe]: outframe_ribo += frame_counts[subframe][i] for i in range(start, stop + 1): if i in all_rna_reads: orf_rna += all_rna_reads[i] orf_te = float(orf_ribo) / float(orf_rna) orf_len = int(stop - start) try: in_out_ratio = orf_ribo / outframe_ribo except: in_out_ratio = "Null" datadict = { 'inframe ribo': [orf_ribo], 'outframe ribo': [outframe_ribo], 'in/out ratio': [in_out_ratio], 'rna': [orf_rna], 'te': [orf_te], 'len': [orf_len], 'context_score': [str(con_score) + "/150"] } df = pd.DataFrame(datadict, columns=([ "inframe ribo", "outframe ribo", "in/out ratio", "rna", "te", "len", "context_score" ])) label = df.ix[[0], :].T label.columns = ["Start pos: {}".format(start - 1)] htmllabels[frame].append(str(label.to_html())) points1 = ax_f1.plot(all_start_points[1], [0.75] * len(all_start_points[1]), 'o', color='b', mec='k', ms=13, mew=1, alpha=0, zorder=3) points2 = ax_f2.plot(all_start_points[2], [0.75] * len(all_start_points[2]), 'o', color='b', mec='k', ms=13, mew=1, alpha=0, zorder=3) points3 = ax_f3.plot(all_start_points[3], [0.75] * len(all_start_points[3]), 'o', color='b', mec='k', ms=13, mew=1, alpha=0, zorder=3) tooltip1 = PointHTMLTooltip(points1[0], htmllabels[1], voffset=10, hoffset=10, css=point_tooltip_css) tooltip2 = PointHTMLTooltip(points2[0], htmllabels[2], voffset=10, hoffset=10, css=point_tooltip_css) tooltip3 = PointHTMLTooltip(points3[0], htmllabels[3], voffset=10, hoffset=10, css=point_tooltip_css) ax_f3.axes.get_yaxis().set_ticks([]) ax_f2.axes.get_yaxis().set_ticks([]) ax_f1.axes.get_yaxis().set_ticks([]) returnstr = "Position,Sequence,Frame 1,Frame 2,Frame 3,RNA-Seq\n" for i in range(0, len(seq)): f1_count = 0 f2_count = 0 f3_count = 0 rna_count = 0 if i + 1 in frame_counts[0]: f1_count = frame_counts[0][i + 1] elif i + 1 in frame_counts[1]: f2_count = frame_counts[1][i + 1] elif i + 1 in frame_counts[2]: f3_count = frame_counts[2][i + 1] if i + 1 in all_rna_reads: rna_count = all_rna_reads[i + 1] returnstr += "{},{},{},{},{},{}\n".format(i + 1, seq[i], f1_count, f2_count, f3_count, rna_count) if seqhili == ['']: plugins.connect(fig, ilp, tooltip1, tooltip2, tooltip3, TopToolbar(yoffset=180, xoffset=-70), DownloadProfile(returnstr=returnstr), DownloadPNG(returnstr=title_str)) else: plugins.connect(fig, ilp, tooltip1, tooltip2, tooltip3, signaltooltip1, signaltooltip2, signaltooltip3, TopToolbar(yoffset=100), DownloadProfile(returnstr=returnstr), DownloadPNG(returnstr=title_str)) ax_main.set_axis_bgcolor(background_col) # This changes the size of the tick markers, works on both firefox and chrome. ax_main.tick_params('both', labelsize=marker_size) ax_main.xaxis.set_major_locator(plt.MaxNLocator(3)) ax_main.yaxis.set_major_locator(plt.MaxNLocator(3)) ax_main.grid(True, color="white", linewidth=30, linestyle="solid") #Without this style tag the markers sizes will appear correct on browser but be original size when downloaded via png graph = "<style>.mpld3-xaxis {{font-size: {0}px;}} .mpld3-yaxis {{font-size: {0}px;}}</style>".format( marker_size) graph += "<div style='padding-left: 55px;padding-top: 22px;'> <a href='https://trips.ucc.ie/short/{0}' target='_blank' ><button class='button centerbutton' type='submit'><b>Direct link to this plot</b></button></a> </div>".format( short_code) graph += mpld3.fig_to_html(fig) tot_prog = 100 return { 'current': 400, 'total': tot_prog, 'status': 'Complete', 'result': graph }
ax[0][n].set_title(syl_types[n]) fu, tu, Sxx = signal.spectrogram(temp, samp, nperseg=NN, noverlap=NN * overlap, window=signal.get_window( ('gaussian', sigma), NN), scaling='spectrum') Sxx = np.clip(Sxx, a_min=np.amax(Sxx) * 0.000001, a_max=np.amax(Sxx)) ax[1][n].pcolormesh(tu, fu, np.log(Sxx), cmap=plt.get_cmap('Greys'), rasterized=True) ax[1][n].set_ylim(0, 8000) ax[1][n].yaxis.set_major_locator(plt.MaxNLocator(2)) ax[1][n].xaxis.set_major_locator(plt.MaxNLocator(2)) fig.tight_layout() # Ahora 0 va a ser silencio # syl_types.append('s') num_syl = len(syl_types) glyphs_to = [x for x in syl_types] orden = 1 combinations = list(it.product(glyphs_to, repeat=orden)) glyphs_from = [''.join(list(x)) for x in combinations] num_glyphs_to = len(glyphs_to) num_glyphs_from = len(glyphs_from) # ------------------# # Transition matrix, numbers n_tr_matrix = np.zeros((num_files, num_glyphs_from, num_glyphs_to)) n_tr_matrix_norep = np.zeros((num_files, num_glyphs_from, num_glyphs_to))
list_files = np.array_split(runs, 5) list_dist = np.array_split(dist, 5) fig, ax = plt.subplots(2, 3, subplot_kw={'projection': 'polar'}, figsize=(60, 60)) for i in range(len(ax)): for r, j, d in zip(list_files[i], range(3), list_dist[i]): df, _ = access_file(direc + r) angle, fit = angle_theta_binning(df, col) #ax[i, j].scatter(angle['theta'], angle['signal'], s=0.01) ax[i, j].plot(fit['theta'], fit['signal'], color='red') ax[i, j].set_rlabel_position(90) ax[i, j].set_theta_zero_location("N") # theta=0 at the top ax[i, j].set_theta_direction(-1) ax[i, j].yaxis.set_major_locator(plt.MaxNLocator(3)) if col == 'Couple': ax[i, j].set_rticks([7.2, 7.4, 7.6]) # Less radial ticks elif col == 'Force': ax[i, j].set_rticks([169, 172, 176]) # Less radial ticks ax[i, j].tick_params(axis='y', which='major', labelsize=9) ax[i, j].set_title('d = {:.2f}'.format(float(d) / 724)) plt.tight_layout() plt.savefig(out_direc + col + '_1.pdf') fig, ax = plt.subplots(2, 3, subplot_kw={'projection': 'polar'}, figsize=(60, 60))
counts = Counter(match) print counts x_axis = [] y_axis = [] for eachPixcel in counts: #print eachPixcel x_axis.append(eachPixcel) y_axis.append(counts[eachPixcel]) #print counts[eachPixcel] fig = plt.figure() axis1 = fig.add_subplot(211) axis2 = fig.add_subplot(212) #axis1.legend("Given Image") #axis2.legend("OCR Prediction") #plt.ylim(400) x_points = plt.MaxNLocator(12) axis2.xaxis.set_major_locator(x_points) axis1.imshow(test_iar, label="Given Image") axis2.bar(x_axis, y_axis, label="OCR Prediction") axis2.set_ylim(350) axis1.legend() axis2.legend() plt.show() #a =one[1].split("],") #print one[0] #print len(eachPixcel)
# Ingredient 3 - Define your label axis # Ingrediente 3 - Defina os rotulos dos eixos plt.xlabel("Amigos") plt.ylabel("ECDF") plt.margins(0.02) # Ingredient 5 - Define your pattern from x(or y) axis # Ingrediente 5 - Defina o padrao dos numeros eixo x(ou y) def major_formatter(a, pos): return "%.f" % (a) ax = plt.axes() # Ingredient 6 - Define how many elements show in y axis # Ingrediente 6 - Defina Quantos elementos aparecerao no eixo y ax.yaxis.set_major_locator(plt.MaxNLocator(9)) # Ingredient 7 - Define how many elements show in x axis # Ingrediente 7 - Defina Quantos elementos aparecerao no eixo x ax.xaxis.set_major_locator(plt.MaxNLocator(11)) # Ingredient 8 - Add the Ingredient 5 in x axis # Ingrediente 8 - Adiciona o Ingrediente 5 no eixo x ax.xaxis.set_major_formatter(plt.FuncFormatter(major_formatter)) plt.legend() #################################################### # Receita para dar um Zoom no recheio do Bolo ## # Recipe to make zoom inside the cake ## ####################################################
def plotErrorRate(res): halpha = 6562.80 hbeta = 4861.325 o2 = 3728.5 lya = 1215.67 mg2 = 2798.75 c3 = 1908.73 o3d = 5006.91 c4 = 1549.06 qop4 = res[res[:, 0] >= 3] thresh = 0.01 autozNum = 1.0 * (np.abs(qop4[:, 3] - qop4[:, 1]) < thresh).sum() marzNum = 1.0 * (np.abs(qop4[:, 2] - qop4[:, 1]) < thresh).sum() runzxNum = 1.0 * (np.abs(qop4[:, 4] - qop4[:, 1]) < thresh).sum() runzeNum = 1.0 * (np.abs(qop4[:, 5] - qop4[:, 1]) < thresh).sum() autoz = ((1 + qop4[:, 3]) / (1 + qop4[:, 1]))[np.abs(qop4[:, 3] - qop4[:, 1]) > thresh] marz = ((1 + qop4[:, 2]) / (1 + qop4[:, 1]))[np.abs(qop4[:, 2] - qop4[:, 1]) > thresh] runzx = ((1 + qop4[:, 4]) / (1 + qop4[:, 1]))[np.abs(qop4[:, 4] - qop4[:, 1]) > thresh] runze = ((1 + qop4[:, 5]) / (1 + qop4[:, 1]))[np.abs(qop4[:, 5] - qop4[:, 1]) > thresh] msr = (100.0 * (1.0 - (1.0 * marz.size / qop4[:, 0].size))) asr = (100.0 * (1.0 - (1.0 * autoz.size / qop4[:, 0].size))) rxsr = (100.0 * (1.0 - (1.0 * runzx.size / qop4[:, 0].size))) resr = (100.0 * (1.0 - (1.0 * runze.size / qop4[:, 0].size))) print("marz %0.3f%% success rate, %0.3f%% fail rate" % (msr, 100 - msr)) print("autoz %0.3f%% success rate, %0.3f%% fail rate" % (asr, 100 - asr)) print("runzx %0.3f%% success rate, %0.3f%% fail rate" % (rxsr, 100 - rxsr)) print("runze %0.3f%% success rate, %0.3f%% fail rate" % (resr, 100 - resr)) fig = plt.figure(figsize=(7, 7), dpi=300) matplotlib.rcParams.update({'font.size': 12}) #matplotlib.rcParams['axes.labelsize'] = 20 rc('text', usetex=False) matplotlib.rcParams['xtick.labelsize'] = 12 matplotlib.rcParams['ytick.labelsize'] = 12 gs = gridspec.GridSpec(4, 1) gs.update(wspace=0.0, hspace=0.0) ax0 = fig.add_subplot(gs[0]) ax1 = fig.add_subplot(gs[1], sharex=ax0) ax2 = fig.add_subplot(gs[2], sharex=ax0) ax3 = fig.add_subplot(gs[3], sharex=ax0) bins = np.linspace(0.2, 1.5, 200) width = 1 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 hist0 = 100 * np.histogram(runzx, bins=bins)[0] / autozNum hist1 = 100 * np.histogram(runze, bins=bins)[0] / marzNum hist2 = 100 * np.histogram(autoz, bins=bins)[0] / runzxNum hist3 = 100 * np.histogram(marz, bins=bins)[0] / runzeNum maxes = max([max(hist0), max(hist1), max(hist2), max(hist3)]) ax0.plot([halpha / o3d, halpha / o3d], [1.1 * maxes, 0], ':', color='#555555') ax1.plot([halpha / o3d, halpha / o3d], [1.1 * maxes, 0], ':', color='#555555') ax2.plot([halpha / o3d, halpha / o3d], [1.1 * maxes, 0], ':', color='#555555') ax3.plot([halpha / o3d, halpha / o3d], [1.1 * maxes, 0], ':', color='#555555') ax2.text(1.23, 2, r'$\mathrm{H}\alpha\mathrm{/O[III]}$', fontsize=14) ax0.plot([o2 / o3d, o2 / o3d], [1.1 * maxes, 0], ':', color='#555555') ax1.plot([o2 / o3d, o2 / o3d], [1.1 * maxes, 0], ':', color='#555555') ax2.plot([o2 / o3d, o2 / o3d], [1.1 * maxes, 0], ':', color='#555555') ax3.plot([o2 / o3d, o2 / o3d], [1.1 * maxes, 0], ':', color='#555555') ax0.text(0.63, 0.92, r'$\mathrm{O[II]/O[III]}$', fontsize=14) ax0.plot([mg2 / halpha, mg2 / halpha], [1.1 * maxes, 0], ':', color='#555555') ax1.plot([mg2 / halpha, mg2 / halpha], [1.1 * maxes, 0], ':', color='#555555') ax2.plot([mg2 / halpha, mg2 / halpha], [1.1 * maxes, 0], ':', color='#555555') ax3.plot([mg2 / halpha, mg2 / halpha], [1.1 * maxes, 0], ':', color='#555555') ax0.text(0.31, 0.92, r'$\mathrm{MgII/H}\alpha$', fontsize=14) ax0.plot([o2 / halpha, o2 / halpha], [1.1 * maxes, 0], ':', color='#555555') ax1.plot([o2 / halpha, o2 / halpha], [1.1 * maxes, 0], ':', color='#555555') ax2.plot([o2 / halpha, o2 / halpha], [1.1 * maxes, 0], ':', color='#555555') ax3.plot([o2 / halpha, o2 / halpha], [1.1 * maxes, 0], ':', color='#555555') ax3.text(0.455, 1.5, r'$\mathrm{O[II]/H}\alpha$', fontsize=14) a0 = ax0.bar(center, hist0, align='center', width=width, edgecolor="none", facecolor="#E53935", label="Runz xcor") a1 = ax1.bar(center, hist1, align='center', width=width, edgecolor="none", facecolor="#AB47BC", label="Runz ELM") a2 = ax2.bar(center, hist2, align='center', width=width, edgecolor="none", facecolor="#4CAF50", label="Autoz") a3 = ax3.bar(center, hist3, align='center', width=width, edgecolor="none", facecolor="#2196F3", label="Marz") ''' ax0.plot(center, hist0, linewidth=2, color="#E53935") ax1.plot(center, hist1, linewidth=2, color="#AB47BC") ax2.plot(center, hist2, linewidth=2, color="#4CAF50") ax3.plot(center, hist3, linewidth=2, color="#2196F3") ''' ax0.yaxis.set_major_locator(plt.MaxNLocator(4)) ax1.yaxis.set_major_locator(plt.MaxNLocator(4)) ax2.yaxis.set_major_locator(plt.MaxNLocator(4)) ax3.yaxis.set_major_locator(plt.MaxNLocator(4)) ax0.yaxis.get_major_ticks()[0].label1.set_visible(False) ax1.yaxis.get_major_ticks()[0].label1.set_visible(False) ax2.yaxis.get_major_ticks()[0].label1.set_visible(False) ax3.set_xlabel(r"$(1 + z_A)/(1 + z_M)$", fontsize=18) ax0.get_xaxis().set_visible(False) ax1.get_xaxis().set_visible(False) ax2.get_xaxis().set_visible(False) xmax = 1.6 ax0.set_ylim(0, 1.1 * max(hist0)) ax1.set_ylim(0, 1.1 * max(hist1)) ax2.set_ylim(0, 1.1 * max(hist2)) ax3.set_ylim(0, 1.1 * max(hist3)) ax0.set_xlim(0.2, xmax) ax1.set_xlim(0.2, xmax) ax2.set_xlim(0.2, xmax) ax3.set_xlim(0.2, xmax) ax0.text(0.98 * xmax, 0.9 * max(hist0), 'Runz xcor', fontsize=14, horizontalalignment='right') ax1.text(0.98 * xmax, 0.9 * max(hist1), 'Runz ELM', fontsize=14, horizontalalignment='right') ax2.text(0.98 * xmax, 0.9 * max(hist2), 'Autoz', fontsize=14, horizontalalignment='right') ax3.text(0.98 * xmax, 0.9 * max(hist3), 'Marz', fontsize=14, horizontalalignment='right') figtext(0.03, 0.7, r"probability to misidentify [%]", fontdict={'fontsize': 15}, rotation=90) #fig.savefig("errorRateqop3.png", bbox_inches='tight', dpi=600, transparent=True) fig.savefig("errorRateqop3.pdf", bbox_inches='tight', transparent=True)
def plot(self, t, dynamic=False): """Produces a quick plot with the internal states at time t. Parameters ---------- t : float Dimensional time (in 'time_units') at which to plot. """ import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib import cm, colors t_in_seconds = t * self.time_scaling_factor self.fig = plt.figure(figsize=self.figsize) self.gridspec = gridspec.GridSpec(self.n_rows, self.n_cols) self.plots = {} self.time_lines = {} self.colorbars = {} self.axes = [] # initialize empty handles, to be created only if the appropriate plots are made solution_handles = [] for k, (key, variable_lists) in enumerate(self.variables.items()): ax = self.fig.add_subplot(self.gridspec[k]) self.axes.append(ax) x_min, x_max, y_min, y_max = self.axis_limits[key] ax.set_xlim(x_min, x_max) if y_min is not None and y_max is not None: ax.set_ylim(y_min, y_max) ax.xaxis.set_major_locator(plt.MaxNLocator(3)) self.plots[key] = defaultdict(dict) variable_handles = [] # Set labels for the first subplot only (avoid repetition) if variable_lists[0][0].dimensions == 0: # 0D plot: plot as a function of time, indicating time t with a line ax.set_xlabel("Time [{}]".format(self.time_unit)) for i, variable_list in enumerate(variable_lists): for j, variable in enumerate(variable_list): if len(variable_list) == 1: # single variable -> use linestyle to differentiate model linestyle = self.linestyles[i] else: # multiple variables -> use linestyle to differentiate # variables (color differentiates models) linestyle = self.linestyles[j] full_t = self.ts_seconds[i] (self.plots[key][i][j], ) = ax.plot( full_t / self.time_scaling_factor, variable(full_t, warn=False), # color=self.colors[i], linestyle=linestyle, ) variable_handles.append(self.plots[key][0][j]) solution_handles.append(self.plots[key][i][0]) y_min, y_max = ax.get_ylim() ax.set_ylim(y_min, y_max) (self.time_lines[key], ) = ax.plot( [ t_in_seconds / self.time_scaling_factor, t_in_seconds / self.time_scaling_factor, ], [y_min, y_max], "k--", lw=1.5, ) elif variable_lists[0][0].dimensions == 1: # 1D plot: plot as a function of x at time t # Read dictionary of spatial variables spatial_vars = self.spatial_variable_dict[key] spatial_var_name = list(spatial_vars.keys())[0] ax.set_xlabel( "{} [{}]".format(spatial_var_name, self.spatial_unit), ) for i, variable_list in enumerate(variable_lists): for j, variable in enumerate(variable_list): if len(variable_list) == 1: # single variable -> use linestyle to differentiate model linestyle = self.linestyles[i] else: # multiple variables -> use linestyle to differentiate # variables (color differentiates models) linestyle = self.linestyles[j] (self.plots[key][i][j], ) = ax.plot( self.first_dimensional_spatial_variable[key], variable(t_in_seconds, **spatial_vars, warn=False), # color=self.colors[i], linestyle=linestyle, zorder=10, ) variable_handles.append(self.plots[key][0][j]) solution_handles.append(self.plots[key][i][0]) # add lines for boundaries between subdomains for boundary in variable_lists[0][0].internal_boundaries: boundary_scaled = boundary * self.spatial_factor ax.axvline(boundary_scaled, color="0.5", lw=1, zorder=0) elif variable_lists[0][0].dimensions == 2: # Read dictionary of spatial variables spatial_vars = self.spatial_variable_dict[key] # there can only be one entry in the variable list variable = variable_lists[0][0] # different order based on whether the domains are x-r, x-z or y-z if self.is_x_r[key] is True: x_name = list(spatial_vars.keys())[1][0] y_name = list(spatial_vars.keys())[0][0] x = self.second_dimensional_spatial_variable[key] y = self.first_dimensional_spatial_variable[key] var = variable(t_in_seconds, **spatial_vars, warn=False) else: x_name = list(spatial_vars.keys())[0][0] y_name = list(spatial_vars.keys())[1][0] x = self.first_dimensional_spatial_variable[key] y = self.second_dimensional_spatial_variable[key] # need to transpose if domain is x-z if self.is_y_z[key] is True: var = variable(t_in_seconds, **spatial_vars, warn=False) else: var = variable(t_in_seconds, **spatial_vars, warn=False).T ax.set_xlabel("{} [{}]".format(x_name, self.spatial_unit)) ax.set_ylabel("{} [{}]".format(y_name, self.spatial_unit)) vmin, vmax = self.variable_limits[key] # store the plot and the var data (for testing) as cant access # z data from QuadMesh or QuadContourSet object if self.is_y_z[key] is True: self.plots[key][0][0] = ax.pcolormesh( x, y, var, vmin=vmin, vmax=vmax, ) else: self.plots[key][0][0] = ax.contourf(x, y, var, levels=100, vmin=vmin, vmax=vmax) self.plots[key][0][1] = var if vmin is None and vmax is None: vmin = ax_min(var) vmax = ax_max(var) self.colorbars[key] = self.fig.colorbar( cm.ScalarMappable(colors.Normalize(vmin=vmin, vmax=vmax)), ax=ax, ) # Set either y label or legend entries if len(key) == 1: title = split_long_string(key[0]) ax.set_title(title) else: ax.legend( variable_handles, [split_long_string(s, 6) for s in key], bbox_to_anchor=(0.5, 1), loc="lower center", ) # Set global legend if len(self.labels) > 1: fig_legend = self.fig.legend(solution_handles, self.labels, loc="lower right") # Get the position of the top of the legend in relative figure units # There may be a better way ... try: legend_top_inches = fig_legend.get_window_extent( renderer=self.fig.canvas.get_renderer()).get_points()[1, 1] fig_height_inches = (self.fig.get_size_inches() * self.fig.dpi)[1] legend_top = legend_top_inches / fig_height_inches except AttributeError: # pragma: no cover # When testing the examples we set the matplotlib backend to "Template" # which means that the above code doesn't work. Since this is just for # that particular test we can just skip it legend_top = 0 else: legend_top = 0 # Fix layout if dynamic: slider_top = 0.05 else: slider_top = 0 bottom = max(legend_top, slider_top) self.gridspec.tight_layout(self.fig, rect=[0, bottom, 1, 1])
def createContourPlot(self, title, ax, x, y, z, rangey, rangez, startTime, endTime, sdt_count): tmin = time.mktime(startTime.timetuple()) tmax = time.mktime(endTime.timetuple()) tgrid_max = 1000 # Reasonable maximum width for time-depth-flot plot is about 1000 pixels dgrid_max = 200 # Height of time-depth-flot plot area is 200 pixels dinc = 0.5 # Average vertical resolution of AUV Dorado nlevels = 255 # Number of color filled contour levels zmin = rangez[0] zmax = rangez[1] dmin = rangey[0] dmax = rangey[1] scale_factor = 1 # 2 points define a line, take half the number of simpledepthtime points sdt_count = int(max(sdt_count, 2) / 2) if sdt_count > tgrid_max: sdt_count = tgrid_max xi = np.linspace(tmin, tmax, sdt_count) #print 'xi = %s' % xi # Make depth spacing dinc m, limit to time-depth-flot resolution (dgrid_max) y_count = int((dmax - dmin) / dinc) if y_count > dgrid_max: y_count = dgrid_max yi = np.linspace(dmin, dmax, y_count) #print 'yi = %s' %yi try: scale_factor = float(tmax - tmin) / (dmax - dmin) except ZeroDivisionError as e: logger.warn( 'Not setting scale_factor. Scatter plots will still work.') contour_flag = False scale_factor = 1 else: logger.warn('self.scale_factor = %f', scale_factor) xi = xi / scale_factor xg = [xe / scale_factor for xe in x] contour_flag = True zi = [] cs = None # Register the signal function handler signal.signal(signal.SIGALRM, self.handler) # Define a timeout of 90 seconds for gridding functions signal.alarm(90) if not self.data: logger.warn('no data found to plot') signal.alarm(0) raise Exception('no data') if contour_flag: try: logger.warn( 'Gridding data with sdt_count = %d, and y_count = %d', sdt_count, y_count) zi = self.gridData(xg, y, z, xi, yi) signal.alarm(0) except KeyError: logger.warn('Got KeyError. Could not grid the data') contour_flag = False scale_factor = 1 try: # use RBF logger.warn('Trying radial basis function') xi, yi, zi = self.gridDataRbf(tmin, tmax, dmin, dmax, xg, y, z) contour_flag = True signal.alarm(0) except Exception as e: logger.warn('Could not grid the data' + str(e)) except Exception as e: logger.warn('Could not grid the data' + str(e)) contour_flag = False try: # use RBF logger.warn('Trying radial basis function') xi, yi, zi = self.gridDataRbf(tmin, tmax, dmin, dmax, xg, y, z) contour_flag = True signal.alarm(0) except Exception as e: logger.warn('Could not grid the data' + str(e)) try: if scale_factor > 1 and contour_flag: ax.set_xlim(tmin / scale_factor, tmax / scale_factor) else: ax.set_xlim(tmin, tmax) self.scale_factor = scale_factor ax.set_ylim([dmax, dmin]) ax.set_ylabel('depth (m)', fontsize=8) ax.tick_params(axis='both', which='major', labelsize=8) ax.tick_params(axis='both', which='minor', labelsize=8) if contour_flag: logger.debug('Contouring the data') cs = ax.contourf(xi, yi, zi, levels=np.linspace(zmin, zmax, nlevels), cmap=self.cm_jetplus, extend='both') # this will show the points where the contouring occurs #ax.scatter(x,y,marker='.',s=2,c='k',lw=0) else: logger.debug('Plotting the data') cs = ax.scatter(x, y, c=z, s=20, marker='.', vmin=zmin, vmax=zmax, lw=0, alpha=1.0, cmap=self.cm_jetplus) # limit the number of ticks max_yticks = 5 yloc = plt.MaxNLocator(max_yticks) ax.yaxis.set_major_locator(yloc) except Exception as e: logger.debug('Error - trying to plot the data') cs = ax.scatter(x, y, c=z, s=20, marker='.', vmin=zmin, vmax=zmax, lw=0, alpha=1.0, cmap=self.cm_jetplus) return cs, zi
data[0][0] + (data[0][1] - data[0][0]) * i for i in range(len(data[0])) ] #magic number comes from experiments width = int(len(data[0]) / 29) length = 4 if width * 100 > pow(2, 16): width = int(pow(2, 16)) print('Reached max width for plot: ' + str(width) + ' pixels') fig, ax = plt.subplots(figsize=(width, length)) ax.grid(True) plt.MaxNLocator(5) for i in range(len(data) - 1): ax.plot(data[0], data[i + 1], label=labels[i + 1]) start, end = ax.get_xlim() # formatting x axis marking hours = mdates.HourLocator(interval=1) h_fmt = mdates.DateFormatter('%H:%M:%S') # formatting density of x axis markings ax.xaxis.set_major_locator(hours) ax.xaxis.set_major_formatter(h_fmt) ax.xaxis.set_ticks(np.arange(start, end, 0.00035)) ax.tick_params(axis='x', rotation=45)
fig, ax1 = plt.subplots() color = "blue" ax1.set_xlabel('date') for tick in ax1.get_xticklabels(): tick.set_rotation(45) ax1.set_ylabel('Number of Postive Cases', color=color) ax1.plot(date, num_pos, color=color) #ax1.plot(date, ave_num, color=color) ax1.tick_params(axis='y', labelcolor=color) ax2 = ax1.twinx() color = 'red' ax2.set_ylabel('Postive Rate', color=color) ax2.plot(date, rate_pos, color=color) #ax2.plot(date, ave_rate, color=color) ax2.tick_params(axis='y', labelcolor=color) ax1.xaxis.set_major_locator(plt.MaxNLocator(25)) fig.tight_layout() #plt.show() #7-Day Rolling Average fig,ax3 = plt.subplots() color = "blue" ax3.set_xlabel('date') for tick in ax3.get_xticklabels(): tick.set_rotation(45) ax3.set_ylabel('Number of Postive Cases', color=color) ax3.plot(date, ave_num, color=color) ax3.tick_params(axis='y', labelcolor=color) ax4 = ax3.twinx() color = 'red' ax4.set_ylabel('Postive Rate', color=color)
def model_comparisson(halo, mask=False): fig, axes = plt.subplots(ncols=3, nrows=1, sharey=True) bmin = halo.bmin bmaj = halo.bmaj scale = 1. model4 = halo.result4.model model6 = halo.result6.model model8 = halo.result8.model ra = halo.ra.value dec = halo.dec.value for axi in axes.flat: axi.xaxis.set_major_locator(plt.MaxNLocator(5)) axi.xaxis.set_major_formatter(ScalarFormatter(useOffset=False)) axi.yaxis.set_major_formatter(ScalarFormatter(useOffset=False)) fig.set_size_inches(3.2*5,5.1) vmin=-2*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value vmax=4*(halo.result4.params_units[0]) data = (np.copy(halo.result4.data)/halo.pix_area).to(uJyarcsec2).value noise = (halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value masked_data = data.copy() #if mask: masked_data[halo.result4.image_mask==1]= -10000. LEVEL = np.arange(1,7)*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value #NORM = mplc.LogNorm(vmin=0.4*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value, # vmax=20*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value) #NORM = SymLogNorm(2.*halo.result4.params_units[0] , linscale=1.0, vmin=vmin, vmax=vmax) #NORMres = mplc.Normalize(vmin=-2.*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value, # vmax=1.*(data/halo.pix_area).to(uJyarcsec2).value.max()) NORMres = mplc.Normalize(vmin=-2.*noise, vmax=2.*masked_data.max()) im1 = axes[0].imshow(masked_data, cmap='inferno', origin='lower', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres) try: cont1 = axes[0].contour((model4/halo.pix_area).to(uJyarcsec2).value, colors='white', levels=LEVEL, alpha=0.6, extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.) cont2 = axes[0].contour(masked_data, colors='lightgreen', levels=np.array([-999.8]), alpha=0.9, linestyles='-', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.5) except: print('PROCESSING: Failed making contours') pass axes[0].set_title('Circular\n $S_{\\mathrm{1.5 GHz}}=%.1f\\pm%.1f$ mJy' % (halo.result4.flux_val.value, halo.result4.flux_err.value), fontsize=15) axes[0].set_xlabel('RA [deg]', fontsize=labelsize) axes[0].set_ylabel('DEC [deg]', fontsize=labelsize) axes[0].grid(color='white', linestyle='-', alpha=0.25) draw_sizebar(halo,axes[0], scale) draw_ellipse(halo,axes[0], bmin, bmaj) plt.tight_layout() im2 = axes[1].imshow(masked_data, cmap='inferno', origin='lower', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres) try: cont3 = axes[1].contour((model6/halo.pix_area).to(uJyarcsec2).value, colors='white', levels=LEVEL, alpha=0.6, extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.) cont4 = axes[1].contour(masked_data, colors='lightgreen', levels=np.array([-999.8]), alpha=0.9, linestyles='-', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.5) except: print('PROCESSING: Failed making contours') pass axes[1].set_title('Elliptical\n $S_{\\mathrm{1.5 GHz}}=%.1f\\pm%.1f$ mJy' % (halo.result6.flux_val.value, halo.result8.flux_err.value), fontsize=15) axes[1].set_xlabel('RA [deg]', fontsize=labelsize) axes[1].set_ylabel('DEC [deg]', fontsize=labelsize) axes[1].grid(color='white', linestyle='-', alpha=0.25) draw_sizebar(halo,axes[0], scale) draw_ellipse(halo,axes[0], bmin, bmaj) plt.tight_layout() im3 = axes[2].imshow(masked_data, cmap='inferno', origin='lower', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres) try: cont5 = axes[2].contour((model8/halo.pix_area).to(uJyarcsec2).value, colors='white', levels=LEVEL, alpha=0.6, extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.) cont6 = axes[2].contour(masked_data, colors='lightgreen', levels=np.array([-999.8]), alpha=0.9, linestyles='-', extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.5) except: print('PROCESSING: Failed making contours') pass axes[2].set_title('Skewed \n $S_{\\mathrm{1.5 GHz}}=%.1f\\pm%.1f$ mJy' % (halo.result8.flux_val.value, halo.result8.flux_err.value), fontsize=15) axes[2].set_xlabel('RA [deg]', fontsize=labelsize) axes[2].set_ylabel('DEC [deg]', fontsize=labelsize) axes[2].grid(color='white', linestyle='-', alpha=0.25) draw_sizebar(halo,axes[0], scale) draw_ellipse(halo,axes[0], bmin, bmaj) plt.tight_layout() import matplotlib.ticker as ticker cbar = fig.colorbar(im3) cbar.ax.set_ylabel('$\\mu$Jy arcsec$^{-2}$',fontsize=labelsize) #cbar.formatter = ScalarFormatter(useMathText=False) #cbar.formatter = ticker.LogFormatter(base=10.,labelOnlyBase=True) cbar.formatter = ticker.StrMethodFormatter('%.2f') plt.savefig(halo.plotPath +halo.file.replace('.fits','')+'_mcmc_model_ALL.pdf') #plt.show() plt.clf() plt.close(fig)