def printstuff(vals, vals_sum, file_count): # create running sum of values print file_count print '----------------' print 'sum', ma.sum(vals) print 'total sum', ma.sum(vals_sum) print '----------------' print 'max', ma.amax(vals) print 'min', ma.amin(vals) print '\n----------------'
def printstuff(vals, vals_sum, file_count): # create running sum of values print file_count print "----------------" print "sum", ma.sum(vals) print "total sum", ma.sum(vals_sum) print "----------------" print "max", ma.amax(vals) print "min", ma.amin(vals) print "\n----------------"
def printstuff(vals,vals_sum,file_count): # create running sum of values print file_count print '----------------' print 'sum', ma.sum(vals) print 'total sum', ma.sum(vals_sum) print '----------------' print 'max', ma.amax(vals) print 'min', ma.amin(vals) print '\n----------------'
def printstuff(vals,vals_sum,file_count): # create running sum of values print file_count print '----------------' print 'sum', ma.sum(vals) print 'total sum', ma.sum(vals_sum) print 'perc masked:', ma.count_masked(vals_sum)/vals_sum.size*100.,'%' print '----------------' print 'max', ma.amax(vals) print 'min', ma.amin(vals) print '\n----------------'
def printstuff(vals, vals_sum, file_count): # create running sum of values print file_count print '----------------' print 'sum', ma.sum(vals) print 'total sum', ma.sum(vals_sum) print 'perc masked:', ma.count_masked(vals_sum) / vals_sum.size * 100., '%' print '----------------' print 'max', ma.amax(vals) print 'min', ma.amin(vals) print '\n----------------'
def normalizer(data_bank, special_input, data_bank_structure): # TODO: do sth with special_input # data_bank: dimensions are in 'Rows' for row_index in range(size(data_bank, 0) - 1): current_dimension = data_bank[row_index, :] current_dimension = (current_dimension - mean(current_dimension)) / \ (amax(current_dimension) - amin(current_dimension)) data_bank[row_index, :] = current_dimension return data_bank
def privImshow(img, noise, extent, ax=None, **kwargs): args = {'cmap': colormap, 'sigma': 1} args.update(kwargs) ax = ax or plt.gca() level0 = noise * args['sigma'] lev = [] for i in range(0, 10): lev.append(level0 * 2**i) col = ax.imshow(img, cmap=args['cmap'], norm=mpl.colors.SymLogNorm(linthresh=level0, linscale=0.5, vmin=lev[0], vmax=0.5 * ma.amax(img)), extent=extent) divider = make_axes_locatable(ax) cax = divider.append_axes('right', size='5%', pad='3%') cbar = plt.colorbar(col, cax=cax) cbar.set_label('Flux Density [Jy/beam]')
def plot_aligned_maps(maps,masked_shift=True, beam='max', fig_size='aanda*', **kwargs): '''Derive shifts and plot images All angles are in rad and converted if needed. If the beam is given explicitly, please write it in terms of mas ''' if 'sigma' in kwargs.keys(): sigma = kwargs.get('sigma',False) else: sigma=1 files = [eh.image.load_fits(m,aipscc=True) for m in maps] fovx = np.array([m.fovx() for m in files]) fovy = np.array([m.fovy() for m in files]) header = [read_header(m) for m in maps] freq1 = np.round(header[0]['CRVAL3']*1e-9,1) freq2 = np.round(header[1]['CRVAL3']*1e-9,1) maps_beam=[[h['BMAJ']*np.pi/180,h['BMIN']*np.pi/180,h['BPA']*np.pi/180] for h in header] maps_ps = np.array([dm.psize for dm in files]) naxis1 = (fovx/maps_ps).astype(int) naxis2 = (fovy/maps_ps).astype(int) ppb=[PXPERBEAM(bp[0],bp[1],pxi) for bp,pxi in zip(maps_beam,maps_ps)] noise1=header[0]['NOISE'] noise2=header[1]['NOISE'] r2m = 180/np.pi*3.6e6 if beam=='mean': _maj = np.mean([maps_beam[0][0],maps_beam[1][0]]) _min = np.mean([maps_beam[0][1],maps_beam[1][1]]) _pos = np.mean([maps_beam[0][2],maps_beam[1][2]]) sys.stdout.write(' Will use mean beam.\n') if beam=='max': if np.logical_and(maps_beam[0][0] > maps_beam[1][0],maps_beam[0][1]>maps_beam[1][1]): _maj = maps_beam[0][0] _min = maps_beam[0][1] _pos = maps_beam[0][2] elif np.logical_and(maps_beam[0][0] < maps_beam[1][0],maps_beam[0][1]<maps_beam[1][1]): _maj = maps_beam[1][0] _min = maps_beam[1][1] _pos = maps_beam[1][2] else: print('could not derive max beam.') return sys.stdout.write(' Will use max beam.\n') if beam=='median': _maj = np.median([maps_beam[0][0],maps_beam[1][0]]) _min = np.median([maps_beam[0][1],maps_beam[1][1]]) _pos = np.median([maps_beam[0][2],maps_beam[1][2]]) sys.stdout.write(' Will use median beam.\n') if type(beam)==list: _maj,_min,_pos = beam _maj/=r2m _min/=r2m _pos = _pos*180/np.pi common_beam = [_maj,_min,_pos] sys.stdout.write('Will use beam of ({} , {} ) mas at PA {} degree.\n'.format(_maj*r2m,_min*r2m,_pos*180/np.pi)) #derive common parameter common_ps = min(maps_ps) common_fov = min([min(x,y) for x,y in zip(fovx,fovy)]) common_naxis= int(common_fov/common_ps) common_ppb = PXPERBEAM(common_beam[0],common_beam[1],common_ps) noise1_r = noise1/ppb[0]*common_ppb noise2_r = noise2/ppb[1]*common_ppb # regrid and blur the clean maps #check for same fov and pixel size if np.logical_and(maps_ps[0] != maps_ps[1], fovx[0]!=fovy[1]): file1regrid = files[0].regrid_image(common_fov, common_naxis, interp='linear') file1regridblur = file1regrid.blur_gauss(common_beam,frac=1) file2regrid = files[1].regrid_image(common_fov, common_naxis, interp='linear') file2regridblur = file2regrid.blur_gauss(common_beam, frac=1) file1rb = file1regridblur.imarr(pol='I').copy() file2rb = file2regridblur.imarr(pol='I').copy() mask1=np.zeros_like(file1rb, dtype=np.bool) mask2=np.zeros_like(file2rb, dtype=np.bool) # cut out inner, optically thick part of the image if 'npix_x' in kwargs.keys(): npix_x = kwargs.get('npix_x',False) npix_y = kwargs.get('npix_y',False) px_min_x = int(common_naxis/2-npix_x) px_max_x = int(common_naxis/2+npix_x) px_min_y = int(common_naxis/2-npix_y) px_max_y = int(common_naxis/2+npix_y) px_range_x = np.arange(px_min_x,px_max_x+1,1) px_range_y = np.arange(px_min_y,px_max_y+1,1) index=np.meshgrid(px_range_y,px_range_x) mask1[tuple(index)] = True mask2[tuple(index)] = True if 'cut_left' in kwargs.keys(): cut_left = kwargs.get('cut_left',False) px_max = int(common_naxis/2.+cut_left) px_range_x = np.arange(0,px_max,1) mask1 mask1[:,px_range_x] = True mask2[:,px_range_x] = True if 'cut_right' in kwargs.keys(): cut_right = kwargs.get('cut_right',False) px_max = int(common_naxis/2-cut_right) px_range_x = np.arange(px_max,naxis,1) mask1[:,px_range_x] = True mask2[:,px_range_x] = True if 'radius' in kwargs.keys(): radius = kwargs.get('radius',False) rr,cc = circle(int(len(file1rb)/2),int(len(file1rb)/2),radius) mask1[rr,cc] = True mask2[rr,cc] = True if 'e_maj' in kwargs.keys(): e_maj = kwargs.get('e_maj',False) e_min = kwargs.get('e_min',False) e_pa = kwargs.get('e_pa',False) e_xoffset = kwargs.get('e_xoffset',False) if e_xoffset!=False: x,y = int(len(file1rb)/2)+e_xoffset,int(len(file1rb)/2) else: x,y = int(len(file1rb)/2),int(len(file1rb)/2) if e_pa!=False: e_pa = 90-e_pa else: e_pa = maps_beam[0][2] rr,cc =ellipse(y,x,e_maj,e_min,rotation=e_pa*np.pi/180) mask1[rr,cc] = True mask2[rr,cc] = True if 'flux_cut' in kwargs.keys(): flux_cut = kwargs.get('flux_cut',False) mask1[file1>flux_cut*ma.amax(file1rb)] = True mask2[file2>flux_cut*ma.amax(file2rb)] = True file1rbm = file1rb.copy() file2rbm = file2rb.copy() file1rbm[mask1]=0 file2rbm[mask2]=0 #align image # ps covnerted from radperpx to masperpx if masked_shift: sys.stdout.write('Will derive the shift using the mask during cross-correlation\n') file2rb_shift = align(file1rb,file2rb,common_ps*180/np.pi*3.6e6,common_ps*180/np.pi*3.6e6,mask1=~mask1,mask2=~mask2) else: sys.stdout.write('Will derive the shift using already masked images\n') file2rb_shift=align(file1rbm,file2rbm,common_ps*180/np.pi*3.6e6,common_ps*180/np.pi*3.6e6) ###################### #shift file2regridblur to found position #file2regridblur_shift = apply_shift(np.flipud(file2regridblur.imarr(pol='I')),file2_shift['shift']) # file2rb_shift = apply_shift(file2rb,file2_shift['shift']) # file2rb_shift *= ppb_r ########################### file1_plt = files[0].regrid_image(fovx[0],naxis1[0]).blur_gauss(maps_beam[0],frac=1).imarr(pol='I') file2_plt = files[1].regrid_image(fovx[1],naxis1[1]).blur_gauss(maps_beam[1],frac=1).imarr(pol='I') file1_plt = file1_plt*ppb[0] file2_plt = file2_plt*ppb[1] file1rb_plt = file1rb*common_ppb file2rb_plt = file2rb*common_ppb file1rbm_plt = file1rbm*common_ppb file2rbm_plt = file2rbm*common_ppb file2rb_shift_plt = apply_shift(file2rb,file2rb_shift['shift'])* common_ppb ra=file2regridblur.fovx()/eh.RADPERUAS/1e3/2 dec=file2regridblur.fovy()/eh.RADPERUAS/1e3/3 ra_min=-ra ra_max=ra dec_min=-dec dec_max=dec scale1 = maps_ps[0]*180/np.pi*3.6e6 scale2 = maps_ps[1]*180/np.pi*3.6e6 x1=np.linspace(-naxis1[0]*0.5*scale1,(naxis1[0]*0.5-1)*scale1,naxis1[0]) y1=np.linspace(naxis2[0]*0.5*scale1,-(naxis2[0]*0.5-1)*scale1,naxis2[0]) x2=np.linspace(-naxis1[1]*0.5*scale2,(naxis1[1]*0.5-1)*scale2,naxis1[1]) y2=np.linspace(naxis2[1]*0.5*scale2,-(naxis2[1]*0.5-1)*scale2,naxis2[1]) extent1 = np.max(x1), np.min(x1), np.min(y1), np.max(y1) extent2 = np.max(x2), np.min(x2), np.min(y2), np.max(y2) f,ax = plt.subplots(2,2) axe_ratio='scaled' ax[0,0].set_title('{} GHz original'.format(freq1)) ax[0,1].set_title('{} GHz original'.format(freq2)) ax[1,0].set_title('{} GHz regrid $+$ blur'.format(freq1)) ax[1,1].set_title('{} GHz regrid $+$ blur'.format(freq2)) level0 = min([noise1,noise2,noise1_r,noise2_r])*sigma lev=[] for i in range(0,10): lev.append(level0*2**i) level1r = noise1_r*sigma lev1_r=[] for i in range(0,10): lev1_r.append(level1r*2**i) level2r = noise2_r*sigma lev2_r=[] for i in range(0,10): lev2_r.append(level2r*2**i) imax = max([ma.amax(ii) for ii in [file1_plt,file2_plt,file1rb_plt,file2rb_plt]]) norm = mpl.colors.SymLogNorm(linthresh=level0,linscale=0.5,vmin=level0,vmax=0.5*imax,base=10) im1 = ax[0,0].imshow(file1_plt,cmap=colormap,norm=norm,extent=extent1) im2 = ax[0,1].imshow(file2_plt,cmap=colormap,norm=norm,extent=extent2) im3 = ax[1,0].imshow(file1rb_plt,cmap=colormap,norm=norm,extent=extent2) im4 = ax[1,1].imshow(file2rb_plt,cmap=colormap,norm=norm,extent=extent2) divider = make_axes_locatable(ax[0,1]) cax = divider.append_axes('right', size='5%', pad='3%') #cbar = f.colorbar(im,ax=ax[:]) cbar = f.colorbar(im2,cax=cax) cbar.set_label('Flux Density [Jy/beam]') divider = make_axes_locatable(ax[1,1]) cax = divider.append_axes('right', size='5%', pad='3%') #cbar = f.colorbar(im,ax=ax[:]) cbar = f.colorbar(im4,cax=cax) cbar.set_label('Flux Density [Jy/beam]') for aa in ax.flat: aa.set(xlabel='RA [mas]', ylabel='Relative DEC [mas]') aa.xaxis.set_minor_locator(AutoMinorLocator()) aa.yaxis.set_minor_locator(AutoMinorLocator()) aa.axis(axe_ratio) aa.set_xlim(ra_max,ra_min) aa.set_ylim(dec_min,dec_max) figsize=set_size(fig_size,subplots=(2,2)) set_corrected_size(f,figsize) plt.tight_layout(pad=0.4) f.savefig(plotDir+'{:d}GHz_convolved_with_{:d}GHz.pdf'.format(int(freq2),int(freq1))) ###################################################### # plt.cla() f,ax = plt.subplots(2,2) ax[0,0].set_title('{}GHz regrid $+$ blur'.format(freq1)) ax[0,1].set_title('{}GHz regrid $+$ blur'.format(freq2)) ax[1,0].set_title('{0}GHz/ {1}GHz not shifted'.format(freq1,freq2)) ax[1,1].set_title('{0}GHz/ {1}GHz shifted'.format(freq1,freq2)) imax = max([ma.amax(ii) for ii in [file1rbm_plt,file2rbm_plt]]) norm = mpl.colors.SymLogNorm(linthresh=level0,linscale=0.5,vmin=level0,vmax=0.5*imax,base=10) im = ax[0,0].imshow(file1rbm_plt,cmap=colormap,norm=norm,extent=extent2) im = ax[0,1].imshow(file2rbm_plt,cmap=colormap,norm=norm,extent=extent2) divider = make_axes_locatable(ax[0,1]) cax = divider.append_axes('right', size='5%', pad=0.05) #cbar = f.colorbar(col, use_gridspec=True,cax=cax) cbar = f.colorbar(im,cax=cax) cbar.set_label('Flux Density [Jy/beam]') # cntr1=ax[1,0].contour(np.flipud(file1rb_plt),linewidths=0.5,levels=lev1_r,colors='grey',extent=extent2,alpha=1) cntr2=ax[1,0].contour(np.flipud(file2rb_plt),linewidths=0.5,levels=lev2_r,colors='darkblue',extent=extent2,alpha=0.6) h1,_ = cntr1.legend_elements() h2,_ = cntr2.legend_elements() ax[1,0].legend([h1[0],h2[0]],['{}GHz'.format(freq1),'{}GHz'.format(freq2)],loc='upper right') # cntr1=ax[1,1].contour(np.flipud(file1rb_plt),linewidths=0.5,levels=lev1_r,colors='grey',extent=extent2) cntr2=ax[1,1].contour(np.flipud(file2rb_shift_plt),linewidths=0.5,levels=lev2_r,colors='darkblue',extent=extent2,alpha=0.6) h1,_ = cntr1.legend_elements() h2,_ = cntr2.legend_elements() ax[1,1].legend([h1[0],h2[0]],['{}GHz'.format(freq1),'{}GHz'.format(freq2)],loc='upper right') for aa in ax.flat: aa.set(xlabel='RA [mas]', ylabel='Relative Declination [mas]') aa.axis(axe_ratio) aa.xaxis.set_minor_locator(AutoMinorLocator()) aa.yaxis.set_minor_locator(AutoMinorLocator()) aa.set_xlim(ra_max,ra_min) aa.set_ylim(dec_min,dec_max) figsize=set_size(fig_size,subplots=(2,2)) set_corrected_size(f,figsize) plt.tight_layout(pad=0.4) f.savefig(plotDir+'shifted_maps_{:d}GHz_{:d}GHz.pdf'.format(int(freq1),int(freq2))) ######### plt.cla() shift_export=file2rb_shift['shift'].copy() sys.stdout.write('final shift: {}'.format(shift_export)) shift_export[0]*=common_ps*180/np.pi*3.6e6 shift_export[1]*=common_ps*180/np.pi*3.6e6 if masked_shift: sys.stdout.write('shift in mas: {}'.format(shift_export)) else: error_export=file2rb_shift['error'].copy() error_export*=common_ps*180/np.pi*3.6e6 sys.stdout.write('shift in mas: {}\pm{}'.format(shift_export,error_export)) ######################### # plot spix map file1rb_plt = np.flipud(file1rb_plt) file2rb_shift_plt = np.flipud(file2rb_shift_plt) spix1 = file1rb_plt*(file1rb_plt > noise1*sigma) #replaces indices where condition is not met with 0 spix2 = file2rb_shift_plt*(file2rb_shift_plt > noise2*sigma) spix1[spix1==0] = noise1*sigma spix2[spix2==0] = noise2*sigma a = np.log10(spix2/spix1)/np.log10(freq2/freq1) spix_vmin,spix_vmax=-3,5 sys.stdout.write('\nSpectral index max(alpha)={} - min(alpha)={}\nCutoff {}<alpha<{}\n'.format(ma.amax(a),ma.amin(a),spix_vmin,spix_vmax)) a[a<spix_vmin] = spix_vmin a[a>spix_vmax] = spix_vmax a[spix2==noise2*sigma] =spix_vmin level10 = noise1*sigma lev1=[] level20 = noise2*sigma lev2=[] for i in range(0,10): lev1.append(level10*2**i) lev2.append(level20*2**i) f,ax = plt.subplots() if fig_size=='aanda*': fig_size='aanda' cset = ax.contour(spix1,linewidths=[0.5],levels=lev1_r,colors=['grey'], extent=extent2,origin='lower',alpha=0.7) im = ax.imshow(a,cmap='hot_r',origin='lower',extent= extent2,vmin=spix_vmin,vmax=spix_vmax) divider = make_axes_locatable(ax) cax = divider.append_axes('right', size='5%', pad=0.05) cbar = f.colorbar(im, use_gridspec=True,cax=cax) cbar.set_label(r'Spectral index $\alpha$') h1,_ = cset.legend_elements() ax.legend([h1[0]],['{}GHz'.format(freq1)],loc='upper right') ax.axis('scaled') ax.set_xlabel('RA [mas]') ax.set_ylabel('Relative Dec [mas]') ax.set_xlim(ra_max,ra_min) ax.set_ylim(dec_min,dec_max) ax.minorticks_on() #ax.tick_params('both', length=8, width=2, which='major') #f.tick_params(axis='both',which='both',direction='in', labelsize=13) # cb = plt.colorbar(im,cax=cax,cmap='jet') # cb.ax.tick_params(labelsize=13, width=2) # cb.set_label(r'$\alpha$',fontsize=15) figsize=set_size(fig_size) set_corrected_size(f,figsize) plt.savefig(plotDir+'spectral_index_between_{:d}_{:d}.pdf'.format(int(freq1),int(freq2)),bbox_inches='tight') plt.close('all') ############################ if masked_shift: return {'file1':file1regridblur,'file2':file2regridblur,'shift':shift_export,'increment_dec':common_ps*3.6e6,'increment_ra':common_ps*3.6e6} else: return {'file1':file1regridblur,'file2':file2regridblur,'shift':shift_export,'increment_dec':common_ps*3.6e6,'increment_ra':common_ps*3.6e6,'error':error_export,'diffphase':file2rb_shift['diffphase']}
def BHOSS2fits(filename,freq,source,date,history,user,plot=1,show=False): ##some definitions DEGREE = 3.141592653589/180.0 HOUR = 15.0*DEGREE RADPERAS = DEGREE/3600.0 RADPERUAS = RADPERAS*1.e-6 #get file name filename_load=filename tmp=filename.split('_') #get source postion loc=SkyCoord.from_name(source) #get RA and DEC in degree ra=loc.ra.deg dec=loc.dec.deg #convert date to mjd (modified julian date) modjuldate=(aTime(date)).mjd #get BHOSS GRRT file based on Z. Younsi script #--> needs to be updated once the BHOSS header is modified!!!! # # First header line: [image width, offset, resolution, # of observational frequencies] header_1 = np.genfromtxt(filename, max_rows = 1) # Second header line: [observational time, inclination, BH spin parameter, Luminosity F_nu correction to erg Hz, Jansky correction to Jy Hz ] header_2 = np.genfromtxt(filename, skip_header = 1, max_rows = 1) # Third header line: observational frequencies of interest header_3 = np.genfromtxt(filename, skip_header = 2, max_rows = 1) width = header_1[0] offset = header_1[1] M = int(header_1[2]) s1 = width + offset s2 = 2*width/(M - 1) N_obs_freqs = header_1[3] time = header_2[0] inclination = header_2[1] phi = header_2[2] spin = header_2[3] L_corr = header_2[4] Jansky_corr = header_2[5] if source=='SgrA*': Micro_Arcsecond_Corr = 5.04975 # New value based on Boehle et al. 2016 if source=='M 87': Micro_Arcsecond_Corr = 3.622197344489511 # New value based from Yosuke for M87 width_scaled = Micro_Arcsecond_Corr*width # Scale from r_g to micro-arcseconds #find select frequency within computed freqs ind=np.where(header_3==freq) if len(ind[0])<1: sys.exit('EXIT:freq not in GRRT file. Available freqs are %s' %str((header_3))) else: freq_ID=ind[0][0]+3 print header_3 print header_3[freq_ID-3] # Now read in all image data ascii2 = np.loadtxt(filename_load, skiprows = 3, usecols = (0, 1, freq_ID)) data2=ascii2.reshape([M, M, 3]) # Convert from indices to (alpha,beta), in units of r_g, on the image plane x = -s1 + s2*(data2[:,:,0] - 1) y = -s1 + s2*(data2[:,:,1] - 1) # Convert (alpha,beta) into micro-arcseconds x = Micro_Arcsecond_Corr*x y = Micro_Arcsecond_Corr*y xmax = np.amax(x) xmin = np.amin(x) ymax = np.amax(y) ymin = np.amin(y) #flux in Jansky jansky=(data2[:,:,2]*Jansky_corr) #create pixel size dxorg=(xmax-xmin)/x.shape[0] dyorg=(ymax-ymin)/y.shape[0] dim=x.shape[0] print('image resolution %f' %(dxorg)) print('org res. total flux:', ma.sum(jansky), 'max flux:',ma.amax(jansky)) #create fits file # Create header and fill in some values header = fits.Header() header['AUTHOR'] = user header['OBJECT'] = source header['CTYPE1'] = 'RA---SIN' header['CTYPE2'] = 'DEC--SIN' header['CDELT1'] = -dxorg*RADPERUAS/DEGREE header['CDELT2'] = dxorg*RADPERUAS/DEGREE header['OBSRA'] = ra header['OBSDEC'] = dec header['FREQ'] = freq header['MJD'] = float(modjuldate) header['TELESCOP'] = 'VLBI' header['BUNIT'] = 'JY/PIXEL' header['STOKES'] = 'I' header['HISTORY'] = history hdu = fits.PrimaryHDU(jansky, header=header) hdulist = [hdu] hdulist = fits.HDUList(hdulist) # Save fits tmp=filename.split('/') outname=tmp[-1] hdulist.writeto('FITS/%s_%i_%iGHz.fits' %(outname[:-4],dim,(freq/1e9)), overwrite=True) if plot==1: #create image (normalised) fonts=12 cmap='cubehelix' fig=plt.figure(figsize=(7,8)) plt.subplots_adjust(left=0.15, bottom=0.1, right=0.95, top=0.85, wspace=0.00001, hspace=0.00001) ax=fig.add_subplot(111,frame_on='True',aspect='equal',axisbg='k') #i1=ax.imshow(jansky/ma.amax(jansky),origin='lower', vmin=0,vmax=1,extent=[xmax, xmin, ymin, ymax], interpolation="bicubic",cmap=cmap ) i1=ax.imshow(ma.log10(jansky),origin='lower',vmin=-10,vmax=ma.log10(0.05),extent=[xmax, xmin, ymin, ymax], interpolation="bicubic",cmap=cmap ) ax.annotate(r'$\mathrm{%s}$' %(source), xy=(0.1, 0.91),xycoords='axes fraction', fontsize=18, horizontalalignment='left', verticalalignment='bottom', color='w') ax.annotate(r'$\mathrm{{\nu=%i\,GHz}}$' %(freq/1e9), xy=(0.1, 0.81),xycoords='axes fraction', fontsize=18, horizontalalignment='left', verticalalignment='bottom', color='w') #set axis plt.xlabel('relative R.A [$\mu$as]', fontsize=fonts) plt.ylabel('relative DEC [$\mu$as]', fontsize=fonts) #set position of colorbar p1=ax.get_position().get_points().flatten() ax11 = fig.add_axes([p1[0], p1[3], p1[2]-p1[0], 0.02]) t1=plt.colorbar(i1,cax=ax11,orientation='horizontal',format='%1.1f') #t1.set_label(r'$\rm{S/S}_{\rm{max}}$',fontsize=fonts+4,labelpad=-65) t1.set_label(r'$\log_{10}(S)\,\mathrm{[Jy/pixel]}$',fontsize=fonts+4,labelpad=-65) t1.ax.xaxis.set_ticks_position('top') t1.ax.tick_params(labelsize=fonts) #set xmax left and ticks ax.set_xlim(xmax,xmin) ax.set_ylim(ymin,ymax) ax.yaxis.set_major_locator(plt.MultipleLocator(20)) ax.yaxis.set_minor_locator(plt.MultipleLocator(5)) ax.xaxis.set_major_locator(plt.MultipleLocator(20)) ax.xaxis.set_minor_locator(plt.MultipleLocator(5)) for spine in ax.spines.values(): spine.set_edgecolor('w') #settings for axes and tickmarks for label in ax.xaxis.get_ticklabels(): label.set_fontsize(fonts) for label in ax.yaxis.get_ticklabels(): label.set_fontsize(fonts) for tick in ax.xaxis.get_major_ticks(): tick.tick1line.set_color('w') tick.tick2line.set_color('w') tick.tick1line.set_markersize(14) tick.tick2line.set_markersize(14) for tick in ax.xaxis.get_minor_ticks(): tick.tick1line.set_color('w') tick.tick2line.set_color('w') tick.tick1line.set_markersize(10) tick.tick2line.set_markersize(10) for tick in ax.yaxis.get_major_ticks(): tick.tick1line.set_color('w') tick.tick2line.set_color('w') tick.tick1line.set_markersize(14) tick.tick2line.set_markersize(14) for tick in ax.yaxis.get_minor_ticks(): tick.tick1line.set_color('w') tick.tick2line.set_color('w') tick.tick1line.set_markersize(10) tick.tick2line.set_markersize(10) #save image plt.savefig('IMAGE/%s_%i_%iGHz.pdf' %(source,dim,(freq/1e9)),dpi=150, bbox_inches='tight', pad_inches = 0.04) if show==True: plt.show() return {'image':jansky,'dx':dxorg,'dy':dyorg}
def validationExterne(): """ Permet d'effectuer une validation externe entre l'image résultante de la réduction d'échelle et une image de température de surface calculée à partir des bandes 10 et 11 de Landsat 8 (disponibles sur EarthData). Les résultats de la validation externe sont des métriques de qualité en comparant les résultats de la réduction d'échelle à la température de surface calculée à 100m. Ces résultats sont présentés dans la console par des 'print' (lignes 129 à 144). """ # Match prediction result extent landsat_b10 = Image( r'data/LC08_L1TP_014028_20200706_20200721_01_T1_B10.TIF') landsat_b10.reprojectMatch( r'data/MOD11_L2.clipped_test2.tif'.split(".")[0] + '_subdivided_100m.tif', False) landsat_b10.setNewFile( landsat_b10.filename.replace(".TIF", "_reproject.tif")) # Get TOA radiance b10_array = landsat_b10.getArray(masked=True, lower_valid_range=1, upper_valid_range=65535) b10_array_radiance = ma.add(ma.multiply(b10_array, 0.00033420), 0.10000) # Get Brightness Temperature b10_array_brightness_temp = (1321.0789 / (ma.log( (774.8853 / b10_array_radiance) + 1))) - 273.15 # Get NDVI landsat_b4 = Image( r'data/LC08_L1TP_014028_20200706_20200721_01_T1_B4_reproject.tif') b4_DN = landsat_b4.getArray(masked=True, lower_valid_range=1, upper_valid_range=65535) b4 = np.add(np.multiply(b4_DN, float(0.00002)), float(-0.10)) landsat_b5 = Image( r'data/LC08_L1TP_014028_20200706_20200721_01_T1_B5_reproject.tif') b5_DN = landsat_b5.getArray(masked=True, lower_valid_range=1, upper_valid_range=65535) b5 = np.add(np.multiply(b5_DN, float(0.00002)), float(-0.10)) ndvi = np.divide(np.subtract(b5, b4), np.add(b5, b4), where=((np.add(b5, b4)) != 0)) # Get proportion of vegetation min_ndvi = ma.amin(ndvi) max_ndvi = ma.amax(ndvi) pv = ma.power( ma.divide(ma.subtract(ndvi, min_ndvi), (ma.subtract(max_ndvi, min_ndvi)), where=(ma.subtract(max_ndvi, min_ndvi)) != 0), 2) # Get emissivity emissivity = 0.004 * pv + 0.986 # Get Landsat 8 LST landsat_lst = b10_array_brightness_temp / ( 1 + (0.00115 * b10_array_brightness_temp / 1.4388) * ma.log(emissivity)) # Save LST image for visualization landsat_b10.save_band(landsat_lst, r'data/landsat_lst.tif') # Validation between both arrays predicted_lst = ma.masked_invalid( Image(r'data/MODIS_predit_100m.tif').getArray()) predicted_lst_with_residuals = ma.masked_invalid( Image(r'data/MODIS_predit_100m_avec_residus.tif').getArray()) predicted_lst = ma.filled(predicted_lst, 0) predicted_lst_with_residuals = ma.filled(predicted_lst_with_residuals, 0) # Without residuals print('Without residual correction') print('Mean Absolute Error (MAE):', metrics.mean_absolute_error(predicted_lst, landsat_lst)) print('Mean Squared Error:', metrics.mean_squared_error(predicted_lst, landsat_lst)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(predicted_lst, landsat_lst)), "°C") print( 'Accuracy:', 100 - np.mean(100 * ((abs(predicted_lst - landsat_lst)) / landsat_lst)), "%") print('Explained variance score (EVS):', metrics.explained_variance_score(predicted_lst, landsat_lst)) # With residuals print("\n") print('With residual correction') print( 'Mean Absolute Error (MAE):', metrics.mean_absolute_error(predicted_lst_with_residuals, landsat_lst)) print( 'Mean Squared Error:', metrics.mean_squared_error(predicted_lst_with_residuals, landsat_lst)) print( 'Root Mean Squared Error:', np.sqrt( metrics.mean_squared_error(predicted_lst_with_residuals, landsat_lst)), "°C") print( 'Accuracy:', 100 - np.mean(100 * ( (abs(predicted_lst_with_residuals - landsat_lst)) / landsat_lst)), "%") print( 'Explained variance score (EVS):', metrics.explained_variance_score(predicted_lst_with_residuals, landsat_lst))
if i >= BATS_lon_min and i <= BATS_lon_max: lon_index.add(index2) # Must do .add in order to add for a set index2 += 1 print(lon_index) lat_index_min = min(lat_index) lat_index_max = max(lat_index) lon_index_min = min(lon_index) lon_index_max = max(lon_index) BATSadt = adt[:, lat_index_min:lat_index_max, lon_index_min:lon_index_max] # creating colorbar scale: high = ma.amax(adt) low = ma.amin(adt) colorbar_scale = ["%.2E" % low] i = 0.16 while i < 1: value = (high - low) * i colorbar_scale.append("%.2E" % value) i += 0.16 ## ^^ ignore this but when you make your colorbar you can add the ## scale by: cbar.ax.set_yticklabels(colorbar_scale) # write code for global ocean here: # write code for BATS part here: print(BATSadt.shape)
def make_closea_masks(config=None,domcfg_file=None,mask=None): #========================= # 1. Read in domcfg file #========================= if config is None: raise Exception('configuration must be specified') if domcfg_file is None: raise Exception('domain_cfg file must be specified') if mask is None: mask=False domcfg = nc.Dataset(domcfg_file,'r+') lon = domcfg.variables['nav_lon'][:] lat = domcfg.variables['nav_lat'][:] top_level = domcfg.variables['top_level'][0][:] nx = top_level.shape[1] ny = top_level.shape[0] # Generate 2D "i" and "j" fields for use in "where" statements. # These are the Fortran indices, counting from 1, so we have to # add 1 to np.arange because python counts from 0. ones_2d = np.ones((ny,nx)) ii1d = np.arange(nx)+1 jj1d = np.arange(ny)+1 ii2d = ii1d * ones_2d jj2d = np.transpose(jj1d*np.transpose(ones_2d)) #===================================== # 2. Closea definitions (old style) #===================================== # NB. The model i and j indices defined here are Fortran indices, # ie. counting from 1 as in the NEMO code. Also the indices # of the arrays (ncsi1 etc) count from 1 in order to match # the Fortran code. # This means that you can cut and paste the definitions from # the NEMO code and change round brackets to square brackets. # But BEWARE: Fortran array(a:b) == Python array[a:b+1] !!! # # If use_runoff_box = True then specify runoff area as all sea points within # a rectangular area. If use_runoff_box = False then specify a list of points # as in the old NEMO code. Default to false. use_runoff_box = False #================================================================ if config == 'ORCA2': num_closea = 4 max_runoff_points = 4 ncsnr = np.zeros(num_closea+1,dtype=np.int) ; ncstt = np.zeros(num_closea+1,dtype=np.int) ncsi1 = np.zeros(num_closea+1,dtype=np.int) ; ncsj1 = np.zeros(num_closea+1,dtype=np.int) ncsi2 = np.zeros(num_closea+1,dtype=np.int) ; ncsj2 = np.zeros(num_closea+1,dtype=np.int) ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) # Caspian Sea (spread over globe) ncsnr[1] = 1 ; ncstt[1] = 0 ncsi1[1] = 11 ; ncsj1[1] = 103 ncsi2[1] = 17 ; ncsj2[1] = 112 # Great Lakes - North America - put at St Laurent mouth ncsnr[2] = 1 ; ncstt[2] = 2 ncsi1[2] = 97 ; ncsj1[2] = 107 ncsi2[2] = 103 ; ncsj2[2] = 111 ncsir[2,1] = 110 ; ncsjr[2,1] = 111 # Black Sea (crossed by the cyclic boundary condition) # put in Med Sea (north of Aegean Sea) ncsnr[3:5] = 4 ; ncstt[3:5] = 2 ncsir[3:5,1] = 171; ncsjr[3:5,1] = 106 ncsir[3:5,2] = 170; ncsjr[3:5,2] = 106 ncsir[3:5,3] = 171; ncsjr[3:5,3] = 105 ncsir[3:5,4] = 170; ncsjr[3:5,4] = 105 # west part of the Black Sea ncsi1[3] = 174 ; ncsj1[3] = 107 ncsi2[3] = 181 ; ncsj2[3] = 112 # east part of the Black Sea ncsi1[4] = 2 ; ncsj1[4] = 107 ncsi2[4] = 6 ; ncsj2[4] = 112 #================================================================ elif config == 'eORCA1': num_closea = 1 max_runoff_points = 1 ncsnr = np.zeros(num_closea+1,dtype=np.int) ; ncstt = np.zeros(num_closea+1,dtype=np.int) ncsi1 = np.zeros(num_closea+1,dtype=np.int) ; ncsj1 = np.zeros(num_closea+1,dtype=np.int) ncsi2 = np.zeros(num_closea+1,dtype=np.int) ; ncsj2 = np.zeros(num_closea+1,dtype=np.int) ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) # Caspian Sea (spread over the globe) ncsnr[1] = 1 ; ncstt[1] = 0 ncsi1[1] = 332 ; ncsj1[1] = 243 ncsi2[1] = 344 ; ncsj2[1] = 275 #================================================================ elif config == 'eORCA025_UK': num_closea = 10 max_runoff_points = 1 ncsnr = np.zeros(num_closea+1,dtype=np.int) ; ncstt = np.zeros(num_closea+1,dtype=np.int) ncsi1 = np.zeros(num_closea+1,dtype=np.int) ; ncsj1 = np.zeros(num_closea+1,dtype=np.int) ncsi2 = np.zeros(num_closea+1,dtype=np.int) ; ncsj2 = np.zeros(num_closea+1,dtype=np.int) ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) # Caspian Sea ncsnr[1] = 1 ; ncstt[1] = 0 ncsi1[1] = 1330 ; ncsj1[1] = 831 ncsi2[1] = 1375 ; ncsj2[1] = 981 # Aral Sea ncsnr[2] = 1 ; ncstt[2] = 0 ncsi1[2] = 1376 ; ncsj1[2] = 900 ncsi2[2] = 1400 ; ncsj2[2] = 981 # Azov Sea ncsnr[3] = 1 ; ncstt[3] = 0 ncsi1[3] = 1284 ; ncsj1[3] = 908 ncsi2[3] = 1304 ; ncsj2[3] = 933 # Lake Superior ncsnr[4] = 1 ; ncstt[4] = 0 ncsi1[4] = 781 ; ncsj1[4] = 905 ncsi2[4] = 815 ; ncsj2[4] = 926 # Lake Michigan ncsnr[5] = 1 ; ncstt[5] = 0 ncsi1[5] = 795 ; ncsj1[5] = 871 ncsi2[5] = 813 ; ncsj2[5] = 905 # Lake Huron part 1 ncsnr[6] = 1 ; ncstt[6] = 0 ncsi1[6] = 814 ; ncsj1[6] = 882 ncsi2[6] = 825 ; ncsj2[6] = 905 # Lake Huron part 2 ncsnr[7] = 1 ; ncstt[7] = 0 ncsi1[7] = 826 ; ncsj1[7] = 889 ncsi2[7] = 833 ; ncsj2[7] = 905 # Lake Erie ncsnr[8] = 1 ; ncstt[8] = 0 ncsi1[8] = 816 ; ncsj1[8] = 871 ncsi2[8] = 837 ; ncsj2[8] = 881 # Lake Ontario ncsnr[9] = 1 ; ncstt[9] = 0 ncsi1[9] = 831 ; ncsj1[9] = 882 ncsi2[9] = 847 ; ncsj2[9] = 889 # Lake Victoria ncsnr[10] = 1 ; ncstt[10] = 0 ncsi1[10] = 1274 ; ncsj1[10] = 672 ncsi2[10] = 1289 ; ncsj2[10] = 687 #================================================================ elif config == 'eORCA025_UK_rnf': num_closea = 10 max_runoff_points = 1 use_runoff_box = True ncsnr = np.zeros(num_closea+1,dtype=np.int) ; ncstt = np.zeros(num_closea+1,dtype=np.int) ncsi1 = np.zeros(num_closea+1,dtype=np.int) ; ncsj1 = np.zeros(num_closea+1,dtype=np.int) ncsi2 = np.zeros(num_closea+1,dtype=np.int) ; ncsj2 = np.zeros(num_closea+1,dtype=np.int) ncsir1 = np.zeros(num_closea+1,dtype=np.int) ; ncsjr1 = np.zeros(num_closea+1,dtype=np.int) ncsir2 = np.zeros(num_closea+1,dtype=np.int) ; ncsjr2 = np.zeros(num_closea+1,dtype=np.int) ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) # Caspian Sea ncsnr[1] = 1 ; ncstt[1] = 0 ncsi1[1] = 1330 ; ncsj1[1] = 831 ncsi2[1] = 1375 ; ncsj2[1] = 981 # Aral Sea ncsnr[2] = 1 ; ncstt[2] = 0 ncsi1[2] = 1376 ; ncsj1[2] = 900 ncsi2[2] = 1400 ; ncsj2[2] = 981 # Azov Sea ncsnr[3] = 1 ; ncstt[3] = 0 ncsi1[3] = 1284 ; ncsj1[3] = 908 ncsi2[3] = 1304 ; ncsj2[3] = 933 # Lake Superior ncsnr[4] = 1 ; ncstt[4] = 1 ncsi1[4] = 781 ; ncsj1[4] = 905 ncsi2[4] = 815 ; ncsj2[4] = 926 # runff points the St Laurence Seaway for all Great Lakes ncsir1[4:10] = 873 ; ncsjr1[4:10] = 909 ncsir2[4:10] = 884 ; ncsjr2[4:10] = 920 # Lake Michigan ncsnr[5] = 1 ; ncstt[5] = 1 ncsi1[5] = 795 ; ncsj1[5] = 871 ncsi2[5] = 813 ; ncsj2[5] = 905 # Lake Huron part 1 ncsnr[6] = 1 ; ncstt[6] = 1 ncsi1[6] = 814 ; ncsj1[6] = 882 ncsi2[6] = 825 ; ncsj2[6] = 905 # Lake Huron part 2 ncsnr[7] = 1 ; ncstt[7] = 1 ncsi1[7] = 826 ; ncsj1[7] = 889 ncsi2[7] = 833 ; ncsj2[7] = 905 # Lake Erie ncsnr[8] = 1 ; ncstt[8] = 1 ncsi1[8] = 816 ; ncsj1[8] = 871 ncsi2[8] = 837 ; ncsj2[8] = 881 # Lake Ontario ncsnr[9] = 1 ; ncstt[9] = 1 ncsi1[9] = 831 ; ncsj1[9] = 882 ncsi2[9] = 847 ; ncsj2[9] = 889 # Lake Victoria ncsnr[10] = 1 ; ncstt[10] = 0 ncsi1[10] = 1274 ; ncsj1[10] = 672 ncsi2[10] = 1289 ; ncsj2[10] = 687 #================================================================ elif config == 'eORCA025_UK_empmr': num_closea = 10 max_runoff_points = 1 use_runoff_box = True ncsnr = np.zeros(num_closea+1,dtype=np.int) ; ncstt = np.zeros(num_closea+1,dtype=np.int) ncsi1 = np.zeros(num_closea+1,dtype=np.int) ; ncsj1 = np.zeros(num_closea+1,dtype=np.int) ncsi2 = np.zeros(num_closea+1,dtype=np.int) ; ncsj2 = np.zeros(num_closea+1,dtype=np.int) ncsir1 = np.zeros(num_closea+1,dtype=np.int) ; ncsjr1 = np.zeros(num_closea+1,dtype=np.int) ncsir2 = np.zeros(num_closea+1,dtype=np.int) ; ncsjr2 = np.zeros(num_closea+1,dtype=np.int) ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) # Caspian Sea ncsnr[1] = 1 ; ncstt[1] = 0 ncsi1[1] = 1330 ; ncsj1[1] = 831 ncsi2[1] = 1375 ; ncsj2[1] = 981 # Aral Sea ncsnr[2] = 1 ; ncstt[2] = 0 ncsi1[2] = 1376 ; ncsj1[2] = 900 ncsi2[2] = 1400 ; ncsj2[2] = 981 # Azov Sea ncsnr[3] = 1 ; ncstt[3] = 0 ncsi1[3] = 1284 ; ncsj1[3] = 908 ncsi2[3] = 1304 ; ncsj2[3] = 933 # Lake Superior ncsnr[4] = 1 ; ncstt[4] = 2 ncsi1[4] = 781 ; ncsj1[4] = 905 ncsi2[4] = 815 ; ncsj2[4] = 926 # runff points the St Laurence Seaway for all Great Lakes ncsir1[4:10] = 873 ; ncsjr1[4:10] = 909 ncsir2[4:10] = 884 ; ncsjr2[4:10] = 920 # Lake Michigan ncsnr[5] = 1 ; ncstt[5] = 2 ncsi1[5] = 795 ; ncsj1[5] = 871 ncsi2[5] = 813 ; ncsj2[5] = 905 # Lake Huron part 1 ncsnr[6] = 1 ; ncstt[6] = 2 ncsi1[6] = 814 ; ncsj1[6] = 882 ncsi2[6] = 825 ; ncsj2[6] = 905 # Lake Huron part 2 ncsnr[7] = 1 ; ncstt[7] = 2 ncsi1[7] = 826 ; ncsj1[7] = 889 ncsi2[7] = 833 ; ncsj2[7] = 905 # Lake Erie ncsnr[8] = 1 ; ncstt[8] = 2 ncsi1[8] = 816 ; ncsj1[8] = 871 ncsi2[8] = 837 ; ncsj2[8] = 881 # Lake Ontario ncsnr[9] = 1 ; ncstt[9] = 2 ncsi1[9] = 831 ; ncsj1[9] = 882 ncsi2[9] = 847 ; ncsj2[9] = 889 # Lake Victoria ncsnr[10] = 1 ; ncstt[10] = 0 ncsi1[10] = 1274 ; ncsj1[10] = 672 ncsi2[10] = 1289 ; ncsj2[10] = 687 #===================================== # 3. Generate mask fields #===================================== rnf_count = 0 empmr_count = 0 closea_mask = ma.zeros(top_level.shape,dtype=np.int) temp_mask_rnf = ma.zeros(top_level.shape,dtype=np.int) temp_mask_empmr = ma.zeros(top_level.shape,dtype=np.int) closea_mask_rnf = ma.zeros(top_level.shape,dtype=np.int) closea_mask_empmr = ma.zeros(top_level.shape,dtype=np.int) for ics in range(num_closea): closea_mask = ma.where( ( ii2d[:] >= ncsi1[ics+1] ) & ( ii2d[:] <= ncsi2[ics+1] ) & ( jj2d[:] >= ncsj1[ics+1] ) & ( jj2d[:] <= ncsj2[ics+1] ) & ( top_level == 1 ), ics+1, closea_mask) if ncstt[ics+1] == 1: rnf_count = rnf_count + 1 temp_mask_rnf[:] = 0 if use_runoff_box: temp_mask_rnf = ma.where( ( ii2d[:] >= ncsir1[ics+1] ) & ( ii2d[:] <= ncsir2[ics+1] ) & ( jj2d[:] >= ncsjr1[ics+1] ) & ( jj2d[:] <= ncsjr2[ics+1] ) & ( top_level == 1 ), rnf_count, 0) else: for ir in range(ncsnr[ics+1]): temp_mask_rnf[ncsjr[ics+1],ncsjr[ics+1]] = rnf_count temp_mask_rnf = ma.where( closea_mask_rnf > 0, ma.minimum(temp_mask_rnf,closea_mask_rnf), temp_mask_rnf) min_rnf = ma.amin(temp_mask_rnf[ma.where(temp_mask_rnf > 0)]) max_rnf = ma.amax(temp_mask_rnf[ma.where(temp_mask_rnf > 0)]) if min_rnf != max_rnf: print 'min_rnf, max_rnf : ',min_rnf,max_rnf raise Exception('Partially overlapping target rnf areas for two closed seas.') else: # source area: closea_mask_rnf[ma.where(closea_mask==ics+1)] = min_rnf # target area: closea_mask_rnf[ma.where(temp_mask_rnf>0)] = min_rnf # reset rnf_count: rnf_count = min_rnf if ncstt[ics+1] == 2: empmr_count = empmr_count + 1 temp_mask_empmr[:] = 0 if use_runoff_box: temp_mask_empmr = ma.where( ( ii2d[:] >= ncsir1[ics+1] ) & ( ii2d[:] <= ncsir2[ics+1] ) & ( jj2d[:] >= ncsjr1[ics+1] ) & ( jj2d[:] <= ncsjr2[ics+1] ) & ( top_level == 1 ), empmr_count, 0) else: for ir in range(ncsnr[ics+1]): temp_mask_empmr[ncsjr[ics+1],ncsjr[ics+1]] = empmr_count temp_mask_empmr = ma.where( closea_mask_empmr > 0, ma.minimum(temp_mask_empmr,closea_mask_empmr), temp_mask_empmr) min_empmr = ma.amin(temp_mask_empmr[ma.where(temp_mask_empmr > 0)]) max_empmr = ma.amax(temp_mask_empmr[ma.where(temp_mask_empmr > 0)]) if min_empmr != max_empmr: raise Exception('Partially overlapping target empmr areas for two closed seas.') else: # source area: closea_mask_empmr[ma.where(closea_mask==ics+1)] = min_empmr # target area: closea_mask_empmr[ma.where(temp_mask_empmr>0)] = min_empmr # reset empmr_count: empmr_count = min_empmr if mask: # apply land-sea mask if required closea_mask.mask = np.where(top_level==0,True,False) closea_mask_rnf.mask = np.where(top_level==0,True,False) closea_mask_empmr.mask = np.where(top_level==0,True,False) #===================================== # 4. Append masks to domain_cfg file. #===================================== domcfg.createVariable('closea_mask',datatype='i',dimensions=('y','x'),fill_value=closea_mask.fill_value,chunksizes=(1000,1000)) domcfg.variables['closea_mask'][:]=closea_mask if rnf_count > 0: domcfg.createVariable('closea_mask_rnf',datatype='i',dimensions=('y','x'),fill_value=closea_mask_rnf.fill_value,chunksizes=(1000,1000)) domcfg.variables['closea_mask_rnf'][:]=closea_mask_rnf if empmr_count > 0: domcfg.createVariable('closea_mask_empmr',datatype='i',dimensions=('y','x'),fill_value=closea_mask_empmr.fill_value,chunksizes=(1000,1000)) domcfg.variables['closea_mask_empmr'][:]=closea_mask_empmr domcfg.close()
def __init__(self,imgFiles,shiftFile=None): self.header = [read_header(m) for m in imgFiles] self.shift_ra = None self.shift_dec = None self.shift_date = None if shiftFile is not None: shift_ra,shift_dec,shift_date = read_shift(shiftFile) self.date_im = np.array([parse(h['DATE-OBS']) for h in self.header]) date_shift = np.array([parse(d) for d in shift_date]) intersecS = np.in1d(date_shift,self.date_im) intersecI = np.in1d(self.date_im,date_shift) #self.date_im= [np.round(Time(h['DATE-OBS']).to_value('decimalyear'),4) for h in self.header] #previous way of doing it #date_shift = [np.round(Time(d, format='decimalyear').value,4) for d in shift_date] #date_intersection = list(set(self.date_im).intersection(date_shift)) if np.array_equal(intersecI,intersecS): sys.stdout.write('Shifts for all epochs are there. Will continue\n') self.shift_ra = np.array(shift_ra) self.shift_dec = np.array(shift_dec) self.shift_date= np.array(shift_date) else: sys.stdout.write('Dates/Number of shifts are not equal to the given image observing dates.\n') input('Do you want to continute by only using the images for which shifts are there?\n Press Enter to continue...') self.shift_ra = np.array(shift_ra)[intersecS] self.shift_dec = np.array(shift_dec)[intersecS] self.shift_date= np.array(date_shift)[intersecS] imgFiles = np.array(imgFiles)[intersecI] self.header = [read_header(m) for m in imgFiles] self.ehtFiles = [eh.image.load_fits(m, aipscc=True) for m in imgFiles] self.beam = [[h['BMAJ']*np.pi/180,h['BMIN']*np.pi/180,h['BPA']*np.pi/180] for h in self.header] self.fovx = np.array([m.fovx() for m in self.ehtFiles]) self.fovy = np.array([m.fovy() for m in self.ehtFiles]) self.naxis1 = np.array([h['NAXIS1'] for h in self.header]) self.naxis2 = np.array([h['NAXIS2'] for h in self.header]) self.px_inc = np.array([h['cdelt2'] for h in self.header]) self.noise_difmap = np.array([h['noise'] for h in self.header]) self.ppb = [PXPERBEAM(b[0],b[1],pxi*np.pi/180) for b,pxi in zip(self.beam,self.px_inc)] self.median_beam= np.array([np.sqrt(b[0]*b[1]) for b in self.beam]) self.stacked_beam = np.median(self.median_beam) self.stacked_naxis= ma.amax([self.naxis1,self.naxis2]) self.stacked_fov = ma.amax([self.fovx,self.fovy]) self.stacked_px_inc = self.stacked_fov/self.stacked_naxis*180/np.pi self.stacked_ppb = PXPERBEAM(self.stacked_beam,self.stacked_beam,self.stacked_px_inc*np.pi/180) self.date_im = None self.stackedImg = None self.blurImg = None self.stacked_noise_diff = None #check whether all images have the same field of view. If not, regrid them to the smalles fov. if len(np.unique(np.concatenate([self.fovx,self.fovy])))>1: sys.stdout.write('Regrid images as they do not have the same FOV.\n') maxfov = [max(x,y) for x,y in zip(self.fovx,self.fovy)] self.ehtFiles = [f.pad(mf,mf) for f,mf in zip(self.ehtFiles,maxfov)] self.ehtFiles = [f.regrid_image(self.stacked_fov,self.stacked_naxis,interp='linear') for f in self.ehtFiles] # blur with a circular gauss blurFiles = [im.blur_circ(self.stacked_beam) for im in self.ehtFiles] self.blurImg= np.array([im.imarr(pol='I') for im in blurFiles]) ###################### if shiftFile is not None: print(self.shift_ra) self.shift_ra = self.shift_ra/self.stacked_px_inc/3.6e6 self.shift_dec= self.shift_dec/self.stacked_px_inc/3.6e6 print(self.shift_ra) self.blurImg = np.array([apply_shift(f,[sd,sr]) for f,sd,sr in zip(self.blurImg,self.shift_dec,self.shift_ra)]) self.stackedImg = self.blurImg.sum(axis=0)/len(self.blurImg)*self.stacked_ppb print(np.sum(self.noise_difmap)/len(self.noise_difmap))