def __call__(self, value, clip=None): if clip is None: clip = self.clip if cbook.iterable(value): vtype = 'array' val = ma.asarray(value).astype(np.float) else: vtype = 'scalar' val = ma.array([value]).astype(np.float) self.autoscale_None(val) vmin, vmax = self.vmin, self.vmax cmin, cmax = self.cmin * vmin, self.cmax * vmax if vmin > vmax: raise ValueError("minvalue must be less than or equal to maxvalue") elif vmin == vmax: result = 0.0 * val else: if clip: mask = ma.getmask(val) val = ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask) result = 0. * val + 0.5 result[val > cmax] = (ma.log10(val[val > cmax]) - ma.log10(cmax)) / (np.log10(vmax) - np.log10(cmax)) / 2. + 0.5 result[val < cmin] = -(ma.log10(-val[val < cmin]) - ma.log10(-cmin)) / (np.log10(-vmin) - np.log10(-cmin)) / 2. + 0.5 if vtype == 'scalar': result = result[0] return result
def make_qq_plot(qq, ci=True, ylim=7.3, xlim=7.3): hv_logp = np.array(qq['hv_logp']).astype(float) data_logpvec = np.array(qq['data_logpvec']).astype(float) model_logpvec = np.array(qq['model_logpvec']).astype(float) ylim_data = max(hv_logp[np.isfinite(data_logpvec)]) model_logpvec[hv_logp > ylim_data] = np.nan if ci: q = 10**-data_logpvec dq = 1.96 * np.sqrt(q * (1 - q) / qq['sum_data_weights']) y1 = hv_logp x1 = ma.filled(-ma.log10(q + dq), np.nan) #left CI bound x2 = ma.filled(-ma.log10(q - dq), np.nan) #right CI bound if True: y2 = np.empty(hv_logp.shape) y2[:] = np.nan y2[x2 < np.nanmax(x1)] = interp1d(x1, y1)( x2[x2 < np.nanmax(x1)]) #upper CI bound y2[np.isnan(y2)] = ylim_data plt.fill_between(x2, y1, y2, color=(0.1843, 0.3098, 0.3098), alpha=0.25) else: plt.plot(x1, hv_logp, x2, hv_logp) hData = plt.plot(data_logpvec, hv_logp) hModel = plt.plot(model_logpvec, hv_logp) hNull = plt.plot(hv_logp, hv_logp, 'k--') plt.ylim(0, ylim) plt.xlim(0, xlim)
def components(prof, p): ''' Interpolates the given data to calculate the U and V components at a given pressure Parameters ---------- prof : profile object Profile object p : number, numpy array Pressure (hPa) of a level Returns ------- U and V components at the given pressure (kts) : number, numpy array ''' # Note: numpy's interpolation routine expects the interpolation # routine to be in ascending order. Because pressure decreases in the # vertical, we must reverse the order of the two arrays to satisfy # this requirement. if prof.wdir.count() == 0: # JTS - Fixed a bug where clicking "Interpolate Focused Profile" throws an error for NUCAPS. return ma.masked_where(ma.ones(np.shape(p)), p), ma.masked_where(ma.ones(np.shape(p)), p) U = generic_interp_pres(ma.log10(p), prof.logp[::-1], prof.u[::-1]) V = generic_interp_pres(ma.log10(p), prof.logp[::-1], prof.v[::-1]) return U, V
def degree_distribution(A, networkName, directed=True): binNum = 30 if (directed): (kin, kout) = get_degree(A) bins = np.linspace(0, np.log10(np.max(kin)), num=binNum) digitized = np.digitize(np.log10(kin), bins) bin_counts = np.asarray([digitized.tolist().count(i) for i in range(0,len(bins))]) bin_counts = ma.log10(bin_counts) #fit the line a,b = ma.polyfit(bins, bin_counts, 1, full=False) print('best fit in degree line:\ny = {:.2f} + {:.2f}x'.format(b, a)) yfit = [b + a * xi for xi in bins] fig, axs = plt.subplots(2, 1) axs[0].scatter(bins, bin_counts) axs[0].plot(bins, yfit, color="orange") axs[0].set_title('in-degree distribution') axs[0].set_xlabel('Degree (d) log base 10', fontsize="small") axs[0].set_ylabel('Frequency log base 10', fontsize="small") axs[0].set_ylim(bottom=0) bins = np.linspace(0, np.log10(np.max(kout)), num=binNum) digitized = np.digitize(np.log10(kout), bins) bin_counts = np.asarray([digitized.tolist().count(i) for i in range(0,len(bins))]) bin_counts = ma.log10(bin_counts) print('best fit out degree line:\ny = {:.2f} + {:.2f}x'.format(b, a)) yfit = [b + a * xi for xi in bins] axs[1].scatter(bins, bin_counts) axs[1].plot(bins, yfit, color="orange") axs[1].set_title('out-degree distribution') axs[1].set_xlabel('Degree (d) log base 10', fontsize="small") axs[1].set_ylabel('Frequency log base 10', fontsize="small") plt.subplots_adjust(hspace=0.01) plt.tight_layout() plt.savefig(networkName + 'degree.pdf') plt.close() if (not directed): (kin,kout) = get_degree(A) print (kin.shape) #bin the statistics bins = np.linspace(0, np.log10(np.max(kin)), num=binNum) digitized = np.digitize(np.log10(kin), bins) bin_counts = np.asarray([digitized.tolist().count(i) for i in range(0,len(bins))]) bin_counts = ma.log10(bin_counts) #fit the line a,b = ma.polyfit(bins, bin_counts, 1, full=False) print('best fit line:\ny = {:.2f} + {:.2f}x'.format(b, a)) yfit = [b + a * xi for xi in bins] plt.scatter(bins, bin_counts) plt.plot(bins, yfit, color="orange") plt.title('degree distribution') plt.xlabel('Degree (d) log base 10', fontsize="small") plt.ylabel('Frequency log base 10', fontsize="small") plt.ylim(bottom=0) # plt.xscale('log') # plt.yscale('log') plt.tight_layout() plt.savefig(networkName + 'degree.pdf') plt.close()
def components(prof, p): ''' Interpolates the given data to calculate the U and V components at a given pressure Parameters ---------- prof : profile object Profile object p : number, numpy array Pressure (hPa) of a level Returns ------- U and V components at the given pressure (kts) : number, numpy array ''' # Note: numpy's interpoloation routine expects the interpoloation # routine to be in ascending order. Because pressure decreases in the # vertical, we must reverse the order of the two arrays to satisfy # this requirement. if prof.wdir.count() == 0: return ma.masked, ma.masked U = generic_interp_pres(ma.log10(p), prof.logp[::-1], prof.u[::-1]) V = generic_interp_pres(ma.log10(p), prof.logp[::-1], prof.v[::-1]) return U, V
def plot(self, ax=None, fig=None, xlim=(-1.5, .5), ylim=(-1.2, 1.5), **kwargs): if fig is None and ax is None: fig = plt.figure(1, figsize=(6, 6)) if ax is None: ax = fig.add_subplot(111) self.ax = ax ax.set_xlabel(r'$\log_{10}$ [N II]/H$\alpha$') ax.set_ylabel(r'$\log_{10}$ [O III]/H$\beta$') ax.set_xlim(*xlim) ax.set_ylim(*ylim) self.x = ma.log10(self.n2 / self.ha) self.y = ma.log10(self.o3 / self.hb) ax.scatter(self.x, self.y, **kwargs) self.kauffmann2003() return
def __init__(self, wha, flux_ha, flux_n2): for line in ('wha', 'flux_ha', 'flux_n2'): if not isinstance(eval(line), ma.masked_array): self.__dict__.update({line: ma.masked_array(eval(line))}) else: self.__dict__[line] = eval(line) self.x = ma.log10(self.flux_n2 / self.flux_ha) self.y = ma.log10(self.wha)
def plot_cov(): # see http://stackoverflow.com/questions/13784201/matplotlib-2-subplots-1-colorbar fig, axes = plt.subplots( nrows=2, ncols=2, figsize=(14, 14), sharex=True, sharey=True) imshow_kwargs = { 'cmap': 'Purples', 'origin': 'upper', 'interpolation': 'none', 'vmin': -13, 'vmax': -7 } s = slice(None, None) I, J, V = cov_dr11_raw.T cov = sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr().toarray() print('min(cov): {}'.format(np.min(cov))) print('max(cov): {}'.format(np.max(cov))) ax = axes[0, 0] im = ax.imshow(ma.log10(cov[s, s]), **imshow_kwargs) ax.tick_params(direction='in') ax.set_ylabel('log(+C)', fontsize=18) # fig.colorbar(im, ax=ax) ax = axes[1, 0] im = ax.imshow(ma.log10(-cov[s, s]), **imshow_kwargs) ax.tick_params(direction='in') ax.set_ylabel('log(-C)', fontsize=18) ax.set_xlabel('DR11', fontsize=18) # fig.colorbar(im, ax=ax) # I, J, V = cov_mock000_raw.T # cov = sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr().toarray() cov = np.tril(smo_cov) print('min(cov): {}'.format(np.min(cov))) print('max(cov): {}'.format(np.max(cov))) ax = axes[0, 1] im = ax.imshow(ma.log10(cov[s, s]), **imshow_kwargs) ax.tick_params(direction='in') # fig.colorbar(im, ax=ax) ax = axes[1, 1] im = ax.imshow(ma.log10(-cov[s, s]), **imshow_kwargs) ax.tick_params(direction='in') ax.set_xlabel('DM', fontsize=18) # fig.colorbar(im, ax=ax) plt.tight_layout()
def __call__(self, value, clip=None): method = self.stretch exponent = self.exponent midpoint = self.midpoint if clip is None: clip = self.clip if cbook.iterable(value): vtype = 'array' val = ma.asarray(value).astype(np.float) else: vtype = 'scalar' val = ma.array([value]).astype(np.float) self.autoscale_None(val) vmin, vmax = self.vmin, self.vmax if vmin > vmax: raise ValueError("minvalue must be less than or equal to maxvalue") elif vmin == vmax: return 0.0 * val else: if clip: mask = ma.getmask(val) val = ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask) result = (val - vmin) * (1.0 / (vmax - vmin)) negative = result < 0. if self.stretch == 'linear': pass elif self.stretch == 'log': result = ma.log10(result * (self.midpoint - 1.) + 1.) \ / ma.log10(self.midpoint) elif self.stretch == 'sqrt': result = ma.sqrt(result) elif self.stretch == 'arcsinh': result = ma.arcsinh(result / self.midpoint) \ / ma.arcsinh(1. / self.midpoint) elif self.stretch == 'power': result = ma.power(result, exponent) else: raise Exception("Unknown stretch in APLpyNormalize: %s" % self.stretch) result[negative] = -np.inf if vtype == 'scalar': result = result[0] return result
def test_testUfuncs1(self): # Test various functions such as sin, cos. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d assert_(eq(np.cos(x), cos(xm))) assert_(eq(np.cosh(x), cosh(xm))) assert_(eq(np.sin(x), sin(xm))) assert_(eq(np.sinh(x), sinh(xm))) assert_(eq(np.tan(x), tan(xm))) assert_(eq(np.tanh(x), tanh(xm))) with np.errstate(divide='ignore', invalid='ignore'): assert_(eq(np.sqrt(abs(x)), sqrt(xm))) assert_(eq(np.log(abs(x)), log(xm))) assert_(eq(np.log10(abs(x)), log10(xm))) assert_(eq(np.exp(x), exp(xm))) assert_(eq(np.arcsin(z), arcsin(zm))) assert_(eq(np.arccos(z), arccos(zm))) assert_(eq(np.arctan(z), arctan(zm))) assert_(eq(np.arctan2(x, y), arctan2(xm, ym))) assert_(eq(np.absolute(x), absolute(xm))) assert_(eq(np.equal(x, y), equal(xm, ym))) assert_(eq(np.not_equal(x, y), not_equal(xm, ym))) assert_(eq(np.less(x, y), less(xm, ym))) assert_(eq(np.greater(x, y), greater(xm, ym))) assert_(eq(np.less_equal(x, y), less_equal(xm, ym))) assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym))) assert_(eq(np.conjugate(x), conjugate(xm))) assert_(eq(np.concatenate((x, y)), concatenate((xm, ym)))) assert_(eq(np.concatenate((x, y)), concatenate((x, y)))) assert_(eq(np.concatenate((x, y)), concatenate((xm, y)))) assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
def inverse(self, value): if not self.scaled(): raise ValueError("Not invertible until scaled") vmin, vmax = self.vmin, self.vmax if cbook.iterable(value): val = ma.asarray(value) else: val = value if self.stretch == 'linear': pass elif self.stretch == 'log': val = (ma.power(10., val * ma.log10(self.midpoint)) - 1.) / (self.midpoint - 1.) elif self.stretch == 'sqrt': val = val * val elif self.stretch == 'arcsinh': val = self.midpoint * \ ma.sinh(val * ma.arcsinh(1. / self.midpoint)) elif self.stretch == 'power': val = ma.power(val, (1. / self.exponent)) else: raise Exception("Unknown stretch in APLpyNormalize: %s" % self.stretch) return vmin + val * (vmax - vmin)
def average_in_flux(mag, dmag, axis=None): flux = 10**(mag / -2.5) dflux = np.log(10) / 2.5 * flux * dmag avg_dflux = np.power(np.sum(np.power(dflux, -2), axis), -0.5) avg_flux = np.sum(flux * np.power(dflux, -2), axis) * avg_dflux**2 avg_mag = -2.5 * np.log10(avg_flux) avg_dmag = 2.5 / np.log(10) * np.divide(avg_dflux, avg_flux) return avg_mag, avg_dmag
def transform_non_affine(self, a): oneminus = 1 - a oneminus[oneminus <= 0.0] = 1e-300 thres = 1.0 - 10**(-self.nines - 1) mask = a > thres masked = ma.masked_where(mask, a) if masked.mask.any(): return -ma.log10(oneminus) else: return -np.log10(oneminus)
def inverse(self, value): # ORIGINAL MATPLOTLIB CODE if not self.scaled(): raise ValueError("Not invertible until scaled") vmin, vmax = self.vmin, self.vmax # CUSTOM APLPY CODE if cbook.iterable(value): val = ma.asarray(value) else: val = value if self.stretch == 'Linear': pass elif self.stretch == 'Log': val = (ma.power(10., val * ma.log10(self.midpoint)) - 1.) / (self.midpoint - 1.) elif self.stretch == 'Sqrt': val = val * val elif self.stretch == 'Arcsinh': val = self.midpoint * \ ma.sinh(val * ma.arcsinh(1. / self.midpoint)) elif self.stretch == 'Arccosh': val = self.midpoint * \ ma.cosh(val * ma.arccosh(1. / self.midpoint)) elif self.stretch == 'Power': val = ma.power(val, (1. / self.exponent)) elif self.stretch == 'Exp': val = 1. / np.exp(val) else: raise Exception("Unknown stretch in APLpyNormalize: %s" % self.stretch) return vmin + val * (vmax - vmin)
def get_noise_levels(ncfile): # ---------------- # Open NetCDF file # ---------------- print('Opening NetCDF file ' + ncfile) dataset = nc4.Dataset(ncfile,'r+',format='NETCDF3_CLASSIC') nray = len(dataset.dimensions['time']); ngate = len(dataset.dimensions['range']); elv = np.transpose(np.tile(dataset.variables['elevation'][:],(ngate,1))); rng = np.tile(dataset.variables['range'][:],(nray,1)) height = rng*np.sin(elv*np.pi/180.) zh = dataset.variables['ZED_H'][:]; zed = ma.masked_where(height<14000, zh); rngkm = ma.masked_where(rng<=0.0, rng/1000.); range2 = 20.*ma.log10(rngkm); zh[:] = zed - range2; zv = zh.copy(); zv[:] = zh[:] - dataset.variables['ZDR'][:] zx = zh.copy(); zx[:] = zh[:] + dataset.variables['LDR'][:] nezharr = ma.mean(zh,axis=1) nezherr = ma.std(zh,axis=1) nezvarr = ma.mean(zv,axis=1) nezverr = ma.std(zv,axis=1) nezxarr = ma.mean(zx,axis=1) nezxerr = ma.std(zx,axis=1) nezharr = ma.masked_where(nezherr>MAX_ERR,nezharr) nezvarr = ma.masked_where(nezverr>MAX_ERR,nezvarr) nezxarr = ma.masked_where(nezxerr>MAX_ERR,nezxarr) nezh = ma.median(nezharr) nezv = ma.median(nezvarr) nezx = ma.median(nezxarr) dataset.close() return np.round(nezh,2), np.round(nezv,2), np.round(nezx,2)
def inverse(self, value): # ORIGINAL MATPLOTLIB CODE if not self.scaled(): raise ValueError("Not invertible until scaled") vmin, vmax = self.vmin, self.vmax # CUSTOM APLPY CODE if cbook.iterable(value): val = ma.asarray(value) else: val = value if self.stretch == 'linear': pass elif self.stretch == 'log': val = (ma.power(10., val * ma.log10(self.midpoint)) - 1.) / (self.midpoint - 1.) elif self.stretch == 'sqrt': val = val * val elif self.stretch == 'arcsinh': val = self.midpoint * \ ma.sinh(val * ma.arcsinh(1. / self.midpoint)) elif self.stretch == 'square': val = ma.power(val, (1. / 2)) elif self.stretch == 'power': val = ma.power(val, (1. / self.exponent)) else: raise Exception("Unknown stretch in APLpyNormalize: %s" % self.stretch) return vmin + val * (vmax - vmin)
def transform_non_affine(self, a): """ This transform takes an Nx1 ``numpy`` array and returns a transformed copy. Since the range of the Mercator scale is limited by the user-specified threshold, the input array must be masked to contain only valid values. ``matplotlib`` will handle masked arrays and remove the out-of-range data from the plot. Importantly, the ``transform`` method *must* return an array that is the same shape as the input array, since these values need to remain synchronized with values in the other dimension. """ masked = ma.masked_where(((a - self.zero) * self.sign <= 0.0), (a - self.zero) * self.sign) if masked.mask.any(): return ma.log10(masked) / np.log10 (self.base) else: return np.log10((a - self.zero) * self.sign) / np.log10 (self.base)
def vtmp(prof, p): ''' Interpolates the given data to calculate a virtual temperature at a given pressure Parameters ---------- prof : profile object Profile object p : number, numpy array Pressure (hPa) of the level for which virtual temperature is desired Returns ------- Virtual tmperature (C) at the given pressure : number, numpy array ''' return generic_interp_pres(ma.log10(p), prof.logp[::-1], prof.vtmp[::-1])
def transform_non_affine(self, a): lower = a[np.where(a<=change)] greater = a[np.where(a> change)] if lower.size: lower = self._handle_nonpos(lower * 10.0)/10.0 if isinstance(lower, ma.MaskedArray): lower = ma.log10(lower) else: lower = np.log10(lower) lower = factor*lower if greater.size: greater = (factor*np.log10(change) + (greater-change)) # Only low if not(greater.size): return lower # Only high if not(lower.size): return greater return np.concatenate((lower, greater))
def output_peaks_mvalue_2wig_file(pks1_uni, pks2_uni, merged_pks, comparison_name): """ output of peaks with normed m value and p values """ print 'output wig files ... ' peaks = _add_peaks(_add_peaks(pks1_uni, merged_pks), pks2_uni) f_2write = open('_'.join([comparison_name, 'peaks_Mvalues.wig']), 'w') f_2write.write('browser position chr11:5220000-5330000\n') f_2write.write('track type=wiggle_0 name=%s' % comparison_name + ' visibility=full autoScale=on color=255,0,0 ' + ' yLineMark=0 yLineOnOff=on priority=10\n') for chr_id in peaks.keys(): f_2write.write('variableStep chrom=' + chr_id + ' span=100\n') pks_chr = peaks[chr_id] sorted_pks_chr = _sort_peaks_list(pks_chr, 'summit') # write sorted peak summit and m-value to file [ f_2write.write('\t'.join( ['%d' % pk.summit, '%s\n' % str(pk.normed_mvalue)])) for pk in sorted_pks_chr ] f_2write.close() f_2write = open('_'.join([comparison_name, 'peaks_Pvalues.wig']), 'w') f_2write.write('browser position chr11:5220000-5330000\n') f_2write.write('track type=wiggle_0 name=%s(-log10(p-value))' % comparison_name + ' visibility=full autoScale=on color=255,0,0 ' + ' yLineMark=0 yLineOnOff=on priority=10\n') for chr_id in peaks.keys(): f_2write.write('variableStep chrom=' + chr_id + ' span=100\n') pks_chr = peaks[chr_id] sorted_pks_chr = _sort_peaks_list(pks_chr, 'summit') # write sorted peak summit and m-value to file [ f_2write.write('\t'.join( ['%d' % pk.summit, '%s\n' % str(-log10(pk.pvalue))])) for pk in sorted_pks_chr ] f_2write.close()
def temp(prof, p): ''' Interpolates the given data to calculate a temperature at a given pressure Parameters ---------- prof : profile object Profile object p : number, numpy array Pressure (hPa) of the level for which temperature is desired Returns ------- Temperature (C) at the given pressure : number, numpy array ''' # Note: numpy's interpoloation routine expects the interpoloation # routine to be in ascending order. Because pressure decreases in the # vertical, we must reverse the order of the two arrays to satisfy # this requirement. return generic_interp_pres(ma.log10(p), prof.logp[::-1], prof.tmpc[::-1])
def lin2db(array: np.ndarray, scale: int = 10) -> np.ndarray: """Linear to dB conversion.""" if ma.isMaskedArray(array): return scale * ma.log10(array) return scale * np.log10(array)
def transform_non_affine(self, a): a = self._handle_nonpos(a * 10.0) if isinstance(a, ma.MaskedArray): return ma.log10(a) return np.log10(a)
def main(): # parse command-line arguments parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) ## targets to fit parser.add_argument("--name", type=str, default=None, help="base name of combined skim file") parser.add_argument("--subsample-step", type=int, default=1000, help="step size used for subsampling observations") parser.add_argument("--dont-save", action="store_true", help="dont save delta field (just do all the preprocessing)") args = parser.parse_args() # import data skim = h5py.File(args.name+'.hdf5', 'r') norm = skim['norm'][:][:,np.newaxis] loglam = skim['loglam'][:] wave = np.power(10.0, loglam) quasar_redshifts = skim['z'][:] linear_continuum = h5py.File(args.name+'-linear-continuum.hdf5', 'r') params_a = linear_continuum['params_a'].value params_b = linear_continuum['params_b'].value continuum = linear_continuum['continuum'].value continuum_wave = linear_continuum['continuum_wave'].value continuum_interp = scipy.interpolate.UnivariateSpline(continuum_wave, continuum, s=0, ext=1) wave_lya = linear_continuum.attrs['wave_lya'] abs_alpha = linear_continuum.attrs['abs_alpha'] abs_beta = linear_continuum.attrs['abs_beta'] forest_wave_ref = linear_continuum.attrs['forest_wave_ref'] print 'Adjusting weights for pipeline variance and LSS variance...' forest_min_z = linear_continuum.attrs['forest_min_z'] forest_max_z = linear_continuum.attrs['forest_max_z'] forest_dz = 0.1 forest_z_bins = np.arange(forest_min_z, forest_max_z + forest_dz, forest_dz) var_lss = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.05 + 0.06*(forest_z_bins - 2.0)**2, s=0) var_pipe_scale = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.7 + 0.2*(forest_z_bins - 2.0)**2, s=0) forest_pixel_redshifts = wave/wave_lya - 1 abs_coefs = abs_alpha*np.power(1+forest_pixel_redshifts, abs_beta) forest_wave_refs = forest_wave_ref*(1+quasar_redshifts) def model_flux(a, b): return a*np.power(wave/forest_wave_refs[:,np.newaxis], b)*continuum_interp(wave/(1+quasar_redshifts[:,np.newaxis]))*np.exp(-abs_coefs) mflux = model_flux(params_a[:,np.newaxis],params_b[:,np.newaxis]) # (1.0 + quasar_redshifts[:,np.newaxis])*forest_wave/args.wave_lya - 1.0 print forest_pixel_redshifts.shape pixel_mask = skim['mask'][:] print pixel_mask.shape flux = np.ma.MaskedArray(skim['flux'][:], mask=pixel_mask) ivar = np.ma.MaskedArray(skim['ivar'][:], mask=pixel_mask) delta_flux = flux/mflux - 1.0 delta_ivar = ivar*mflux*mflux delta_weight = delta_ivar*var_pipe_scale(forest_pixel_redshifts) delta_weight = delta_weight/(1 + delta_weight*var_lss(forest_pixel_redshifts)) redshift_order = np.argsort(quasar_redshifts) export_exact_image(args.name+'-delta-flux.png', delta_flux[redshift_order][::args.subsample_step], dpi=100, vmin=-5, vmax=5, cmap=plt.get_cmap('bwr'), origin='lower') export_exact_image(args.name+'-delta-weight.png', ma.log10(delta_flux[redshift_order][::args.subsample_step]), dpi=100, vmin=-5, vmax=2, cmap=plt.get_cmap('Purples'), origin='lower') export_exact_image(args.name+'-delta-mask.png', pixel_mask[redshift_order][::args.subsample_step], dpi=100, origin='lower') print 'Computing mean delta...' mask_params = (params_a > .1) & (params_a < 10) & (params_b > -10) & (params_b < 10) delta_mean = ma.average(delta_flux[mask_params], axis=0) delta_mean_weighted = ma.average(delta_flux[mask_params], weights=delta_weight[mask_params], axis=0) delta_mean_ivar_weighted = ma.average(delta_flux[mask_params], weights=delta_ivar[mask_params], axis=0) plt.figure(figsize=(12,9)) plt.plot(wave, delta_mean, label='Unweighted Mean') plt.plot(wave, delta_mean_weighted, label='LSS weighted Mean') plt.plot(wave, delta_mean_ivar_weighted, label='Ivar weighted Mean') # plt.ylim(0.06*np.array([-1,1])) plt.xlabel(r'Observed Wavelength ($\AA$)') plt.ylabel(r'Delta Mean') plt.grid() plt.legend() plt.savefig(args.name+'-lssweighted-delta-mean.png', dpi=100, bbox_inches='tight') plt.close() if args.dont_save: return -1 outfile = h5py.File(args.name+'-delta.hdf5', 'w') # copy attributes from input files for attr_key in skim.attrs: outfile.attrs[attr_key] = skim.attrs[attr_key] # it's okay to overwrite the few that were already copied, I added a few attr to the combined # skim file and dont want to run the whole chain just yet for attr_key in linear_continuum.attrs: outfile.attrs[attr_key] = linear_continuum.attrs[attr_key] # create los group lines_of_sight = outfile.create_group('lines_of_sight') outfile.create_dataset('delta_mean', data=delta_mean.data) # loop over targets progress_bar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(quasar_redshifts)).start() for i, z in enumerate(quasar_redshifts): progress_bar.update(i) if not mask_params[i]: # print 'fit param outside nominal range' continue z = quasar_redshifts[i] a = params_a[i] b = params_b[i] norm_i = norm[i] meta = skim['meta'][i] assert norm_i > 0 ra = float(meta['ra']) dec = float(meta['dec']) thing_id = meta['thing_id'] plate = meta['plate'] mjd = meta['mjd'] fiber = meta['fiber'] # save to hdf5 file los = lines_of_sight.create_group(str(thing_id)) los.attrs['plate'] = plate los.attrs['mjd'] = mjd los.attrs['fiber'] = fiber los.attrs['ra'] = ra los.attrs['dec'] = dec los.attrs['z'] = z los.attrs['p0'] = a los.attrs['p1'] = b los.create_dataset('loglam', data=loglam, dtype='f4') los.create_dataset('delta', data=(delta_flux[i]-delta_mean_weighted), dtype='f8') los.create_dataset('weight', data=delta_weight[i], dtype='f8') los.create_dataset('r_comov', data=np.zeros_like(loglam), dtype='f4') # los.create_dataset('ivar', data=ivar[i]/(norm_i*norm_i), dtype='f4') los.create_dataset('ivar', data=delta_ivar[i], dtype='f4') outfile.close() progress_bar.finish()
def transform(self, a): a = _mask_non_positives(a * 10.0) if isinstance(a, MaskedArray): return ma.log10(a) return np.log10(a)
def _lin2log(*args: ndarray) -> list: return [ma.log10(x) for x in args]
#mask land values Land = ma.getmask(varname) Landdt = rt.AddDepthTime(RomsFile, Land) Xratio = ma.array(x_rat, mask = Land).flatten() Yratio = ma.array(y_rat, mask = Land).flatten() #remove mask Xratio = Xratio[~Xratio.mask] Yratio = Yratio[~Yratio.mask] X_good = Xratio !=0 Y_good = Yratio !=0 xlog = ma.log10(Xratio[X_good]) ylog = ma.log10(Yratio[Y_good]) #depth romsvars = {'h' : RomsNC.variables['h'][:], \ 'zeta' : RomsNC.variables['zeta'][:]} #compute depth at rho points depth = dep._set_depth_T(RomsFile, None, 'rho', romsvars['h'], romsvars['zeta']) depth = ma.array(depth, mask = Land).flatten() _dep = depth[X_good] depthLR = _dep[xlog > 1] depthLR = depthLR[~depthLR.mask]
where_are_NaNs = isnan(img_masked1) img_masked1[where_are_NaNs] = 0 plt.imshow(img_masked1, origin = 'lower',aspect='auto') plt.pcolor(img_masked1,norm=LogNorm()) plt.set_cmap('seismic') cbar=plt.colorbar() with np.errstate(divide='ignore', invalid='ignore'): ratio = np.true_divide(img_masked,img_masked1) ratio[ratio == np.inf] = 0 ratio= np.nan_to_num(ratio) ratio_norm=ratio/2.76 balmer = 2.5*(ma.log10(ratio_norm)) plt.imshow(balmer, origin = 'lower',aspect='auto') plt.pcolor(balmer,norm=LogNorm()) plt.set_cmap('jet') cbar=plt.colorbar() ratio_norm=ratio/2.76 balmer = 2.5*(ma.log(ratio_norm)) print balmer.filled(0) outfile = 'balmer.fits.gz' hdu = fits.PrimaryHDU(balmer) hdu.writeto(outfile, clobber=True)
def BHOSS2fits(filename,freq,source,date,history,user,plot=1,show=False): ##some definitions DEGREE = 3.141592653589/180.0 HOUR = 15.0*DEGREE RADPERAS = DEGREE/3600.0 RADPERUAS = RADPERAS*1.e-6 #get file name filename_load=filename tmp=filename.split('_') #get source postion loc=SkyCoord.from_name(source) #get RA and DEC in degree ra=loc.ra.deg dec=loc.dec.deg #convert date to mjd (modified julian date) modjuldate=(aTime(date)).mjd #get BHOSS GRRT file based on Z. Younsi script #--> needs to be updated once the BHOSS header is modified!!!! # # First header line: [image width, offset, resolution, # of observational frequencies] header_1 = np.genfromtxt(filename, max_rows = 1) # Second header line: [observational time, inclination, BH spin parameter, Luminosity F_nu correction to erg Hz, Jansky correction to Jy Hz ] header_2 = np.genfromtxt(filename, skip_header = 1, max_rows = 1) # Third header line: observational frequencies of interest header_3 = np.genfromtxt(filename, skip_header = 2, max_rows = 1) width = header_1[0] offset = header_1[1] M = int(header_1[2]) s1 = width + offset s2 = 2*width/(M - 1) N_obs_freqs = header_1[3] time = header_2[0] inclination = header_2[1] phi = header_2[2] spin = header_2[3] L_corr = header_2[4] Jansky_corr = header_2[5] if source=='SgrA*': Micro_Arcsecond_Corr = 5.04975 # New value based on Boehle et al. 2016 if source=='M 87': Micro_Arcsecond_Corr = 3.622197344489511 # New value based from Yosuke for M87 width_scaled = Micro_Arcsecond_Corr*width # Scale from r_g to micro-arcseconds #find select frequency within computed freqs ind=np.where(header_3==freq) if len(ind[0])<1: sys.exit('EXIT:freq not in GRRT file. Available freqs are %s' %str((header_3))) else: freq_ID=ind[0][0]+3 print header_3 print header_3[freq_ID-3] # Now read in all image data ascii2 = np.loadtxt(filename_load, skiprows = 3, usecols = (0, 1, freq_ID)) data2=ascii2.reshape([M, M, 3]) # Convert from indices to (alpha,beta), in units of r_g, on the image plane x = -s1 + s2*(data2[:,:,0] - 1) y = -s1 + s2*(data2[:,:,1] - 1) # Convert (alpha,beta) into micro-arcseconds x = Micro_Arcsecond_Corr*x y = Micro_Arcsecond_Corr*y xmax = np.amax(x) xmin = np.amin(x) ymax = np.amax(y) ymin = np.amin(y) #flux in Jansky jansky=(data2[:,:,2]*Jansky_corr) #create pixel size dxorg=(xmax-xmin)/x.shape[0] dyorg=(ymax-ymin)/y.shape[0] dim=x.shape[0] print('image resolution %f' %(dxorg)) print('org res. total flux:', ma.sum(jansky), 'max flux:',ma.amax(jansky)) #create fits file # Create header and fill in some values header = fits.Header() header['AUTHOR'] = user header['OBJECT'] = source header['CTYPE1'] = 'RA---SIN' header['CTYPE2'] = 'DEC--SIN' header['CDELT1'] = -dxorg*RADPERUAS/DEGREE header['CDELT2'] = dxorg*RADPERUAS/DEGREE header['OBSRA'] = ra header['OBSDEC'] = dec header['FREQ'] = freq header['MJD'] = float(modjuldate) header['TELESCOP'] = 'VLBI' header['BUNIT'] = 'JY/PIXEL' header['STOKES'] = 'I' header['HISTORY'] = history hdu = fits.PrimaryHDU(jansky, header=header) hdulist = [hdu] hdulist = fits.HDUList(hdulist) # Save fits tmp=filename.split('/') outname=tmp[-1] hdulist.writeto('FITS/%s_%i_%iGHz.fits' %(outname[:-4],dim,(freq/1e9)), overwrite=True) if plot==1: #create image (normalised) fonts=12 cmap='cubehelix' fig=plt.figure(figsize=(7,8)) plt.subplots_adjust(left=0.15, bottom=0.1, right=0.95, top=0.85, wspace=0.00001, hspace=0.00001) ax=fig.add_subplot(111,frame_on='True',aspect='equal',axisbg='k') #i1=ax.imshow(jansky/ma.amax(jansky),origin='lower', vmin=0,vmax=1,extent=[xmax, xmin, ymin, ymax], interpolation="bicubic",cmap=cmap ) i1=ax.imshow(ma.log10(jansky),origin='lower',vmin=-10,vmax=ma.log10(0.05),extent=[xmax, xmin, ymin, ymax], interpolation="bicubic",cmap=cmap ) ax.annotate(r'$\mathrm{%s}$' %(source), xy=(0.1, 0.91),xycoords='axes fraction', fontsize=18, horizontalalignment='left', verticalalignment='bottom', color='w') ax.annotate(r'$\mathrm{{\nu=%i\,GHz}}$' %(freq/1e9), xy=(0.1, 0.81),xycoords='axes fraction', fontsize=18, horizontalalignment='left', verticalalignment='bottom', color='w') #set axis plt.xlabel('relative R.A [$\mu$as]', fontsize=fonts) plt.ylabel('relative DEC [$\mu$as]', fontsize=fonts) #set position of colorbar p1=ax.get_position().get_points().flatten() ax11 = fig.add_axes([p1[0], p1[3], p1[2]-p1[0], 0.02]) t1=plt.colorbar(i1,cax=ax11,orientation='horizontal',format='%1.1f') #t1.set_label(r'$\rm{S/S}_{\rm{max}}$',fontsize=fonts+4,labelpad=-65) t1.set_label(r'$\log_{10}(S)\,\mathrm{[Jy/pixel]}$',fontsize=fonts+4,labelpad=-65) t1.ax.xaxis.set_ticks_position('top') t1.ax.tick_params(labelsize=fonts) #set xmax left and ticks ax.set_xlim(xmax,xmin) ax.set_ylim(ymin,ymax) ax.yaxis.set_major_locator(plt.MultipleLocator(20)) ax.yaxis.set_minor_locator(plt.MultipleLocator(5)) ax.xaxis.set_major_locator(plt.MultipleLocator(20)) ax.xaxis.set_minor_locator(plt.MultipleLocator(5)) for spine in ax.spines.values(): spine.set_edgecolor('w') #settings for axes and tickmarks for label in ax.xaxis.get_ticklabels(): label.set_fontsize(fonts) for label in ax.yaxis.get_ticklabels(): label.set_fontsize(fonts) for tick in ax.xaxis.get_major_ticks(): tick.tick1line.set_color('w') tick.tick2line.set_color('w') tick.tick1line.set_markersize(14) tick.tick2line.set_markersize(14) for tick in ax.xaxis.get_minor_ticks(): tick.tick1line.set_color('w') tick.tick2line.set_color('w') tick.tick1line.set_markersize(10) tick.tick2line.set_markersize(10) for tick in ax.yaxis.get_major_ticks(): tick.tick1line.set_color('w') tick.tick2line.set_color('w') tick.tick1line.set_markersize(14) tick.tick2line.set_markersize(14) for tick in ax.yaxis.get_minor_ticks(): tick.tick1line.set_color('w') tick.tick2line.set_color('w') tick.tick1line.set_markersize(10) tick.tick2line.set_markersize(10) #save image plt.savefig('IMAGE/%s_%i_%iGHz.pdf' %(source,dim,(freq/1e9)),dpi=150, bbox_inches='tight', pad_inches = 0.04) if show==True: plt.show() return {'image':jansky,'dx':dxorg,'dy':dyorg}
for i in range(1,m): ka += a[m-2,i-1]*Rx[m-i] ka += Rx[m] kb = -P[m-1] k.append(ka/kb) a[m-1,m-1] = ka/kb for i in range(1,m): a[m-1,i-1] = a[m-2,i-1]+k[m-1]*a[m-2,m-i-1] P.append(P[m-1]*(1-k[m-1]**2)) hb=[1] for i in range(0,p): hb.append(a[p-1,i]) #把0阶的最小预测误差功率从P[]中删除 P.remove(P[0]) ha=[1] w,H = signal.freqz(ha,hb) G_2 = P[p-1] PSD = G_2*((np.abs(H))**2) PSD1 = 10*log10(PSD) plt.figure(figsize=(10, 28)) plt.subplot(511) plt.plot(x_n) plt.grid(True,linestyle = "--",color = 'gray' ,linewidth = '0.5',axis='both') plt.subplot(512) plt.plot(w/(2*pi),abs(PSD1)) plt.grid(True,linestyle = "--",color = 'gray' ,linewidth = '0.5',axis='both') f = findpeak(w,PSD1,N,f1,f2) print(f)
def draw_figs_to_show_data(pks1_uni, pks2_uni, merged_pks, pks1_name, pks2_name, ma_fit, reads1_name, reads2_name): """ draw four figures to show data before and after rescaled """ pks_3set = [pks1_uni, pks2_uni, merged_pks] pks1_name = ' '.join([pks1_name, 'unique']) pks2_name = ' '.join([pks2_name, 'unique']) merged_pks_name = 'merged common peaks' pks_names = [pks1_name, pks2_name, merged_pks_name] colors = 'bgr' a_max = 0 a_min = 10000 plt.figure(1).set_size_inches(16, 12) for (idx, pks) in enumerate(pks_3set): mvalues, avalues = get_peaks_mavalues(pks) if len(avalues) != 0: a_max = max(max(avalues), a_max) a_min = min(min(avalues), a_min) plt.scatter(avalues, mvalues, s=10, c=colors[idx]) plt.xlabel('A value') plt.ylabel('M value') plt.grid(axis='y') plt.legend(pks_names, loc='best') plt.title('before rescale') # plot the fitting model into figure 1 x = np.arange(a_min, a_max, 0.01) y = ma_fit[1] * x + ma_fit[0] plt.plot(x, y, '-', color='k') plt.savefig('before_rescale.png') # plot the scatter plots of read count in merged common peaks between two chip-seq sets plt.figure(2).set_size_inches(16, 12) rd_min = 1000 rd_max = 0 rds_density1, rds_density2 = [], [] for key in merged_pks.keys(): for pk in merged_pks[key]: rds_density1.append(pk.read_density1), rds_density2.append( pk.read_density2) rd_max = max(max(log2(rds_density1)), rd_max) rd_min = min(min(log2(rds_density1)), rd_min) plt.scatter(log2(rds_density1), log2(rds_density2), s=10, c='r', label=merged_pks_name, alpha=0.5) plt.xlabel(' log2 read density' + ' by ' + '"' + reads1_name + '" reads') plt.ylabel(' log2 read density' + ' by ' + '"' + reads2_name + '" reads') plt.grid(axis='y') plt.legend(loc='upper left') plt.title('Fitting Model via common peaks') rx = np.arange(rd_min, rd_max, 0.01) ry = (2 - ma_fit[1]) * rx / (2 + ma_fit[1]) - 2 * ma_fit[0] / (2 + ma_fit[1]) plt.plot(rx, ry, '-', color='k') plt.savefig('log2_read_density.png') # plot the MA plot after rescale plt.figure(3).set_size_inches(16, 12) for (idx, pks) in enumerate(pks_3set): normed_mvalues, normed_avalues = get_peaks_normed_mavalues(pks) plt.scatter(normed_avalues, normed_mvalues, s=10, c=colors[idx]) plt.xlabel('A value') plt.ylabel('M value') plt.grid(axis='y') plt.legend(pks_names, loc='best') plt.title('after rescale') plt.savefig('after_rescale.png') # generate MA plot for this set of peaks together with p-value plt.figure(4).set_size_inches(16, 12) for (idx, pks) in enumerate(pks_3set): normed_mvalues, normed_avalues = get_peaks_normed_mavalues(pks) colors = -log10(get_peaks_pvalues(pks)) for i, c in enumerate(colors): if c > 50: colors[i] = 50 plt.scatter(normed_avalues, normed_mvalues, s=10, c=colors, cmap='jet') plt.colorbar() plt.grid(axis='y') plt.xlabel('A value') plt.ylabel('M value') plt.title('-log10(P-value)') plt.savefig('-log10_P-value.png') plt.close()
def _lin2log(*args): return [ma.log10(x) for x in args]
def transform_non_affine(self, a): masked = ma.masked_where(a > 1-10**(-1-self.nines), a) if masked.mask.any(): return -ma.log10(1-a) else: return -np.log10(1-a)
def transform_non_affine(self, a): masked = ma.masked_where(a > 1 - 10**(-1 - self.nines), a) if masked.mask.any(): return -ma.log10(1 - a) else: return -np.log10(1 - a)
def __call__(self, value, clip=None): #read in parameters method = self.stretch exponent = self.exponent midpoint = self.midpoint # ORIGINAL MATPLOTLIB CODE if clip is None: clip = self.clip if cbook.iterable(value): vtype = 'array' val = ma.asarray(value).astype(np.float) else: vtype = 'scalar' val = ma.array([value]).astype(np.float) self.autoscale_None(val) vmin, vmax = self.vmin, self.vmax if vmin > vmax: raise ValueError("minvalue must be less than or equal to maxvalue") elif vmin==vmax: return 0.0 * val else: if clip: mask = ma.getmask(val) val = ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask) result = (val-vmin) * (1.0/(vmax-vmin)) # CUSTOM APLPY CODE # Keep track of negative values negative = result < 0. if self.stretch == 'linear': pass elif self.stretch == 'log': result = ma.log10(result * (self.midpoint - 1.) + 1.) \ / ma.log10(self.midpoint) elif self.stretch == 'sqrt': result = ma.sqrt(result) elif self.stretch == 'arcsinh': result = ma.arcsinh(result/self.midpoint) \ / ma.arcsinh(1./self.midpoint) elif self.stretch == 'power': result = ma.power(result, exponent) else: raise Exception("Unknown stretch in APLpyNormalize: %s" % self.stretch) # Now set previously negative values to 0, as these are # different from true NaN values in the FITS image result[negative] = -np.inf if vtype == 'scalar': result = result[0] return result
def find_max(array_like): no_zeros = array_like[nonzero(array_like)] with errstate(all='ignore'): max = nanmax(10 ** log10(no_zeros)) return max
def __call__(self, value, clip=None): #read in parameters method = self.stretch exponent = self.exponent midpoint = self.midpoint # ORIGINAL MATPLOTLIB CODE if clip is None: clip = self.clip if cbook.iterable(value): vtype = 'array' val = ma.asarray(value).astype(np.float) else: vtype = 'scalar' val = ma.array([value]).astype(np.float) self.autoscale_None(val) vmin, vmax = self.vmin, self.vmax if vmin > vmax: raise ValueError("minvalue must be less than or equal to maxvalue") elif vmin == vmax: return 0.0 * val else: if clip: mask = ma.getmask(val) val = ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask) result = (val - vmin) * (1.0 / (vmax - vmin)) # CUSTOM APLPY CODE # Keep track of negative values negative = result < 0. if self.stretch == 'linear': pass elif self.stretch == 'log': result = ma.log10(result * (self.midpoint - 1.) + 1.) \ / ma.log10(self.midpoint) elif self.stretch == 'sqrt': result = ma.sqrt(result) elif self.stretch == 'arcsinh': result = ma.arcsinh(result / self.midpoint) \ / ma.arcsinh(1. / self.midpoint) elif self.stretch == 'power': result = ma.power(result, exponent) else: raise Exception("Unknown stretch in APLpyNormalize: %s" % self.stretch) # Now set previously negative values to 0, as these are # different from true NaN values in the FITS image result[negative] = -np.inf if vtype == 'scalar': result = result[0] return result