def spec2vel(spec, basename, transition, z, f_col, sb_id): """ """ specs = glob.glob(spec) # Sort the SBs crrls.natural_sort(specs) for s in specs: # Determine the subband name try: sb = re.findall('{0}\d+'.format(sb_id), s)[0] except IndexError: print "Could not find SB number." print "Will use SB???" sb = 'SB???' # Load the data data = np.loadtxt(s) x = data[:,f_col] y = data[:,f_col+1:] qns, freqs = crrls.find_lines_sb(x, transition, z) for i,n in enumerate(qns): # Convert the frequency axis to velocity vel = crrls.freq2vel(freqs[i]*(1+z), x)/1e3 # Save the spectrum with a velocity column np.savetxt('{0}_{1}_n{2}.ascii'.format(basename, sb, int(n)), np.c_[x, vel, y])
def main(spec, out, freqf): """ """ specs = glob.glob(spec) # Sort the SBs crrls.natural_sort(specs) for s in specs: ## Determine the subband name #try: #sb = re.findall('SB\d+', s)[0] #except IndexError: #print "Could not find SB number." #print "Will use SB???" #sb = 'SB???' data = np.loadtxt(s, comments='#') freq = data[:,0]*freqf tb = data[:,1] data[:,0] = data[:,0]/1e6 # write the processed spectrum #np.savetxt('{0}_{1}.ascii'.format(basename, sb), data) tbtable = Table([freq, tb], names=['FREQ MHz', 'Tb Jy/BEAM']) ascii.write(tbtable, out, format='commented_header')
def find_good_lines(spec, basename, transition, transs, z, vel_shift, x_col): """ """ specs = glob.glob(spec) crrls.natural_sort(specs) with open('{0}_good_lines.log'.format(transition), 'w') as log: for s in specs: # Determine the subband name sb = re.findall('SB\d+', s)[0] data = np.loadtxt(s) x = data[:,x_col] qns, freqs = crrls.find_lines_sb(x, transition, z) of = [] for t in transs: n, f = crrls.find_lines_sb(x, t, z) of.append(list(f)) of = np.array(list(_f for of_ in of for _f in of_)) for i,freq in enumerate(freqs): vel = crrls.freq2vel(freq*1e6, freq*1e6)/1e3 ovel = crrls.freq2vel(freq*1e6, of*1e6)/1e3 diff = [abs(v - vel) for v in ovel] if all(d > vel_shift for d in diff): log.write('{0}_{1}_n{2:.0f}.ascii\n'.format(basename, sb, qns[i]))
def find_good_lines(spec, basename, transition, transs, z, vel_shift, x_col): """ """ specs = glob.glob(spec) crrls.natural_sort(specs) with open('{0}_good_lines.log'.format(transition), 'w') as log: for s in specs: # Determine the subband name sb = re.findall('SB\d+', s)[0] data = np.loadtxt(s) x = data[:, x_col] qns, freqs = crrls.find_lines_sb(x, transition, z) of = [] for t in transs: n, f = crrls.find_lines_sb(x, t, z) of.append(list(f)) of = np.array(list(_f for of_ in of for _f in of_)) for i, freq in enumerate(freqs): vel = crrls.freq2vel(freq * 1e6, freq * 1e6) / 1e3 ovel = crrls.freq2vel(freq * 1e6, of * 1e6) / 1e3 diff = [abs(v - vel) for v in ovel] if all(d > vel_shift for d in diff): log.write('{0}_{1}_n{2:.0f}.ascii\n'.format( basename, sb, qns[i]))
def load_itau_all_norad(trans='alpha', n_max=1000): """ Loads all the available models. """ #LOCALDIR = os.path.dirname(os.path.realpath(__file__)) models = glob.glob('{0}/bbn/*_dat_bn_beta'.format(LOCALDIR)) natural_sort(models) Te = np.zeros(len(models)) ne = np.zeros(len(models)) other = np.zeros(len(models), dtype='|S20') data = np.zeros((len(models), 2, n_max)) for i, model in enumerate(models): st = model[model.index('T') + 2:model.index('T') + 5] Te[i] = str2val(st) sn = model[model.index('ne') + 3:model.index('ne') + 7].split('_')[0] ne[i] = str2val(sn) other[i] = model.split('bn_beta')[-1] n, int_tau = itau(st, sn, trans, n_max=1000, other=other[i]) data[i, 0] = n data[i, 1] = int_tau return [Te, ne, other, data]
def spec2vel(spec, basename, transition, z, f_col, sb_id): """ """ specs = glob.glob(spec) # Sort the SBs crrls.natural_sort(specs) for s in specs: # Determine the subband name try: sb = re.findall('{0}\d+'.format(sb_id), s)[0] except IndexError: print "Could not find SB number." print "Will use SB???" sb = 'SB???' # Load the data data = np.loadtxt(s) x = data[:, f_col] y = data[:, f_col + 1:] qns, freqs = crrls.find_lines_sb(x, transition, z) for i, n in enumerate(qns): # Convert the frequency axis to velocity vel = crrls.freq2vel(freqs[i] * (1 + z), x) / 1e3 # Save the spectrum with a velocity column np.savetxt('{0}_{1}_n{2}.ascii'.format(basename, sb, int(n)), np.c_[x, vel, y])
def load_itau_all_hydrogen(trans='alpha', n_max=1000, verbose=False, value='itau'): """ Loads all the available models for Hydrogen. """ #LOCALDIR = os.path.dirname(os.path.realpath(__file__)) models = glob.glob('{0}/bbn2_RRL_HI{1}/*'.format(LOCALDIR, trans)) natural_sort(models) models = np.asarray(models) models = sorted(models, key=lambda x: (str2val(x.split('_')[5]), float(x.split('_')[7]), str2val(x.split('_')[11]) if len(x.split('_')) > 17 else 0)) Te = np.zeros(len(models)) ne = np.zeros(len(models)) other = np.zeros(len(models), dtype=object) data = np.zeros((len(models), 2, n_max)) for i, model in enumerate(models): if verbose: print(model) st = model.split('_')[5] Te[i] = str2val(st) sn = model.split('_')[7] ne[i] = float(sn) if len(model.split('_')) <= 18: other[i] = '-' else: other[i] = '_'.join(model.split('_')[10:13]) if verbose: print("Trying to load model: ne={0}, te={1}, tr={2}".format( ne[i], Te[i], other[i])) n, int_tau = itau_h(st, sn, trans, n_max=n_max, other=other[i], verbose=verbose, value=value) data[i, 0] = n data[i, 1] = int_tau return [Te, ne, other, data]
def make_rms_list(spec, output, transitions, z, dv, mode, f_col, y_col): """ """ specs = glob.glob(spec) crrls.natural_sort(specs) with open(output, 'w') as log: for s in specs: data = np.loadtxt(s) x = data[:,f_col] y = data[:,y_col] # Catch NaNs and invalid values: mask_x0 = np.ma.masked_equal(x, 1.0).mask mask_x1 = np.isnan(x) mask_y = np.isnan(y) mask = np.array(reduce(np.logical_or, [mask_x0, mask_x1, mask_y])) # Remove NaNs and invalid values mx = x[~mask] my = y[~mask] if len(x) > 0: trans = transitions.split(',') bf = [] for o,t in enumerate(trans): n, f = crrls.find_lines_sb(mx, t, z) bf.append(list(f)) if len(bf) > 0: bf = np.array(list(_f for _bf in bf for _f in _bf)) x_lf, y_lf = crrls.blank_lines2(mx, my, bf, dv) else: x_lf, y_lf = mx,my else: pass rms = crrls.get_rms(y_lf) mrms = set_weight(mode, rms) # Get the SB frequency freq = np.mean(mx) log.write("{0} {1} {2}\n".format(s, mrms, freq))
def make_rms_list(spec, output, transitions, z, dv, mode, f_col, y_col): """ """ specs = glob.glob(spec) crrls.natural_sort(specs) with open(output, 'w') as log: for s in specs: data = np.loadtxt(s) x = data[:, f_col] y = data[:, y_col] # Catch NaNs and invalid values: mask_x0 = np.ma.masked_equal(x, 1.0).mask mask_x1 = np.isnan(x) mask_y = np.isnan(y) mask = np.array(reduce(np.logical_or, [mask_x0, mask_x1, mask_y])) # Remove NaNs and invalid values mx = x[~mask] my = y[~mask] if len(x) > 0: trans = transitions.split(',') bf = [] for o, t in enumerate(trans): n, f = crrls.find_lines_sb(mx, t, z) bf.append(list(f)) if len(bf) > 0: bf = np.array(list(_f for _bf in bf for _f in _bf)) x_lf, y_lf = crrls.blank_lines2(mx, my, bf, dv) else: x_lf, y_lf = mx, my else: pass rms = crrls.get_rms(y_lf) mrms = set_weight(mode, rms) # Get the SB frequency freq = np.mean(mx) log.write("{0} {1} {2}\n".format(s, mrms, freq))
def load_bn_all(n_min=5, n_max=1000, verbose=False, location=LOCALDIR): """ """ models = glob.glob('{0}/bn2/*_dat'.format(location)) natural_sort(models) models = np.asarray(models) models_tr = sorted(models, key=lambda x: (str2val(x.split('_')[3]), float(x.split('_')[5]), str2val(x.split('_')[10]) if len(x.split('_')) > 17 else 0)) models = models_tr Te = np.zeros(len(models)) ne = np.zeros(len(models)) Tr = np.zeros(len(models), dtype='|S20') data = np.zeros((len(models), 5, n_max - n_min)) for i, model in enumerate(models): if verbose: print(model) st = model.split('_')[3] Te[i] = str2val(st) sn = model.split('_')[5].rstrip('0') ne[i] = float(sn) if len(model.split('_')) <= 17: Tr[i] = '-' else: Tr[i] = '_'.join(model.split('_')[8:11]) if verbose: print("Trying to load model: ne={0}, te={1}, tr={2}".format( ne[i], Te[i], Tr[i])) bn = load_bn(st, sn, Tr=Tr[i], n_min=n_min, n_max=n_max, verbose=verbose) data[i, 0] = bn[:, 0] data[i, 1] = bn[:, 1] data[i, 2] = bn[:, 2] data[i, 3] = bn[:, 3] data[i, 4] = bn[:, 4] return [Te, ne, Tr, data]
def load_itau_nelim(temp, dens, trad, trans, n_max=1000, verbose=False, value='itau'): """ Loads models given a temperature, radiation field and an upper limit for the electron density. """ #LOCALDIR = os.path.dirname(os.path.realpath(__file__)) models = glob.glob('{0}/bbn2_{1}/*_T_{2}_*_{3}_*'.format( LOCALDIR, trans, temp, trad)) #print models natural_sort(models) models = np.asarray(models) models_len = np.asarray([len(model.split('_')) for model in models]) models = sorted(models, key=lambda x: (str2val(x.split('_')[4]), float(x.split('_')[6]), str2val(x.split('_')[11]) if len(x.split('_')) > 17 else 0)) models = np.asarray(models) nes = np.asarray( [float(model.split('_')[6].rstrip('0')) for model in models]) # Only select those models with a density equal or lower than the specified value: dens. models = models[nes <= dens] #print models return load_models(models, trans, n_max=n_max, verbose=verbose, value=value)
def valid_ne(line): """ Checks all the available models and lists the available ne values. """ #LOCALDIR = os.path.dirname(os.path.realpath(__file__)) models = glob.glob('{0}/bbn2_{1}/*'.format(LOCALDIR, line)) natural_sort(models) models = np.asarray(models) models_len = np.asarray([len(model.split('_')) for model in models]) #models_tr = models[models_len>17] #print models_tr[0].split('_')[11], models_tr[0].split('_')[4], models_tr[0].split('_')[6] models = sorted(models, key=lambda x: (str2val(x.split('_')[4]), float(x.split('_')[6]), str2val(x.split('_')[11]) if len(x.split('_')) > 17 else 0)) ne = np.asarray( [float(model.split('_')[6].rstrip('0')) for model in models]) return np.unique(ne)
table = Table(rows=data, names=('n', 'f0 (MHz)', 'center (km/s)', 'center_err (km/s)', 'itau (Hz)', 'itau_err (Hz)', 'FWHM (km/s)', 'FWHM_err (km/s)', 'tau', 'tau_err', 'FWHM_gauss (km/s)', 'FWHM_gauss_err (km/s)', 'FWHM_lorentz (km/s)', 'FWHM_lorentz_err (km/s)', 'residuals'), dtype=('i3', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')) table.write(log, format='ascii.fixed_width') if __name__ == '__main__': dD_fix0 = 3.4/2.#crrls.sigma2FWHM(1.4456523-0*0.0267016)/2. trans = 'alpha' frng = 'all' stacks = glob.glob('CI{0}_only_n*.ascii'.format(trans)) crrls.natural_sort(stacks) prop = {'n':['812-863', '760-806', '713-748', '668-709', '623-665', '580-621'], 'ns':[37,36,36,36,37,37]} vel = [] tau = [] wei = [] fit3 = [] res3 = [] n = [] f0 = []
def sbsplot(spec, output, show_lines, transitions, z, x_axis, x_col, x_min, x_max, y_axis, y_col, identifier): """ """ pdf = PdfPages(output) specs = glob.glob(spec) crrls.natural_sort(specs) # If only one file is passed, it probably contains a list if len(specs) == 1: specs = np.genfromtxt(specs[0], dtype=str) try: specs.shape[1] specs = glob.glob(spec) # Or a single file is to be plotted except IndexError: pass for s in specs: data = np.loadtxt(s) x = data[:, x_col] y = data[:, y_col] # Determine the subband name try: sb = re.findall('{0}\d+'.format(identifier), s)[0] except IndexError: print("Could not find SB number.") print("Will use the file name.") sb = s # Begin ploting fig = plt.figure(frameon=False) fig.suptitle(sb) ax = fig.add_subplot(1, 1, 1, adjustable='datalim') ax.step(x, y, 'k-', lw=1, where='mid') # Mark the transitions? if show_lines: trans = transitions.split(',') for o, t in enumerate(trans): if x[~np.isnan(x)][0] > x[~np.isnan(x)][1]: r = -1 else: r = 1 qns, freqs = crrls.find_lines_sb(x[~np.isnan(x)][::r], t, z) ylbl = np.ma.masked_invalid(y).mean() for label, i, j in zip(qns, freqs, [ylbl] * len(freqs)): plt.annotate(label, xy=(i, j), xytext=(-10, 15 * o + 5), size='x-small', textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) #if len(qns) > 0: plt.annotate(tprops[t][0], xy=(i, j), xytext=(-4, 0), textcoords='offset points', size='xx-small') plt.plot(freqs, [ylbl] * len(freqs), marker='|', ls='none', ms=25, c=tprops[t][1], mew=8, alpha=0.8) ax.set_xlabel(x_axis) ax.set_ylabel(y_axis) if x_max: ax.set_xlim(x_min, x_max) pdf.savefig(fig) plt.close(fig) pdf.close()
parser.add_argument('--clobber', help="Overwrite existing fits files?", action='store_true') parser.add_argument('-v', '--verbose', action='store_true', help="Verbose output?") parser.add_argument('-l', '--logfile', type=str, default=None, help="Where to store the logs.\n" \ "(string, Default: output to console)") args = parser.parse_args() if args.verbose: loglev = logging.DEBUG else: loglev = logging.ERROR # Prepare the logger formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(filename=args.logfile, level=loglev, format=formatter) logger = logging.getLogger(__name__) logger.info('Will extract a spectrum from cube: {0}'.format(args.cube)) logger.info('Will extract region: {0}'.format(args.region)) outfits = args.outfits fitslist = args.fitslist natural_sort(fitslist) main(outfits, fitslist, args.stokeslast, args.chan_id, args.nzeros, args.clobber)
def remove_stack(spec, model, basename, transition, z, x_col, y_col, freq, plot, plot_file, mode): """ """ specs = glob.glob(spec) crrls.natural_sort(specs) # If only one file is passed, it probably contains a list if len(specs) == 1: specs = np.genfromtxt(specs[0], dtype=str) try: specs.shape[1] specs = glob.glob(spec) # Or a single file is to be processed except IndexError: pass if plot: pdf = PdfPages(plot_file) for s in specs: # Determine the subband name try: sb = re.findall('SB\d+', s)[0] except IndexError: print "Could not find SB number." print "Will use SB???" sb = 'SB???' data = np.loadtxt(s) p = data[:, x_col].argsort() x = np.copy(data[p, x_col]) y = np.copy(data[p, y_col]) mask = np.isnan(y) # Load model mod = np.loadtxt(model) p = mod[:, 0].argsort() xm = mod[p, 0] ym = mod[p, 1] # remove NaNs ym = ym[~np.isnan(xm)] xm = xm[~np.isnan(xm)] qns, freqs = crrls.find_lines_sb(x[~np.isnan(x)], transition, z) #print qns y_mod = np.zeros(len(y)) ys = np.copy(y) ys[mask] = 0 x[mask] = -9999 # This way it should be outside the boundaries if not freq: for i, n in enumerate(qns): # Convert the model velocity axis to frequency fm = crrls.vel2freq(freqs[i] * (1. + z), xm * 1e3) p = fm.argsort() ymod = ym[p] fm = fm[p] # Interpolate the model axis to the spectrum grid interp_ym = interpolate.interp1d(fm, ymod, kind='linear', bounds_error=False, fill_value=0.0) y_mod += interp_ym(x) else: # Interpolate the model axis to the spectrum grid interp_ym = interpolate.interp1d(xm, ym, kind='linear', bounds_error=False, fill_value=0.0) y_mod += interp_ym(x) # Remove the model if 'sub' in mode.lower(): ys = ys - y_mod off = 0. elif 'div' in mode.lower(): ys = ((ys + 10.) / y_mod - 1.) * 10. off = 10. # Return the masked values to their NaN values ys[mask] = np.nan x[mask] = np.nan if plot: fig = plt.figure(frameon=False) fig.suptitle(sb) ax = fig.add_subplot(1, 1, 1, adjustable='datalim') ax.step(x, y_mod - off, 'r-', drawstyle='steps', lw=1, where='pre', label='model') ax.step(x, y, 'b-', drawstyle='steps', lw=1, where='pre', label='in') ax.step(x, ys, 'g-', drawstyle='steps', lw=1, where='pre', label='out') ax.legend(loc=0, numpoints=1, frameon=False) pdf.savefig(fig) plt.close(fig) data[:, y_col] = ys np.savetxt('{0}_{1}.ascii'.format(basename, sb), data) if plot: pdf.close()
'--verbose', action='store_true', help="Verbose output?") parser.add_argument('-l', '--logfile', type=str, default=None, help="Where to store the logs.\n" \ "(string, Default: output to console)") args = parser.parse_args() if args.verbose: loglev = logging.DEBUG else: loglev = logging.ERROR # Prepare the logger formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(filename=args.logfile, level=loglev, format=formatter) logger = logging.getLogger(__name__) outfits = args.outfits fitslist = args.fitslist natural_sort(fitslist) logger.debug(fitslist) main(outfits, fitslist, args.stokeslast, args.chan_id, args.chan_end, args.nzeros, args.overwrite)
def load_itau_all(line='RRL_CIalpha', n_min=5, n_max=1000, verbose=False, value='itau'): """ Loads all the available models for Carbon. :param line: Which models should be loaded. :type line: string :param n_min: Minimum n number to include in the output. :type n_min: int :param n_max: Maximum n number to include in the output. :type n_max: int :param verbose: Verbose output? :type verbose: bool :param value: ['itau'\|'bbnMdn'\|None] Which value should be in the output. :type value: string """ #LOCALDIR = os.path.dirname(os.path.realpath(__file__)) models = glob.glob('{0}/bbn2_{1}/*'.format(LOCALDIR, line)) natural_sort(models) models = np.asarray(models) models_len = np.asarray([len(model.split('_')) for model in models]) models_tr = sorted(models, key=lambda x: (str2val(x.split('_')[4]), float(x.split('_')[6]), str2val(x.split('_')[11]) if len(x.split('_')) > 17 else 0)) models = models_tr Te = np.zeros(len(models)) ne = np.zeros(len(models)) other = np.zeros(len(models), dtype='|S20') data = np.zeros((len(models), 2, n_max - n_min)) for i, model in enumerate(models): if verbose: print(model) st = model.split('_')[4] Te[i] = str2val(st) sn = model.split('_')[6].rstrip('0') ne[i] = float(sn) if len(model.split('_')) <= 17: other[i] = '-' else: other[i] = '_'.join(model.split('_')[9:12]) if verbose: print("Trying to load model: ne={0}, te={1}, tr={2}".format( ne[i], Te[i], other[i])) n, int_tau = itau(st, ne[i], line, n_min=n_min, n_max=n_max, other=other[i], verbose=verbose, value=value) data[i, 0] = n data[i, 1] = int_tau return [Te, ne, other, data]
'itau (Hz)', 'itau_err (Hz)', 'FWHM (km/s)', 'FWHM_err (km/s)', 'tau', 'tau_err', 'FWHM_gauss (km/s)', 'FWHM_gauss_err (km/s)', 'FWHM_lorentz (km/s)', 'FWHM_lorentz_err (km/s)', 'residuals'), dtype=('i3', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')) table.write(log, format='ascii.fixed_width') if __name__ == '__main__': dD_fix0 = 3.4 / 2. #crrls.sigma2FWHM(1.4456523-0*0.0267016)/2. trans = 'alpha' frng = 'all' stacks = glob.glob('CI{0}_only_n*.ascii'.format(trans)) crrls.natural_sort(stacks) prop = { 'n': ['812-863', '760-806', '713-748', '668-709', '623-665', '580-621'], 'ns': [37, 36, 36, 36, 37, 37] } vel = [] tau = [] wei = [] fit3 = [] res3 = [] n = [] f0 = [] data0 = np.empty((len(stacks), 15))