def do_cocube(outname='NGC4047', gallist=['NGC4047'], seq='smo7', lines=['12', '13'], linelbl=['co', '13co'], colm=['data3d', 'rms3d', 'dilmsk3d', 'smomsk3d'], unit=['K', 'K', '', ''], colmlbl=['msk.K', '_dil.ecube', '_dil.mask', '_smo.mask'], hexgrid=False, allpix=False, fitsdir='fitsdata', ortpar='edge_leda.csv'): """ Extract 3D molecular line data into an HDF5 database. This script assumes standardized naming conventions, for example: UGC10710.co.smo7_dil.mask.fits.gz = ${galaxy}.${linelbl}.${seq}${colmlbl}.fits.gz The data cube is assumed to be uncompressed, the errors and masks should be gzip compressed. Parameters ---------- outname : str Prefix of the output filename gallist : list of str List of galaxy names seq : str Identifier, generally to indicate smoothing resolution lines : list of str How different lines will be identified in the database linelbl : list of str How different lines are identified in the FITS file names colm : list of str How different file types will be identified in the database unit : list of str The brightness unit for each member of colm colmlbl : list of str How different file types are identified in the FITS file names hexgrid : boolean True to sample on a hexagonal grid (experimental) allpix : boolean True to dump every pixel, otherwise every 3rd pixel in x and y is used. fitsdir : str Path to the directory where FITS files reside ortpar : filename Name of the EdgeTable which has LEDA orientation parameters for the sample """ if allpix: stride = [1, 1, 1] else: stride = [3, 3, 1] # Get the orientation parameters from LEDA orttbl = EdgeTable(ortpar) orttbl.add_index('Name') tablelist = [] for gal in gallist: file0 = os.path.join( fitsdir, gal + '.' + linelbl[0] + '.' + seq + colmlbl[0] + '.fits') if not os.path.exists(file0): print('####### Cannot find', file0) continue for i_line, line in enumerate(lines): for i_col in range(len(colm)): # --- Read the first image (main cube data) if i_line == 0 and i_col == 0: print('Reading', file0) galtab = fitsextract(file0, bunit=unit[i_col], col_lbl=colm[i_col] + '_' + line, keepnan=True, stride=stride, ra_gc=15 * orttbl.loc[gal]['ledaRA'], dec_gc=orttbl.loc[gal]['ledaDE'], pa=orttbl.loc[gal]['ledaPA'], inc=orttbl.loc[gal]['ledaAxIncl'], ortlabel='LEDA', first=True, use_hexgrid=hexgrid) gname = Column([np.string_(gal)] * len(galtab), name='Name', description='Galaxy Name') galtab.add_column(gname, index=0) print(galtab[20:50]) # --- Read the subsequent images (assumed to be gzipped if i_col>0) else: if i_col == 0: getfile = os.path.join( fitsdir, gal + '.' + linelbl[i_line] + '.' + seq + colmlbl[i_col] + '.fits') else: getfile = os.path.join( fitsdir, gal + '.' + linelbl[i_line] + '.' + seq + colmlbl[i_col] + '.fits.gz') if os.path.exists(getfile): print('Reading', getfile) addtb = fitsextract(getfile, bunit=unit[i_col], col_lbl=colm[i_col] + '_' + line, keepnan=True, stride=stride, use_hexgrid=hexgrid) jointb = join(galtab, addtb, keys=['ix', 'iy', 'iz']) galtab = jointb else: print('####### Cannot find', getfile) newcol = Column(data=[np.nan] * len(galtab), name=colm[i_col] + '_' + line, unit=unit[i_col], dtype='f4') galtab.add_column(newcol) tablelist.append(galtab) if len(tablelist) > 0: t_merge = vstack(tablelist) print(t_merge[20:50]) for i_line, line in enumerate(lines): t_merge[colm[0] + '_' + line].description = linelbl[ i_line] + ' brightness temperature in cube' t_merge[colm[1] + '_' + line].description = linelbl[ i_line] + ' estimated 1-sigma channel noise' t_merge[colm[2] + '_' + line].description = linelbl[ i_line] + ' mask value for dilated mask' t_merge[colm[3] + '_' + line].description = linelbl[ i_line] + ' mask value for smoothed mask' t_merge.meta['date'] = datetime.today().strftime('%Y-%m-%d') t_merge.meta[ 'comments'] = 'Sampled CO and 13CO data cubes from EDGE-125' t_merge.write(outname + '.cocube_' + seq + '.hdf5', path='data', overwrite=True, serialize_meta=True, compression=True) return
def do_comom(outname='NGC4047', gallist=['NGC4047'], seq='smo7', lines=['12', '13'], linelbl=['co', '13co'], msktyp=['str', 'dil', 'smo'], hexgrid=False, allpix=False, fitsdir='fitsdata', ortpar='edge_leda.csv'): """ Extract 2D molecular line data into an HDF5 database. This script assumes standardized naming conventions, for example: UGC10710.co.smo7_smo.emom2.fits.gz = ${galaxy}.${linelbl}.${seq}_${msktyp}.${ftype}.fits.gz The possible values for ${ftype} need to be defined within 'dotypes'. Parameters ---------- outname : str Prefix of the output filename gallist : list of str List of galaxy names seq : str Identifier, generally to indicate smoothing resolution lines : list of str How different lines will be identified in the database linelbl : list of str How different lines are identified in the FITS file names msktyp : list of str The types of masks to include. Each mask is a separate path in the HDF5 file. hexgrid : boolean True to sample on a hexagonal grid (experimental) allpix : boolean True to dump every pixel, otherwise every 3rd pixel in x and y is used. fitsdir : str Path to the directory where FITS files reside ortpar : filename Name of the EdgeTable which has LEDA orientation parameters for the sample """ if allpix: stride = [1, 1, 1] else: stride = [3, 3, 1] # Get the orientation parameters from LEDA orttbl = EdgeTable(ortpar) orttbl.add_index('Name') for i_msk, msk in enumerate(msktyp): tablelist = [] if msk == 'str': dotypes = ['mom0', 'e_mom0'] unit = ['K km/s', 'K km/s'] if msk == 'dil': dotypes = [ 'snrpk', 'mom0', 'e_mom0', 'mom1', 'e_mom1', 'mom2', 'e_mom2' ] unit = ['', 'K km/s', 'K km/s', 'km/s', 'km/s', 'km/s', 'km/s'] if msk == 'smo': dotypes = ['mom0', 'e_mom0', 'mom1', 'e_mom1', 'mom2', 'e_mom2'] unit = ['K km/s', 'K km/s', 'km/s', 'km/s', 'km/s', 'km/s'] for gal in gallist: file0 = os.path.join( fitsdir, gal + '.' + linelbl[0] + '.' + seq + '_' + msk + '.' + dotypes[0] + '.fits.gz') print(file0) if not os.path.exists(file0): continue adopt_incl = orttbl.loc[gal]['ledaAxIncl'] print('Adopted inclination is {} deg'.format(adopt_incl)) for i_line, line in enumerate(lines): for i_mtype, mtype in enumerate(dotypes): # --- Read the first image (should be snrpk or mom0) if i_line == 0 and i_mtype == 0: print('Reading', file0) galtab = fitsextract(file0, bunit=unit[0], col_lbl=dotypes[0] + '_' + line, keepnan=True, stride=stride, ra_gc=15 * orttbl.loc[gal]['ledaRA'], dec_gc=orttbl.loc[gal]['ledaDE'], pa=orttbl.loc[gal]['ledaPA'], inc=adopt_incl, ortlabel='LEDA', first=True, use_hexgrid=hexgrid) gname = Column([np.string_(gal)] * len(galtab), name='Name', description='Galaxy Name') galtab.add_column(gname, index=0) print(galtab[20:50]) # --- Read the subsequent images else: ftype = mtype.replace('e_m', 'em', 1) if line != '13': getfile = os.path.join( fitsdir, gal + '.' + linelbl[i_line] + '.' + seq + '_' + msk + '.' + ftype + '.fits.gz') elif msk == 'str' or mtype == 'snrpk': getfile = os.path.join( fitsdir, gal + '.' + linelbl[i_line] + '.' + seq + '_' + msk + '.' + ftype + '.fits.gz') else: getfile = os.path.join( fitsdir, gal + '.' + linelbl[i_line] + '.' + seq + '_mk12_' + msk + '.' + ftype + '.fits.gz') if os.path.exists(getfile): print('Reading', getfile) addtb = fitsextract(getfile, bunit=unit[i_mtype], col_lbl=mtype + '_' + line, keepnan=True, stride=stride, use_hexgrid=hexgrid) jointb = join(galtab, addtb, keys=['ix', 'iy']) galtab = jointb else: newcol = Column(data=[np.nan] * len(galtab), name=mtype + '_' + line, unit=unit[i_mtype], dtype='f4') galtab.add_column(newcol) # Add the H2 column density, with and without deprojection if line == '12': sigmol = msd_co(galtab['mom0_12'], name='sigmol') e_sigmol = msd_co(galtab['e_mom0_12'], name='e_sigmol') sigmol_fo = msd_co(galtab['mom0_12'] * np.cos(np.radians(adopt_incl)), name='sigmol_fo') e_sigmol_fo = msd_co(galtab['e_mom0_12'] * np.cos(np.radians(adopt_incl)), name='e_sigmol_fo') galtab.add_columns( [sigmol, e_sigmol, sigmol_fo, e_sigmol_fo]) tablelist.append(galtab) if len(tablelist) > 0: t_merge = vstack(tablelist) for i_line, line in enumerate(lines): if 'snrpk' in dotypes: t_merge['snrpk_' + line].description = linelbl[ i_line] + ' peak signal to noise ratio' t_merge['mom0_' + line].description = linelbl[ i_line] + ' integrated intensity using {} mask'.format(msk) t_merge['e_mom0_' + line].description = linelbl[ i_line] + ' error in mom0 assuming {} mask'.format(msk) if msk != 'str': t_merge['mom1_' + line].description = linelbl[ i_line] + ' intensity wgtd mean velocity using {} mask'.format( msk) t_merge['e_mom1_' + line].description = linelbl[ i_line] + ' error in mom1 assuming {} mask'.format(msk) t_merge['mom2_' + line].description = linelbl[ i_line] + ' intensity wgtd vel disp using {} mask'.format( msk) t_merge['e_mom2_' + line].description = linelbl[ i_line] + ' error in mom2 assuming {} mask'.format(msk) if line == '12': t_merge[ 'sigmol'].description = 'apparent H2+He surf density not deprojected' t_merge[ 'e_sigmol'].description = 'error in sigmol not deprojected' t_merge[ 'sigmol_fo'].description = 'H2+He surf density deprojected to face-on using ledaAxIncl' t_merge[ 'e_sigmol_fo'].description = 'error in sigmol deprojected to face-on' t_merge.meta['date'] = datetime.today().strftime('%Y-%m-%d') print(t_merge[20:50]) if i_msk == 0: t_merge.write(outname + '.comom_' + seq + '.hdf5', path=msk, overwrite=True, serialize_meta=True, compression=True) else: t_merge.write(outname + '.comom_' + seq + '.hdf5', path=msk, append=True, serialize_meta=True, compression=True) return
from datetime import datetime import glob import os import numpy as np from astropy.table import Table, Column, join, vstack from edge_pydb import EdgeTable from edge_pydb.conversion import msd_co from edge_pydb.fitsextract import fitsextract seq = 'smo7' #seq = 'de20' msktyp = ['str', 'dil', 'smo'] lines = ['12', '13'] # Get the orientation parameters from LEDA ort = EdgeTable('edge_leda.csv', cols=['Name', 'ledaRA', 'ledaDE', 'ledaPA', 'ledaAxIncl']) ort.add_index('Name') for imsk, msk in enumerate(msktyp): gallist = [ os.path.basename(file).split('.')[0] for file in sorted( glob.glob('fitsdata/*.co.' + seq + '_dil.snrpk.fits.gz')) ] tablelist = [] if msk == 'str': dotypes = ['mom0', 'emom0'] unit = ['K km/s', 'K km/s'] if msk == 'dil': dotypes = ['snrpk', 'mom0', 'emom0', 'mom1', 'emom1', 'mom2', 'emom2'] unit = ['', 'K km/s', 'K km/s', 'km/s', 'km/s', 'km/s', 'km/s'] if msk == 'smo':
from datetime import datetime import glob import os from astropy.table import Table, Column, join, vstack from astropy import units as u import numpy as np from astropy.io import fits from astropy.wcs import WCS from reproject import reproject_interp from edge_pydb import EdgeTable from edge_pydb.conversion import stmass_pc2, sfr_ha, ZOH_M13, bpt_type from edge_pydb.fitsextract import fitsextract, getlabels # Get the orientation parameters from LEDA ort = EdgeTable('edge_leda.csv', cols=['Name', 'ledaRA', 'ledaDE', 'ledaPA', 'ledaIncl']) #ort = EdgeTable('edge_rfpars.csv', cols=['Name', 'rfPA', 'rfInc', 'rfKinRA', 'rfKinDecl']) ort.add_index('Name') # Get the distance from the CALIFA table dist = EdgeTable('edge_califa.csv', cols=['Name', 'caDistP3d']) dist.add_index('Name') # Read the FITS data codir = '../img_comom/fitsdata/' cadir = 'fitsdata/' prodtype = ['ELINES', 'SFH', 'SSP', 'indices', 'flux_elines'] for prod in prodtype: zsel, labels, units, nsel = getlabels(prod) filelist = [
def do_califa(outname='NGC4047', gallist=['NGC4047'], linelbl='co', seq='smo7', hexgrid=False, allpix=False, debug=False, califa_natv='fitsdata', califa_smo='fitsdata', comom='../img_comom/fitsdata', nfiles=5, astrom='x', ortpar='edge_leda.csv', distpar='edge_califa.csv', distcol='caDistP3d', discard_cdmatrix=False): """ Extract Pipe3D products into an HDF5 database. This script assumes there are 5 native resolution and 5 smoothed resolution files per galaxy. Parameters ---------- outname : str Prefix of the output filename gallist : list of str List of galaxy names linelbl : str Identifier for reference line in the CO FITS filenames seq : str Identifier, generally to indicate smoothing resolution for CO hexgrid : boolean True to sample on a hexagonal grid (experimental) allpix : boolean True to dump every pixel, otherwise every 3rd pixel in x and y is used. debug : boolean True to generate some additional output califa_natv : str Path to the directory where native res CALIFA FITS files reside califa_smo : str Path to the directory where smoothed res CALIFA FITS files reside comom : str Path to the directory where CO moments FITS files reside nfiles : int Number of Pipe3D files per galaxy. Should be 5 (old) or 1 (packed). astrom : str String at start of filename for native resolution images with astrometry. This is ignored in nfiles=1. ortpar : filename Name of the EdgeTable which has LEDA orientation parameters for the sample distpar : filename Name of the EdgeTable which has distances for converting \Sigma_*. distcol : str Name of the distance column in 'distpar' to use. Default is 'caDistP3d' taken from 'DL' column in get_proc_elines_CALIFA.csv. discard_cdmatrix : boolean True to discard CD matrix in CALIFA files. Use with care since this relies on the CDELT1 and CDELT2 being correct. """ if allpix: stride = [1, 1, 1] else: stride = [3, 3, 1] # cuts for when to apply BD correction hacut = 0.06 # 1e-16 erg / (cm2 s) hbcut = 0.04 # 1e-16 erg / (cm2 s) ahalo = 0 # mag ahahi = 6 # mag # FITS keywords important for astrometry wcskeys = [ 'CTYPE1', 'CTYPE2', 'CRVAL1', 'CRVAL2', 'CRPIX1', 'CRPIX2', 'CDELT1', 'CDELT2' ] cdkeys = [ 'CD1_1', 'CD1_2', 'CD2_1', 'CD2_2', 'CD1_3', 'CD2_3', 'CD3_1', 'CD3_2', 'CD3_3' ] dimkeys = ['NAXIS1', 'NAXIS2'] # Get the orientation parameters from LEDA orttbl = EdgeTable(ortpar) orttbl.add_index('Name') # Get the distance from the CALIFA table disttbl = EdgeTable(distpar) disttbl.add_index('Name') # Read the FITS data # The columns to save are defined in fitsextract.py prodtype = ['ELINES', 'SFH', 'SSP', 'indices', 'flux_elines'] leadstr = ['', '', '', 'indices.CS.', 'flux_elines.'] tailstr = ['.ELINES', '.SFH', '.SSP', '', ''] tailstr = [s + '.cube.fits.gz' for s in tailstr] for i_prod, prod in enumerate(prodtype): zsel, labels, units, nsel = getlabels(prod) default_len = len(zsel) rglist = [] smlist = [] if len(gallist) == 0: raise RuntimeError('Error: gallist is empty!') for gal in gallist: print('\nWorking on galaxy {} product {} nsel={}'.format( gal, prod, nsel)) # Generate output header using CO astrometry cofile = os.path.join( comom, gal + '.' + linelbl + '.' + seq + '_dil.snrpk.fits.gz') if not os.path.exists(cofile): print('####### Cannot find', cofile) continue cohd = fits.getheader(cofile) # CALIFA files with x in name have optical astrometry if nfiles == 5: cafile = os.path.join( califa_natv, astrom + leadstr[i_prod] + gal + tailstr[i_prod]) else: cafile = os.path.join(califa_natv, gal + '.Pipe3D.cube.fits.gz') if not os.path.exists(cafile): print('####### Cannot find', cafile) continue if nfiles == 5: hdus = fits.open(cafile, ignore_missing_end=True) cahd = hdus[0].header #cahd = fits.getheader(cafile, ignore_missing_end=True) else: hdus = fits.open(cafile) # The header for the selected extension cahd = hdus[hdus.index_of(prod)].header # Blanking of CTYPE3 so that fitsextract treats as pseudocube cahd['CTYPE3'] = '' # Use HDU 0 'ORG_HDR' when possible cahd0 = hdus[0].header for key in cdkeys + wcskeys: if key in cahd0.keys(): cahd[key] = cahd0[key] # Set CDELT3 to 1 since this will be its value in template for key in ['CDELT3', 'CD3_3']: if key in cahd.keys(): cahd[key] = 1. # Copy the CALIFA header and replace wcskeys with CO values outhd = cahd.copy() for key in dimkeys + wcskeys: if key in cohd.keys(): outhd[key] = cohd[key] # Need to discard CD matrix which would override the new wcskeys if 'CDELT1' in cohd.keys() and 'CDELT2' in cohd.keys(): for key in cdkeys: if key in outhd.keys(): del outhd[key] # Optionally discard CD matrix in CALIFA files and fall back on CDELTs if discard_cdmatrix: for key in cdkeys: if key in cahd.keys(): del cahd[key] # First process the native resolution file (tab0) with astrometry if nfiles == 5: #hdu = fits.open(cafile, ignore_missing_end=True)[0] hdu = hdus[0] else: hdu = hdus[hdus.index_of(prod)] if debug: print('\nINPUT', WCS(hdu.header)) print('\nOUTPUT', WCS(outhd)) newim = reproject_interp(hdu, outhd, order=0, return_footprint=False) nz = newim.shape[0] if debug: print('nz=', nz) #fits.writeto(base.replace('fits','rg.fits'), newim, outhd, overwrite=True) rglabels = [s + '_rg' for s in labels] # Add smoothed Ha and Hb columns for extinction estimates if prod == 'ELINES' or prod == 'flux_elines': kernel = Gaussian2DKernel(3) if prod == 'ELINES': hb_idx = 5 ha_idx = 6 rglabels += ['Hbeta_sm3_rg', 'Halpha_sm3_rg'] outhd['DESC_20'] = ' Hbeta after 3as smooth' outhd['DESC_21'] = ' Halpha after 3as smooth' else: hb_idx = 28 ha_idx = 45 rglabels += ['flux_Hbeta_sm3_rg', 'flux_Halpha_sm3_rg'] outhd['NAME408'] = ' Hbeta after 3as smooth' outhd['NAME409'] = ' Halpha after 3as smooth' hb_conv = convolve(newim[hb_idx, :, :], kernel, preserve_nan=True) ha_conv = convolve(newim[ha_idx, :, :], kernel, preserve_nan=True) newim = np.concatenate( (newim, hb_conv[np.newaxis], ha_conv[np.newaxis])) if len(zsel) == default_len: zsel = list(zsel) + [nz, nz + 1] if len(units) == default_len: units += ['10^-16 erg cm^-2 s^-1', '10^-16 erg cm^-2 s^-1'] tab0 = fitsextract(newim, header=outhd, keepnan=True, stride=stride, bunit=units, col_lbl=rglabels, zselect=zsel, ra_gc=15 * orttbl.loc[gal]['ledaRA'], dec_gc=orttbl.loc[gal]['ledaDE'], pa=orttbl.loc[gal]['ledaPA'], inc=orttbl.loc[gal]['ledaAxIncl'], ortlabel='LEDA', first=True, use_hexgrid=hexgrid) gname = Column([np.string_(gal)] * len(tab0), name='Name', description='Galaxy Name') tab0.add_column(gname, index=0) rglist.append(tab0) # Then process the smoothed file (tab1) smofile = os.path.join(califa_smo, leadstr[i_prod] + gal + tailstr[i_prod]) hdu = fits.open(smofile, ignore_missing_end=True)[0] hdu.header = cahd newim = reproject_interp(hdu, outhd, order=0, return_footprint=False) # if debug: # fits.writeto(base.replace('fits','sm.fits'), newim, outhd, overwrite=True) smlabels = [s + '_sm' for s in labels] # Add smoothed Ha and Hb for extinction estimates if prod == 'ELINES' or prod == 'flux_elines': kernel = Gaussian2DKernel(5) if prod == 'ELINES': hb_idx = 5 ha_idx = 6 smlabels += ['Hbeta_sm5_sm', 'Halpha_sm5_sm'] outhd['DESC_20'] = ' Hbeta after 5as smooth' outhd['DESC_21'] = ' Halpha after 5as smooth' else: hb_idx = 28 ha_idx = 45 smlabels += ['flux_Hbeta_sm5_sm', 'flux_Halpha_sm5_sm'] outhd['NAME408'] = ' Hbeta after 5as smooth' outhd['NAME409'] = ' Halpha after 5as smooth' hb_conv = convolve(newim[hb_idx, :, :], kernel, preserve_nan=True) ha_conv = convolve(newim[ha_idx, :, :], kernel, preserve_nan=True) newim = np.concatenate( (newim, hb_conv[np.newaxis], ha_conv[np.newaxis])) tab1 = fitsextract(newim, header=outhd, keepnan=True, stride=stride, bunit=units, col_lbl=smlabels, zselect=zsel, ra_gc=15 * orttbl.loc[gal]['ledaRA'], dec_gc=orttbl.loc[gal]['ledaDE'], pa=orttbl.loc[gal]['ledaPA'], inc=orttbl.loc[gal]['ledaAxIncl'], ortlabel='LEDA', first=True, use_hexgrid=hexgrid) gname = Column([np.string_(gal)] * len(tab1), name='Name', description='Galaxy Name') tab1.add_column(gname, index=0) smlist.append(tab1) # Add additional columns if prod == 'ELINES' or prod == 'flux_elines': if prod == 'ELINES': prfx = '' else: prfx = 'flux_' # # Native resolution # sfr0 is SFR from Halpha without extinction correction sfr0_rg = sfr_ha(tab0[prfx + 'Halpha_rg'], imf='salpeter', name=prfx + 'sigsfr0_rg') e_sfr0_rg = Column( sfr0_rg * abs(tab0['e_' + prfx + 'Halpha_rg'] / tab0[prfx + 'Halpha_rg']), name='e_' + prfx + 'sigsfr0_rg', dtype='f4', unit=sfr0_rg.unit, description='error of uncorrected SFR surface density') tab0.add_columns([sfr0_rg, e_sfr0_rg]) # Balmer decrement corrected SFR sfr_rg, sfrext_rg, e_sfr_rg, e_sfrext_rg = sfr_ha( tab0[prfx + 'Halpha_rg'], flux_hb=tab0[prfx + 'Hbeta_rg'], e_flux_ha=tab0['e_' + prfx + 'Halpha_rg'], e_flux_hb=tab0['e_' + prfx + 'Hbeta_rg'], imf='salpeter', name=prfx + 'sigsfr_corr_rg') tab0.add_columns([sfr_rg, e_sfr_rg, sfrext_rg, e_sfrext_rg]) # Halpha extinction and SFR after 3" smoothing and clipping A_Ha3_rg = Column(get_AHa(tab0[prfx + 'Halpha_sm3_rg'], tab0[prfx + 'Hbeta_sm3_rg'], np.log10), name=prfx + 'AHa_smooth3_rg', dtype='f4', unit='mag', description='Ha extinction after 3as smooth') clip = ((tab0[prfx + 'Halpha_sm3_rg'] < hacut) | (tab0[prfx + 'Hbeta_sm3_rg'] < hbcut) | (A_Ha3_rg > ahahi) | (A_Ha3_rg < ahalo)) sfr3_rg = Column( sfr0_rg * 10**(0.4 * A_Ha3_rg), name=prfx + 'sigsfr_adopt_rg', dtype='f4', unit=sfr0_rg.unit, description='smooth+clip BD corrected SFR surface density') sfr3_rg[clip] = sfr0_rg[clip] # A_Ha3_rg[clip] = np.nan tab0.add_columns([A_Ha3_rg, sfr3_rg]) # # Smoothed resolution # sfr0 is SFR from Halpha without extinction correction sfr0_sm = sfr_ha(tab1[prfx + 'Halpha_sm'], imf='salpeter', name=prfx + 'sigsfr0_sm') e_sfr0_sm = Column( sfr0_sm * abs(tab1['e_' + prfx + 'Halpha_sm'] / tab1[prfx + 'Halpha_sm']), name='e_' + prfx + 'sigsfr0_sm', dtype='f4', unit=sfr0_sm.unit, description='error of uncorrected SFR surface density') tab1.add_columns([sfr0_sm, e_sfr0_sm]) # Balmer decrement corrected SFR sfr_sm, sfrext_sm, e_sfr_sm, e_sfrext_sm = sfr_ha( tab1[prfx + 'Halpha_sm'], flux_hb=tab1[prfx + 'Hbeta_sm'], e_flux_ha=tab1['e_' + prfx + 'Halpha_sm'], e_flux_hb=tab1['e_' + prfx + 'Hbeta_sm'], imf='salpeter', name=prfx + 'sigsfr_corr_sm') tab1.add_columns([sfr_sm, e_sfr_sm, sfrext_sm, e_sfrext_sm]) # Halpha extinction and SFR after 5" smoothing and clipping A_Ha5_sm = Column(get_AHa(tab1[prfx + 'Halpha_sm5_sm'], tab1[prfx + 'Hbeta_sm5_sm'], np.log10), name=prfx + 'AHa_smooth5_sm', dtype='f4', unit='mag', description='Ha extinction after 5as smooth') clip = ((tab1[prfx + 'Halpha_sm5_sm'] < hacut) | (tab1[prfx + 'Hbeta_sm5_sm'] < hbcut) | (A_Ha5_sm > ahahi) | (A_Ha5_sm < ahalo)) sfr5_sm = Column( sfr0_sm * 10**(0.4 * A_Ha5_sm), name=prfx + 'sigsfr_adopt_sm', dtype='f4', unit=sfr0_rg.unit, description='smooth+clip BD corrected SFR surface density') sfr5_sm[clip] = sfr0_sm[clip] # A_Ha5_sm[clip] = np.nan tab1.add_columns([A_Ha5_sm, sfr5_sm]) # # BPT requires flux_elines since EW(Ha) is part of classification if prod == 'flux_elines': BPT0, BPT0sf, p_BPT0 = bpt_type(tab0, ext='_rg', name='BPT_rg', prob=True) tab0.add_columns([BPT0, p_BPT0, BPT0sf]) BPT1, BPT1sf, p_BPT1 = bpt_type(tab1, ext='_sm', name='BPT_sm', prob=True) tab1.add_columns([BPT1, p_BPT1, BPT1sf]) # zoh0, zoherr0 = ZOH_M13(tab0, ext='_rg', name='ZOH_rg', err=True) tab0.add_columns([zoh0, zoherr0]) zoh1, zoherr1 = ZOH_M13(tab1, ext='_sm', name='ZOH_sm', err=True) tab1.add_columns([zoh1, zoherr1]) elif prod == 'SSP': # For stellar surface density we need distance star0 = stmass_pc2(tab0['mass_ssp_rg'], dz=tab0['cont_dezon_rg'], dist=disttbl.loc[gal][distcol], name='sigstar_rg') star1 = stmass_pc2(tab1['mass_ssp_sm'], dz=tab1['cont_dezon_sm'], dist=disttbl.loc[gal][distcol], name='sigstar_sm') avstar0 = stmass_pc2(tab0['mass_Avcor_ssp_rg'], dz=tab0['cont_dezon_rg'], dist=disttbl.loc[gal][distcol], name='sigstar_Avcor_rg') avstar0.description += ' dust corrected' avstar1 = stmass_pc2(tab1['mass_Avcor_ssp_sm'], dz=tab1['cont_dezon_sm'], dist=disttbl.loc[gal][distcol], name='sigstar_Avcor_sm') avstar1.description += ' dust corrected' ferr0 = Column( abs(tab0['e_medflx_ssp_rg'] / tab0['medflx_ssp_rg']), name='fe_medflx_rg', dtype='f4', unit='fraction', description='fractional error in continuum flux') ferr1 = Column( abs(tab1['e_medflx_ssp_sm'] / tab1['medflx_ssp_sm']), name='fe_medflx_sm', dtype='f4', unit='fraction', description='fractional error in continuum flux') tab0.add_columns([star0, avstar0, ferr0]) tab1.add_columns([star1, avstar1, ferr1]) if len(rglist) > 0: rg_merge = vstack(rglist) rg_merge.meta['date'] = datetime.today().strftime('%Y-%m-%d') if debug: print(rg_merge.colnames) print('There are', len(rg_merge), 'rows in native table') if len(smlist) > 0: sm_merge = vstack(smlist) sm_merge.meta['date'] = datetime.today().strftime('%Y-%m-%d') if debug: print(sm_merge.colnames) print('There are', len(sm_merge), 'rows in smoothed table') if prod == prodtype[0]: rg_merge.write(outname + '.pipe3d.hdf5', path=prod + '_rg', overwrite=True, serialize_meta=True, compression=True) else: rg_merge.write(outname + '.pipe3d.hdf5', path=prod + '_rg', append=True, serialize_meta=True, compression=True) sm_merge.write(outname + '.pipe3d.hdf5', path=prod + '_sm', append=True, serialize_meta=True, compression=True) return
# --- Use default values for distance and thickness = 100 pc dmpc = db['caDistMpc'][i] z0 = 206265 * 100 / (dmpc * 1e6) # 100 pc thickness, fixed print(' Assumed INC, PA, Z0: {:.2f} {:.2f} {:.2f}'.format( inc, pa, z0)) gal_param = paramlist % (fitsin, nrad, vsys, xpos, ypos, vrot, inc, pa, z0, free, mask) file = open(run + '/param_' + gal + '.par', 'w') file.write(gal_param) file.close() print(run + ' Done') return # CALIFA table: source for DISTANCE db = EdgeTable('edge_califa.csv', cols=['Name', 'caDistMpc']) # NED table: source for CENTER RA & DEC ned = EdgeTable('edge_ned.csv', cols=['Name', 'nedRA', 'nedDE']) db.join(ned) # LEDA table: source for INC, CENTER RA & DEC leda = EdgeTable('edge_leda.csv', cols=['Name', 'ledaRA', 'ledaDE', 'ledaPA', 'ledaAxIncl']) leda['ledaRA'].convert_unit_to('deg') leda['ledaRA'].format = '.5f' db.join(leda) # CO observations table: source for VSYS coobs = EdgeTable('edge_coobs_DE.csv', cols=['Name', 'coVsys', 'coTpk_10']) db.join(coobs) # Becca's fits: source for PA rfpars = EdgeTable('edge_rfpars.csv', cols=['Name', 'rfKinRA', 'rfKinDecl', 'rfPA', 'rfInc'])