def read_and_filter_exo_archive(fname='data/exo_archive_query.txt', \ rho_min=0.1*u.arcsec, rho_max=1.5*u.arcsec,\ st_v_min=8.*u.mag, mp_min=0.25*u.jupiterMass): # make sure there are no conflicting units rho_min = unit_check_convert(rho_min, u.arcsec) rho_max = unit_check_convert(rho_max, u.arcsec) st_v_min = unit_check_convert(st_v_min, u.mag) mp_min = unit_check_convert(mp_min, u.jupiterMass) dat = ascii.read(fname) # remove rows with missing important values filter_cols = ['st_dist', 'st_optmag', 'pl_orbsmax', 'pl_bmassj'] for f in filter_cols: dat = dat[~dat[f].mask] # manually assign b/c table units were (non-standard) "mags" not "mag" # Must be done before converting to QTable dat['st_optmag'].unit = u.mag dat = QTable(dat) # remove the metadata that had column descriptions, since it makes it a pain # to write out data later if we change columns. Better to add column descriptions # later by hand if needed... dat.meta = {} dat.rename_column('pl_orbsmax', 'sma_au') dat.rename_column('pl_orbincl', 'orb_incl') dat.rename_column('pl_orbeccen', 'eccen') dat.rename_column('pl_bmassj', 'pl_massj') # replace spaces with _ in star names dat['pl_hostname'] = [x.replace(' ', '_') for x in dat['pl_hostname']] #keep only stars brighter than st_v_min in V dat = dat[dat['st_optmag'] < st_v_min] #keep only planets larger than mp_min*Mj dat = dat[dat['pl_massj'] > mp_min] # keep only planets with a maximum elongation between X1 and X2 tmp = (dat['sma_au'] / dat['st_dist']).decompose() * u.radian dat['sma_arcsec'] = tmp.to(u.arcsec) dat['sma_arcsec'].info.format = '%.2f' dat = dat[dat['sma_arcsec'] >= rho_min] dat = dat[dat['sma_arcsec'] < rho_max] dat.sort('sma_arcsec') return dat
def test_ecsv_astropy_objects_in_meta(): """ Test that astropy core objects in ``meta`` are serialized. """ t = QTable([[1, 2] * u.m, [4, 5]], names=['a', 'b']) tm = _get_time() c = SkyCoord([[1, 2], [3, 4]], [[5, 6], [7, 8]], unit='deg', frame='fk4', obstime=Time('2016-01-02'), location=EarthLocation(1000, 2000, 3000, unit=u.km)) unit = u.m / u.s t.meta = {'tm': tm, 'c': c, 'unit': unit} out = StringIO() t.write(out, format='ascii.ecsv') t2 = QTable.read(out.getvalue(), format='ascii.ecsv') compare_time(tm, t2.meta['tm']) compare_coord(c, t2.meta['c']) assert t2.meta['unit'] == unit
def load_dataset(filename): """Load a dataset. Returns ------- result: QTable """ datafile_path = os.path.join(_data_directory, filename) result = QTable(ascii.read(datafile_path, format='csv', fast_reader=False)) # add the meta data this_row = dataset_index.loc['dem_quiet_sun.csv'] meta_dict = dict(zip(this_row.colnames, this_row)) result.meta = meta_dict if dataset_index.loc[filename]['col_units'] != "None": units_list = dataset_index.loc[filename]['col_units'].split(',') for this_unit, this_col in zip(units_list, result.columns): result[this_col].unit = u.Unit(this_unit) return result
def streamsearch(stream1D, splittab, cutoff, banddir, savedir, datestr, timestr, Nmax=False, Nmin=1024, dm=DispersionMeasure(56.7), output=False): ''' Searches the corrected 1D stream for signals stonger than 'cutoff' sigma ''' POS = [] SNR = [] snr_search = stream1D * 1 start_time = Time(splittab.meta['T_START'], format='isot', precision=9) n_frames = splittab.meta['NFRAMES'] samples_per_frame = splittab.meta['FRAMELEN'] binning = splittab.meta['I_BIN'] s_per_sample = splittab.meta['TBIN'] * binning nsamples = n_frames * samples_per_frame pos = np.nanargmax(snr_search) signal = snr_search[pos] snr_search[pos - 30:pos + 30] = np.nan snr = (signal - np.nanmean(snr_search[pos - 150:pos + 150])) / np.nanstd( snr_search[pos - 150:pos + 150]) i = 0 t0 = time.time() snr_search[:int(1.11 / 2.56e-6 / 100)] = 0 while (snr > cutoff) or (len(POS) < Nmin): if (len(POS) < Nmax) or (not Nmax): POS += [pos] SNR += [snr] snr_search[pos - 30:pos + 30] = 0 pos = np.nanargmax(snr_search) signal = snr_search[pos] snr_search[pos - 30:pos + 30] = np.nan snr = (signal - np.nanmean(snr_search[pos - 150:pos + 150]) ) / np.nanstd(snr_search[pos - 150:pos + 150]) i += 1 t = time.time() - t0 m, s = divmod(t, 60) h, m = divmod(m, 60) #print(f'Intensity stream searched for pulses: {len(POS)} pulses found -- ' # f'S/N: {snr:.3f} -- POS: {pos*100*2.56e-6:.3f} -- Time elapsed: ' # f'{int(h):02d}:{int(m):02d}:{int(s):02d}', end=' \r') print( f'Intenisty stream searched for pulses: {len(POS)} pulses found ' ) POS = np.array(POS) TIME_S = POS * s_per_sample SNR = np.array(SNR) MJD = start_time + TIME_S * u.s # Create Table of GPs to be saved tab = QTable() tab.meta = splittab.meta tab['time'] = (TIME_S * u.s + start_time).isot tab['off_s'] = TIME_S * u.s tab['pos'] = POS tab['snr'] = SNR tab.sort('pos') tab.meta['DM'] = dm.value tab.meta['binning'] = 100 tab.meta['sigma'] = cutoff tab.meta['start'] = start_time.isot tab.meta['nsamples'] = nsamples tab.meta['history'] = [ 'Intensity stream i_stream.npy saved from ChannelSplit' f' on vdif files {banddir}*/{datestr}T{timestr}' 'Z_chime_psr_vdif/*', 'i_stream.npy dedispersed and searched for giant pulses' ] tab.write(savedir + f'search_tab.fits', overwrite=True) if output: return tab return
def apply(self, data, name, unit=None): """Apply an arbitrarily shaped sequence as additional column to a `~sbpy.data.DataClass` object and reshape it accordingly. Parameters ---------- data : list or iterable `~astropy.units.Quantity` object Data to be added in a new column in form of a one-dimensional list or a two-dimensional nested sequence. Each element in ``data`` corresponds to one of the rows in the existing data table. If an element of ``data`` is a list, the corresponding data table row is repeated the same the number of times as there are elements in this sublist. If ``data`` is provided as a flat list and has the same length as the current data table, ``data`` will be simply added as a column to the data table and the length of the data table will not change. If ``data`` is provided as a `~astropy.units.Quantity` object (only possible for flat lists), its unit is adopted, unless ``unit`` is specified (not None). name : str Name of the new data column. unit : `~astropy.units` object or str, optional Unit to be applied to the new column. Default: `None` Returns ------- None Notes ----- As a result of this method, the length of the underlying data table will be the same as the length of the flattened `data` parameter. Examples -------- Imagine the following scenario: you obtain photometric measurements of the same asteroid over a number of nights. The following `~sbpy.data.Ephem` object summarizes the observations: >>> from sbpy.data import Ephem >>> import astropy.units as u >>> obs = Ephem.from_columns([[2451223, 2451224, 2451226]*u.d, ... [120.1, 121.3, 124.9]*u.deg, ... [12.4, 12.2, 10.8]*u.deg], ... names=('JD', 'RA', 'DEC')) >>> obs <QTable length=3> JD RA DEC d deg deg float64 float64 float64 --------- ------- ------- 2451223.0 120.1 12.4 2451224.0 121.3 12.2 2451226.0 124.9 10.8 After analyzing the observations, you would like to add the measured apparent V-band magnitudes to this object. You have one observation from the first night, two from the second night, and three from the third night. Instead of re-creating ``obs``, `~sbpy.data.DataClass.apply` offers a convenient way to supplement ``obs``: >>> obs.apply([[12.1], [12.5, 12.6], [13.5, 13.4, 13.5]], ... name='V', unit='mag') >>> obs <QTable length=6> JD RA DEC V d deg deg mag float64 float64 float64 float64 --------- ------- ------- ------- 2451223.0 120.1 12.4 12.1 2451224.0 121.3 12.2 12.5 2451224.0 121.3 12.2 12.6 2451226.0 124.9 10.8 13.5 2451226.0 124.9 10.8 13.4 2451226.0 124.9 10.8 13.5 Note how the data table has been re-arranged and rows have been duplicated in order to provide the expected shape. """ new_table = None # strip units off Quantity objects if isinstance(data, u.Quantity): unit = data.unit data = data.value if len(data) != len(self.table): raise DataClassError('Data parameter must have ' 'same length as self._table') _newcolumn = array([]) for i, val in enumerate(data): if not isinstance(val, (list, tuple, ndarray)): val = [val] _newcolumn = hstack([_newcolumn, val]) # add corresponding row from _table for each element in val for j in range(len(val)): # initialize new QTable object if new_table is None: new_table = QTable(self.table[0]) continue new_table.add_row(self.table[i]) # add new column new_table.add_column(Column(_newcolumn, name=name, unit=unit)) # restore meta data new_table.meta = self.meta self.table = new_table
def table_pyoof_out(path_pyoof_out, order): """ Auxiliary function to tabulate all data from a series of observations gathered in a common ``pyoof_out/`` directory. Note: Piston and tilt are not used in error calculations, the phase calculations will only be included if these are manually changed in ``core.py``. Parameters ---------- path_pyoof_out : `list` set of paths to the directory ``pyoof_out/`` or where the output from the `~pyoof` package is located. order : `int` Order used for the Zernike circle polynomial, :math:`n`. Returns ------- qt : `~astropy.table.table.QTable` `~astropy.table.table.QTable` with units of the most important quantities from the `~pyoof` package. """ qt = QTable(names=[ 'name', 'tel_name', 'obs-object', 'obs-date', 'meanel', 'i_amp', 'c_dB', 'q', 'phase-rms', 'e_rs', 'beam-snr-out-l', 'beam-snr-in', 'beam-snr-out-r' ], dtype=[np.string_] * 4 + [np.float] * 9) for p, pyoof_out in enumerate(path_pyoof_out): with open(os.path.join(pyoof_out, 'pyoof_info.yml'), 'r') as inputfile: pyoof_info = yaml.load(inputfile, Loader=yaml.Loader) _phase = np.genfromtxt(os.path.join(pyoof_out, f'phase_n{order}.csv')) * apu.rad phase_rms = rms(_phase, circ=True) phase_e_rs = e_rs(_phase, circ=True) # random-surface-error efficiency error # cov = np.genfromtxt(os.path.join(pyoof_out, f'cov_n{order}.csv')) # idx = np.argwhere(cov[0, :].astype(int) > 5) params = Table.read(os.path.join(pyoof_out, f'fitpar_n{order}.csv'), format='ascii') I_coeff = params['parfit'][:5] qt.add_row([ pyoof_info['name'], pyoof_info['tel_name'], pyoof_info['obs_object'], pyoof_info['obs_date'], pyoof_info['meanel'], I_coeff[0], I_coeff[1], I_coeff[2], phase_rms, phase_e_rs ] + pyoof_info['snr']) # updating units qt['phase-rms'] *= apu.rad qt['meanel'] *= apu.deg qt['obs-date'] = Time(qt['obs-date'], format='isot', scale='utc') qt['c_dB'] *= apu.dB qt.meta = {'order': order} return qt
elements['i'].unit = u.eV elements['ionization energy'].unit = u.eV elements['atomic mass'] = elements['z'] / elements['zovera'] * u.u elements.add_index('z') compounds_file = os.path.join(_data_directory, 'compounds_mixtures.csv') compounds = QTable(ascii.read(compounds_file, format='csv', fast_reader=False)) compounds['density'].unit = u.g / (u.cm**3) compounds.add_index('symbol') notation_translation = Table( ascii.read(os.path.join(_data_directory, 'siegbahn_to_iupac.csv'), format='csv', fast_reader=False)) emission_lines = QTable( ascii.read(os.path.join(_data_directory, 'emission_lines.csv'), format='csv', fast_reader=False)) # not sure why i need to fix this otherwise it is \ufenergy emission_lines.rename_column(emission_lines.colnames[0], 'energy') emission_lines[emission_lines.colnames[0]].unit = u.eV emission_lines.add_index(emission_lines.colnames[0]) emission_lines.add_index(emission_lines.colnames[1]) emission_lines.meta = { "source": "Center for X-ray Optics and Advanced Light Source, X-Ray Data Booklet Table 1-2", "publication date": "2009 October", "url": "https://xdb.lbl.gov/Section1/Table_1-3.pdf" }