def array(): # composite index col0 = np.array([x % 2 for x in range(1, 11)]) col1 = np.array([x for x in range(1, 11)]) t = Table([col0, col1]) t = t[t.argsort()] return SortedArray(t, t['col1'].copy())
def write_absid_file(self, outfil=None): from astropy.table import Column from astropy.table.table import Table wrest = self.lines.keys() wrest.sort() if outfil is None: outfil = self.absid_file # Columns cols = [Column(np.array(wrest), name='WREST')] clm_nms = self.lines[wrest[0]].analy.keys() for clm_nm in clm_nms: clist = [self.lines[iwrest].analy[clm_nm] for iwrest in wrest] cols.append( Column(np.array(clist), name=clm_nm) ) cols.append( Column(np.ones(len(cols[0]))*self.zabs, name='ZABS') ) table = Table(cols) prihdr = fits.Header() prihdr['COMMENT'] = "Above are the data sources" prihdu = fits.PrimaryHDU(header=prihdr) table_hdu = fits.BinTableHDU.from_columns(np.array(table.filled())) thdulist = fits.HDUList([prihdu, table_hdu]) thdulist.writeto(outfil,clobber=True) print('Wrote AbsID file: {:s}'.format(outfil))
def test_write_jsviewer_options(tmpdir): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['<b>a</b>', 'b', 'c', 'd', 'e'] t['a'].unit = 'm' tmpfile = tmpdir.join('test.html').strpath t.write(tmpfile, format='jsviewer', table_id='test', max_lines=3, jskwargs={'display_length': 5}, table_class='display hover', htmldict=dict(raw_html_cols='b')) ref = REFERENCE % dict( lines=format_lines(t['a'][:3], t['b'][:3]), table_class='display hover', table_id='test', length='5', display_length='5, 10, 25, 50, 100, 500, 1000', datatables_css_url= 'https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', datatables_js_url= 'https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', jquery_url='https://code.jquery.com/jquery-3.1.1.min.js') with open(tmpfile) as f: assert f.read().strip() == ref.strip()
def createMultipleGaussian(self, stddevXRange=[2., 3], stddevYRange=None, fluxInPhotons=[1000., 10000], nStars=100, withSeed=False): if withSeed is True: np.random.seed(seed=12345) xMean = np.random.uniform(1, self._shape[1] - 1, nStars) yMean = np.random.uniform(1, self._shape[0] - 1, nStars) sx = np.random.uniform(stddevXRange[0], stddevXRange[1], nStars) if stddevYRange is None: sy = sx else: sy = np.random.uniform(stddevYRange[0], stddevYRange[1], nStars) theta = np.arctan2(yMean - 0.5 * self._shape[0], xMean - 0.5 * self._shape[1]) - np.pi / 2 flux = np.random.uniform(fluxInPhotons[0], fluxInPhotons[1], nStars) self._table = Table() self._table['x_mean'] = xMean self._table['y_mean'] = yMean self._table['x_stddev'] = sx self._table['y_stddev'] = sy self._table['theta'] = theta self._table['flux'] = flux ima = self.createImage() return ima
def test_write_jsviewer_local(tmpdir): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['a', 'b', 'c', 'd', 'e'] t['a'].unit = 'm' tmpfile = tmpdir.join('test.html').strpath t.write(tmpfile, format='jsviewer', table_id='test', jskwargs={'use_local_files': True}) ref = REFERENCE % dict( lines=format_lines(t['a'], t['b']), table_class='display compact', table_id='test', length='50', display_length='10, 25, 50, 100, 500, 1000', datatables_css_url='file://' + join(EXTERN_DIR, 'css', 'jquery.dataTables.css'), datatables_js_url='file://' + join(EXTERN_DIR, 'js', 'jquery.dataTables.min.js'), jquery_url='file://' + join(EXTERN_DIR, 'js', 'jquery-3.1.1.min.js')) with open(tmpfile) as f: assert f.read().strip() == ref.strip()
def createMultipleGaussianIntegerCentroids(self, stddevXRange=[2., 3], stddevYRange=None, fluxInPhotons=[1000., 10000], nStars=100): xMean = np.random.randint(1, self._shape[1] - 1, nStars) yMean = np.random.randint(1, self._shape[0] - 1, nStars) sx = np.random.uniform(stddevXRange[0], stddevXRange[1], nStars) if stddevYRange is None: sy = sx else: sy = np.random.uniform(stddevYRange[0], stddevYRange[1], nStars) theta = np.arctan2(yMean - 0.5 * self._shape[0], xMean - 0.5 * self._shape[1]) - np.pi / 2 flux = np.random.uniform(fluxInPhotons[0], fluxInPhotons[1], nStars) self._table = Table() self._table['x_mean'] = xMean self._table['y_mean'] = yMean self._table['x_stddev'] = sx self._table['y_stddev'] = sy self._table['theta'] = theta self._table['flux'] = flux ima = self.createImage() return ima
def test_table_index_time_warning(engine): # Make sure that no ERFA warnings are emitted when indexing a table by # a Time column with a non-default time scale tab = Table() tab['a'] = Time([1, 2, 3], format='jyear', scale='tai') tab['b'] = [4, 3, 2] with warnings.catch_warnings(record=True) as wlist: tab.add_index(('a', 'b'), engine=engine) assert len(wlist) == 0
def project(self, cube, cutoff): # TODO Make all this with astropy units from the call functions table = Table(names=("Line Code", "Mol", "Ch Name", "Rest Freq", "Obs Freq", "Intensity"),dtype=('S80','S40','S40','f8','f8','f8')) dba = db.lineDB(self.dbpath) # Maybe we can have an always open DB dba.connect() fwin = axis_range(cube.data,cube.wcs,axis=2) # print "fwin",fwin cor_fwin = np.array(fwin/(1 + self.z))*u.Hz cor_fwin = cor_fwin.to(u.MHz).value # print "cor_fwin",cor_fwin counter = 0 used = False for mol in self.mol_list: # For each molecule specified in the dictionary # load its spectral lines linlist = dba.getSpeciesLines(mol, cor_fwin[0], cor_fwin[1]) # Selected spectral lines for this molecule # rinte = INTEN_VALUES[0] # for j in range(len(INTEN_GROUP)): # TODO: baaad python, try a more pythonic way.. # if mol in INTEN_GROUP[j]: # rinte = INTEN_VALUES[j] abun = random.uniform(self.mol_list[mol][0], self.mol_list[mol][1])*u.Jy/u.beam for lin in linlist: counter += 1 trans_temp = lin[5]*u.K inten = lin[4] if inten != 0: inten = 10 ** inten flux = np.exp(-abs(trans_temp - self.temp) / trans_temp) * inten * abun flux = flux.value * u.Jy/u.beam # print trans_temp, self.temp, flux freq = (1 + self.z) * lin[3]*u.MHz # TODO: astropy # print flux, cutoff if flux < cutoff: # TODO: astropy units! log.info(' - Discarding ' + str(lin[1]) + ' at freq=' + str(freq) + '('+str(lin[3]*u.MHz)+') because I='+str(flux)+' < '+str(cutoff)) continue if self._draw(cube,flux,freq,cutoff)==False: log.info(' - Discarding ' + str(lin[1]) + ' at freq=' + str(freq) + '('+str(lin[3]*u.MHz)+') because it is too thin for the resolution') continue log.info(' - Projecting ' + str(lin[2]) + ' (' + str(lin[1]) + ') at freq=' + str(freq) + '('+str(lin[3]*u.MHz)+') intens='+ str(flux)) used = True # add line to the table. # TODO: modificar ultimo valor, que corresponde a la intensidad. table.add_row((self.comp_name + "-l" + str(counter), mol, str(lin[2]), str(lin[3]),freq, flux)) dba.disconnect() if not used: return None return table
def from_tm(cls, tmfile): """Process the given SOCFile and creates LevelB FITS files. Parameters ---------- tmfile : `SOCPacketFile` The input data file. """ packet_data = defaultdict(list) for packet_no, binary in tmfile.get_packet_binaries(): try: packet = TMPacket(binary) except Exception: logger.error('Error parsing %s, %d', tmfile.name, packet_no, exc_info=True) return packet.source = (tmfile.file.name, packet_no) packet_data[packet.key].append(packet) for prod_key, packets in packet_data.items(): headers = [] hex_data = [] for packet in packets: sh = vars(packet.source_packet_header) bs = sh.pop('bitstream') hex_data.append(bs.hex) dh = vars(packet.data_header) dh.pop('datetime') headers.append({**sh, **dh, 'raw_file': packet.source[0], 'packet': packet.source[1]}) if len(headers) == 0 or len(hex_data) == 0: return None control = Table(headers) control['index'] = np.arange(len(control), dtype=np.int64) data = Table() data['control_index'] = np.array(control['index'], dtype=np.int64) data['data'] = hex_data control = unique(control, keys=['scet_coarse', 'scet_fine', 'sequence_count']) # Only keep data that is in the control table via index data = data[np.nonzero(control['index'][:, None] == data['control_index'])[1]] # now reindex both data and control control['index'] = range(len(control)) data['control_index'] = control['index'] service_type, service_subtype, ssid = prod_key if ssid is not None: control['ssid'] = ssid product = LevelB(service_type=service_type, service_subtype=service_subtype, ssid=ssid, control=control, data=data) yield product
def generate_transmission_tables(): from datetime import datetime cur_date = datetime.now().strftime('%Y%m%d') datetime.now().strftime('%Y%m%d') trans = Transmission() energies = np.linspace(2, 150, 1001) * u.keV norm_sci_energies = trans.get_transmission() norm_sci_energies.write(f'stix_transmission_sci_energies_{cur_date}.csv') norm_high_res = trans.get_transmission(energies=energies) norm_high_res.write(f'stix_transmission_highres_{cur_date}.csv') comps = trans.get_transmission_by_component() comps_sci_energies = Table( [c.transmission(trans.energies) for c in comps.values()], names=[k for k in comps.keys()]) comps_sci_energies['energy'] = trans.energies comps_sci_energies.write( f'stix_transmission_by_component_sci_energies_{cur_date}.csv') comps_highres = Table([c.transmission(energies) for c in comps.values()], names=[k for k in comps.keys()]) comps_highres['energy'] = energies comps_highres.write( f'stix_transmission_by_component_highres_{cur_date}.csv')
def load_table(filename): """ Load a table from a given file. Supports csv, tab, tex, vo, vot, xml, fits, and hdf5. Parameters ---------- filename : str File to read Returns ------- table : Table Table of data. """ supported = get_table_formats() fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.' if fmt in ['csv', 'tab', 'tex'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = ascii.read(filename) elif fmt in ['vo', 'vot', 'xml', 'fits', 'hdf5'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = Table.read(filename) else: log.error("Table format not recognized or supported") log.error("{0} [{1}]".format(filename, fmt)) raise Exception("Table format not recognized or supported") return t
def join(self, datatable): """ add data to the current catalogue. This is based on astropy.Table join: " The join() method allows one to merge these two tables into a single table based on matching values in the “key columns”. " We use the join_type='outer' (http://docs.astropy.org/en/stable/table/operations.html) """ # --------------------- # - Input Test if type(datatable) is not Table: try: datatable = Table(datatable) except: raise TypeError( "the given datatable is not an astropy. Table and cannot be converted into." ) from astropy.table import join self._properties["data"] = join(self.data, datatable, join_type='outer') self._update_fovmask_()
def get_transmission(self, energies=None, attenuator=False): base_comps = [self.components[name] for name in ['front_window', 'rear_window', 'dem', 'mli', 'calibration_foil', 'dead_layer']] if energies is None: energies = self.energies if attenuator: base_comps.append(self.components['attenuator']) base = Compound(base_comps) base_trans = base.transmission(energies[:-1] + 0.5 * np.diff(energies)) fine = Compound(base_comps + [self.components['grid_covers']]) fine_trans = fine.transmission(energies[:-1] + 0.5 * np.diff(energies)) fine_grids = np.array([11, 13, 18, 12, 19, 17]) - 1 transmission = Table() # transmission['sci_channel'] = range(1, 31) for i in range(33): name = f'det-{i}' if np.isin(i, fine_grids): transmission[name] = fine_trans else: transmission[name] = base_trans return transmission
def main2(): """ As per main() but we operate on columns instead of rows. So much faster! """ global colnames if len(sys.argv) != 3: print "Usage ", __file__, " inputcatalog outputcatalog" sys.exit() infile = sys.argv[-2] outfile = sys.argv[-1] print 'read' master = Table.read(infile) print 'colnames' colnames = get_colnames(master) print 'filtering' # don't have to worry about rows where the first col has the source we want second = np.where(master['local_rms_2'] <= master['local_rms_1']) master = shuffle_left_rows(master, second, 2) print "strip cols" master = strip_cols(master) print 'write' if os.path.exists(outfile): os.remove(outfile) master.write(outfile)
def test_updating_row_byindex(self, main_col, table_types, engine): self._setup(main_col, table_types) t = Table([['a', 'b', 'c', 'd'], [2, 3, 4, 5], [3, 4, 5, 6]], names=('a', 'b', 'c'), meta={'name': 'first table'}) t.add_index('a', engine=engine) t.add_index('b', engine=engine) t.loc['c'] = ['g', 40, 50] # single label, with primary key 'a' t2 = t[2] assert list(t2) == ['g', 40, 50] # list search t.loc[['a', 'd', 'b']] = [['a', 20, 30], ['d', 50, 60], ['b', 30, 40]] t2 = [['a', 20, 30], ['d', 50, 60], ['b', 30, 40]] for i, p in zip(t2, [1, 4, 2]): # same order as input list assert list(t[p-1]) == i
def _extractStars(self): self._findStars() self._starsTab = Table() self._starsTab['x'] = self._selectedStars['xcentroid'] self._starsTab['y'] = self._selectedStars['ycentroid'] self._starsCut = extract_stars(NDData(data=self._image), self._starsTab, self._size)
def test_get_observation_type_invalid_obs_id_valueerror(self, mock_query): with pytest.raises(ValueError): arr = {'a': np.array([], dtype=np.int32), 'b': [], 'obs_type': []} data_table = Table(arr) ehst = ESAHubbleClass(self.get_dummy_tap_handler()) mock_query.return_value = data_table dummy_obs_id = '1234' ehst.get_observation_type(dummy_obs_id)
def createMultipleIntegratedGaussianPRFImage(self, stddevRange=[2., 3], fluxInPhotons=[1000., 10000], nStars=100): xMean = np.random.uniform(1, self._shape[1] - 1, nStars) yMean = np.random.uniform(1, self._shape[0] - 1, nStars) sx = np.random.uniform(stddevRange[0], stddevRange[1], nStars) flux = np.random.uniform(fluxInPhotons[0], fluxInPhotons[1], nStars) self._table = Table() self._table['x_0'] = xMean self._table['y_0'] = yMean self._table['sigma'] = sx self._table['flux'] = flux ima = make_model_sources_image(self._shape, IntegratedGaussianPRF(), self._table) return ima
def fits_to_packets(file): logger.info(f'Processing fits file {file}') parser = StixTCTMParser() control = Table.read(str(file), hdu=1) data = Table.read(str(file), hdu=2) binary_packets = [ ConstBitArray(hex=hex).tobytes() for hex in data['data'] ][-2:] packets = [parser.parse_binary(bd)[0] for bd in binary_packets][-2:] if np.abs([((len(data['data'][i]) // 2) - (control['data_len'][i] + 7)) for i in range(len(data))]).sum() > 0: raise ValueError('Packet size and expected length do not match') # packets = list(chain.from_iterable(packets)) # Filter keeping only TM packets # packets = list(filter(lambda x: x['header']['TMTC'] == 'TM', packets)) # Packet ordering is not guaranteed so sort by coarse time then seq count # packets.sort(key=lambda x: (x['header']['coarse_time'], x['header']['seq_count'])) return packets
def createMoffatImage(self, posX, posY, gamma, alpha, peak): table = Table() table['x_0'] = [posX] table['y_0'] = [posY] table['gamma'] = [gamma] table['alpha'] = [alpha] table['amplitude'] = [peak] self._table = table return self.createImage()
def pickleCatalogue(catalogue, filename, **kwargs): """Write a catalogue to an ascii file.""" verbose = kwargs.pop('verbose', False) if kwargs: raise TypeError('Unexpected **kwargs: %r' % kwargs) outfile = open(filename, 'wb') pickle.dump(Table(catalogue, masked=False), outfile, pickle.HIGHEST_PROTOCOL) outfile.close() print("Catalogue pickled." if verbose else "") return
def createIntegratedGaussianPRFImage(self, sigma, flux, x0, y0): table = Table() table['sigma'] = [sigma] table['flux'] = [flux] table['x_0'] = [x0] table['y_0'] = [y0] self._table = table ima = make_model_sources_image(self._shape, IntegratedGaussianPRF(), table) return ima
def alignCoordsOnMeanNGS(self): disp = self._getDisplacementsFromMeanNGS() dx = disp[:, 0] dy = disp[:, 1] self.starsTabsNew = [] for i in range(len(self._starsTabs)): tab = Table() tab['x_fit'] = self._starsTabs[i]['x_fit'] - dx[i] tab['y_fit'] = self._starsTabs[i]['y_fit'] - dy[i] tab['flux_fit'] = self._starsTabs[i]['flux_fit'] self.starsTabsNew.append(tab)
def _gen_sources_table(self): """ Will generate a resumed table of each component. :return: an Astropy Table. """ table = Table(names=("Source Name", "Comp ID", "Model", "Alpha", "Delta", "Redshift", "Radial Vel"), dtype=('S80', 'S80', 'S40', 'f8', 'f8', 'f8', 'f8')) for source in self.sources: for component in self.sources[source].comp: table.add_row( (self.sources[source].name, component.comp_name, component.get_model_name(), component.pos[0].value + component.offset[1].value, component.pos[1].value + component.offset[0].value, component.get_redshift().value, component.get_velocity().value)) return table
def process_tmtc_file(tmfile, basedir): fits_processor = FitsL0Processor(basedir) tree = Et.parse(tmfile) root = tree.getroot() packet_data = defaultdict(list) for i, node in enumerate(root.iter('Packet')): packet_binary = unhexlify(node.text) # Not sure why guess and extra moc header header, packet_hex = process_tm_packet(packet_binary[76:]) # if header.get('ssid', -1) == 21: key = f"{header['service_type']}-{header['service_subtype']}" packet_data[key].append((header, packet_hex)) for product, packet in packet_data.items(): #header and hex for the same type # packet_data = sorted(packet_data, # key=lambda x: (x[0]['scet_coarse'] + x[0]['scet_fine'] / 2 ** 16, x[0]['seq_count'])) #difference services header, hex_data = zip(*packet) control = Table(header) control['index'] = np.arange(len(control)) data = Table() data['control_index'] = control['index'] # data['data'] = binary_data data['data'] = hex_data if 'ssid' in control.colnames: ssids = np.unique(control['ssid']) for ssid in ssids: index = np.nonzero(control['ssid'] == ssid) if len(index[0]) > 0: cur_control = control[index] cur_data = data[index] cur_control['index'] = np.arange(len(cur_control)) cur_data['control_index'] = np.arange(len(cur_control)) prod = SciLevel0(control=cur_control, data=cur_data) fits_processor.write_fits(prod) else: prod = level0(control=control, data=data) fits_processor.write_fits(prod)
def test_write_jsviewer_default(tmpdir): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['a', 'b', 'c', 'd', 'e'] t['a'].unit = 'm' tmpfile = tmpdir.join('test.html').strpath t.write(tmpfile, format='jsviewer') ref = REFERENCE % dict( lines=format_lines(t['a'], t['b']), table_class='display compact', table_id='table%s' % id(t), length='50', display_length='10, 25, 50, 100, 500, 1000', datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', jquery_url='https://code.jquery.com/jquery-3.1.1.min.js' ) with open(tmpfile) as f: assert f.read().strip() == ref.strip()
def test_get_observation_type(self, mock_query): arr = { 'a': np.array([1, 4], dtype=np.int32), 'b': [2.0, 5.0], 'obs_type': ['HST Test', 'y'] } data_table = Table(arr) ehst = ESAHubbleClass(self.get_dummy_tap_handler()) mock_query.return_value = data_table dummy_obs_id = "1234" oids = ehst.get_observation_type(dummy_obs_id) assert oids == 'HST Test'
def test_write_jsviewer_default(tmpdir): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['a', 'b', 'c', 'd', 'e'] t['a'].unit = 'm' tmpfile = tmpdir.join('test.html').strpath t.write(tmpfile, format='jsviewer') ref = REFERENCE % dict( lines=format_lines(t['a'], t['b']), table_class='display compact', table_id=f'table{id(t)}', length='50', display_length='10, 25, 50, 100, 500, 1000', datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', jquery_url='https://code.jquery.com/jquery-3.1.1.min.js' ) with open(tmpfile) as f: assert f.read().strip() == ref.strip()
def test__select_related_members(self, mock_query): arr = { 'a': np.array([1, 4], dtype=np.int32), 'b': [2.0, 5.0], 'members': ['caom:HST/test', 'y'] } data_table = Table(arr) ehst = ESAHubbleClass(self.get_dummy_tap_handler()) mock_query.return_value = data_table dummy_obs_id = "1234" oids = ehst._select_related_members(dummy_obs_id) assert oids == ['test']
def test_show_in_notebook(): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['b', 'c', 'a', 'd', 'e'] htmlstr_windx = t.show_in_notebook().data # should default to 'idx' htmlstr_windx_named = t.show_in_notebook(show_row_index='realidx').data htmlstr_woindx = t.show_in_notebook(show_row_index=False).data assert (textwrap.dedent(""" <thead><tr><th>idx</th><th>a</th><th>b</th></tr></thead> <tr><td>0</td><td>1</td><td>b</td></tr> <tr><td>1</td><td>2</td><td>c</td></tr> <tr><td>2</td><td>3</td><td>a</td></tr> <tr><td>3</td><td>4</td><td>d</td></tr> <tr><td>4</td><td>5</td><td>e</td></tr> """).strip() in htmlstr_windx) assert '<thead><tr><th>realidx</th><th>a</th><th>b</th></tr></thead>' in htmlstr_windx_named assert '<thead><tr><th>a</th><th>b</th></tr></thead>' in htmlstr_woindx
def test__select_related_composite(self, mock_query): arr = { 'a': np.array([1, 4], dtype=np.int32), 'b': [2.0, 5.0], 'observation_id': ['x', 'y'] } data_table = Table(arr) ehst = ESAHubbleClass(self.get_dummy_tap_handler()) mock_query.return_value = data_table dummy_obs_id = "1234" oids = ehst._select_related_composite(dummy_obs_id) assert oids == ['x', 'y']
def writer(filename, catalog, fmt=None): """ construct a dict of the data this method preserves the data types in the VOTable """ tab_dict = {} name_list = [] for name in catalog[0].names: col_name = name if catalog[0].galactic: if name.startswith('ra'): col_name = 'lon'+name[2:] elif name.endswith('ra'): col_name = name[:-2] + 'lon' elif name.startswith('dec'): col_name = 'lat'+name[3:] elif name.endswith('dec'): col_name = name[:-3] + 'lat' col_name = pre + col_name tab_dict[col_name] = [getattr(c, name, None) for c in catalog] name_list.append(col_name) t = Table(tab_dict, meta=meta) # re-order the columns t = t[[n for n in name_list]] if fmt is not None: if fmt in ["vot", "vo", "xml"]: vot = from_table(t) # description of this votable vot.description = repr(meta) writetoVO(vot, filename) elif fmt in ['hdf5']: t.write(filename, path='data', overwrite=True) elif fmt in ['fits']: writeFITSTable(filename, t) else: ascii.write(t, filename, fmt, overwrite=True) else: ascii.write(t, filename, overwrite=True) return
def test_invalid_updates(self, main_col, table_types, engine): # using .loc and .loc_indices with a value not present should raise an exception self._setup(main_col, table_types) t = Table([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]], names=('a', 'b', 'c'), meta={'name': 'first table'}) t.add_index('a') with pytest.raises(ValueError): t.loc[3] = [[1,2,3]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6], [2, 3]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5], [2, 3]]
def sdss_check(x, y): """ Check whether stars are in the SDSS catalogue. This function accepts either a single x and y coordinate, or an array of each. """ w = WCS('a100.fits') sfilt = [] # Check which format x and y are given in. if not (isinstance(x, (np.ndarray, list, float, int)) & isinstance(y, (np.ndarray, list, float, int)) & (np.shape(x) == np.shape(y))): print('Error: Need a set of pixel coordinates.') print(' X and Y must have same non-zero size.') raise TypeError x = [x] if (np.shape(x) == ()) else x y = [y] if (np.shape(y) == ()) else y lon, lat = w.all_pix2world(x, y, 1) pos = coords.SkyCoord(lon, lat, unit="deg") if len(pos) == 1: pos = [pos] table_fields = ['RA', 'Dec', 'psfMag_r', 'psfMagErr_r', 'psffwhm_r', 'nDetect', 'X_pixel', 'Y_pixel'] sfilt = AstroTable(names=table_fields) for index, position in enumerate(pos): sfull = SDSS.query_region(position, radius='1arcsec', data_release=13, photoobj_fields=table_fields[:-2]) try: sline = (sfull[np.where((sfull['nDetect'] > 0) & (sfull['psfMag_r'] > -99))[0]][0]) slist = [sl for sl in sline] slist.append(x[index]) slist.append(y[index]) sfilt.add_row(slist) except (TypeError, IndexError): print("Star at " + str(position)[39:-1] + " not found :-(.") slist = np.zeros(len(table_fields)) slist[-2:] = x[index], y[index] sfilt.add_row(slist) return sfilt
def test_write_jsviewer_local(tmpdir): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['a', 'b', 'c', 'd', 'e'] t['a'].unit = 'm' tmpfile = tmpdir.join('test.html').strpath t.write(tmpfile, format='jsviewer', table_id='test', jskwargs={'use_local_files': True}) ref = REFERENCE % dict( lines=format_lines(t['a'], t['b']), table_class='display compact', table_id='test', length='50', display_length='10, 25, 50, 100, 500, 1000', datatables_css_url='file://' + join(EXTERN_DIR, 'css', 'jquery.dataTables.css'), datatables_js_url='file://' + join(EXTERN_DIR, 'js', 'jquery.dataTables.min.js'), jquery_url='file://' + join(EXTERN_DIR, 'js', 'jquery-3.1.1.min.js') ) with open(tmpfile) as f: assert f.read().strip() == ref.strip()
def test_write_jsviewer_options(tmpdir): t = Table() t['a'] = [1, 2, 3, 4, 5] t['b'] = ['<b>a</b>', 'b', 'c', 'd', 'e'] t['a'].unit = 'm' tmpfile = tmpdir.join('test.html').strpath t.write(tmpfile, format='jsviewer', table_id='test', max_lines=3, jskwargs={'display_length': 5}, table_class='display hover', htmldict=dict(raw_html_cols='b')) ref = REFERENCE % dict( lines=format_lines(t['a'][:3], t['b'][:3]), table_class='display hover', table_id='test', length='5', display_length='5, 10, 25, 50, 100, 500, 1000', datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', jquery_url='https://code.jquery.com/jquery-3.1.1.min.js' ) with open(tmpfile) as f: assert f.read().strip() == ref.strip()
def writer(filename, catalog, fmt=None): # construct a dict of the data # this method preserves the data types in the VOTable tab_dict = {} for name in catalog[0].names: tab_dict[name] = [getattr(c, name, None) for c in catalog] t = Table(tab_dict,meta=meta) # re-order the columns t = t[[n for n in catalog[0].names]] if fmt is not None: if fmt in ["vot", "vo", "xml"]: vot = from_table(t) # description of this votable vot.description = repr(meta) writetoVO(vot, filename) elif fmt in ['hdf5']: t.write(filename,path='data',overwrite=True) elif fmt in ['fits']: writeFITSTable(filename,t) else: ascii.write(t, filename, fmt) else: ascii.write(t, filename) return
def write_absid_file(self, outfil=None): '''TO BE DEPRECATED (probably) Writes portions of the LLS lines to a FITS table. ''' from astropy.table import Column from astropy.table.table import Table # #wrest = self.lines.keys() #wrest.sort() wrest = [line.wrest.value for line in self.lines] if outfil is None: outfil = self.absid_file # Columns cols = [Column(np.array(wrest), name='WREST')] clm_nms = self.lines[0].analy.keys() for clm_nm in clm_nms: if clm_nm == 'spec': continue clist = [line.analy[clm_nm] for line in self.lines] cols.append( Column(np.array(clist), name=clm_nm) ) cols.append( Column(np.ones(len(cols[0]))*self.zabs, name='ZABS') ) #from PyQt4 import QtCore #QtCore.pyqtRemoveInputHook() #xdb.set_trace() #QtCore.pyqtRestoreInputHook() table = Table(cols) prihdr = fits.Header() prihdr['COMMENT'] = "Above are the data sources" prihdu = fits.PrimaryHDU(header=prihdr) table_hdu = fits.BinTableHDU.from_columns(np.array(table.filled())) thdulist = fits.HDUList([prihdu, table_hdu]) thdulist.writeto(outfil,clobber=True) print('Wrote AbsID file: {:s}'.format(outfil))
def load_table(filename): """ :param filename: :return: """ supported = get_table_formats() fmt = os.path.splitext(filename)[-1][1:].lower() #extension sans '.' if fmt in ['csv', 'tab', 'tex'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = ascii.read(filename) elif fmt in ['vo', 'vot', 'xml', 'fits','hdf5'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = Table.read(filename) else: log.error("Table format not recognized or supported") log.error("{0} [{1}]".format(filename,fmt)) t= None return t
def load(self,catalogue_file,**kwargs): """ load the given file and create the object. kwargs can have any build option like key_ra, key_mag etc. """ # --------------------- # - Parsing the input if catalogue_file.endswith(".fits"): # loading from fits file fits = pf.open(catalogue_file) header = fits[self._build_properties["data_index"]].header data = fits[self._build_properties["data_index"]].data if type(data) == pf.fitsrec.FITS_rec: from astrobject.utils.tools import fitsrec_to_dict data = TableColumns(fitsrec_to_dict(data)) elif catalogue_file.endswith(".pkl"): # loading from pkl fits = None header = None data = load_pkl(catalogue_file) if not type(data) is Table: try: data = Table(data) except: warnings.warn("Convertion of 'data' into astropy Table failed") else: fits = None header = None format_ = kwargs.pop("format","ascii") data = Table.read(catalogue_file,format=format_,**kwargs) # --------------------- # - Calling Creates self.create(data, header, **kwargs) self._properties["filename"] = catalogue_file self._derived_properties["fits"] = fits
if table is None: if args.short: print "Failed" else: print "---" print "\033[91mNot able to access data for source in archive %s\033[0m" % (cat) print "---" sys.exit(1) if args.short: print "OK" sys.exit(0) if len(cols) > 0: tab = table table = Table() for c in cols: table.add_column(tab.columns[c]) if outfile: table.write(outfile,format='ascii',delimiter=',') print "---" print " Table retrieved:" table.pprint(max_width=-1) print "---" sys.exit(0)