def _to_table(self, vo_table): ''' Return the current table as a VOT object ''' table = VOTable(vo_table) # Add keywords for key in self.keywords: if isinstance(self.keywords[key], basestring): arraysize = '*' else: arraysize = None param = Param(table, name=key, ID=key, value=self.keywords[key], arraysize=arraysize) table.params.append(param) # Define some fields n_rows = len(self) fields = [] for i, name in enumerate(self.names): data = self.data[name] unit = self.columns[name].unit description = self.columns[name].description dtype = self.columns[name].dtype column_type = smart_dtype(dtype) if data.ndim > 1: arraysize = str(data.shape[1]) else: arraysize = None if column_type in type_dict: datatype = type_dict[column_type] elif column_type == np.int8: warnings.warn("int8 unsupported - converting to int16") datatype = type_dict[np.int16] elif column_type == np.uint16: warnings.warn("uint16 unsupported - converting to int32") datatype = type_dict[np.int32] elif column_type == np.uint32: warnings.warn("uint32 unsupported - converting to int64") datatype = type_dict[np.int64] elif column_type == np.uint64: raise Exception("uint64 unsupported") else: raise Exception("cannot use numpy type " + str(column_type)) if column_type == np.float32: precision = 'E9' elif column_type == np.float64: precision = 'E17' else: precision = None if datatype == 'char': if arraysize is None: arraysize = '*' else: raise ValueError("Cannot write vector string columns to VO files") field = Field(vo_table, ID=name, name=name, \ datatype=datatype, unit=unit, arraysize=arraysize, \ precision=precision) field.description = description fields.append(field) table.fields.extend(fields) table.create_arrays(n_rows) # Character columns are stored as object columns in the vo_table # instance. Leaving the type as string should work, but causes # a segmentation fault on MacOS X with Python 2.6 64-bit so # we force the conversion to object type columns. for name in self.names: dtype = self.columns[name].dtype column_type = smart_dtype(dtype) # Add data to the table # At the moment, null values in VO table are dealt with via a # 'mask' record array if column_type == np.string_: table.array[name] = self.data[name].astype(np.object_) if self._masked: table.array.mask[name] = self.data[name].mask.astype(np.object_) else: if self.data[name].dtype.type == np.bytes_ and type(self.columns[name].null) != bytes: table.array.mask[name] = (self.data[name] == \ self.columns[name].null.encode('utf-8')).astype(np.object_) else: table.array.mask[name] = (self.data[name] == \ self.columns[name].null).astype(np.object_) else: table.array[name] = self.data[name] if self._masked: table.array.mask[name] = self.data[name].mask else: table.array.mask[name] = self.data[name] == \ self.columns[name].null table.name = self.table_name return table
fits_pixel_type = '' if hdr['BITPIX'] == -64: fits_pixel_type = 'double' elif hdr['BITPIX'] == -32: fits_pixel_type = 'float' else: raise RuntimeError('Unrecognized BITPIX value for file converstion') # Create the FIELD elements f1 = Field(t, name='wavenumber', datatype=fits_pixel_type, ucd='em.wavenumber;em.MIR', unit='cm-1') f1.description = 'wavenumber merged over orders' f2 = Field(t, name='intensity', datatype=fits_pixel_type, ucd='phot.flux.density;em.MIR', unit='erg.s-1.cm-1.sr-1') f3 = Field(t, name='intensity_err', datatype=fits_pixel_type, ucd='stat.error;phot.flux.density;em.MIR', unit='erg.s-1.cm-1.sr-1') f3.description = 'error (standard deviation)' f4 = Field(t,
def _to_table(self, vo_table): ''' Return the current table as a VOT object ''' table = VOTable(vo_table) # Add keywords for key in self.keywords: if isinstance(self.keywords[key], basestring): arraysize = '*' else: arraysize = None param = Param(table, name=key, ID=key, value=self.keywords[key], arraysize=arraysize) table.params.append(param) # Define some fields n_rows = len(self) fields = [] for i, name in enumerate(self.names): data = self.data[name] unit = self.columns[name].unit description = self.columns[name].description dtype = self.columns[name].dtype column_type = smart_dtype(dtype) if data.ndim > 1: arraysize = str(data.shape[1]) else: arraysize = None if column_type in type_dict: datatype = type_dict[column_type] elif column_type == np.int8: warnings.warn("int8 unsupported - converting to int16") datatype = type_dict[np.int16] elif column_type == np.uint16: warnings.warn("uint16 unsupported - converting to int32") datatype = type_dict[np.int32] elif column_type == np.uint32: warnings.warn("uint32 unsupported - converting to int64") datatype = type_dict[np.int64] elif column_type == np.uint64: raise Exception("uint64 unsupported") else: raise Exception("cannot use numpy type " + str(column_type)) if column_type == np.float32: precision = 'F9' elif column_type == np.float64: precision = 'F17' else: precision = None if datatype == 'char': if arraysize is None: arraysize = '*' else: raise ValueError( "Cannot write vector string columns to VO files") field = Field(vo_table, ID=name, name=name, \ datatype=datatype, unit=unit, arraysize=arraysize, \ precision=precision) field.description = description fields.append(field) table.fields.extend(fields) table.create_arrays(n_rows) # Character columns are stored as object columns in the vo_table # instance. Leaving the type as string should work, but causes # a segmentation fault on MacOS X with Python 2.6 64-bit so # we force the conversion to object type columns. for name in self.names: dtype = self.columns[name].dtype column_type = smart_dtype(dtype) # Add data to the table # At the moment, null values in VO table are dealt with via a # 'mask' record array if column_type == np.string_: table.array[name] = self.data[name].astype(np.object_) if self._masked: table.mask[name] = self.data[name].mask.astype(np.object_) else: table.mask[name] = (self.data[name] == \ self.columns[name].null).astype(np.object_) else: table.array[name] = self.data[name] if self._masked: table.mask[name] = self.data[name].mask else: table.mask[name] = self.data[name] == \ self.columns[name].null table.name = self.table_name return table