def write(self, dbtype, *args, **kwargs): #self._raise_vector_columns() # Check if table overwrite is requested if 'overwrite' in kwargs: overwrite = kwargs.pop('overwrite') else: overwrite = False # Open the connection connection, cursor = sql.connect_database(dbtype, *args, **kwargs) # Check that table name is set if not self.table_name: raise Exception("Table name is not set") else: table_name = str(self.table_name) # Check that table name is ok # todo # lowercase because pgsql automatically converts # table names to lower case # Check if table already exists existing_tables = sql.list_tables(cursor, dbtype).values() if table_name in existing_tables or \ table_name.lower() in existing_tables: if overwrite: sql.drop_table(cursor, table_name) else: raise ExistingTableException() # Create table columns = [(name, smart_dtype(self.columns[name].dtype)) \ for name in self.names] shapes = [self.data[name].shape for name in self.names] sql.create_table(cursor, dbtype, table_name, columns, shapes, primary_key=self._primary_key) mapper = sql.get_sql_row_mapper(shapes) # Insert row float_column = [self.columns[name].dtype.type in [np.float32, np.float64] for name in self.names] for i in range(self.__len__()): row = self.row(i, python_types=True) sql.insert_row(cursor, dbtype, table_name, mapper(row), fixnan=not self._masked) # Close connection connection.commit() cursor.close()
def _to_table(self, VOTable): ''' Return the current table as a VOT object ''' table = Table(VOTable) # Define some fields n_rows = len(self) fields = [] for i, name in enumerate(self.names): data = self.data[name] unit = self.columns[name].unit dtype = self.columns[name].dtype column_type = smart_dtype(dtype) if data.ndim > 1: arraysize = str(data.shape[1]) else: arraysize = None if column_type == np.string_: arraysize = "1024" if column_type in type_dict: datatype = type_dict[column_type] elif column_type == np.int8: warnings.warn("int8 unsupported - converting to int16") datatype = type_dict[np.int16] elif column_type == np.uint16: warnings.warn("uint16 unsupported - converting to int32") datatype = type_dict[np.int32] elif column_type == np.uint32: warnings.warn("uint32 unsupported - converting to int64") datatype = type_dict[np.int64] elif column_type == np.uint64: raise Exception("uint64 unsupported") else: raise Exception("cannot use numpy type " + str(column_type)) if column_type == np.float32: precision = 'F9' elif column_type == np.float64: precision = 'F17' else: precision = None fields.append(Field(VOTable, ID="col" + str(i), name=name, \ datatype=datatype, unit=unit, arraysize=arraysize, \ precision=precision)) table.fields.extend(fields) table.create_arrays(n_rows) # Character columns are stored as object columns in the VOTable # instance. Leaving the type as string should work, but causes # a segmentation fault on MacOS X with Python 2.6 64-bit so # we force the conversion to object type columns. for name in self.names: dtype = self.columns[name].dtype column_type = smart_dtype(dtype) # Add data to the table # At the moment, null values in VO table are dealt with via a # 'mask' record array if column_type == np.string_: table.array[name] = self.data[name] if self._masked: table.mask[name] = self.data[name].mask.astype(np.object_) else: table.mask[name] = (self.data[name] == \ self.columns[name].null).astype(np.object_) else: table.array[name] = self.data[name] if self._masked: table.mask[name] = self.data[name].mask else: table.mask[name] = self.data[name] == \ self.columns[name].null table.name = self.table_name return table
def _to_hdu(self): ''' Return the current table as a pyfits HDU object ''' columns = [] for name in self.names: if self._masked: data = self.data[name].filled() null = self.data[name].fill_value if data.ndim > 1: null = null[0] if type(null) in [np.bool_, np.bool]: null = bool(null) else: data = self.data[name] null = self.columns[name].null unit = self.columns[name].unit dtype = self.columns[name].dtype elemwidth = None if unit == None: unit = '' if data.ndim > 1: elemwidth = str(data.shape[1]) column_type = smart_dtype(dtype) if column_type == np.string_: elemwidth = dtype.itemsize if column_type in type_dict: if elemwidth: format = str(elemwidth) + type_dict[column_type] else: format = type_dict[column_type] else: raise Exception("cannot use numpy type " + str(column_type)) if column_type == np.uint16: bzero = - np.iinfo(np.int16).min elif column_type == np.uint32: bzero = - np.iinfo(np.int32).min elif column_type == np.uint64: raise Exception("uint64 unsupported") elif column_type == np.int8: bzero = -128 else: bzero = None columns.append(pyfits.Column(name=name, format=format, unit=unit, \ null=null, array=data, bzero=bzero)) hdu = pyfits.new_table(pyfits.ColDefs(columns)) hdu.name = self.table_name for key in self.keywords: if len(key) > 8: keyname = "hierarch " + key else: keyname = key hdu.header.update(keyname, self.keywords[key]) for comment in self.comments: hdu.header.add_comment(comment) return hdu