def create_btable(self, tablename, csv, crosscat_column_types): """Upload a csv table to the predictive db. Crosscat_column_types must be a dictionary mapping column names to either 'ignore', 'continuous', or 'multinomial'. Not every column name must be present in the dictionary: default is continuous.""" # First, test if table with this name already exists, and fail if it does if self.persistence_layer.check_if_table_exists(tablename): raise Exception('Error: btable with that name already exists.') csv_abs_path = self.persistence_layer.write_csv(tablename, csv) # Parse column names to create table csv = csv.replace('\r', '') colnames = csv.split('\n')[0].split(',') # Guess schema and create table header, values = du.read_csv(csv_abs_path, has_header=True) postgres_coltypes, cctypes = self._guess_schema( header, values, crosscat_column_types, colnames) self.persistence_layer.create_btable_from_csv(tablename, csv_abs_path, cctypes, postgres_coltypes, colnames) return dict(columns=colnames, data=[cctypes], message='Created btable %s. Inferred schema:' % tablename)
def parse_timing_file(filename): header, rows = du.read_csv(filename) _timing_row = namedtuple('timing_row', ' '.join(header)) timing_rows = [] for row in rows: row = map(do_strip, row) timing_row = _timing_row(*row) timing_rows.append(timing_row) return timing_rows