Beispiel #1
0
    def write_data(self, thebook):
        from pyexcel.book import to_book
        book = thebook
        if isinstance(thebook, BookStream):
            book = to_book(thebook)
        initializers = self.__keywords.get(params.INITIALIZERS, None)
        if initializers is None:
            initializers = [None] * len(self.__tables)
        mapdicts = self.__keywords.get(params.MAPDICTS, None)
        if mapdicts is None:
            mapdicts = [None] * len(self.__tables)
        for sheet in book:
            if len(sheet.colnames) == 0:
                sheet.name_columns_by_row(0)
        colnames_array = [sheet.colnames for sheet in book]
        scattered = zip(self.__tables, colnames_array, mapdicts, initializers)

        importer = sql.SQLTableImporter(self.__session)
        for each_table in scattered:
            adapter = sql.SQLTableImportAdapter(each_table[0])
            adapter.column_names = each_table[1]
            adapter.column_name_mapping_dict = each_table[2]
            adapter.row_initializer = each_table[3]
            importer.append(adapter)
        to_store = OrderedDict()
        for sheet_name in book.sheet_names():
            # due book.to_dict() brings in column_names
            # which corrupts the data
            to_store[sheet_name] = book[sheet_name].get_internal_array()
        save_data(importer, to_store, file_type=DB_SQL, **self.__keywords)
Beispiel #2
0
    def render_book_to_stream(self, file_stream, book,
                              inits=None, mapdicts=None, **keywords):
        from pyexcel.book import to_book
        session, tables = file_stream
        thebook = book
        if isinstance(book, BookStream):
            thebook = to_book(book)
        initializers = inits
        if initializers is None:
            initializers = [None] * len(tables)
        if mapdicts is None:
            mapdicts = [None] * len(tables)
        for sheet in thebook:
            if len(sheet.colnames) == 0:
                sheet.name_columns_by_row(0)
        colnames_array = [sheet.colnames for sheet in book]
        scattered = zip(tables, colnames_array, mapdicts, initializers)

        importer = sql.SQLTableImporter(session)
        for each_table in scattered:
            adapter = sql.SQLTableImportAdapter(each_table[0])
            adapter.column_names = each_table[1]
            adapter.column_name_mapping_dict = each_table[2]
            adapter.row_initializer = each_table[3]
            importer.append(adapter)
        to_store = OrderedDict()
        for sheet_name in thebook.sheet_names():
            # due book.to_dict() brings in column_names
            # which corrupts the data
            to_store[sheet_name] = book[sheet_name].get_internal_array()
        save_data(importer, to_store, file_type=self._file_type, **keywords)
Beispiel #3
0
 def render_sheet_to_stream(self, file_stream, sheet,
                            init=None, mapdict=None, **keywords):
     headers = sheet.colnames
     if len(headers) == 0:
         raise Exception(NO_COLUMN_NAMES)
     importer = sql.SQLTableImporter(file_stream[0])
     adapter = sql.SQLTableImportAdapter(file_stream[1])
     adapter.column_names = headers
     adapter.row_initializer = init
     adapter.column_name_mapping_dict = mapdict
     importer.append(adapter)
     save_data(importer, {adapter.get_name(): sheet.get_internal_array()},
               file_type=self._file_type, **keywords)
Beispiel #4
0
 def write_data(self, sheet):
     headers = sheet.colnames
     if len(headers) == 0:
         headers = sheet.rownames
     importer = sql.SQLTableImporter(self.__session)
     adapter = sql.SQLTableImportAdapter(self.__table)
     adapter.column_names = headers
     adapter.row_initializer = self.__keywords.get(params.INITIALIZER, None)
     adapter.column_name_mapping_dict = self.__keywords.get(
         params.MAPDICT, None)
     importer.append(adapter)
     save_data(importer, {adapter.get_name(): sheet.get_internal_array()},
               file_type=DB_SQL,
               **self.__keywords)