def write_data(self, thebook): from pyexcel.book import to_book book = thebook if isinstance(thebook, BookStream): book = to_book(thebook) initializers = self.__keywords.get(params.INITIALIZERS, None) if initializers is None: initializers = [None] * len(self.__tables) mapdicts = self.__keywords.get(params.MAPDICTS, None) if mapdicts is None: mapdicts = [None] * len(self.__tables) for sheet in book: if len(sheet.colnames) == 0: sheet.name_columns_by_row(0) colnames_array = [sheet.colnames for sheet in book] scattered = zip(self.__tables, colnames_array, mapdicts, initializers) importer = sql.SQLTableImporter(self.__session) for each_table in scattered: adapter = sql.SQLTableImportAdapter(each_table[0]) adapter.column_names = each_table[1] adapter.column_name_mapping_dict = each_table[2] adapter.row_initializer = each_table[3] importer.append(adapter) to_store = OrderedDict() for sheet_name in book.sheet_names(): # due book.to_dict() brings in column_names # which corrupts the data to_store[sheet_name] = book[sheet_name].get_internal_array() save_data(importer, to_store, file_type=DB_SQL, **self.__keywords)
def write_data(self, thebook): from pyexcel.book import to_book book = thebook if isinstance(thebook, BookStream): book = to_book(thebook) new_models = [model for model in self.__models if model is not None] batch_size = self.__keywords.get(params.BATCH_SIZE, None) initializers = self.__keywords.get(params.INITIALIZERS, None) if initializers is None: initializers = [None] * len(new_models) mapdicts = self.__keywords.get(params.MAPDICTS, None) if mapdicts is None: mapdicts = [None] * len(new_models) for sheet in book: if len(sheet.colnames) == 0: sheet.name_columns_by_row(0) colnames_array = [sheet.colnames for sheet in book] scattered = zip(new_models, colnames_array, mapdicts, initializers) importer = django.DjangoModelImporter() for each_model in scattered: adapter = django.DjangoModelImportAdapter(each_model[0]) adapter.column_names = each_model[1] adapter.column_name_mapping_dict = each_model[2] adapter.row_initializer = each_model[3] importer.append(adapter) to_store = OrderedDict() for sheet_name in book.sheet_names(): # due book.to_dict() brings in column_names # which corrupts the data to_store[sheet_name] = book[sheet_name].get_internal_array() save_data(importer, to_store, file_type=DB_DJANGO, batch_size=batch_size)
def render_book_to_stream(self, models, thebook, inits=None, mapdicts=None, batch_size=None, **keywords): from pyexcel.book import to_book book = thebook if isinstance(thebook, BookStream): book = to_book(thebook) new_models = [model for model in models if model is not None] initializers = inits if initializers is None: initializers = [None] * len(new_models) if mapdicts is None: mapdicts = [None] * len(new_models) for sheet in book: if len(sheet.colnames) == 0: sheet.name_columns_by_row(0) colnames_array = [sheet.colnames for sheet in book] scattered = zip(new_models, colnames_array, mapdicts, initializers) importer = django.DjangoModelImporter() for each_model in scattered: adapter = django.DjangoModelImportAdapter(each_model[0]) adapter.column_names = each_model[1] adapter.column_name_mapping_dict = each_model[2] adapter.row_initializer = each_model[3] importer.append(adapter) to_store = OrderedDict() for sheet_name in book.sheet_names(): # due book.to_dict() brings in column_names # which corrupts the data to_store[sheet_name] = book[sheet_name].get_internal_array() save_data(importer, to_store, file_type=self._file_type, batch_size=batch_size, **keywords)
def render_book_to_stream(self, file_stream, book, inits=None, mapdicts=None, **keywords): from pyexcel.book import to_book session, tables = file_stream thebook = book if isinstance(book, BookStream): thebook = to_book(book) initializers = inits if initializers is None: initializers = [None] * len(tables) if mapdicts is None: mapdicts = [None] * len(tables) for sheet in thebook: if len(sheet.colnames) == 0: sheet.name_columns_by_row(0) colnames_array = [sheet.colnames for sheet in book] scattered = zip(tables, colnames_array, mapdicts, initializers) importer = sql.SQLTableImporter(session) for each_table in scattered: adapter = sql.SQLTableImportAdapter(each_table[0]) adapter.column_names = each_table[1] adapter.column_name_mapping_dict = each_table[2] adapter.row_initializer = each_table[3] importer.append(adapter) to_store = OrderedDict() for sheet_name in thebook.sheet_names(): # due book.to_dict() brings in column_names # which corrupts the data to_store[sheet_name] = book[sheet_name].get_internal_array() save_data(importer, to_store, file_type=self._file_type, **keywords)
def save_book_as(**keywords): """ Save a book from a data source to another one """ dest_keywords, source_keywords = _split_keywords(**keywords) book = sources.get_book_stream(**source_keywords) book = to_book(book) return sources.save_book(book, **dest_keywords)
def save_book_as(**keywords): """ Save a book from a data source to another one """ dest_keywords, source_keywords = _split_keywords(**keywords) book = sources.get_book_stream(**source_keywords) book = to_book(book) return sources.save_book(book, **dest_keywords)
def render_book(self, book, title=DEFAULT_TITLE, x_in_column=0, y_in_column=1, **keywords): from pyexcel.book import to_book cls = getattr(pygal, self._chart_class) instance = cls(title=title, **keywords) for sheet in to_book(book): self._render_a_sheet(instance, sheet, x_in_column=x_in_column, y_in_column=y_in_column) chart_content = instance.render() return chart_content
def render_book(self, book, x_in_column=0, y_in_column=1, **keywords): from pyexcel.book import to_book line_handles = [] fig = plt.figure() ax = fig.add_subplot(111) for sheet in to_book(book): line_handle, = ax.plot( sheet.column[x_in_column], sheet.column[y_in_column], label=sheet.name) line_handles.append(line_handle) ax.legend(handles=line_handles) self._set_axis_labels(ax, **keywords) fig.savefig(self._image_stream, format=self._file_type)
def save_book_as(**keywords): """Save a book from a data source to another one :param file_name: a file with supported file extension :param file_content: the file content :param file_stream: the file stream :param file_type: the file type in *content* :param session: database session :param tables: a list of database table :param models: a list of django models :param bookdict: a dictionary of two dimensional arrays :param url: a download http url for your excel file :param dest_file_name: another file name. **out_file** is deprecated though is still accepted. :param dest_file_type: this is needed if you want to save to memory :param dest_session: the target database session :param dest_tables: the list of target destination tables :param dest_models: the list of target destination django models :param dest_mapdicts: a list of mapping dictionaries :param dest_initializers: table initialization functions :param dest_mapdicts: to nominate a model or table fields. Optional :param dest_batch_size: batch creation size. Optional :param keywords: additional keywords can be found at :meth:`pyexcel.get_book` :returns: IO stream if saving to memory. None otherwise see also :ref:`a-list-of-data-structures` Here is a table of parameters: ========================== =============================== source parameters ========================== =============================== loading from file file_name, keywords loading from string file_content, file_type, keywords loading from stream file_stream, file_type, keywords loading from sql session, tables loading from django models models loading from dictionary bookdict loading from an url url ========================== =============================== Where the dictionary should have text as keys and two dimensional array as values. ================ ============================================ Saving to source parameters ================ ============================================ file dest_file_name, dest_sheet_name, keywords with prefix 'dest' memory dest_file_type, dest_content, dest_sheet_name, keywords with prefix 'dest' sql dest_session, dest_tables, dest_table_init_func, dest_mapdict django model dest_models, dest_initializers, dest_mapdict, dest_batch_size ================ ============================================ """ dest_keywords, source_keywords = _split_keywords(**keywords) book = sources.get_book_stream(**source_keywords) book = to_book(book) return sources.save_book(book, **dest_keywords)