def test_load_ods_data(): msg = "Please install one of these plugins for read data in 'ods': " msg += "pyexcel-ods,pyexcel-ods3" try: get_data("test.ods") except manager.SupportingPluginAvailableButNotInstalled as e: eq_(str(e), msg)
def test_load_ods_data(): msg = "Please install one of these plugins for read data in 'ods': " msg += "pyexcel-ods,pyexcel-ods3" try: get_data("test.ods") except exceptions.SupportingPluginAvailableButNotInstalled as e: eq_(str(e), msg)
def test_load_ods_data_from_memory(): io = BytesIO() msg = "Please install one of these plugins for read data in 'ods': " msg += "pyexcel-ods,pyexcel-ods3" try: get_data(io, file_type="ods") except manager.SupportingPluginAvailableButNotInstalled as e: eq_(str(e), msg)
def test_load_ods_data_from_memory(): io = BytesIO() msg = "Please install one of these plugins for read data in 'ods': " msg += "pyexcel-ods,pyexcel-ods3" try: get_data(io, file_type="ods") except exceptions.SupportingPluginAvailableButNotInstalled as e: eq_(str(e), msg)
def test_generator_can_be_written(): test_filename = "generator.csv" test_fixture = os.path.join("tests", "fixtures", "test.csv") data = get_data(test_fixture, streaming=True) save_data(test_filename, data) assert os.path.exists(test_filename) data2 = get_data(test_filename) expected = get_data(test_fixture) assert data2[test_filename] == expected['test.csv'] os.unlink(test_filename)
def get_data(self): if self.file_stream is not None: sheets = get_data(self.file_stream, file_type=self.file_type, **self.keywords) else: sheets = get_data(self.file_content, file_type=self.file_type, **self.keywords) return one_sheet_tuple(sheets.items())
def get_data(self): if self.file_stream is not None: sheets = get_data(self.file_stream, file_type=self.file_type, **self.keywords) else: sheets = get_data(self.file_content, file_type=self.file_type, **self.keywords) return sheets, KEYWORD_MEMORY, None
def get_data(self): if self.file_stream is not None: sheets = get_data(self.file_stream, file_type=self.file_type, **self.keywords) else: sheets = get_data(self.file_content, file_type=self.file_type, **self.keywords) return sheets
def test_generator_can_be_written(): test_filename = "generator.csv" test_fixture = os.path.join("tests", "fixtures", "test.csv") data = get_data(test_fixture, streaming=True) save_data(test_filename, data) assert os.path.exists(test_filename) data2 = get_data(test_filename) expected = get_data(test_fixture) assert data2[test_filename] == expected["test.csv"] os.unlink(test_filename)
def get_data(self): if self.file_stream is not None: sheets = get_data(self.file_stream, file_type=self.file_type, streaming=True, **self.keywords) else: sheets = get_data(self.file_content, file_type=self.file_type, streaming=True, **self.keywords) return sheets
def get_data(self): exporter = DjangoModelExporter() for model in self.models: adapter = DjangoModelExportAdapter(model) exporter.append(adapter) data = get_data(exporter, file_type=DB_DJANGO, **self.keywords) return data
def usufyToTextExport(d, fPath=None): ''' Workaround to export to a .txt file. :param d: Data to export. :param fPath: File path. If None was provided, it will assume that it has to print it. ''' import pyexcel as pe import pyexcel.ext.text as text if fPath == None: isTerminal = True else: isTerminal = False try: oldData = get_data(fPath) except: # No information has been recovered oldData = {"OSRFramework":[]} # Generating the new tabular data tabularData = _generateTabularData(d, {"OSRFramework":[[]]}, True, canUnicode=False) # The tabular data contains a dict representing the whole book and we need only the sheet!! sheet = pe.Sheet(tabularData["OSRFramework"]) sheet.name = "Profiles recovered (" + getCurrentStrDatetime() +")." # Defining the headers sheet.name_columns_by_row(0) text.TABLEFMT = "grid" try: with open(fPath, "w") as oF: oF.write(str(sheet)) except: # If a fPath was not provided... We will only print the info: return sheet
def test_filter_both_ways_2(self): filtered_data = get_data(self.test_file, start_column=1, column_limit=1, start_row=3, row_limit=1, library="pyexcel-ods") expected = [[24]] eq_(filtered_data[self.sheet_name], expected)
def get_data(self): exporter = SQLTableExporter(self.session) for table in self.tables: adapter = SQLTableExportAdapter(table) exporter.append(adapter) data = get_data(exporter, file_type=DB_SQL, **self.keywords) return data
def test_force_file_type(): test_file = "force_file_type.txt" data = get_data( os.path.join("tests", "fixtures", test_file), force_file_type="csv" ) expected = [[1, 2, 3]] eq_(expected, data[test_file])
def test_filter_row_2(self): filtered_data = get_data(self.test_file, start_row=3, row_limit=1, library="pyexcel-xlsx") expected = [[4, 24, 34]] eq_(filtered_data[self.sheet_name], expected)
def test_force_file_type_on_write(): test_file = "force_file_type_on_write.txt" save_data(test_file, {"sheet 1": [[1, 2]]}, force_file_type="csv") data = get_data(test_file, force_file_type="csv") expected = [[1, 2]] eq_(expected, data[test_file]) os.unlink(test_file)
def test_pyexcel_issue_138(): array = [["123_122", "123_1.", "123_1.0"]] save_data("test.csv", array) data = get_data("test.csv") expected = [["123_122", "123_1.", "123_1.0"]] eq_(data["test.csv"], expected) os.unlink("test.csv")
def get_data(self): exporter = django.DjangoModelExporter() for model in self.__models: adapter = django.DjangoModelExportAdapter(model) exporter.append(adapter) data = get_data(exporter, file_type=DB_DJANGO, **self.__keywords) return data
def test_filter_both_ways(self): filtered_data = get_data(self.test_file, start_column=1, start_row=3, library="pyexcel-xlsx") expected = [[24, 34], [25, 35], [26, 36]] eq_(filtered_data[self.sheet_name], expected)
def test_pyexcel_issue_138(): array = [['123_122', '123_1.', '123_1.0']] save_data('test.csv', array) data = get_data('test.csv') expected = [['123_122', '123_1.', '123_1.0']] eq_(data['test.csv'], expected) os.unlink('test.csv')
def get_data(self): exporter = sql.SQLTableExporter(self.__session) for table in self.__tables: adapter = sql.SQLTableExportAdapter(table) exporter.append(adapter) data = get_data(exporter, file_type=DB_SQL, **self.__keywords) return data
def test_issue_23(): test_file = os.path.join("tests", "fixtures", "issue23.csv") data = get_data(test_file) expected = [[8204235414504252, u'inf'], [82042354145042521, u'-inf'], [820423541450425216, 0], [820423541450425247, 1], [8204235414504252490, 1.1]] eq_(data['issue23.csv'], expected)
def test_issue_8(): test_file = "test_issue_8.csv" data = [[1, 2], [], [], [], [3, 4]] save_data(test_file, data) written_data = get_data(test_file, skip_empty_rows=False) eq_(data, written_data[test_file]) os.unlink(test_file)
def get_data(self): exporter = SQLTableExporter(self.session) for table in self.tables: adapter = SQLTableExportAdapter(table) exporter.append(adapter) data = get_data(exporter, file_type=DB_SQL) return data
def test_filter_column_2(self): filtered_data = get_data(self.test_file, start_column=1, column_limit=1, library="pyexcel-ods3") expected = [[21], [22], [23], [24], [25], [26]] eq_(filtered_data[self.sheet_name], expected)
def get_data(self): exporter = DjangoModelExporter() for model in self.models: adapter = DjangoModelExportAdapter(model) exporter.append(adapter) data = get_data(exporter, file_type=DB_DJANGO) return data
def test_issue_20(): test_file = os.path.join("tests", "fixtures", "issue20.csv") data = get_data(test_file) expected = [[u'to', u'infinity', u'and', u'beyond']] eq_(data['issue20.csv'], expected)
def test_conversion_from_bytes_to_text(): data = [['1','2','3']] save_data("conversion.csv", data) with open("conversion.csv", "rb") as f: content = f.read() result = get_data(content, 'csv') assert result == data os.unlink("conversion.csv")
def test_file_handle_as_input(): test_file = "file_handle.csv" with open(test_file, 'w') as f: f.write("1,2,3") with open(test_file, 'r') as f: data = get_data(f, 'csv') eq_(data['csv'], [[1, 2, 3]])
def test_file_type_case_insensitivity(): test_file = "file_handle.CSv" with open(test_file, 'w') as f: f.write("1,2,3") with open(test_file, 'r') as f: data = get_data(f, 'csv') eq_(data['csv'], [[1, 2, 3]])
def test_file_handle_as_input(): test_file = "file_handle.csv" with open(test_file, "w") as f: f.write("1,2,3") with open(test_file, "r") as f: data = get_data(f, "csv") eq_(data["csv"], [[1, 2, 3]])
def test_file_type_case_insensitivity(): test_file = "file_handle.CSv" with open(test_file, "w") as f: f.write("1,2,3") with open(test_file, "r") as f: data = get_data(f, "csv") eq_(data["csv"], [[1, 2, 3]])
def test_filter_row(self): def custom_row_renderer(row): return [str(element) for element in row] custom_data = get_data(self.test_file, row_renderer=custom_row_renderer) expected = [["1", "21", "31"], ["2", "22", "32"]] eq_(custom_data[self.test_file], expected)
def test_filter_both_ways_2(self): filtered_data = get_data(self.test_file, start_column=1, column_limit=1, start_row=3, row_limit=1) expected = [[24]] eq_(filtered_data[self.test_file], expected)
def test_filter_row(self): def custom_row_renderer(row): return [str(element) for element in row] custom_data = get_data(self.test_file, row_renderer=custom_row_renderer) expected = [['1', '21', '31'], ['2', '22', '32']] eq_(custom_data[self.test_file], expected)
def test_conversion_from_bytes_to_text(): test_file = "conversion.csv" data = [['1','2','3']] save_data(test_file, data) with open(test_file, "rb") as f: content = f.read() result = get_data(content, 'csv') assert result['csv'] == [[1, 2, 3]] os.unlink(test_file)
def get_data(self): exporter = sql.SQLTableExporter(self.__session) adapter = sql.SQLTableExportAdapter(self.__table, self.__export_columns) exporter.append(adapter) data = get_data(exporter, file_type=DB_SQL) if self.__sheet_name is not None: _set_dictionary_key(data, self.__sheet_name) return data
def test_case_insentivity(): data = [["1", "2", "3"]] io = manager.get_io("CSV") # test default format for saving is 'csv' save_data(io, data) io.seek(0) # test default format for reading is 'csv' result = get_data(io) assert result["csv"] == [[1, 2, 3]]
def test_case_insentivity(): data = [['1', '2', '3']] io = manager.get_io("CSV") # test default format for saving is 'csv' save_data(io, data) io.seek(0) # test default format for reading is 'csv' result = get_data(io) assert result['csv'] == [[1, 2, 3]]
def test_conversion_from_bytes_to_text(): test_file = "conversion.csv" data = [["1", "2", "3"]] save_data(test_file, data) with open(test_file, "rb") as f: content = f.read() result = get_data(content, "csv") assert result["csv"] == [[1, 2, 3]] os.unlink(test_file)
def get_data(self): exporter = django.DjangoModelExporter() adapter = django.DjangoModelExportAdapter(self.__model, self.__export_columns) exporter.append(adapter) data = get_data(exporter, file_type=DB_DJANGO, **self.__keywords) if self.__sheet_name is not None: _set_dictionary_key(data, self.__sheet_name) return data
def test_filter_column_2(self): filtered_data = get_data( self.test_file, start_column=1, column_limit=1, library="pyexcel-ods", ) expected = [[21], [22], [23], [24], [25], [26]] eq_(filtered_data[self.sheet_name], expected)
def test_filter_row(self): def custom_row_renderer(row): return [str(element) for element in row] custom_data = get_data( self.test_file, row_renderer=custom_row_renderer ) expected = [["1", "21", "31"], ["2", "22", "32"]] eq_(custom_data[self.test_file], expected)
def test_filter_both_ways_2(self): filtered_data = get_data(self.test_file, start_column=1, column_limit=1, start_row=3, row_limit=1, library="pyexcel-ods3") expected = [[24]] eq_(filtered_data[self.sheet_name], expected)
def test_issue_30_utf8_BOM_header(): content = [[u"人有悲歡離合", u"月有陰晴圓缺"]] test_file = "test-utf8-BOM.csv" save_data(test_file, content, encoding="utf-8-sig", lineterminator="\n") custom_encoded_content = get_data(test_file, encoding="utf-8-sig") assert custom_encoded_content[test_file] == content with open(test_file, "rb") as f: content = f.read() assert content[0:3] == b"\xef\xbb\xbf" os.unlink(test_file)
def test_filter_both_ways_2(self): filtered_data = get_data( self.test_file, start_column=1, column_limit=1, start_row=3, row_limit=1, ) expected = [[24]] eq_(filtered_data[self.test_file], expected)
def test_issue_23(): test_file = get_fixture("issue23.csv") data = get_data(test_file) expected = [ [8204235414504252, u"inf"], [82042354145042521, u"-inf"], [820423541450425216, 0], [820423541450425247, 1], [8204235414504252490, 1.1], ] eq_(data["issue23.csv"], expected)
def test_issue_23(): test_file = os.path.join("tests", "fixtures", "issue23.csv") data = get_data(test_file) expected = [ [8204235414504252, u'inf'], [82042354145042521, u'-inf'], [820423541450425216, 0], [820423541450425247, 1], [8204235414504252490, 1.1] ] eq_(data['issue23.csv'], expected)
def test_issue_33_34(): if PY26: pass else: import mmap test_file = get_fixture("issue20.csv") with open(test_file, "r+b") as f: memory_mapped_file = mmap.mmap( f.fileno(), 0, access=mmap.ACCESS_READ ) data = get_data(memory_mapped_file, file_type="csv") expected = [[u"to", u"infinity", u"and", u"beyond"]] eq_(data["csv"], expected)
def test_issue_35_encoding_for_file_content(): encoding = "utf-16" content = [ [u"Äkkilähdöt", u"Matkakirjoituksia", u"Matkatoimistot"], [u"Äkkilähdöt", u"Matkakirjoituksia", u"Matkatoimistot"], ] test_file = "test-%s-encoding-in-mmap-file.csv" % encoding save_data(test_file, content, encoding=encoding) with open(test_file, "r+b") as f: csv_content = f.read() data = get_data(csv_content, file_type="csv", encoding=encoding) eq_(data["csv"], content) os.unlink(test_file)
def get_data(self): f = request.urlopen(self.url) info = f.info() if PY2: mime_type = info.type else: mime_type = info.get_content_type() file_type = FILE_TYPE_MIME_TABLE.get(mime_type, None) if file_type is None: file_type = get_file_type_from_url(self.url) content = f.read() sheets = get_data(content, file_type=file_type, **self.keywords) return sheets
def check_mmap_encoding(encoding): import mmap content = [ [u"Äkkilähdöt", u"Matkakirjoituksia", u"Matkatoimistot"], [u"Äkkilähdöt", u"Matkakirjoituksia", u"Matkatoimistot"], ] test_file = "test-%s-encoding-in-mmap-file.csv" % encoding save_data(test_file, content, encoding=encoding) with open(test_file, "r+b") as f: memory_mapped_file = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) data = get_data(memory_mapped_file, file_type="csv", encoding=encoding) eq_(data["csv"], content) os.unlink(test_file)
def parse_db(self, argument, export_columns_list=None, on_demand=False, **keywords): session, tables = argument exporter = sql.SQLTableExporter(session) if export_columns_list is None: export_columns_list = [None] * len(tables) for table, export_columns in zip(tables, export_columns_list): adapter = sql.SQLTableExportAdapter(table, export_columns) exporter.append(adapter) if on_demand: sheets, _ = iget_data( exporter, file_type=self._file_type, **keywords) else: sheets = get_data(exporter, file_type=self._file_type, **keywords) return sheets