def export_to_excel(self, filename: str) -> None: """ Export in XLSX format. """ from pyexcelerate import Workbook # type: ignore self.__preprocess() sub = len(self.keys) if sub >= 1: data = [ self.values[ctr:ctr + sub] for ctr in range(0, len(self.values), sub) ] else: typer.secho( "An error occured please check your query.", fg=typer.colors.RED, bold=True, ) sys.exit() data.insert(0, self.keys) wb = Workbook() ws = wb.new_sheet("Analytics", data=data) wb.save(filename) typer.secho( "\nAnalytics successfully created in XLSX format ✅", bold=True, )
def to_xlsx(self, filename, submissions): workbook = Workbook() sheets = {} for chunk in self.parse_submissions(submissions): for section_name, rows in chunk.items(): try: cursor = sheets[section_name] current_sheet = cursor['sheet'] except KeyError: current_sheet = workbook.new_sheet(section_name) cursor = sheets[section_name] = { "sheet": current_sheet, "row": 2, } for i, label in enumerate(self.labels[section_name], 1): current_sheet.set_cell_value(1, i, label) for row in rows: y = cursor["row"] for i, cell in enumerate(row, 1): current_sheet.set_cell_value(y, i, cell) cursor["row"] += 1 workbook.save(filename)
def __call__(self, data, wrapped=True): outfile = BytesIO() # Create workbook wb = Workbook() ws = wb.new_sheet("data") # Headers headers = self.fields_by_major_version['1'].keys() final_column = get_column_letter(len(headers)) ws.range("A1", final_column + "1").value = [headers] # Data row_count = 2 get_major_version = self.get_major_version for obj in data.items: row = [ accessor(obj) for accessor in self.fields_by_major_version[ get_major_version(obj)].values() ] ws.range("A" + str(row_count), final_column + str(row_count)).value = [row] row_count += 1 # Save wb.save(outfile) # Return outfile.seek(0) return {'file': outfile, 'client_filename': self.client_filename}
def make_excel(self): def set_style(the_ws): ws_style = Style(size=15, alignment=Alignment(horizontal="center", vertical="center")) the_ws.range("A1", "E1").merge() for i in range(1, 5): the_ws.set_col_style(i, ws_style) the_ws.set_col_style( 5, Style(size=30, alignment=Alignment(horizontal="center", vertical="center"))) wb = Workbook() # ws = wb.new_sheet(u"南区交换", data=self.exchange_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南交换", data=self.exchange_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区交换", data=self.exchange_vege_data) # set_style(ws) # ws = wb.new_sheet(u"南区赠与", data=self.given_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南赠与", data=self.given_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区赠与", data=self.given_vege_data) # set_style(ws) ws = wb.new_sheet(u'交换表', data=self.exchange_data) set_style(ws) ws = wb.new_sheet(u'赠与表', data=self.given_data) set_style(ws) wb.save('stucampus/christmas/info/1.xlsx')
def row_height_width(): from pyexcelerate import Workbook, Color, Style, Fill from datetime import datetime wb = Workbook() ws = wb.new_sheet("sheet name 1") ws[1][1].value = "this is long string 1" ws[1][2].value = "this is long string 2" ws[1][3].value = "this is long string 3" ws.set_col_style(1, Style(size=-1)) # auto-fit column 1 ws.set_col_style(2, Style(size=0)) # hidden column 2 ws.set_col_style(3, Style(size=100)) # width=100 column 3 # ----------------- ws = wb.new_sheet("sheet name 2") ws[1][1].value = "this is long string 1" ws[2][1].value = "this is long string 2" ws[3][1].value = "this is long string 3" ws.set_row_style(1, Style(size=-1)) # auto-fit column 1 ws.set_row_style(2, Style(size=0)) # hidden column 2 ws.set_row_style(3, Style(size=100)) # width=100 column 3 wb.save("row_height_width.xlsx")
def _to_excel(self, data, path, header=[], sheet_name='Data'): values = data.values.tolist() if self._is_dataframe(data) else data sheet_data = [ header, ] + values if header else values wb = Workbook() wb.new_sheet(sheet_name, data=sheet_data) saved = False cancel = False while not saved and not cancel: try: wb.save(path) saved = True except PermissionError: retry = msg.askretrycancel( constants.APP_NAME, f'Could not save file to ({path}) because there is ' 'a file with that name currently open. To continue, ' 'close the file and retry.') if not retry: cancel = True return saved
def export_to_xlsx(data, temp_dir): start_time = datetime.now() # full_file_path = "{0}{1}temp.xlsx".format(file_path, os.sep) file_path = "/tmp/temp.xlsx" try: wb = Workbook() ws = wb.new_sheet("Policies", data=data) ws.set_row_style(1, Style(font=Font(bold=True))) # bold the header row # save xlsx file and open it as a binary file wb.save(file_path) xlsx_file = open(file_path, 'rb') output = io.BytesIO() output.write(xlsx_file.read()) # close and delete file xlsx_file.close() os.remove(file_path) except Exception as ex: m.logger.fatal("\tunable to export to file: {}".format(ex,)) return None m.logger.info("\tfile export took {}".format(datetime.now() - start_time)) return output
def save_excel_fast(df, path, sheet_name="Sheet1", skiprows=0): data = [[]] * skiprows + [ df.columns.tolist(), ] + df.values.tolist() wb = Workbook() wb.new_sheet(sheet_name, data=data) wb.save(path)
def click(self): gram=self.textEdit.toPlainText().split('\n') sentence=self.lineEdit.text() grammar=Grammar() grammar.insert_from_arr(gram) if grammar.have_left_recursion()==True: grammar.eliminating_left_recursion() if grammar.have_left_factor()==True: grammar.left_factoring() follow=grammar.follow() if grammar.is_LL1()==False: print('不是LL(1)文法') return table=grammar.consuct_predictive_parsing_table(follow) step = grammar.predictive_parsing(table, sentence) for i in range(len(step)): self.tableWidget.insertRow(self.tableWidget.rowCount()) for i in range(self.tableWidget.rowCount()): for j in range(self.tableWidget.columnCount()): self.tableWidget.setItem(i,j,QTableWidgetItem(step[i][j])) #把分析过程写入Excel文件中 from pyexcelerate import Workbook data = step # data is a 2D array wb = Workbook() wb.new_sheet("step", data=data) wb.save("output.xlsx")
def df_to_excel(df, path, sheet_name='Sheet 1'): data = [ df.columns.tolist(), ] + df.values.tolist() wb = Workbook() wb.new_sheet(sheet_name, data=data) wb.save(path)
def save_to_excel(filename, rows): wb = Workbook() x = threading.Thread(target=print_time, args=(len(rows), )) wb.new_sheet(sheet_name='Untitled', data=rows) x.start() wb.save(filename + '.xlsx') print('Your data is saved in ' + filename + '.xlsx')
def generate_excel(self, size=5): ''' Generates Excel file and uploads to S3 bucket. Updates task state as it generates. ''' data = [] for i in range(size): row = [] row.append(i) data.append(row) self.update_state(state='PROGRESS', meta={'current': i, 'total': size}) time.sleep(1) stream = BytesIO() wb = Workbook() wb.new_sheet("sheet name", data=data) wb.save(stream) stream.seek(0) key_name = 'small_excel_' + str(random.randint(1, 10000)) + '.xlsx' s3_client.upload_fileobj(stream, S3_BUCKET_NAME, key_name) url = s3_client.generate_presigned_url('get_object', Params={ 'Bucket': S3_BUCKET_NAME, 'Key': key_name }, ExpiresIn=3600) return { 'current': size, 'total': size, 'status': 'COMPLETED', 'result': 42, 'url': url }
def _write_xlsx_output(self, in_both_data=[]): wb = Workbook() if in_both_data: wb.new_sheet('in_both', data=in_both_data) else: for name, df in self.dfs.items(): if self.include_options[name]: sheet_name = f'file_{name}' column_names = list(df) data = [ column_names, ] + df.values.tolist() wb.new_sheet(sheet_name, data=data) try_to_save = True while try_to_save: try: wb.save(self.output_path) self.output_saved = True try_to_save = False except PermissionError: try_to_save = msg.askretrycancel( self.title, f'Could not save file to ({self.output_path}) ' 'because there is a file with that name currently ' 'open. Close that file to allow for this one to be ' 'saved.')
def to_xlsx(self, filename, submissions): workbook = Workbook() sheets = {} for chunk in self.parse_submissions(submissions): for section_name, rows in chunk.items(): try: cursor = sheets[section_name] current_sheet = cursor['sheet'] except KeyError: current_sheet = workbook.new_sheet(section_name) cursor = sheets[section_name] = { "sheet": current_sheet, "row": 2, } for i, label in enumerate(self.labels[section_name], 1): current_sheet.set_cell_value(1, i, label) for row in rows: y = cursor["row"] for i, cell in enumerate(row, 1): current_sheet.set_cell_value(y, i, cell) cursor["row"] += 1 workbook.save(filename)
def make_xlsx(self): wb = Workbook() ws = wb.new_sheet("sheet1") row = 1 column = 1 for i in range(0, len(self.stock_list), 1): for j in range(0, len(self.stock_list[i]), 1): ws.set_cell_value(row, column, self.stock_list[i][j]) column = column + 1 row = row + 2 column = 1 row = 2 column = 1 for i in range(0, len(self.keywords_list), 1): for j in range(0, len(self.keywords_list[i]), 1): ws.set_cell_value(row, column, self.keywords_list[i][j]) column = column + 1 row = row + 2 column = 1 wb.save(self.path)
def make_excel(self): def set_style(the_ws): ws_style = Style(size=15, alignment=Alignment(horizontal="center", vertical="center")) the_ws.range("A1", "E1").merge() for i in range(1, 5): the_ws.set_col_style(i, ws_style) the_ws.set_col_style(5, Style(size=30, alignment=Alignment(horizontal="center", vertical="center"))) wb = Workbook() # ws = wb.new_sheet(u"南区交换", data=self.exchange_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南交换", data=self.exchange_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区交换", data=self.exchange_vege_data) # set_style(ws) # ws = wb.new_sheet(u"南区赠与", data=self.given_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南赠与", data=self.given_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区赠与", data=self.given_vege_data) # set_style(ws) ws = wb.new_sheet(u'交换表', data=self.exchange_data) set_style(ws) ws = wb.new_sheet(u'赠与表', data=self.given_data) set_style(ws) wb.save('stucampus/christmas/info/1.xlsx')
async def report(task_id: int): task = await Task.filter(id=task_id).first() result = [['Ссылка', 'Найденные ключевые слова', 'Были ли проблемы']] resource_items = await ResourceItem.filter( resource__task_id=task.id).order_by('resource__order').values( 'resource_id', 'resource__domain', 'resource__error_https', 'resource__error_http', 'keywords_found', 'done', 'error') resource_with_grouper = groupby(resource_items, itemgetter('resource_id')) for resource_id, items in resource_with_grouper: keywords = set() problems = set() domain = '' for item in items: for x in item['keywords_found']: keywords.add(x) if item['resource__error_https'] and item['resource__error_http']: problems.add(item['resource__error_https']) problems.add(item['resource__error_http']) domain = item['resource__domain'] problems.discard(None) result.append([ domain, ', '.join(str(s) for s in keywords), ', '.join(str(e) for e in problems) if problems else 'Нет' ]) wb = Workbook() wb.new_sheet("sheet name", data=result) wb.save('/tmp/result.xlsx') return FileResponse('/tmp/result.xlsx')
def write_cell_data_faster(): wb = Workbook() ws = wb.new_sheet("sheet name") ws.set_cell_value(1, 1, 15) # a number ws.set_cell_value(1, 2, 20) ws.set_cell_value(1, 3, "=SUM(A1,B1)") # a formula ws.set_cell_value(1, 4, str(datetime.now())) # a date wb.save("write_cell_data_faster.xlsx")
def write_cell_data_fast(): wb = Workbook() ws = wb.new_sheet("sheet name") ws[1][1].value = 15 # a number ws[1][2].value = 20 ws[1][3].value = "=SUM(A1,B1)" # a formula ws[1][4].value = str(datetime.now()) # a date wb.save("write_cell_data_fast.xlsx")
def output(self, records): excel_file = self.get_file_path(self.json.file, '.xlsx') wb = Workbook() self.set_sheets(wb) for record in records: for cell in record: self.write(cell) wb.save(excel_file)
def Make_stock_title_xlsx(self, dir_path, file_name): print("Make method") wb1 = Workbook() ws1 = wb1.new_sheet("sheet1") for i in range(0, len(self.title_list), 1): ws1.set_cell_value(i + 1, 1, self.title_list[i]) wb1.save(dir_path + '/' + file_name + '.xlsx')
def styling_columns_fastest(): from pyexcelerate import Workbook, Color, Style, Fill from datetime import datetime wb = Workbook() ws = wb.new_sheet("sheet name") ws[1][1].value = 123456 ws.set_col_style(1, Style(fill=Fill(background=Color(255, 0, 0, 0)))) wb.save("styling_columns_fastest.xlsx")
def download_make_fts_data(output_dir): ### DOWNLOAD FTS DATA download_fts_data(output_dir) ### WRITE FTS EXCEL DATA worksheet_fts_contributions_data = make_fts_contributions_data(output_dir) wb_fts = Workbook() wb_fts.new_sheet("Data", data=worksheet_fts_contributions_data) wb_fts.save(os.path.join(output_dir, "xlsx", "contributions.xlsx"))
def styling_rows_fast(): from pyexcelerate import Workbook, Color from datetime import datetime wb = Workbook() ws = wb.new_sheet("sheet name") ws[1][1].value = 123456 ws[1].style.fill.background = Color(255, 0, 0) wb.save("styling_rows_fast.xlsx")
def df_to_excel(df, excelfilename): SHEET_LENGTH = 1000000 print(df) wbk = Workbook() for i in range(0, len(df), SHEET_LENGTH): print(i) df1 = df.iloc[i:i + SHEET_LENGTH, ] values = [df1.columns] + list(df1.values) wbk.new_sheet(sheet_name='Row {}'.format(i), data=values) wbk.save(excelfilename)
def _write_output_file(self): column_names = list(self.file_df) data = [ column_names, ] + self.file_df.values.tolist() wb = Workbook() wb.new_sheet('parsed addresses', data=data) output_path = self.gui.output_file.get() wb.save(output_path)
def pasarchivo(ruta, datb, tablas, tipo): """copy to csv files tables from query results""" import sqlite3 import pandas as pd import timeit from pyexcelerate import Workbook from pathlib import Path from datetime import date dat_dir = Path(ruta) db_path1 = dat_dir / datb start_time = timeit.default_timer() conn = sqlite3.connect(db_path1) # database connection c = conn.cursor() today = date.today() df1 = pd.read_csv(tablas) xls_file = "Param" + today.strftime("%y%m%d") + ".xlsx" xls_path = dat_dir / xls_file # xls file path-name csv_path = dat_dir / "csv" # csv path to store big data wb = Workbook() # excelerator file init i = 0 for index, row in df1.iterrows(): # panda row iteration tablas file by tipo column line = row[tipo] if not pd.isna(row[tipo]): # nan null values validation try: df = pd.read_sql_query("select * from " + line + ";", conn) # pandas dataframe from sqlite if len(df) > 1000000: # excel not supported csv_loc = line + today.strftime("%y%m%d") + '.csv.gz' # compressed csv file name print('Table {} saved in {}'.format(line, csv_loc)) df.to_csv(csv_path / csv_loc, compression='gzip') # pandas dataframe saved to csv else: data = [df.columns.tolist()] + df.values.tolist() data = [[index] + row for index, row in zip(df.index, data)] wb.new_sheet(line, data=data) print('Table {} stored in xlsx sheet'.format(line)) i += 1 except sqlite3.Error as error: # sqlite error handling print('SQLite error: %s' % (' '.join(error.args))) end_time = timeit.default_timer() delta = round(end_time - start_time, 2) print("Data proc took " + str(delta) + " secs") deltas = 0 if i == 0: print('No tables to excel') else: print("Saving tables in {} workbook".format(xls_path)) start_time = timeit.default_timer() wb.save(xls_path) end_time = timeit.default_timer() deltas = round(end_time - start_time, 2) print("xlsx save took " + str(deltas) + " secs") print("Total time " + str(delta + deltas) + " secs") c.close() conn.close()
def makeExcel(result): wb = Workbook() for i in range(len(result[0])): GAME = result[0][i][0] MUTATION_RATE = result[0][i][1] LAYERS = result[0][i][2] MAX_GENERATIONS = result[0][i][3] POPULATION_COUNT = result[0][i][4] ELAPSED_TIME = result[0][i][5] COL_TOTALS = result[0][i][6] INFO = result[0][i][7] GENERATIONS = result[0][i][8] sheetname = f"Genetics_{i+1}_{MUTATION_RATE}" ws = wb.new_sheet(f"{i+1}") for i in range(6): ws.set_col_style(i + 1, Style(size=-1)) ws.cell("A1").value = GAME ws.cell("A2").value = "Genetic Alg" ws.cell("A3").value = sheetname ws.cell("B1").value = "Mutation rate:" ws.cell("B2").value = MUTATION_RATE ws.cell("C1").value = "Generations:" ws.cell("C2").value = MAX_GENERATIONS ws.cell("D1").value = "Population count:" ws.cell("D2").value = POPULATION_COUNT ws.cell("E1").value = "Hidden Layers:" lay = '' for i in range(len(LAYERS)): lay += f"{LAYERS[i]}, " ws.cell("E2").value = lay ws.cell("F1").value = "Time(ms):" ws.cell("F2").value = ELAPSED_TIME ws.cell('G1').value = INFO startrow = 4 startcol = 2 ws[startrow - 1][startcol].value = "Generation" ws[startrow - 1][startcol + 1].value = "Average Fitness" ws[startrow - 1][startcol + 2].value = "Max Fitness" for i in range(len(GENERATIONS)): for j in range(len(GENERATIONS[0])): ws[startrow + i][startcol + j].value = int(GENERATIONS[i][j]) ws[startrow + len(GENERATIONS)][startcol].value = "Total:" ws[startrow + len(GENERATIONS)][startcol + 1] = int(COL_TOTALS[1]) ws[startrow + len(GENERATIONS)][startcol + 2] = int(COL_TOTALS[2]) wb.save( f"C:\\Users\\dosha\\Desktop\\ExcelFiles\\GA_{GAME}_{dt.datetime.now().strftime('%f')}.xlsx" ) print("..........Excel file generated..............")
def make_excel(self): def set_style(the_ws): ws_style = Style(size=15, alignment=Alignment(horizontal="center", vertical="center")) the_ws.range("A1", "D1").merge() for i in range(1, 5): the_ws.set_col_style(i, ws_style) wb = Workbook() ws = wb.new_sheet(u'交换表', data=self.exchange_data) set_style(ws) ws = wb.new_sheet(u'赠与表', data=self.given_data) set_style(ws) wb.save('stucampus/christmas/info/3.xlsx')
def styling_ranges(): from pyexcelerate import Workbook, Color from datetime import datetime wb = Workbook() ws = wb.new_sheet("test") ws.range("A1", "C3").value = 1 ws.range("A1", "C1").style.font.bold = True ws.range("A2", "C3").style.font.italic = True ws.range("A3", "C3").style.fill.background = Color(255, 0, 0, 0) ws.range("C1", "C3").style.font.strikethrough = True wb.save("styling_ranges.xlsx")
def write_excel(file_name, data, engine='pyexcelerate'): """ Write multiple sheets to one excel and save to disk. Ignore df's index and forbidden MultiIndex columns. Notes: 1. `pyexcelerate` can be nearly three times faster than `pd.to_excel`. 2. save as csv much faster than excel :param file_name: :param data: [(sheet_name, df), ...] :param engine: pyexcelerate: modify headers' style, display date properly and don't display nan pd.to_excel: pd.to_csv: file_name/sheet_name.csv :return: None """ if engine == 'pyexcelerate': wb = Workbook() for sheet_name, df in data: cols = df.columns.tolist() if len(df) > 0: # don't display nan df = df.fillna('') # display date properly for col in cols: if isinstance(df[col].iloc[0], datetime.date): df[col] = df[col].astype(str) ws = wb.new_sheet(sheet_name, [cols] + df.values.tolist()) # modify headers' style h, w = df.shape right = num2title(w) + '1' ws.range("A1", right).style.fill.background = Color(210, 210, 210, 0) ws.range("A1", right).style.font.bold = True ws.range("A1", right).style.alignment.horizontal = 'center' ws.range("A1", right).style.borders.right.style = '_' ws.range("A1", right).style.borders.bottom.style = '_' wb.save(file_name) elif engine == 'pd.to_excel': writer = pd.ExcelWriter(file_name) for sheet_name, df in data: df.to_excel(writer, sheet_name, index=False) writer.save() writer.close() elif engine == 'pd.to_csv': dir_name = file_name.replace('.xlsx', '') makedirs(dir_name) for sheet_name, df in data: df.to_csv(os.path.join(dir_name, sheet_name + '.csv'), index=False) else: pass
def bulk_prepare_basis(self, start_date, end_date, dart='Day Ahead', market='All', to_database_option=False, to_excel=None): powerplant_list = [entity for entity in self.entities if entity.type == 'plant'] if market != 'All': powerplant_list = [ powerplant for powerplant in powerplant_list if powerplant.market == market] basis_df = pd.DataFrame() basis_hourly_detail_df = pd.DataFrame() for powerplant in powerplant_list: powerplant_basis_df, powerplant_basis_details_df = powerplant.build_basis(start_date, end_date, dart) basis_df = basis_df.append(powerplant_basis_df) basis_hourly_detail_df = basis_hourly_detail_df.append(powerplant_basis_details_df) # # basis_df.to_csv("basis_df.csv") basis_df = basis_df.reset_index() # print (basis_df.columns) # basis_df = pd.read_csv("basis_df.csv") portfolio_basis_result_df = pd.melt(basis_df, id_vars=['month','peak_info','plant'], value_vars=['basis_$','basis_%'], var_name='instrument', value_name='value') portfolio_basis_result_df['instrument_id'] = portfolio_basis_result_df.apply(lambda row: row['plant'] + ' basis - ' + row['peak_info'] + "_" + row['instrument'].split("_")[1], axis=1) portfolio_basis_result_df = portfolio_basis_result_df.reset_index() portfolio_basis_result_df = pd.pivot_table(portfolio_basis_result_df, index=['month'], columns=['instrument_id'], values='value', aggfunc=np.sum) portfolio_basis_result_df = portfolio_basis_result_df.reset_index() # portfolio_basis_result_df.to_csv("portfolio_basis_result_df.csv") if to_excel is not None: # basis_df.to_excel(to_excel, sheet_name='basis') # basis_df.to_excel(to_excel, sheet_name='detail') basis_values = [portfolio_basis_result_df.columns] + list(portfolio_basis_result_df.values) wb = Workbook() wb.new_sheet('basis', data=basis_values) wb.save(to_excel) wb = Workbook() basis_detail_values = [basis_hourly_detail_df.columns] + list(basis_hourly_detail_df.values) wb.new_sheet('basis_details', data=basis_detail_values) wb.save(to_excel.split('.')[0] + "_hourly_detail.xlsx") return basis_df, basis_hourly_detail_df
def make_XlsxFile(similarity_2D_array, search_names, save_dir = 'C:/data/2018_09_09_test/2D_array', save_name = '2D_array.xlsx'): wb = Workbook() ws = wb.new_sheet('new sheet') for i in range(0, len(search_names), 1): ws[1][i + 2].value = search_names[i] for i in range(0, len(search_names), 1): ws[i + 2][1].value = search_names[i] for i in range(0, len(search_names), 1): for j in range(0, len(search_names), 1): ws[i + 2][j + 2].value = similarity_2D_array[i][j] wb.save(save_dir + '/' + save_name)
def print_excel_report(query, database, header, gq, filename): wb = Workbook() #Print Cover material ws = wb.new_sheet("Cover") ws.set_cell_value(1, 1, "Date Generated:") ws.set_cell_value(1, 2, datetime.datetime.now()) ws.set_cell_value(2, 1, "GEMINI Query:") ws.set_cell_value(2, 2, query) ws.set_cell_value(3, 1, "GEMINI Database:") ws.set_cell_value(3, 2, database) ws2 = wb.new_sheet("Variants", data=header) row = 2 for row in gq: cell = 1 row = row + 1 wb.save(filename)
def make_excel(self): wb = Workbook() ws = wb.new_sheet(u'交换表', data=self.exchange_data) wb.save('stucampus/christmas/info/2.xlsx')
return out itemsCollection = [] # создаём заголовок таблицы row = [] for item in data['items']: row.append(item['name']) row.append('Original URL') itemsCollection.append(row) # сами данные парсинга totalLinks = len(data['links']) i = 0 for link in data['links']: i+=1 g.go(link) row = [] for item in data['items']: content = parse(item['xpath'], item['type']) row.append(content) row.append(link) itemsCollection.append(row) print '['+ str(i) +'/'+ str(totalLinks) +']' wb = Workbook() ws = wb.new_sheet("sheet name", data=itemsCollection) ws.set_row_style(1, Style(font=Font(bold=True))) wb.save("output.xlsx")