async def report(task_id: int): task = await Task.filter(id=task_id).first() result = [['Ссылка', 'Найденные ключевые слова', 'Были ли проблемы']] resource_items = await ResourceItem.filter( resource__task_id=task.id).order_by('resource__order').values( 'resource_id', 'resource__domain', 'resource__error_https', 'resource__error_http', 'keywords_found', 'done', 'error') resource_with_grouper = groupby(resource_items, itemgetter('resource_id')) for resource_id, items in resource_with_grouper: keywords = set() problems = set() domain = '' for item in items: for x in item['keywords_found']: keywords.add(x) if item['resource__error_https'] and item['resource__error_http']: problems.add(item['resource__error_https']) problems.add(item['resource__error_http']) domain = item['resource__domain'] problems.discard(None) result.append([ domain, ', '.join(str(s) for s in keywords), ', '.join(str(e) for e in problems) if problems else 'Нет' ]) wb = Workbook() wb.new_sheet("sheet name", data=result) wb.save('/tmp/result.xlsx') return FileResponse('/tmp/result.xlsx')
def df_to_excel(df, path, sheet_name='Sheet 1'): data = [ df.columns.tolist(), ] + df.values.tolist() wb = Workbook() wb.new_sheet(sheet_name, data=data) wb.save(path)
def generate_excel(self, size=5): ''' Generates Excel file and uploads to S3 bucket. Updates task state as it generates. ''' data = [] for i in range(size): row = [] row.append(i) data.append(row) self.update_state(state='PROGRESS', meta={'current': i, 'total': size}) time.sleep(1) stream = BytesIO() wb = Workbook() wb.new_sheet("sheet name", data=data) wb.save(stream) stream.seek(0) key_name = 'small_excel_' + str(random.randint(1, 10000)) + '.xlsx' s3_client.upload_fileobj(stream, S3_BUCKET_NAME, key_name) url = s3_client.generate_presigned_url('get_object', Params={ 'Bucket': S3_BUCKET_NAME, 'Key': key_name }, ExpiresIn=3600) return { 'current': size, 'total': size, 'status': 'COMPLETED', 'result': 42, 'url': url }
def make_excel(self): def set_style(the_ws): ws_style = Style(size=15, alignment=Alignment(horizontal="center", vertical="center")) the_ws.range("A1", "E1").merge() for i in range(1, 5): the_ws.set_col_style(i, ws_style) the_ws.set_col_style( 5, Style(size=30, alignment=Alignment(horizontal="center", vertical="center"))) wb = Workbook() # ws = wb.new_sheet(u"南区交换", data=self.exchange_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南交换", data=self.exchange_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区交换", data=self.exchange_vege_data) # set_style(ws) # ws = wb.new_sheet(u"南区赠与", data=self.given_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南赠与", data=self.given_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区赠与", data=self.given_vege_data) # set_style(ws) ws = wb.new_sheet(u'交换表', data=self.exchange_data) set_style(ws) ws = wb.new_sheet(u'赠与表', data=self.given_data) set_style(ws) wb.save('stucampus/christmas/info/1.xlsx')
def _to_excel(self, data, path, header=[], sheet_name='Data'): values = data.values.tolist() if self._is_dataframe(data) else data sheet_data = [ header, ] + values if header else values wb = Workbook() wb.new_sheet(sheet_name, data=sheet_data) saved = False cancel = False while not saved and not cancel: try: wb.save(path) saved = True except PermissionError: retry = msg.askretrycancel( constants.APP_NAME, f'Could not save file to ({path}) because there is ' 'a file with that name currently open. To continue, ' 'close the file and retry.') if not retry: cancel = True return saved
def make_excel(self): def set_style(the_ws): ws_style = Style(size=15, alignment=Alignment(horizontal="center", vertical="center")) the_ws.range("A1", "E1").merge() for i in range(1, 5): the_ws.set_col_style(i, ws_style) the_ws.set_col_style(5, Style(size=30, alignment=Alignment(horizontal="center", vertical="center"))) wb = Workbook() # ws = wb.new_sheet(u"南区交换", data=self.exchange_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南交换", data=self.exchange_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区交换", data=self.exchange_vege_data) # set_style(ws) # ws = wb.new_sheet(u"南区赠与", data=self.given_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南赠与", data=self.given_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区赠与", data=self.given_vege_data) # set_style(ws) ws = wb.new_sheet(u'交换表', data=self.exchange_data) set_style(ws) ws = wb.new_sheet(u'赠与表', data=self.given_data) set_style(ws) wb.save('stucampus/christmas/info/1.xlsx')
def save_to_excel(filename, rows): wb = Workbook() x = threading.Thread(target=print_time, args=(len(rows), )) wb.new_sheet(sheet_name='Untitled', data=rows) x.start() wb.save(filename + '.xlsx') print('Your data is saved in ' + filename + '.xlsx')
def click(self): gram=self.textEdit.toPlainText().split('\n') sentence=self.lineEdit.text() grammar=Grammar() grammar.insert_from_arr(gram) if grammar.have_left_recursion()==True: grammar.eliminating_left_recursion() if grammar.have_left_factor()==True: grammar.left_factoring() follow=grammar.follow() if grammar.is_LL1()==False: print('不是LL(1)文法') return table=grammar.consuct_predictive_parsing_table(follow) step = grammar.predictive_parsing(table, sentence) for i in range(len(step)): self.tableWidget.insertRow(self.tableWidget.rowCount()) for i in range(self.tableWidget.rowCount()): for j in range(self.tableWidget.columnCount()): self.tableWidget.setItem(i,j,QTableWidgetItem(step[i][j])) #把分析过程写入Excel文件中 from pyexcelerate import Workbook data = step # data is a 2D array wb = Workbook() wb.new_sheet("step", data=data) wb.save("output.xlsx")
def _write_xlsx_output(self, in_both_data=[]): wb = Workbook() if in_both_data: wb.new_sheet('in_both', data=in_both_data) else: for name, df in self.dfs.items(): if self.include_options[name]: sheet_name = f'file_{name}' column_names = list(df) data = [ column_names, ] + df.values.tolist() wb.new_sheet(sheet_name, data=data) try_to_save = True while try_to_save: try: wb.save(self.output_path) self.output_saved = True try_to_save = False except PermissionError: try_to_save = msg.askretrycancel( self.title, f'Could not save file to ({self.output_path}) ' 'because there is a file with that name currently ' 'open. Close that file to allow for this one to be ' 'saved.')
def row_height_width(): from pyexcelerate import Workbook, Color, Style, Fill from datetime import datetime wb = Workbook() ws = wb.new_sheet("sheet name 1") ws[1][1].value = "this is long string 1" ws[1][2].value = "this is long string 2" ws[1][3].value = "this is long string 3" ws.set_col_style(1, Style(size=-1)) # auto-fit column 1 ws.set_col_style(2, Style(size=0)) # hidden column 2 ws.set_col_style(3, Style(size=100)) # width=100 column 3 # ----------------- ws = wb.new_sheet("sheet name 2") ws[1][1].value = "this is long string 1" ws[2][1].value = "this is long string 2" ws[3][1].value = "this is long string 3" ws.set_row_style(1, Style(size=-1)) # auto-fit column 1 ws.set_row_style(2, Style(size=0)) # hidden column 2 ws.set_row_style(3, Style(size=100)) # width=100 column 3 wb.save("row_height_width.xlsx")
def rows_to_xlsx(data): """ Can pass in either a 2D array ... or a list of dicts with the keys "rows" and "name" that will be turned into individual worksheets. """ from pyexcelerate import Workbook import io import datetime from decimal import Decimal if not data: sheets = [{'rows': [[]]}] elif not isinstance(data[0], dict): sheets = [{'rows': data}] else: sheets = data wb = Workbook() for j, sheet in enumerate(sheets): def fixup_value(v): if v is None: return '' if isinstance(v, datetime.datetime): return str(v) if isinstance(v, Decimal): return float(v) if isinstance(v, bool): return int(v) return v rows = [list(map(fixup_value, row)) for row in sheet['rows']] wb.new_sheet(sheet.get('name', 'Sheet%d' % (j + 1)), data=rows) f = io.BytesIO() wb._save(f) return f.getvalue()
def save_excel_fast(df, path, sheet_name="Sheet1", skiprows=0): data = [[]] * skiprows + [ df.columns.tolist(), ] + df.values.tolist() wb = Workbook() wb.new_sheet(sheet_name, data=data) wb.save(path)
def download_make_fts_data(output_dir): ### DOWNLOAD FTS DATA download_fts_data(output_dir) ### WRITE FTS EXCEL DATA worksheet_fts_contributions_data = make_fts_contributions_data(output_dir) wb_fts = Workbook() wb_fts.new_sheet("Data", data=worksheet_fts_contributions_data) wb_fts.save(os.path.join(output_dir, "xlsx", "contributions.xlsx"))
def df_to_excel(df, excelfilename): SHEET_LENGTH = 1000000 print(df) wbk = Workbook() for i in range(0, len(df), SHEET_LENGTH): print(i) df1 = df.iloc[i:i + SHEET_LENGTH, ] values = [df1.columns] + list(df1.values) wbk.new_sheet(sheet_name='Row {}'.format(i), data=values) wbk.save(excelfilename)
def _write_output_file(self): column_names = list(self.file_df) data = [ column_names, ] + self.file_df.values.tolist() wb = Workbook() wb.new_sheet('parsed addresses', data=data) output_path = self.gui.output_file.get() wb.save(output_path)
def pasarchivo(ruta, datb, tablas, tipo): """copy to csv files tables from query results""" import sqlite3 import pandas as pd import timeit from pyexcelerate import Workbook from pathlib import Path from datetime import date dat_dir = Path(ruta) db_path1 = dat_dir / datb start_time = timeit.default_timer() conn = sqlite3.connect(db_path1) # database connection c = conn.cursor() today = date.today() df1 = pd.read_csv(tablas) xls_file = "Param" + today.strftime("%y%m%d") + ".xlsx" xls_path = dat_dir / xls_file # xls file path-name csv_path = dat_dir / "csv" # csv path to store big data wb = Workbook() # excelerator file init i = 0 for index, row in df1.iterrows(): # panda row iteration tablas file by tipo column line = row[tipo] if not pd.isna(row[tipo]): # nan null values validation try: df = pd.read_sql_query("select * from " + line + ";", conn) # pandas dataframe from sqlite if len(df) > 1000000: # excel not supported csv_loc = line + today.strftime("%y%m%d") + '.csv.gz' # compressed csv file name print('Table {} saved in {}'.format(line, csv_loc)) df.to_csv(csv_path / csv_loc, compression='gzip') # pandas dataframe saved to csv else: data = [df.columns.tolist()] + df.values.tolist() data = [[index] + row for index, row in zip(df.index, data)] wb.new_sheet(line, data=data) print('Table {} stored in xlsx sheet'.format(line)) i += 1 except sqlite3.Error as error: # sqlite error handling print('SQLite error: %s' % (' '.join(error.args))) end_time = timeit.default_timer() delta = round(end_time - start_time, 2) print("Data proc took " + str(delta) + " secs") deltas = 0 if i == 0: print('No tables to excel') else: print("Saving tables in {} workbook".format(xls_path)) start_time = timeit.default_timer() wb.save(xls_path) end_time = timeit.default_timer() deltas = round(end_time - start_time, 2) print("xlsx save took " + str(deltas) + " secs") print("Total time " + str(delta + deltas) + " secs") c.close() conn.close()
def make_excel(self): def set_style(the_ws): ws_style = Style(size=15, alignment=Alignment(horizontal="center", vertical="center")) the_ws.range("A1", "D1").merge() for i in range(1, 5): the_ws.set_col_style(i, ws_style) wb = Workbook() ws = wb.new_sheet(u'交换表', data=self.exchange_data) set_style(ws) ws = wb.new_sheet(u'赠与表', data=self.given_data) set_style(ws) wb.save('stucampus/christmas/info/3.xlsx')
def bulk_prepare_basis(self, start_date, end_date, dart='Day Ahead', market='All', to_database_option=False, to_excel=None): powerplant_list = [entity for entity in self.entities if entity.type == 'plant'] if market != 'All': powerplant_list = [ powerplant for powerplant in powerplant_list if powerplant.market == market] basis_df = pd.DataFrame() basis_hourly_detail_df = pd.DataFrame() for powerplant in powerplant_list: powerplant_basis_df, powerplant_basis_details_df = powerplant.build_basis(start_date, end_date, dart) basis_df = basis_df.append(powerplant_basis_df) basis_hourly_detail_df = basis_hourly_detail_df.append(powerplant_basis_details_df) # # basis_df.to_csv("basis_df.csv") basis_df = basis_df.reset_index() # print (basis_df.columns) # basis_df = pd.read_csv("basis_df.csv") portfolio_basis_result_df = pd.melt(basis_df, id_vars=['month','peak_info','plant'], value_vars=['basis_$','basis_%'], var_name='instrument', value_name='value') portfolio_basis_result_df['instrument_id'] = portfolio_basis_result_df.apply(lambda row: row['plant'] + ' basis - ' + row['peak_info'] + "_" + row['instrument'].split("_")[1], axis=1) portfolio_basis_result_df = portfolio_basis_result_df.reset_index() portfolio_basis_result_df = pd.pivot_table(portfolio_basis_result_df, index=['month'], columns=['instrument_id'], values='value', aggfunc=np.sum) portfolio_basis_result_df = portfolio_basis_result_df.reset_index() # portfolio_basis_result_df.to_csv("portfolio_basis_result_df.csv") if to_excel is not None: # basis_df.to_excel(to_excel, sheet_name='basis') # basis_df.to_excel(to_excel, sheet_name='detail') basis_values = [portfolio_basis_result_df.columns] + list(portfolio_basis_result_df.values) wb = Workbook() wb.new_sheet('basis', data=basis_values) wb.save(to_excel) wb = Workbook() basis_detail_values = [basis_hourly_detail_df.columns] + list(basis_hourly_detail_df.values) wb.new_sheet('basis_details', data=basis_detail_values) wb.save(to_excel.split('.')[0] + "_hourly_detail.xlsx") return basis_df, basis_hourly_detail_df
def export_to_xlsx(data, temp_dir): start_time = datetime.now() # full_file_path = "{0}{1}temp.xlsx".format(file_path, os.sep) file_path = "/tmp/temp.xlsx" try: wb = Workbook() ws = wb.new_sheet("Policies", data=data) ws.set_row_style(1, Style(font=Font(bold=True))) # bold the header row # save xlsx file and open it as a binary file wb.save(file_path) xlsx_file = open(file_path, 'rb') output = io.BytesIO() output.write(xlsx_file.read()) # close and delete file xlsx_file.close() os.remove(file_path) except Exception as ex: m.logger.fatal("\tunable to export to file: {}".format(ex,)) return None m.logger.info("\tfile export took {}".format(datetime.now() - start_time)) return output
def export_to_excel(self, filename: str) -> None: """ Export in XLSX format. """ from pyexcelerate import Workbook # type: ignore self.__preprocess() sub = len(self.keys) if sub >= 1: data = [ self.values[ctr:ctr + sub] for ctr in range(0, len(self.values), sub) ] else: typer.secho( "An error occured please check your query.", fg=typer.colors.RED, bold=True, ) sys.exit() data.insert(0, self.keys) wb = Workbook() ws = wb.new_sheet("Analytics", data=data) wb.save(filename) typer.secho( "\nAnalytics successfully created in XLSX format ✅", bold=True, )
def to_xlsx(self, filename, submissions): workbook = Workbook() sheets = {} for chunk in self.parse_submissions(submissions): for section_name, rows in chunk.items(): try: cursor = sheets[section_name] current_sheet = cursor['sheet'] except KeyError: current_sheet = workbook.new_sheet(section_name) cursor = sheets[section_name] = { "sheet": current_sheet, "row": 2, } for i, label in enumerate(self.labels[section_name], 1): current_sheet.set_cell_value(1, i, label) for row in rows: y = cursor["row"] for i, cell in enumerate(row, 1): current_sheet.set_cell_value(y, i, cell) cursor["row"] += 1 workbook.save(filename)
def Convert(ans): global status csv_file = ans excel_file=csv_file.replace('.csv','.xlsx') try: df = read_csv(csv_file, sep=',', error_bad_lines=False, index_col=False, dtype='unicode') df.head() data = [df.columns.tolist(), ] + df.values.tolist() wb = Workbook() wb.new_sheet('Sheet 1', data=data) wb.save(excel_file) status= False except: messagebox.showinfo(message= "Please Select CSV Correctly" ) status= True
def __call__(self, data, wrapped=True): outfile = BytesIO() # Create workbook wb = Workbook() ws = wb.new_sheet("data") # Headers headers = self.fields_by_major_version['1'].keys() final_column = get_column_letter(len(headers)) ws.range("A1", final_column + "1").value = [headers] # Data row_count = 2 get_major_version = self.get_major_version for obj in data.items: row = [ accessor(obj) for accessor in self.fields_by_major_version[ get_major_version(obj)].values() ] ws.range("A" + str(row_count), final_column + str(row_count)).value = [row] row_count += 1 # Save wb.save(outfile) # Return outfile.seek(0) return {'file': outfile, 'client_filename': self.client_filename}
def make_xlsx(self): wb = Workbook() ws = wb.new_sheet("sheet1") row = 1 column = 1 for i in range(0, len(self.stock_list), 1): for j in range(0, len(self.stock_list[i]), 1): ws.set_cell_value(row, column, self.stock_list[i][j]) column = column + 1 row = row + 2 column = 1 row = 2 column = 1 for i in range(0, len(self.keywords_list), 1): for j in range(0, len(self.keywords_list[i]), 1): ws.set_cell_value(row, column, self.keywords_list[i][j]) column = column + 1 row = row + 2 column = 1 wb.save(self.path)
def write_cell_data_fast(): wb = Workbook() ws = wb.new_sheet("sheet name") ws[1][1].value = 15 # a number ws[1][2].value = 20 ws[1][3].value = "=SUM(A1,B1)" # a formula ws[1][4].value = str(datetime.now()) # a date wb.save("write_cell_data_fast.xlsx")
def sqltabexport(conn1, tabs1, filenam, datab): today = date.today() xls_file = filenam + '_' + today.strftime("%y%m%d") + ".xlsx" xls_path = datab.parent / 'xlsx' / xls_file # xls file path-name wb = Workbook() for i in tabs1: try: df = pd.read_sql_query("select * from " + i + ";", conn1) # pandas dataframe from sqlite data = [ df.columns.tolist() ] + df.values.tolist() # dataframe to list to pyexcelerate save wb.new_sheet(i, data=data) except sqlite3.Error as error: # sqlite error handling print('SQLite error: %s' % (' '.join(error.args))) wb.save(xls_path) return
def write_cell_data_faster(): wb = Workbook() ws = wb.new_sheet("sheet name") ws.set_cell_value(1, 1, 15) # a number ws.set_cell_value(1, 2, 20) ws.set_cell_value(1, 3, "=SUM(A1,B1)") # a formula ws.set_cell_value(1, 4, str(datetime.now())) # a date wb.save("write_cell_data_faster.xlsx")
def Make_stock_title_xlsx(self, dir_path, file_name): print("Make method") wb1 = Workbook() ws1 = wb1.new_sheet("sheet1") for i in range(0, len(self.title_list), 1): ws1.set_cell_value(i + 1, 1, self.title_list[i]) wb1.save(dir_path + '/' + file_name + '.xlsx')
def styling_columns_fastest(): from pyexcelerate import Workbook, Color, Style, Fill from datetime import datetime wb = Workbook() ws = wb.new_sheet("sheet name") ws[1][1].value = 123456 ws.set_col_style(1, Style(fill=Fill(background=Color(255, 0, 0, 0)))) wb.save("styling_columns_fastest.xlsx")
def styling_rows_fast(): from pyexcelerate import Workbook, Color from datetime import datetime wb = Workbook() ws = wb.new_sheet("sheet name") ws[1][1].value = 123456 ws[1].style.fill.background = Color(255, 0, 0) wb.save("styling_rows_fast.xlsx")
def sqltabexport(datsq, tabs1, filenam): datab = Path('C:/sqlite/2020' + str(datsq) + '_sqlite.db') today = date.today() xls_file = filenam + '_' + today.strftime("%y%m%d") + ".xlsx" xls_path = datab.parent / xls_file # xls file path-name conn = sqlite3.connect(datab) # database connection c = conn.cursor() wb = Workbook() for i in tabs1: try: df = pd.read_sql_query("select * from " + i + ";", conn) # pandas dataframe from sqlite data = [df.columns.tolist()] + df.values.tolist() # dataframe to list to pyexcelerate save wb.new_sheet(i, data=data) except sqlite3.Error as error: # sqlite error handling print('SQLite error: %s' % (' '.join(error.args))) c.close() conn.close() wb.save(xls_path) return
def print_excel_report(query, database, header, gq, filename): wb = Workbook() #Print Cover material ws = wb.new_sheet("Cover") ws.set_cell_value(1, 1, "Date Generated:") ws.set_cell_value(1, 2, datetime.datetime.now()) ws.set_cell_value(2, 1, "GEMINI Query:") ws.set_cell_value(2, 2, query) ws.set_cell_value(3, 1, "GEMINI Database:") ws.set_cell_value(3, 2, database) ws2 = wb.new_sheet("Variants", data=header) row = 2 for row in gq: cell = 1 row = row + 1 wb.save(filename)
''' from openpyxl import load_workbook wb = load_workbook(filename = '/home/ductu/Downloads/3g.xlsx',read_only=True) ws = wb['Database 3G'] d = ws.cell(row = 4, column = 2) print d.value count=1 for row in ws.iter_rows('A2:AJ3'): print 'number of row ', count count+=1 for cell in row: print cell.value ''' from pyexcelerate import Workbook wb = Workbook() ws = wb.new_sheet("test") ws.range("B2", "C3").value = [[1, 2], [3, 4]] wb.save("output.xlsx")
return out itemsCollection = [] # создаём заголовок таблицы row = [] for item in data['items']: row.append(item['name']) row.append('Original URL') itemsCollection.append(row) # сами данные парсинга totalLinks = len(data['links']) i = 0 for link in data['links']: i+=1 g.go(link) row = [] for item in data['items']: content = parse(item['xpath'], item['type']) row.append(content) row.append(link) itemsCollection.append(row) print '['+ str(i) +'/'+ str(totalLinks) +']' wb = Workbook() ws = wb.new_sheet("sheet name", data=itemsCollection) ws.set_row_style(1, Style(font=Font(bold=True))) wb.save("output.xlsx")
def make_excel(self): wb = Workbook() ws = wb.new_sheet(u'交换表', data=self.exchange_data) wb.save('stucampus/christmas/info/2.xlsx')