def bulk_prepare_basis(self, start_date, end_date, dart='Day Ahead', market='All', to_database_option=False, to_excel=None): powerplant_list = [entity for entity in self.entities if entity.type == 'plant'] if market != 'All': powerplant_list = [ powerplant for powerplant in powerplant_list if powerplant.market == market] basis_df = pd.DataFrame() basis_hourly_detail_df = pd.DataFrame() for powerplant in powerplant_list: powerplant_basis_df, powerplant_basis_details_df = powerplant.build_basis(start_date, end_date, dart) basis_df = basis_df.append(powerplant_basis_df) basis_hourly_detail_df = basis_hourly_detail_df.append(powerplant_basis_details_df) # # basis_df.to_csv("basis_df.csv") basis_df = basis_df.reset_index() # print (basis_df.columns) # basis_df = pd.read_csv("basis_df.csv") portfolio_basis_result_df = pd.melt(basis_df, id_vars=['month','peak_info','plant'], value_vars=['basis_$','basis_%'], var_name='instrument', value_name='value') portfolio_basis_result_df['instrument_id'] = portfolio_basis_result_df.apply(lambda row: row['plant'] + ' basis - ' + row['peak_info'] + "_" + row['instrument'].split("_")[1], axis=1) portfolio_basis_result_df = portfolio_basis_result_df.reset_index() portfolio_basis_result_df = pd.pivot_table(portfolio_basis_result_df, index=['month'], columns=['instrument_id'], values='value', aggfunc=np.sum) portfolio_basis_result_df = portfolio_basis_result_df.reset_index() # portfolio_basis_result_df.to_csv("portfolio_basis_result_df.csv") if to_excel is not None: # basis_df.to_excel(to_excel, sheet_name='basis') # basis_df.to_excel(to_excel, sheet_name='detail') basis_values = [portfolio_basis_result_df.columns] + list(portfolio_basis_result_df.values) wb = Workbook() wb.new_sheet('basis', data=basis_values) wb.save(to_excel) wb = Workbook() basis_detail_values = [basis_hourly_detail_df.columns] + list(basis_hourly_detail_df.values) wb.new_sheet('basis_details', data=basis_detail_values) wb.save(to_excel.split('.')[0] + "_hourly_detail.xlsx") return basis_df, basis_hourly_detail_df
def save_to_excel(filename, rows): wb = Workbook() x = threading.Thread(target=print_time, args=(len(rows), )) wb.new_sheet(sheet_name='Untitled', data=rows) x.start() wb.save(filename + '.xlsx') print('Your data is saved in ' + filename + '.xlsx')
def _write_xlsx_output(self, in_both_data=[]): wb = Workbook() if in_both_data: wb.new_sheet('in_both', data=in_both_data) else: for name, df in self.dfs.items(): if self.include_options[name]: sheet_name = f'file_{name}' column_names = list(df) data = [ column_names, ] + df.values.tolist() wb.new_sheet(sheet_name, data=data) try_to_save = True while try_to_save: try: wb.save(self.output_path) self.output_saved = True try_to_save = False except PermissionError: try_to_save = msg.askretrycancel( self.title, f'Could not save file to ({self.output_path}) ' 'because there is a file with that name currently ' 'open. Close that file to allow for this one to be ' 'saved.')
def to_xlsx(self, filename, submissions): workbook = Workbook() sheets = {} for chunk in self.parse_submissions(submissions): for section_name, rows in chunk.items(): try: cursor = sheets[section_name] current_sheet = cursor['sheet'] except KeyError: current_sheet = workbook.new_sheet(section_name) cursor = sheets[section_name] = { "sheet": current_sheet, "row": 2, } for i, label in enumerate(self.labels[section_name], 1): current_sheet.set_cell_value(1, i, label) for row in rows: y = cursor["row"] for i, cell in enumerate(row, 1): current_sheet.set_cell_value(y, i, cell) cursor["row"] += 1 workbook.save(filename)
def export_to_xlsx(data, temp_dir): start_time = datetime.now() # full_file_path = "{0}{1}temp.xlsx".format(file_path, os.sep) file_path = "/tmp/temp.xlsx" try: wb = Workbook() ws = wb.new_sheet("Policies", data=data) ws.set_row_style(1, Style(font=Font(bold=True))) # bold the header row # save xlsx file and open it as a binary file wb.save(file_path) xlsx_file = open(file_path, 'rb') output = io.BytesIO() output.write(xlsx_file.read()) # close and delete file xlsx_file.close() os.remove(file_path) except Exception as ex: m.logger.fatal("\tunable to export to file: {}".format(ex,)) return None m.logger.info("\tfile export took {}".format(datetime.now() - start_time)) return output
def click(self): gram=self.textEdit.toPlainText().split('\n') sentence=self.lineEdit.text() grammar=Grammar() grammar.insert_from_arr(gram) if grammar.have_left_recursion()==True: grammar.eliminating_left_recursion() if grammar.have_left_factor()==True: grammar.left_factoring() follow=grammar.follow() if grammar.is_LL1()==False: print('不是LL(1)文法') return table=grammar.consuct_predictive_parsing_table(follow) step = grammar.predictive_parsing(table, sentence) for i in range(len(step)): self.tableWidget.insertRow(self.tableWidget.rowCount()) for i in range(self.tableWidget.rowCount()): for j in range(self.tableWidget.columnCount()): self.tableWidget.setItem(i,j,QTableWidgetItem(step[i][j])) #把分析过程写入Excel文件中 from pyexcelerate import Workbook data = step # data is a 2D array wb = Workbook() wb.new_sheet("step", data=data) wb.save("output.xlsx")
def df_to_excel(df, path, sheet_name='Sheet 1'): data = [ df.columns.tolist(), ] + df.values.tolist() wb = Workbook() wb.new_sheet(sheet_name, data=data) wb.save(path)
def __call__(self, data, wrapped=True): outfile = BytesIO() # Create workbook wb = Workbook() ws = wb.new_sheet("data") # Headers headers = self.fields_by_major_version['1'].keys() final_column = get_column_letter(len(headers)) ws.range("A1", final_column + "1").value = [headers] # Data row_count = 2 get_major_version = self.get_major_version for obj in data.items: row = [ accessor(obj) for accessor in self.fields_by_major_version[ get_major_version(obj)].values() ] ws.range("A" + str(row_count), final_column + str(row_count)).value = [row] row_count += 1 # Save wb.save(outfile) # Return outfile.seek(0) return {'file': outfile, 'client_filename': self.client_filename}
def save_excel_fast(df, path, sheet_name="Sheet1", skiprows=0): data = [[]] * skiprows + [ df.columns.tolist(), ] + df.values.tolist() wb = Workbook() wb.new_sheet(sheet_name, data=data) wb.save(path)
def generate_excel(self, size=5): ''' Generates Excel file and uploads to S3 bucket. Updates task state as it generates. ''' data = [] for i in range(size): row = [] row.append(i) data.append(row) self.update_state(state='PROGRESS', meta={'current': i, 'total': size}) time.sleep(1) stream = BytesIO() wb = Workbook() wb.new_sheet("sheet name", data=data) wb.save(stream) stream.seek(0) key_name = 'small_excel_' + str(random.randint(1, 10000)) + '.xlsx' s3_client.upload_fileobj(stream, S3_BUCKET_NAME, key_name) url = s3_client.generate_presigned_url('get_object', Params={ 'Bucket': S3_BUCKET_NAME, 'Key': key_name }, ExpiresIn=3600) return { 'current': size, 'total': size, 'status': 'COMPLETED', 'result': 42, 'url': url }
def row_height_width(): from pyexcelerate import Workbook, Color, Style, Fill from datetime import datetime wb = Workbook() ws = wb.new_sheet("sheet name 1") ws[1][1].value = "this is long string 1" ws[1][2].value = "this is long string 2" ws[1][3].value = "this is long string 3" ws.set_col_style(1, Style(size=-1)) # auto-fit column 1 ws.set_col_style(2, Style(size=0)) # hidden column 2 ws.set_col_style(3, Style(size=100)) # width=100 column 3 # ----------------- ws = wb.new_sheet("sheet name 2") ws[1][1].value = "this is long string 1" ws[2][1].value = "this is long string 2" ws[3][1].value = "this is long string 3" ws.set_row_style(1, Style(size=-1)) # auto-fit column 1 ws.set_row_style(2, Style(size=0)) # hidden column 2 ws.set_row_style(3, Style(size=100)) # width=100 column 3 wb.save("row_height_width.xlsx")
def rows_to_xlsx(data): """ Can pass in either a 2D array ... or a list of dicts with the keys "rows" and "name" that will be turned into individual worksheets. """ from pyexcelerate import Workbook import io import datetime from decimal import Decimal if not data: sheets = [{'rows': [[]]}] elif not isinstance(data[0], dict): sheets = [{'rows': data}] else: sheets = data wb = Workbook() for j, sheet in enumerate(sheets): def fixup_value(v): if v is None: return '' if isinstance(v, datetime.datetime): return str(v) if isinstance(v, Decimal): return float(v) if isinstance(v, bool): return int(v) return v rows = [list(map(fixup_value, row)) for row in sheet['rows']] wb.new_sheet(sheet.get('name', 'Sheet%d' % (j + 1)), data=rows) f = io.BytesIO() wb._save(f) return f.getvalue()
def make_xlsx(self): wb = Workbook() ws = wb.new_sheet("sheet1") row = 1 column = 1 for i in range(0, len(self.stock_list), 1): for j in range(0, len(self.stock_list[i]), 1): ws.set_cell_value(row, column, self.stock_list[i][j]) column = column + 1 row = row + 2 column = 1 row = 2 column = 1 for i in range(0, len(self.keywords_list), 1): for j in range(0, len(self.keywords_list[i]), 1): ws.set_cell_value(row, column, self.keywords_list[i][j]) column = column + 1 row = row + 2 column = 1 wb.save(self.path)
def create_workbook(self, data, structure): # given the data as json and the structure as json too, create a workbook with this data self.wb = Workbook() # order the fields for proper display for sheet_name, sheet_fields in list(structure.items()): self.sorted_sheet_fields[sheet_name] = self.order_fields( sheet_fields) self.pending_processing['main'] = {'data': data, 'is_processed': False} # from the docs: https://docs.python.org/2.7/tutorial/datastructures.html#dictionaries # It is sometimes tempting to change a list while you are looping over it; # however, it is often simpler and safer to create a new list instead # so lets not change the list while iterating while len(list(self.pending_processing.keys())) != 0: all_processed = True for sheet_name, data in list(self.pending_processing.items()): if data['is_processed'] == False: terminal.tprint('Processing ' + sheet_name, 'okblue') all_processed = False self.process_and_write(data['data'], sheet_name) # mark it as processed self.pending_processing[sheet_name]['is_processed'] = True break if all_processed == True: break self.wb.save(self.wb_name) return False
def _to_excel(self, data, path, header=[], sheet_name='Data'): values = data.values.tolist() if self._is_dataframe(data) else data sheet_data = [ header, ] + values if header else values wb = Workbook() wb.new_sheet(sheet_name, data=sheet_data) saved = False cancel = False while not saved and not cancel: try: wb.save(path) saved = True except PermissionError: retry = msg.askretrycancel( constants.APP_NAME, f'Could not save file to ({path}) because there is ' 'a file with that name currently open. To continue, ' 'close the file and retry.') if not retry: cancel = True return saved
def specaud(): from datetime import date from pathlib import Path from pyexcelerate import Workbook from rfpack.validatabc import validatab from rfpack.customparamc import customparam from rfpack.pntopdc import pntopd from rfpack.graffullc import graffull from rfpack.csvfrmxlsxc import xlsxfmcsv proglabel2.config(text="") # label init datab = Path('C:/SQLite/20200522_sqlite.db') pdf_file = date.today().strftime("%y%m%d") + '_Feat1ParAudit.pdf' pdf_path = datab.parent / pdf_file xls_file = Path(pdf_path.with_suffix('.xlsx')) wb = Workbook() # pyexcelerate Workbook fndtbl = datab.parent / Path('findtable.csv') tbcstm = datab.parent / Path('tabcustom.csv') validatab(datab, fndtbl, tbcstm) # locate input tab/parameters in dbabase pnglist, sheetsdic = customparam(datab, 'tab_par', 5, root, my_progress, proglabel2) # generates png files # print Total info in 4 pages, 3 regions per page, bar starts at 60% pnglist1 = graffull(xls_file, 'Total', 4, 60, root, my_progress, proglabel2) pnglist1.extend(pnglist) # review png at the beginning pntopd(pdf_path, pnglist1, 50, 550, 500, 500) # png to pdf xlsxfmcsv(xls_file, sheetsdic, 75, root, my_progress, proglabel2) my_progress[ 'value'] = 100 # prog bar increase a cording to i steps in loop proglabel2.config(text=my_progress['value']) response = messagebox.showinfo("Specific Audit", "Process Finished") proglabel3 = Label(root, text=response) my_progress['value'] = 0 # prog bar increase according to i steps in loop proglabel2.config(text=" ") root.update_idletasks()
def export_to_excel(self, filename: str) -> None: """ Export in XLSX format. """ from pyexcelerate import Workbook # type: ignore self.__preprocess() sub = len(self.keys) if sub >= 1: data = [ self.values[ctr:ctr + sub] for ctr in range(0, len(self.values), sub) ] else: typer.secho( "An error occured please check your query.", fg=typer.colors.RED, bold=True, ) sys.exit() data.insert(0, self.keys) wb = Workbook() ws = wb.new_sheet("Analytics", data=data) wb.save(filename) typer.secho( "\nAnalytics successfully created in XLSX format ✅", bold=True, )
def make_excel(self): def set_style(the_ws): ws_style = Style(size=15, alignment=Alignment(horizontal="center", vertical="center")) the_ws.range("A1", "E1").merge() for i in range(1, 5): the_ws.set_col_style(i, ws_style) the_ws.set_col_style( 5, Style(size=30, alignment=Alignment(horizontal="center", vertical="center"))) wb = Workbook() # ws = wb.new_sheet(u"南区交换", data=self.exchange_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南交换", data=self.exchange_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区交换", data=self.exchange_vege_data) # set_style(ws) # ws = wb.new_sheet(u"南区赠与", data=self.given_south_data) # set_style(ws) # ws = wb.new_sheet(u"西南赠与", data=self.given_wsouth_data) # set_style(ws) # ws = wb.new_sheet(u"斋区赠与", data=self.given_vege_data) # set_style(ws) ws = wb.new_sheet(u'交换表', data=self.exchange_data) set_style(ws) ws = wb.new_sheet(u'赠与表', data=self.given_data) set_style(ws) wb.save('stucampus/christmas/info/1.xlsx')
async def report(task_id: int): task = await Task.filter(id=task_id).first() result = [['Ссылка', 'Найденные ключевые слова', 'Были ли проблемы']] resource_items = await ResourceItem.filter( resource__task_id=task.id).order_by('resource__order').values( 'resource_id', 'resource__domain', 'resource__error_https', 'resource__error_http', 'keywords_found', 'done', 'error') resource_with_grouper = groupby(resource_items, itemgetter('resource_id')) for resource_id, items in resource_with_grouper: keywords = set() problems = set() domain = '' for item in items: for x in item['keywords_found']: keywords.add(x) if item['resource__error_https'] and item['resource__error_http']: problems.add(item['resource__error_https']) problems.add(item['resource__error_http']) domain = item['resource__domain'] problems.discard(None) result.append([ domain, ', '.join(str(s) for s in keywords), ', '.join(str(e) for e in problems) if problems else 'Нет' ]) wb = Workbook() wb.new_sheet("sheet name", data=result) wb.save('/tmp/result.xlsx') return FileResponse('/tmp/result.xlsx')
def output(self, records): excel_file = self.get_file_path(self.json.file, '.xlsx') wb = Workbook() self.set_sheets(wb) for record in records: for cell in record: self.write(cell) wb.save(excel_file)
def write_cell_data_fast(): wb = Workbook() ws = wb.new_sheet("sheet name") ws[1][1].value = 15 # a number ws[1][2].value = 20 ws[1][3].value = "=SUM(A1,B1)" # a formula ws[1][4].value = str(datetime.now()) # a date wb.save("write_cell_data_fast.xlsx")
def write_cell_data_faster(): wb = Workbook() ws = wb.new_sheet("sheet name") ws.set_cell_value(1, 1, 15) # a number ws.set_cell_value(1, 2, 20) ws.set_cell_value(1, 3, "=SUM(A1,B1)") # a formula ws.set_cell_value(1, 4, str(datetime.now())) # a date wb.save("write_cell_data_faster.xlsx")
def download_make_fts_data(output_dir): ### DOWNLOAD FTS DATA download_fts_data(output_dir) ### WRITE FTS EXCEL DATA worksheet_fts_contributions_data = make_fts_contributions_data(output_dir) wb_fts = Workbook() wb_fts.new_sheet("Data", data=worksheet_fts_contributions_data) wb_fts.save(os.path.join(output_dir, "xlsx", "contributions.xlsx"))
def Make_stock_title_xlsx(self, dir_path, file_name): print("Make method") wb1 = Workbook() ws1 = wb1.new_sheet("sheet1") for i in range(0, len(self.title_list), 1): ws1.set_cell_value(i + 1, 1, self.title_list[i]) wb1.save(dir_path + '/' + file_name + '.xlsx')
def styling_rows_fast(): from pyexcelerate import Workbook, Color from datetime import datetime wb = Workbook() ws = wb.new_sheet("sheet name") ws[1][1].value = 123456 ws[1].style.fill.background = Color(255, 0, 0) wb.save("styling_rows_fast.xlsx")
def __init__(self, filename_or_stream, sheet_name='Sheet1'): self._stream = filename_or_stream self._workbook = Workbook() if sheet_name is not None: self._sheet = self._workbook.new_sheet(sheet_name) self._default_style = None self._rowcount = 0
def styling_columns_fastest(): from pyexcelerate import Workbook, Color, Style, Fill from datetime import datetime wb = Workbook() ws = wb.new_sheet("sheet name") ws[1][1].value = 123456 ws.set_col_style(1, Style(fill=Fill(background=Color(255, 0, 0, 0)))) wb.save("styling_columns_fastest.xlsx")
def df_to_excel(df, excelfilename): SHEET_LENGTH = 1000000 print(df) wbk = Workbook() for i in range(0, len(df), SHEET_LENGTH): print(i) df1 = df.iloc[i:i + SHEET_LENGTH, ] values = [df1.columns] + list(df1.values) wbk.new_sheet(sheet_name='Row {}'.format(i), data=values) wbk.save(excelfilename)
def pasarchivo(ruta, datb, tablas, tipo): """copy to csv files tables from query results""" import sqlite3 import pandas as pd import timeit from pyexcelerate import Workbook from pathlib import Path from datetime import date dat_dir = Path(ruta) db_path1 = dat_dir / datb start_time = timeit.default_timer() conn = sqlite3.connect(db_path1) # database connection c = conn.cursor() today = date.today() df1 = pd.read_csv(tablas) xls_file = "Param" + today.strftime("%y%m%d") + ".xlsx" xls_path = dat_dir / xls_file # xls file path-name csv_path = dat_dir / "csv" # csv path to store big data wb = Workbook() # excelerator file init i = 0 for index, row in df1.iterrows(): # panda row iteration tablas file by tipo column line = row[tipo] if not pd.isna(row[tipo]): # nan null values validation try: df = pd.read_sql_query("select * from " + line + ";", conn) # pandas dataframe from sqlite if len(df) > 1000000: # excel not supported csv_loc = line + today.strftime("%y%m%d") + '.csv.gz' # compressed csv file name print('Table {} saved in {}'.format(line, csv_loc)) df.to_csv(csv_path / csv_loc, compression='gzip') # pandas dataframe saved to csv else: data = [df.columns.tolist()] + df.values.tolist() data = [[index] + row for index, row in zip(df.index, data)] wb.new_sheet(line, data=data) print('Table {} stored in xlsx sheet'.format(line)) i += 1 except sqlite3.Error as error: # sqlite error handling print('SQLite error: %s' % (' '.join(error.args))) end_time = timeit.default_timer() delta = round(end_time - start_time, 2) print("Data proc took " + str(delta) + " secs") deltas = 0 if i == 0: print('No tables to excel') else: print("Saving tables in {} workbook".format(xls_path)) start_time = timeit.default_timer() wb.save(xls_path) end_time = timeit.default_timer() deltas = round(end_time - start_time, 2) print("xlsx save took " + str(deltas) + " secs") print("Total time " + str(delta + deltas) + " secs") c.close() conn.close()
def _write_output_file(self): column_names = list(self.file_df) data = [ column_names, ] + self.file_df.values.tolist() wb = Workbook() wb.new_sheet('parsed addresses', data=data) output_path = self.gui.output_file.get() wb.save(output_path)