def read_write(df1): """ Create a worksheet from a Pandas dataframe and read it back into another one """ from itertools import islice wb = Workbook() ws = wb.active for r in dataframe_to_rows(df1, index=True, header=True): ws.append(r) data = ws.values cols = next(data)[1:] data = list(data) idx = [r[0] for r in data] data = (islice(r, 1, None) for r in data) df2 = DataFrame(data, index=idx, columns=cols) ws = wb.create_sheet() for r in dataframe_to_rows(df2, index=True, header=True): ws.append(r) wb.save("read-write.xlsx")
def csvToExcel3(books_data): df1 = pd.DataFrame(books_data) data = { '代号': ['A', 'B', 'C', 'D'], '身高': [178, 177, 180, 175], '体重': [65, 70, 64, 67] } df2 = pd.DataFrame(data) wb = Workbook() ws1 = wb.create_sheet("豆瓣图书", 0) ws2 = wb.create_sheet("体测数据", 1) nums = len(books_data) titles = books_data['titles'] for i in range(1, nums): img = openpyxl.drawing.image.Image(titles[i] + '.jpg') ws1.add_image(img, 'B' + str(i + 2)) for r in dataframe_to_rows(df1, index=True, header=True): ws1.append(r) for r in dataframe_to_rows(df2, index=True, header=True): ws2.append(r) wb.save("pandas_openpyxl.xlsx") df = pd.read_excel('pandas_openpyxl.xlsx') print(df)
def export_2_excel(input_dataframe, output_dataframe, towings_dataframe): # Creating the excel file wb = Workbook() # Creating needed sheets wb.create_sheet(index=2, title='Inputs') wb.create_sheet(index=3, title='Outputs') wb.create_sheet(index=4, title='Towings') wb.remove_sheet(wb['Sheet']) # Adding inputs to first sheet for row in dataframe_to_rows(input_dataframe, index=False): wb['Inputs'].append(row) # Adding outputs to second sheet for row in dataframe_to_rows(output_dataframe, index=False): wb['Outputs'].append(row) # Adding towings to third sheet for row in dataframe_to_rows(towings_dataframe, index=False): wb['Towings'].append(row) # Saving file save_as = './outputs/Bay Assignment.xlsx' wb.save(filename=save_as) print(' ---> Saved as "' + save_as + '"')
def drawTosheet(sheetName,pos, png): book = load_workbook('output.xlsx') newSheet=book.create_sheet(sheetName) newSheet.append(["Sql Order By CPU"]) for r in dataframe_to_rows(SqlStats.cpuDF, index=False,header=True): newSheet.append(r) newSheet.append(["Sql Order By Elapsed Time"]) for r in dataframe_to_rows(SqlStats.esDF, index=False,header=True): newSheet.append(r) newSheet.append(["Sql Order By User I/O Wait"]) for r in dataframe_to_rows(SqlStats.ioDF, index=False,header=True): newSheet.append(r) newSheet.append(["Sql Order By Gets"]) for r in dataframe_to_rows(SqlStats.getsDF, index=False,header=True): newSheet.append(r) newSheet.append(["Sql Order By Physical Reads"]) for r in dataframe_to_rows(SqlStats.prDF, index=False,header=True): newSheet.append(r) count=0 for fig in png: for val in range(count,len(pos)): position=pos[val] count=count+1 newSheet.add_image( openpyxl.drawing.image.Image(fig),position) break book.save("output.xlsx")
def handle_upload_file(request,f): with BytesIO() as b: with pd.ExcelFile("media/"+f) as reader: now = datetime.datetime.now().strftime("%m%d%y_%H%M%S") filename = 'result_'+str(now) header = ['STT', 'MÃ HÀNG', 'MÃ MẶT HÀNG', 'TÊN THUỐC', 'HOẠT CHẤT', 'HÀM LƯỢNG', 'ĐVT', 'TCKT', 'VEN', 'HÃNG SX', 'NƯỚC SX', 'NHÀ CUNG ỨNG', 'ĐƠN GIÁ', 'SỐ LƯỢNG', 'THÀNH TIỀN', 'NHÓM THUỐC', 'HỆ ĐIỀU TRỊ', 'NĂM'] index = pd.read_excel(reader,sheet_name = 0).columns head = [] for data in index: head.append(data) if(header == head): dataABC = pd.read_excel(reader, sheet_name=0) dataXYZ = pd.read_excel(reader, sheet_name=1) dataResultsABC = ABCVEN(dataABC) dataResultsXYZ = AnalysisXYZ(dataXYZ) wb = Workbook() ws = wb.active ws.title = "ABC_VEN_Data" for r in dataframe_to_rows(pd.concat(dataResultsABC), index=False, header=True): ws.append(r) ABCAnalysisDisplay(wb, pd.concat(dataResultsABC)) ws = wb.create_sheet(title="XYZ_Data") for r in dataframe_to_rows(dataResultsXYZ, index=False, header=True): ws.append(r) XYZAnalysisDisplay(wb, dataResultsXYZ) b.seek(0) wb.save(b) messages.success(request,'successfully uploaded!') response = HttpResponse(b.getvalue(), content_type='application/vnd.ms-excel') response['Content-Disposition'] = 'attachment; filename={}.xlsx'.format(filename) return response else: print("Fail") messages.error(request,'Please insert correct format!') return render(request, 'blog/home.html', {'title': 'Home'})
def join_Database(): xlsx_to_formated_xlsx = 'media/qbreports/uploads/xlsx_to_formated_xlsx.xlsx' downlaod_file_name = 'media/qbreports/uploads/custom_qb_report.xlsx' joined_table_file_name = pandas_df_join() csv_to_xlsx, google_sheet = finial_file_creation(joined_table_file_name) dfUpdated = pd.read_csv(csv_to_xlsx, encoding='latin-1') dfgdoc = pd.read_csv(google_sheet, encoding='latin-1') print("creating xlsx files") wb = Workbook() ws = wb.active ws.title = 'Matching_Updated' ws2 = wb.create_sheet() ws2.title = 'Vendor Price Check Gdoc' bold = NamedStyle(name='bold') bold.font = Font(bold=True) wb.add_named_style(bold) cell = WriteOnlyCell(ws) cell.style = 'bold' greenFill = PatternFill(start_color='32CD32', end_color='32CD32', fill_type='solid') for row in dataframe_to_rows(dfUpdated, index=False, header=True): ws.append(row) for row in dataframe_to_rows(dfgdoc, index=False, header=True): ws2.append(row) wb.save(xlsx_to_formated_xlsx) wb = load_workbook(filename=xlsx_to_formated_xlsx) ws = wb.active for row in ws.iter_rows(): if row[1].value == "Vendor Name": for cell in row: ws[str(cell.coordinate)].font = Font(bold=True) if row[1].value == None: for cell in row: if cell.value != None: ws[str(cell.coordinate)].fill = greenFill if row[1].value != None: for cell in row: ws[str(cell.coordinate)].font = Font(bold=True) wb.save(downlaod_file_name) print("created xlsx files") qb_Import_File_delete_everything() return downlaod_file_name
def write_to_workbook(folder, ticker, cur_metrics, five_year_metrics): wb = Workbook() try: ws = wb.active ws.title = 'To Date Fundamentals-Ratios' ws2 = wb.create_sheet("Five Year Fundamentals") for r in dataframe_to_rows(cur_metrics, index=True, header=True): ws.append(r) ws['B3'].number_format = FORMAT_NUMBER_COMMA_SEPARATED1 ws['C3'].number_format = '0.000' ws['D3'].number_format = '0.000' for r in dataframe_to_rows(five_year_metrics, index=True, header=True): ws2.append(r) for dim in ['B', 'C', 'D']: col = ws2.column_dimensions[dim] col.number_format = FORMAT_CURRENCY_USD for dim in ['E']: col = ws2.column_dimensions[dim] col.number_format = '0.00%' for dim in ['F']: col = ws2.column_dimensions[dim] col.number_format = '0.000' wb.save(f"{folder}/{ticker}.xlsx") finally: wb.close()
def main(): if len(sys.argv) < 2: return ('Please insert file!') dataABC = pd.read_excel(sys.argv[1], sheet_name=0) # assume dataXYZ = pd.read_excel(sys.argv[1], sheet_name=1) dataResultsABC = ABCVEN(dataABC) dataResultsXYZ = AnalysisXYZ(dataXYZ) wb = Workbook() ws = wb.active ws.title = "ABC_VEN_Data" for r in dataframe_to_rows(pd.concat(dataResultsABC), index=False, header=True): ws.append(r) ABCAnalysisDisplay(wb, pd.concat(dataResultsABC)) ws = wb.create_sheet(title="XYZ_Data") for r in dataframe_to_rows(dataResultsXYZ, index=False, header=True): ws.append(r) XYZAnalysisDisplay(wb, dataResultsXYZ) wb.save('Test.xlsx')
def write_to_excel(df_list, event_list, start_string): """ Helper function to write a list of Pandas dataframe to excel file :param df_list: list of Pandas Dataframe :return: """ i = 0 print(event_list) event_list = [ list(map(lambda x: x.replace("/", "_"), event)) for event in event_list ] for df in df_list: # Shift Index by 1 df.index += 1 j = 0 max_list = [] # Lis1t that stores max length of strings in a column for col in df.columns.values: max_list.append(max(len(col), df[col].map(str).map(len).max())) if not os.path.exists('../total_part/' + start_string + '_' + str(event_list[i][1]) + '.xlsx'): writer = pd.ExcelWriter("../total_part/" + start_string + '_' + str(event_list[i][1]) + '.xlsx', engine='openpyxl') df.to_excel(writer, sheet_name=str(event_list[i][2])) workbook = writer.book worksheet = writer.sheets[str(event_list[i][2])] else: workbook = load_workbook("../total_part/" + start_string + '_' + str(event_list[i][1]) + '.xlsx') # Check if sheet exists. Create a new one otherwise if str(event_list[i][2]) in workbook.get_sheet_names(): worksheet = workbook[str(event_list[i][2])] df.index += worksheet.max_row - 1 dataframe_rows = list(dataframe_to_rows(df)) for row_count in range(1, len(dataframe_rows)): worksheet.append(dataframe_rows[row_count]) else: worksheet = workbook.create_sheet(str(event_list[i][2])) dataframe_rows = list(dataframe_to_rows(df)) for row_count in range(0, len(dataframe_rows)): worksheet.append(dataframe_rows[row_count]) worksheet = add_formatting(worksheet) cols = list(worksheet.columns) for j in range(1, len(cols)): worksheet.column_dimensions[cols[j][0].column].width = max_list[j - 1] j += 1 workbook.save("../total_part/" + start_string + '_' + str(event_list[i][1]) + '.xlsx') workbook.close() i += 1
def wash_data(filename): wb = Workbook() data = pd.read_excel(filename, sheet_name='Sheet0', usecols=[4, 6, 7, 9, 11, 14, 17, 18, 20, 21, 22, 24, 29]) # 读取表 weekdays_all = ['周五', '周六上午', '周六中午', '周六下午', '周六晚上', '周日上午', '周日中午', '周日下午', '周日晚上', '周二', '周三', '周四'] weekdays_all2 = ['上午', '中午', '下午', '晚上'] finish_excel = data.loc[2, '教学点'] + '__' + data.loc[2, '学期'] data.教师 = data.教师.str.replace('[0-9]*$', '') # 删掉老师名称后的数字 data_fudao = data.辅导老师.str.replace('[0-9]*$', '') # 删掉辅导老师名称后的数字 data_fudao2 = data_fudao.str.replace('.*[\u4e00-\u9fa5]', '辅导:', regex=True) # 删掉数字后只剩名字,名字全部替换成辅导 data.辅导老师 = data_fudao2 + data_fudao # 辅导+老师名字 data.教师 = data.教师.str.cat(data.辅导老师, join='left', sep=' ') # 老师与辅导老师合二为一 data.教室 = data.教室.str.replace('[(].*?[)]|[【].*?[】]', '', regex=True) # ~ data.班次 = data.班次.str.replace('^[\u4e00-\u9fa5][\u4e00-\u9fa5][\u4e00-\u9fa5]','') data.rename(columns={'已缴人数': '人数'}, inplace=True) # '已上课次':'课次', data.sort_values(by='上课时间', axis=0, ascending=True, inplace=True) # 用上课时间排序方法 data = data.drop(columns=['辅导老师', '教学点'], axis=1) if data.学期.iloc[1] == '春季班' or data.学期.iloc[1] == '秋季班': for week in weekdays_all: data_num = data.loc[data.上课时间.str.contains(week)] if week == '周二' or week == '周三' or week == '周四' or week == '周五': data_num.sort_values(by='教室', axis=0, ascending=True, inplace=True) else: data_num.sort_values(by=['上课时间', '教室'], axis=0, ascending=True, inplace=True) ws = wb.create_sheet(week + '(' + str(len(data_num.index)) + '个课)', -1) for r in dataframe_to_rows(data_num, index=False, header=True): ws.append(r) cell_style0(ws, len(data_num.index), week, finish_excel) elif data.学期.iloc[1] == '短期班' or data.学期.iloc[1] == '活动类' or data.学期.iloc[1] == '诊断类': week = data.学期.iloc[1] ws = wb.create_sheet(week, -1) data.sort_values(by='结课日期', axis=0, ascending=True, inplace=True) # 按结课日期排序 for r in dataframe_to_rows(data, index=False, header=True): ws.append(r) cell_style0(ws, len(data.index), week, finish_excel) else: # if data.学期.iloc[1] == '暑假班' or data.学期.iloc[1] == '寒假班' data['上课时间'] = data['上课时间'].str.replace('一', '1') data['上课时间'] = data['上课时间'].str.replace('二', '2') data['上课时间'] = data['上课时间'].str.replace('三', '3') data['上课时间'] = data['上课时间'].str.replace('四', '4') data['上课时间'] = data['上课时间'].str.replace('零', '0') for week in weekdays_all2: data_num = data.loc[data.上课时间.str.contains(week)] data_num.sort_values(by=['上课时间', '教室'], axis=0, ascending=True, inplace=True) ws = wb.create_sheet(week + '(' + str(len(data_num.index)) + '个课)', -1) for r in dataframe_to_rows(data_num, index=False, header=True): ws.append(r) cell_style0(ws, len(data_num.index), week, finish_excel) finish_excel = finish_excel + '__' + '前台课表.xlsx' wb.save(finish_excel) return str(finish_excel)
def cvod_26_covid(a): sql = open('sql/parus/covid_26_svod.sql', 'r').read() with cx_Oracle.connect(userName, password, userbase, encoding="UTF-8") as con: df = pd.read_sql(sql, con) df['type'] = 'parus' old_file = get_dir('punct_zabor') + '/' + datetime.datetime.now().strftime( '%d.%m.%Y') + ' Пункты отбора.xlsx' values = {6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0} try: old = pd.read_excel(old_file, skiprows=3, header=None, sheet_name='Соединение') except: old = pd.DataFrame() else: old = old.loc[~(old[2].isnull() & old[3].isnull() & old[5].isnull())].fillna(value=values) del old[0] del old[14] old['type'] = 'file' if len(old.columns) == len(df.columns): old.columns = df.columns old_file = get_dir('punct_zabor') + '/' + datetime.datetime.now().strftime( '%d.%m.%Y') + ' Пункты отбора.xlsx' new_df = pd.concat([df, old], ignore_index=True).drop_duplicates( subset=['LAB_UTR_MO', 'ADDR_PZ', 'LAB_UTR_02']) date = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%d_%m_%Y') new_name = date + '_26_COVID_19_cvod.xlsx' shablon_path = get_dir('help') shutil.copyfile(shablon_path + '/26_COVID_19_svod.xlsx', shablon_path + '/' + new_name) wb = openpyxl.load_workbook(shablon_path + '/' + new_name) ws = wb['Из паруса'] rows = dataframe_to_rows(df, index=False, header=False) for r_idx, row in enumerate(rows, 5): for c_idx, value in enumerate(row, 2): ws.cell(row=r_idx, column=c_idx, value=value) ws = wb['Из файла'] rows = dataframe_to_rows(old, index=False, header=False) for r_idx, row in enumerate(rows, 5): for c_idx, value in enumerate(row, 2): ws.cell(row=r_idx, column=c_idx, value=value) ws = wb['Соединение'] rows = dataframe_to_rows(new_df, index=False, header=False) for r_idx, row in enumerate(rows, 5): for c_idx, value in enumerate(row, 2): ws.cell(row=r_idx, column=c_idx, value=value) wb.save(shablon_path + '/' + new_name) return shablon_path + '/' + new_name
def compile_pm_rm_excel(excel_dir_store): master_pm = [[] for x in range(5)] master_rm = [[] for x in range(5)] for excel_dir in excel_dir_store: xls = pd.ExcelFile(excel_dir) sheet_names = xls.sheet_names[1:] for sheet, pm_store, rm_store in zip(sheet_names, master_pm, master_rm): df = pd.read_excel(excel_dir, sheet_name=sheet, index_col=None).values pm_store.append(df[1:10, :]) rm_store.append(df[11:, 0][..., None]) for idx, pm_h in enumerate(master_pm): pm = pm_h[0] for pm_hh in pm_h[1:]: pm = np.concatenate((pm, pm_hh), axis=1) master_pm[idx] = pm for idx, pm_h in enumerate(master_rm): rm = pm_h[0] for pm_hh in pm_h[1:]: rm = np.concatenate((rm, pm_hh), axis=1) master_rm[idx] = rm excel_dir = create_excel_file('./results/master_pm_rd.xlsx') wb = openpyxl.load_workbook(excel_dir) for idx, (pm, rm) in enumerate(zip(master_pm, master_rm)): pm_name = 'pm_h{}'.format([1, 3, 6, 12, 24][idx]) rm_name = 'rm_h{}'.format([1, 3, 6, 12, 24][idx]) wb.create_sheet(pm_name) wb.create_sheet(rm_name) ws = wb[pm_name] pm_df = pd.DataFrame(data=pm, columns=['m', 'p'] * len(excel_dir_store)) rows = dataframe_to_rows(pm_df, index=False) for r_idx, row in enumerate(rows, 1): for c_idx, value in enumerate(row, 1): ws.cell(row=r_idx + 1, column=c_idx, value=value) ws = wb[rm_name] rm_df = pd.DataFrame(data=rm, columns=['Relative RMSE'] * len(excel_dir_store)) rows = dataframe_to_rows(rm_df, index=False) for r_idx, row in enumerate(rows, 1): for c_idx, value in enumerate(row, 1): ws.cell(row=r_idx + 1, column=c_idx, value=value) wb.save(excel_dir) pass
def cvod_27_regiz(a): url = os.getenv('url837').replace('837', '870') data = requests.get(url).json() regiz = pd.DataFrame.from_dict(data) columns = [ 'orderresponse_assign_organization_level1_key', 'ShortNameMO', 'Кол-во тестов', 'Кол-во ПЦР тестов', 'Кол-во положительных ПЦР тестов', 'Кол-во тестов на антитела', 'Кол-во положительных тестов на антитела', 'Кол-во тестов на антитела после вакцинации', 'Кол-во положительных тестов на антитела после вакцинации' ] regiz = regiz[columns] sql = open('sql/parus/covid_27_regiz.sql', 'r').read() with cx_Oracle.connect(userName, password, userbase, encoding="UTF-8") as con: df = pd.read_sql(sql, con) sql = open('sql/covid/nsi_27.sql', 'r').read() with sqlalchemy.create_engine( f"mssql+pymssql://{user}:{passwd}@miacbase3/NsiBase", pool_pre_ping=True).connect() as con: nsi = pd.read_sql(sql, con) date = (datetime.datetime.now() - datetime.timedelta(days=2)).strftime('%d_%m_%Y') new_name = date + '_27_COVID_19_regiz.xlsx' shablon_path = get_dir('help') shutil.copyfile(shablon_path + '/27_COVID_19_regiz.xlsx', shablon_path + '/' + new_name) wb = openpyxl.load_workbook(shablon_path + '/' + new_name) ws = wb['parus'] rows = dataframe_to_rows(df, index=False, header=False) for r_idx, row in enumerate(rows, 4): for c_idx, value in enumerate(row, 1): ws.cell(row=r_idx, column=c_idx, value=value) ws = wb['regiz'] rows = dataframe_to_rows(regiz, index=False, header=True) for r_idx, row in enumerate(rows, 2): for c_idx, value in enumerate(row, 2): ws.cell(row=r_idx, column=c_idx, value=value) ws = wb['nsi'] rows = dataframe_to_rows(nsi, index=False, header=True) for r_idx, row in enumerate(rows, 1): for c_idx, value in enumerate(row, 1): ws.cell(row=r_idx, column=c_idx, value=value) wb.save(shablon_path + '/' + new_name) return shablon_path + '/' + new_name
def cvod_52_covid(a): sql1 = open('sql/parus/covid_52_svod.sql', 'r').read() sql2 = open('sql/parus/covid_52_svod_old.sql', 'r').read() with cx_Oracle.connect(userName, password, userbase, encoding="UTF-8") as con: df = pd.read_sql(sql1, con) with cx_Oracle.connect(userName, password, userbase, encoding="UTF-8") as con: df_old = pd.read_sql(sql2, con) date = str(df['DAY'].unique()[0]) del df['ORGANIZATION'] del df_old['ORGANIZATION'] del df['DAY'] del df_old['DAY'] new_name = date + '_52_COVID_19_pred.xlsx' new_name_2 = date + '_52_COVID_19_osn.xlsx' shablon_path = get_dir('help') shutil.copyfile(shablon_path + '/52_COVID_19_pred.xlsx', shablon_path + '/' + new_name) shutil.copyfile(shablon_path + '/52_COVID_19_osn.xlsx', shablon_path + '/' + new_name_2) wb = openpyxl.load_workbook(shablon_path + '/' + new_name) ws = wb['52 COVID'] rows = dataframe_to_rows(df, index=False, header=False) for r_idx, row in enumerate(rows, 11): for c_idx, value in enumerate(row, 1): ws.cell(row=r_idx, column=c_idx, value=value) ws = wb['Вчера'] rows = dataframe_to_rows(df_old, index=False, header=False) for r_idx, row in enumerate(rows, 11): for c_idx, value in enumerate(row, 1): ws.cell(row=r_idx, column=c_idx, value=value) wb.save(shablon_path + '/' + new_name) wb = openpyxl.load_workbook(shablon_path + '/' + new_name_2) ws = wb['52 COVID'] rows = dataframe_to_rows(df, index=False, header=False) for r_idx, row in enumerate(rows, 11): for c_idx, value in enumerate(row, 1): ws.cell(row=r_idx, column=c_idx, value=value) wb.save(shablon_path + '/' + new_name_2) return shablon_path + '/' + new_name + ';' + shablon_path + '/' + new_name_2
def wash_data(filename): wb = Workbook() data = pd.read_excel(filename, sheet_name='Sheet0', usecols=[4, 6, 7, 9, 11, 14, 17, 18, 22, 25, 29]) # 读取表 data['已上课次'] = data['已上课次'] + 1 weekdays_all = ['周五', '周六上午', '周六中午', '周六下午', '周六晚上' , '周日上午', '周日中午', '周日下午', '周日晚上', '周二', '周三', '周四'] weekdays_all2 = ['上午', '中午', '下午', '晚上'] finish_excel = data.loc[2, '教学点'] + '__' + data.loc[2, '学期'] data_fudao = data.辅导老师.str.replace('[0-9]*$', '') # 删掉辅导老师名称后的数字 data_fudao2 = data_fudao.str.replace('.*[\u4e00-\u9fa5]', '辅导', regex=True) # 删掉数字后只剩名字,名字全部替换成辅导 data.辅导老师 = data_fudao2 + '\n' + data_fudao # 辅导+老师名字 data.教师 = data.教师.str.replace('[0-9]\d*$', '') # 删掉老师名称后的数字 data.教师 = data.教师 + '\n' + data.辅导老师 # ~ data.教师 = data['教师'].str.cat(data['辅导老师'],join='left',sep=' ') data.年级 = data['年级'].str.cat(data['班次'], join='left') data.教室 = data.教室.str.replace('[(].*?[)]|[【].*?[】]', '') data.rename(columns={'已上课次': '课次', '已缴人数': '人数'}, inplace=True) data.sort_values(by='上课时间', axis=0, ascending=True, inplace=True) # 用上课时间排序方法 data = data.drop(columns=['辅导老师'], axis=1) data = data.drop(columns=['班次', '教学点'], axis=1) # ~ data = data.drop(columns=['课次'],axis=1) if data.学期.iloc[1] == '春季班' or data.学期.iloc[1] == '秋季班': for week in weekdays_all: data_num = data.loc[data.上课时间.str.contains(week)] if week == '周二' or week == '周三' or week == '周四' or week == '周五': data_num.sort_values(by='教室', axis=0, ascending=True, inplace=True) ws = wb.create_sheet(week + '(' + str(len(data_num.index)) + '个课)', -1) ws.column_dimensions.group('A', hidden=True) for r in dataframe_to_rows(data_num, index=False, header=False): ws.append(r) cell_style(ws, len(data_num.index)) else: for week in weekdays_all2: data_num = data.loc[data.上课时间.str.contains(week)] ws = wb.create_sheet(week + '(' + str(len(data_num.index)) + '个课)', -1) ws.column_dimensions.group('A', hidden=True) for r in dataframe_to_rows(data_num, index=False, header=False): ws.append(r) cell_style(ws, len(data_num.index)) finish_excel = finish_excel + '__' + '随材发放条.xlsx' wb.save(finish_excel) return str(finish_excel)
def output_to_excel_pivot(dataobj): wb = openworkbook(dataobj.sourcefile) datasheet = dataobj.datasheet ws_pvt = wb[dataobj.pivotsheet] ws_data = wb[datasheet] #clear data sheet and put in new data wb.remove(ws_data) wb.create_sheet(datasheet) ws_data = wb[datasheet] #header = ['id'] #header.extend(dataobj.data.columns) #print(f'header type {type(header)} : {header}') #no idea why line above doesn't work but line below does. Output appears identical #header = ['id'] + [w.replace('i', 'i') for w in list(dataobj.data.columns)] header = [w.replace('i', 'i') for w in list(dataobj.data.columns)] #print(f'header after replace type {type(header)} : {header}') ws_data.append(header) for r in dataframe_to_rows(dataobj.data, index=False, header=False): ws_data.append(r) #update pivot data area pivot = ws_pvt._pivots[0] pivot.cache.cacheSource.worksheetSource.ref = f'A1:{get_column_letter(len(dataobj.data.columns))}{len(dataobj.data.index)+1}' pivot.cache.refreshOnload = True try: wb.save(filename=dataobj.outputfile) except: print('excel file in use. No output file made') logging.warning(f'file in use {dataobj.outputfile}')
def get_pipe_export(gu_list, type, start_date, end_date, tot_num, year) : if type == "total" : get_pipe_total_graph_gus(gu_list, start_date, end_date, tot_num) get_pipe_total_map_by_gu(gu_list, start_date, end_date, tot_num) elif type == "add" : get_pipe_add_graph_gus(gu_list, start_date, end_date, tot_num) get_pipe_add_map_by_gu(gu_list, start_date, end_date, tot_num) elif type == "total_add" : get_pipe_total_add_graph_gus(gu_list, start_date, end_date, tot_num) get_pipe_total_add_map_by_gu(gu_list, year, tot_num, start_date, end_date) df_sum = pd.read_excel('templates/df_sum{}.xlsx'.format(tot_num)) wb = openpyxl.load_workbook("templates/df{}.xlsx".format(tot_num)) # 엑셀 파일 선택 sheet_last = wb.create_sheet("Graph") # 제일 마지막에 시트 추가 ws = wb.worksheets[1] # 시트 지정 img = Image.open("templates/df{}.jpg".format(tot_num)) # 이미지 지정 img = openpyxl.drawing.image.Image("templates/df{}.jpg".format(tot_num)) # 이미지 지정 ws.add_image(img,'A1') # 이미지 삽입 sheet_last = wb.create_sheet("Sum") # 제일 마지막에 시트 추가 ws = wb.worksheets[2] # 시트 지정 for row in dataframe_to_rows(df_sum, index = True, header= True) : if len(row) > 1 : ws.append(row) wb.save("templates/df{}.xlsx".format(tot_num)) # 엑셀 저장
def CreateExcelSvod(nameSheetShablon, startRowsShablons, nameSheetSvod, startRowsSvod, pathSvod, i): _list.clear() for excel in glob.glob(_path_folder_files_mo) : try: df = pd1.read_excel(excel, sheet_name=nameSheetShablon, dtype = str, skiprows = startRowsShablons , head= None) except: pass else: _list.append(df) svod = pd.DataFrame() svod = pd.concat(_list) svod["Unnamed: 0"] =pd.to_numeric(svod["Unnamed: 0"]) svod = svod.sort_values(["Unnamed: 0"]) svod = svod.loc[svod["Unnamed: 0"].notnull()] svod = svod.drop_duplicates() wb = openpyxl.load_workbook(pathSvod) ws = wb[nameSheetSvod] rows = dataframe_to_rows(svod,index=False, header=False) for r_idx, row in enumerate(rows, startRowsSvod): for c_idx, value in enumerate(row, 1): ws.cell(row=r_idx, column=c_idx, value=value) if (i == 1): ws['Q2'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%d.%m.%Y') wb.save(pathSvod) return 1
def write_output(df): #input: a pandas dataframe for GC-MS peak areas, calculated masses, and relative concentrations #output: Excel workbook with worksheets for (a) raw GC-MS peak areas (b) calculated mass yields (c) activities and wt% selectivities (requiring user input in the Excel table of Cr amount, nonane amount, reaction time, PE masses) from openpyxl import Workbook from openpyxl.utils.dataframe import dataframe_to_rows print("writing output Excel: data = " + str(df)) wb = Workbook() ws0 = wb.active ws0.title = "exp_parameters" #create ws0 to enter Cr amt, nonane amt, Cr:L ratio, reaction time, PE masses ws1 = wb.create_sheet() ws1.title = "GCMS_areas" for r in dataframe_to_rows(df, index=True, header=True): ws1.append(r) ws2 = wb.create_sheet() ws2.title = "yields_avg" #write Excel equations to ws2, using pre-defined linear response factors, areas from ws1, and nonane amt ws3 = wb.create_sheet() ws3.title = "output" #write Excel equations to ws3, calculating activities and selecitivites from ws2, and input from ws: Cr amt, PE mass, reaction time ws4 = wb.create() ws4.title = "polished" #reformat useful data from ws3 into pretty form #for cell in ws['A'] + ws[1]: # cell.style='Pandas' wb.save("output.xlsx")
def main_process(odir): today = datetime.datetime.now().strftime('%Y%m%d') ofile = os.path.join(odir, 'Production_Report_%s.xlsx' % today) wb = openpyxl.Workbook() col_as_letter = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'] for i, row in list_pd.iterrows(): list_name = row['name'] if len(list_name) > 30: list_name = list_name.replace('Trello card to be archived (m', 'M').replace(')', '') cards_table = organise_cards_in_list(row.name) if not cards_table.empty: ws = wb.create_sheet(list_name) dims = {} for b, c in enumerate(cards_table.columns): max_len = cards_table[c].str.len().max() col_label_len = len(c) letter = col_as_letter[b] dims[letter] = max(max_len, col_label_len) for r in dataframe_to_rows(cards_table, index=False, header=True): ws.append(r) for cell in ws['A'] + ws[1]: cell.style = 'Pandas' for col, value in dims.items(): ws.column_dimensions[col].width = value else: print('Nothing in column "%s"...' % list_name) del wb['Sheet'] wb.save(ofile) print('Output saved to %s' % ofile)
def toexcel(self, directory): """toexcel export dataframe into an Excel File.""" #Detailed view self.__synthesis() wb = op.Workbook() ws = wb.active for r in dataframe_to_rows(self.text_result, index=True, header=True): ws.append(r) ws.delete_cols(1, amount=1) ws.delete_rows(2, amount=1) ws.title = "Liste détaillée" ws.column_dimensions["A"].width = 45 cols = self.text_result.columns.tolist() for i, val in enumerate(cols): if i not in [0, 1]: if len(val) < 15: ws.column_dimensions[op.utils.get_column_letter( i + 1)].width = 15 else: ws.column_dimensions[op.utils.get_column_letter( i + 1)].width = len(val) #ws[op.utils.get_column_letter(i+1)+1].p print(op.utils.get_column_letter(i), len(val), val) wb.save(directory)
def industryMTDReport(self): """[Prints Industry MTD attribution Report.] """ # Create a folder to save the report if it doesn't exist. if os.path.isdir(self.dstPath) is False: os.makedirs(self.dstPath) print('Folder created') else: print('Folder already exists') # Create the data data = self.industryMTDAttribution() # Open the Excel template. wb = pyxl.load_workbook(self.srcPath) #Print data to Excel template sheet = wb['IndustryAttribution'] sheet['A1'] = self.date_obj sheet['A1'].number_format = '[$-409]YYY-mm-dd;@' rows = dataframe_to_rows(data, header=False) for r_idx, row in enumerate(rows, 2): for c_idx, value in enumerate(row, 1): sheet.cell(row=r_idx, column=c_idx, value=value) # Save the Excel workbook in the folder savePath = f'{self.dstPath}Industry_MTD_Attribution_{self.date}.xlsx' wb.save(savePath) print(f'file saved to {savePath}')
def avg_tab(): df = get_avg() df['date'] = pd.to_datetime(df['date']) new_row = {'date': 'Target', 'timing': 9, 'numbers_of_regen': ''} df['date'] = df['date'].dt.strftime('%b-%y') # append row to the dataframe df = df.append(new_row, ignore_index=True) ws2 = wb.create_sheet("Past Data", 1) ws2.title = "Past Data" ws2.sheet_view.showGridLines = False # ws.title = "Regen Timing" for r in dataframe_to_rows(df, index=False, header=True): ws2.append(r) # border BORDER_LIST = [f'A1:C{ws2.max_row}', 'A1:C1'] for pos in BORDER_LIST: set_border(ws2, pos) for cell in ws2[1]: cell.font = Font(bold=True) cell.alignment = Alignment(horizontal="center") cell.fill = PatternFill(start_color="feedc6", end_color="feedc6", fill_type="solid") ws2.column_dimensions['A'].width = 11.5 ws2.column_dimensions['C'].width = 17
def conversion_tab(): ws1 = wb.create_sheet("conversion", 4) ws1.sheet_view.showGridLines = False ws1.title = "conversion" df = conversion.report_convo() for r in dataframe_to_rows(df, index=False, header=True): ws1.append(r)
def printtofile(arr): """Printing to an excel file a given number of solutions in different worksheets while formatting them in a readable format such as adjusting column size, collapsing cells, styling etc. """ final = np.empty((classes, days, periods + 1), dtype=np.dtype(('U', 50))) for i in range(classes): for j in range(days): for jj in range(5): final[i][jj][0] = week[jj] for k in range(periods): final[i][j][k + 1] = teacher_list[int(arr[i][j][k]) - 1] wb = openpyxl.Workbook() ws = wb.active for i in range(periods + 1): ws.column_dimensions[chr(65 + i)].width = 20 for i in range(classes): ws.merge_cells(start_row=ws.max_row + 2, start_column=1, end_row=ws.max_row + 2, end_column=periods + 1) cell = ws.cell(row=ws.max_row, column=1) cell.value = class_list[i] cell.style = "Title" cell.alignment = Alignment(vertical='center', horizontal='center') df = pd.DataFrame(final[i]) for j in dataframe_to_rows(df, index=False, header=False): ws.append(j) ws.row_dimensions.group(ws.max_row - days + 1, ws.max_row, hidden=True) wb.save(filepath)
def start(): datetime_bsas = datetime.now(tz_bsas) # guarda la hora de bsas # formatea la hora y la guarda en la variable hora hora = datetime_bsas.strftime("%d/%m/%Y:%H:%M") #AL FORMATO HAY QUE AÑADIRLE LOS SEGUNDOS ASI NO ESTA TODO EL MINUTO ENTERO ENVIANDO EL MENSAJE HACER ESTO ES LO PROXIMO #POR AHORA ESTA ES LA FUNCIÓN QUE ESTOY EDITANDO, ESTA IGUAL A "start", SOLO POR LO COMENTADO ARRIBA. LO QUE FALTA ES #ACTUALIZAR EL EXCEL UNA VEZ QUE HAYA MANDADO EL MENSAJE. LO ESTOY INTENTANDO EN ./test.py #-----------------FIX----------------- #El error de arriba lo arreglé updateando la fecha del excel que valida la equidad por la nueva fecha, entonces #encuentra la equidad y la cambia. print(hora, "hora actual") xls = pd.read_excel("horarios.xlsx", sheet_name="Hoja 1", ) wb = Workbook() ws = wb.active c = -1 for col in xls["Dia"]: c = c+1 print(col) if str(col) == hora: fecha = xls.at[c, "Dia"] fecha_obj = datetime.strptime(fecha, "%d/%m/%Y:%H:%M") xls.at[c, "Dia"] = datetime.strftime( fecha_obj + pd.Timedelta("2 W"), "%d/%m/%Y:%H:%M") xls = pd.DataFrame(xls) for r in dataframe_to_rows(xls, index=False, header=True): ws.append(r) ws.title = "Hoja 1" wb.save(filename='horarios.xlsx') return (200,str(xls.at[c,"Materia"]),str(xls.at[c,"Hora"]))
def generate_yunda_template(cls): name = '{}.{}韵达单号申请.xlsx'.format(cls.date[4:6], cls.date[6:]) wb = Workbook() ws = wb.active ws.cell(1, 1).value = '电商客户名称' ws.cell(1, 2).value = '厦门东港国际有限公司' ws.cell(2, 1).value = '总运单号/提单号' ws.cell(2, 2).value = cls.waybill_num ws.cell(3, 1).value = '起运国家' ws.cell(3, 2).value = '美国' ws.cell(4, 1).value = '航班航次号' ws.cell(4, 2).value = 'MF850' ws.cell(5, 1).value = '进境日期' ws.cell(5, 2).value = datetime.strftime( cls.flight_date - timedelta(days=1), '%Y-%m-%d') ws.merge_cells('A6:B6') ws.cell(6, 1).value = '运单信息' ws.merge_cells('C6:I6') ws.cell(6, 3).value = '寄件人信息' ws.merge_cells('J6:Q6') ws.cell(6, 10).value = '收件人信息' ws.merge_cells('R6:Y6') ws.cell(6, 18).value = '货物信息' ws.cell(6, 1) for r in dataframe_to_rows(cls.yunda_template, index=False, header=True): ws.append(r) for i in range(ws.max_row): for j in range(ws.max_column): if i >= 5: ws = cls.set_style(ws, i, j) elif ws.cell(i + 1, j + 1).value is not None: ws = cls.set_style(ws, i, j) wb.save(name)
def DL_sheet(): ds = pd.read_excel("DL MADHAVARAM.xlsx") wb = openpyxl.load_workbook('sample layer 3 format.xlsx') ws = wb.get_sheet_by_name('LAYER 1 PARAMETERS FOR DL') thin_border = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) for r in dataframe_to_rows(ds, index=False, header=False): ws.append(r) row_count = ws.max_row for i in range(2, row_count + 1): ws["A" + str(i)].border = thin_border ws["B" + str(i)].border = thin_border ws["C" + str(i)].border = thin_border ws["D" + str(i)].border = thin_border ws["E" + str(i)].border = thin_border ws["F" + str(i)].border = thin_border ws["G" + str(i)].border = thin_border ws["H" + str(i)].border = thin_border ws["I" + str(i)].border = thin_border ws["J" + str(i)].border = thin_border ws["K" + str(i)].border = thin_border ws["L" + str(i)].border = thin_border ws["M" + str(i)].border = thin_border ws["N" + str(i)].border = thin_border wb.save('sample layer 3 format.xlsx') print("LAYER 1 PARAMETERS FOR DL Completed")
def to_spreadsheet(): if not request.json or 'rows' not in request.json or not isinstance( request.json['rows'], list): return jsonify({'message': 'Bad Request'}), 400 try: data = request.json header = 'header' in data data_frame = pd.DataFrame( data['rows'], columns=data['header']) if header else pd.DataFrame( data['rows']) print(data_frame) wb = Workbook() ws = wb.active for r in dataframe_to_rows(data_frame, index=False, header=header): ws.append(r) raw_data = save_virtual_workbook(wb) response = make_response(raw_data) response.headers[ 'Content-Type'] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" response.headers[ 'Content-Disposition'] = "inline; filename=spreadsheet.xlsx" return response except ValueError as error: return jsonify({'message': str(error)}), 400
def createWorkbook(self, dframe): ''' Create the workbook obj and give it all the data in the DataFrame. This copies almost verbatim each cell in the DataFrame to a cell in the workbook--except we add the column headers from the DataFrame to the correct location above the table in row [self.topMargin]. :params dframe: The trades and summaries already formatted in the correct shape for this new document we are creating. :return (wb, ws, nt): The workbook, its worksheet and the original DataFrame ''' nt = dframe # def wb = Workbook() ws = wb.active # Add all cell values from the df to the ws object for r in dataframe_to_rows(nt, index=False, header=False): r = self.cleanData(r) ws.append(r) # Place column names at the top table -- (under the notes and inspire quote) for name, cell in zip(nt.columns, ws[self.topMargin]): cell.value = name return wb, ws, nt
def save_df_to_excel(file_name, sheet_name, df): """ 保存dataframe到execl :param file_name: 保存的excel文件名 :param sheet_name: 保存的sheet :param df: dataframe :return: True/False """ if file_name is None or sheet_name is None or df is None: return False if 'openpyxl' not in sys.modules: print(u'can not import openpyxl', file=sys.stderr) return False try: ws = None try: # 读取文件 wb = openpyxl.load_workbook(file_name) except: # 创建一个excel workbook wb = openpyxl.Workbook() ws = wb.active ws.title = sheet_name try: # 定位WorkSheet if ws is None: ws = wb[sheet_name] except: # 创建一个WorkSheet ws = wb.create_sheet() ws.title = sheet_name rows = dataframe_to_rows(df) for r_idx, row in enumerate(rows, 1): for c_idx, value in enumerate(row, 1): ws.cell(row=r_idx, column=c_idx, value=value) # Save the workbook wb.save(file_name) wb.close() except Exception as ex: import traceback print(u'save_df_to_excel exception:{}'.format(str(ex)), traceback.format_exc(),file=sys.stderr)
def tls(): wb = Workbook() # each base has a sheet for base in BASE_LOOKUP: if base not in BASE_LOOKUP: raise Exception('base not in the base_lookup') base_table = BASE_LOOKUP[base][0] base_id = BASE_LOOKUP[base][1] ws = wb.create_sheet(title='_'.join([base_table, base_id])) base_df = tls_get_base_df(base) for row in dataframe_to_rows(base_df, index=False): ws.append(row) wb.save(r'E:\Yichuan\Comparative_analysis_2017\tls_2017_full.xlsx')
def wh(): wb = Workbook() # each base has a sheet for base in BASE_LOOKUP: if base not in BASE_LOOKUP: raise Exception('base not in the base_lookup') base_table = BASE_LOOKUP[base][0] base_id = BASE_LOOKUP[base][1] ws = wb.create_sheet(title='_'.join([base_table, base_id])) base_df = wh_get_base_df(base) for row in dataframe_to_rows(base_df, index=False): ws.append(row) wb.save('wh_2016_full.xlsx')
def _fill_bi(self, workbook, data_dict, worksheet_range): for sheet_name in data_dict: worksheet = data_dict[sheet_name] if isinstance(sheet_name, str): st = get_sheet_by_name(workbook, sheet_name) elif isinstance(sheet_name, int): st = workbook.worksheets[sheet_name - 1] if not st: raise ValidationError( _('Sheet %s not found!') % sheet_name) if not worksheet.get('_BI_', False): continue for rc, bi_dict in worksheet.get('_BI_', {}).iteritems(): req_field = ['df', 'oper_code'] key_field = bi_dict.keys() if set(req_field) != set(key_field): raise ValidationError( _('_BI_ requires \n' ' - df: initial DataFrame from worksheet\n' ' - oper_code: pandas operation code')) # Get dataframe src_df = bi_dict['df'] src_st = get_sheet_by_name(workbook, src_df) df = load_workbook_range(worksheet_range[src_df], src_st) eval_context = {'df': df, 'pd': pd, 'np': np} # Get DF using safe_eval method df = safe_eval(bi_dict['oper_code'], eval_context, mode="exec", nocopy=True) if 'result' in eval_context: # use result=... df = eval_context['result'] if df is None: df = eval(bi_dict['oper_code'], eval_context) if df.empty: continue df = df.reset_index() rows = dataframe_to_rows(df, index=False, header=False) # Get init cell index xy = coordinate_from_string(rc) c = column_index_from_string(xy[0]) r = xy[1] for r_idx, row in enumerate(rows, r): for c_idx, value in enumerate(row, c): st.cell(row=r_idx, column=c_idx, value=value)
def create_file(df, file_name): # fix df['linked_account'] = df['linked_account'].astype(int) wb = Workbook() ws = wb.active for r in dataframe_to_rows(df, index=False, header=True): ws.append(r) # Format Values for col in formats.keys(): for cell in ws[col]: cell.number_format = formats[col]['number_format'] cell.font = formats[col]['font'] ws.column_dimensions[col].width = formats[col]['width'] # Format Headers for cell in ws['1']: cell.font = font_h cell.fill = PatternFill( fill_type = 'solid', start_color = 'CCCCCC', end_color = 'FFFFFF' ) wb.save( file_name )
def df_to_xlsx( df_map, filename=None, columns_map=None, has_headers=True, return_workbook=False ): """Write a dataframe to an Excel xlsx file.""" if filename: assert filename.endswith("xlsx") logging.info("Writing dataframe to xlsx file [{}]".format(filename)) else: logging.info("Writing dataframe to in-memory workbook") if not df_map: logging.warn("[df_map] is empty, no xlsx created") return False for sheet_name, _df in df_map.items(): logging.info(' Sheet [{}]: {} rows'.format(sheet_name, num_rows(_df))) if not isinstance(df_map, dict): raise Exception('[df_map] must be a dict {sheetname: df}') wb = Workbook() # Remove default sheet sheet_names = wb.get_sheet_names() if len(sheet_names) == 1: sheet1 = wb.get_sheet_by_name(sheet_names[0]) wb.remove_sheet(sheet1) def _remove_bad_xlsx_vals(val): if is_val_empty(val): return None if not isinstance(val, str): return val val = re.sub(r'[\000-\010]|[\013-\014]|[\016-\037]', '', val) return val for sheetname, df in sorted(df_map.items()): df = df.applymap(_remove_bad_xlsx_vals) ws = wb.create_sheet(sheetname) index_name = df.index.name has_index = True if index_name else False if columns_map and sheetname in columns_map: df = df[columns_map[sheetname]] for r in dataframe_to_rows(df, index=has_index, header=has_headers): ws.append(r) for cell in ws['A'] + ws[1]: cell.style = 'Pandas' freeze_cell = None if has_index: ws['A1'] = index_name freeze_cell = 'B2' if has_headers else 'B1' elif has_headers: freeze_cell = 'A2' if freeze_cell: logging.info("Freezing cell {}".format(freeze_cell)) ws.freeze_panes = freeze_cell if return_workbook: return wb wb.save(filename) return {sheet_name:num_rows(_df) for sheet_name, _df in df_map.items()}
def get_jenkins_jobs(server): """ Get excel sheet with statistics of jenkins jobs """ columns = [ 'name', 'Url', 'Type', 'Color', 'Disk', 'description', 'buildable', 'lastBuild', 'lastBuildDate', 'lastSuccessfulBuild', 'lastSuccessfulBuildDate', 'lastUnsuccessfulBuild', 'lastUnsuccessfulBuildDate', ] ca_certificates = '/etc/ssl/certs/nabla.crt' headers = { 'Accept': 'application/json', } # Use urllib3 instead of requests, no ssl or auth required for general requests to jenkinsapi http = urllib3.PoolManager() url_main = urljoin(server, 'api/json') r = http.request('GET', url_main) print(type(r.data.decode('utf-8'))) jenkins_content = json.loads(r.data.decode('utf-8')) jobs = {} for job in jenkins_content['jobs']: if 'color' in job: jobs[job['name']] = { 'name': job['name'], 'Url': job['url'], 'Type': job['_class'], 'Color': job['color'], } else: jobs[job['name']] = { 'name': job['name'], 'Url': job['url'], 'Type': job['_class'], 'Color': 'none', } # print(jobs[job["name"]]) # Add disk space data for all jobs (from local text file): jobs = append_disk_space(jobs) for i, j in jobs.items(): print('DISK', j['Disk'], 'JOB', i) # "_class" : "hudson.model.ListView" # Update all job data jenkins_jobs = [jenkins_job for jenkins_job in list(jobs.values())] print(jenkins_jobs) add_job_properties_partial = partial(add_job_properties, http=http) with Pool(processes=8) as pool: jobs_updated = {jenkins_job['name']: jenkins_job for jenkins_job in pool.map( add_job_properties_partial, jenkins_jobs, )} try: with open('sample_output.txt', 'w') as f_out: f_out.write(json.dumps(jobs_updated)) except: pass # Convert urls to excel-style hyperlinks" # Convert Disk Blocks to MB # Strip Job type to actual job type for job in jobs_updated: jobs_updated[job]['Disk'] /= 1000 jobs_updated[job]['Url'] = '=HYPERLINK("{}", "{}")'.format( jobs_updated[job]['Url'], jobs_updated[job]['Url'], ) jobs_updated[job]['Type'] = jobs_updated[job]['Type'].split('.')[-1] df = pd.DataFrame(columns=columns) for job_updated in jobs_updated.values(): print('job updated', job_updated) df = df.append(job_updated, ignore_index=True) print(df) workbook = Workbook() excel_sheet = workbook.active for row in dataframe_to_rows(df, index=False, header=True): excel_sheet.append(row) ### Formatting ### lengths = [ len(name) if len(name) > 7 else 7 for name in columns ] lengths.extend([5, 5, 5, 5]) for index, column_cells in enumerate(excel_sheet.columns): excel_sheet.column_dimensions[column_cells[0].column].width = lengths[index] for row_cells in excel_sheet.rows: excel_sheet.row_dimensions[row_cells[0].row].height = 12 font_global = Font(name='Arial', size=10) font_red = Font(name='Arial', size=10, color='00FF0000') font_blue = Font(name='Arial', size=10, color='000000FF') font_green = Font(name='Arial', size=10, color='00008000') font_yellow = Font(name='Arial', size=10, color='00C9C900') font_lgrey = Font(name='Arial', size=10, color='00C0C0C0') font_dgrey = Font(name='Arial', size=10, color='00505050') font_bold = Font(name='Arial', size=10, bold=True) border = Border(bottom=Side(border_style='thin', color='000000')) # Fill #f5f5dc for row in excel_sheet: for cell in row: cell.font = font_global for cell in excel_sheet['1:1']: cell.font = font_bold cell.border = border for row in excel_sheet.iter_rows( 'D{}:D{}'.format(excel_sheet.min_row, excel_sheet.max_row), ): for cell in row: if cell.value == 'blue': cell.font = font_green cell.value = 'green' elif cell.value == 'red': cell.font = font_red elif cell.value == 'yellow': cell.font = font_yellow elif cell.value == 'disabled': cell.font = font_lgrey workbook.save('jenkins_test.xlsx')
def create_workbook(df_hash, df_totals, file_name): wb = Workbook() for name in df_hash.keys(): print('Sheet: ' + name) wb.create_sheet(title=name) ws = wb.get_sheet_by_name(name) df = df_hash[ name ] df['linked_account'] = df['linked_account'].astype(int) for r in dataframe_to_rows(df, index=False, header=True): ws.append(r) # Format Values for col in formats.keys(): for cell in ws[col]: cell.number_format = formats[col]['number_format'] cell.font = formats[col]['font'] ws.column_dimensions[col].width = formats[col]['width'] # Format Headers for cell in ws['1']: cell.font = font_h cell.fill = PatternFill( fill_type = 'solid', start_color = 'CCCCCC', end_color = 'FFFFFF' ) # Final ws = wb.get_sheet_by_name('Sheet') for name in df_totals.keys(): print('Totals: ' + name) # df = df_totals[ name ] # df = df_totals # for r in dataframe_to_rows(df, index=False, header=True): # ws.append(r) # # # Format Values # for col in formats.keys(): # for cell in ws[col]: # cell.number_format = '###,###,##0.00' # cell.font = font_1 # ws.column_dimensions[col].width = 16.00 # # # Format Headers # for cell in ws['1']: # cell.font = font_h # cell.fill = PatternFill( fill_type = 'solid', start_color = 'CCCCCC', end_color = 'FFFFFF' ) wb.remove_sheet(ws) wb.save( file_name )