def json_to_excel_reader(file_name): wb = Workbook() ws0 = wb.worksheets[0] ws0.title = u"读者信息" ws1 = wb.create_sheet() ws1.title = u'借阅记录' title = [ "借书号", "姓名", "性别", "单位", "所借书目", "借书日期", "应还日期", "还书日期", "借书记录", "借书权限", "过期次数", "借阅次数" ] title2 = ["时间", "单位", "姓名", "借书号", "动作", "ISBN", "书名", "书籍位置"] for i in range(len(title)): ws0.cell(row=1, column=i + 1).value = title[i] for i in range(len(title2)): ws1.cell(row=1, column=i + 1).value = title2[i] with open(file_name, "r") as f: json_data = json.load(f) for reader in json_data: reader_single = [] for key in title: if key in ["借书日期", "应还日期", "还书日期"]: if json_data[reader][key] != None: d = datetime.datetime.strptime( json_data[reader][key], '%Y-%m-%d %H:%M:%S') else: d = None reader_single.append(d) else: reader_single.append(json_data[reader][key]) existed = ws0.max_row for i in range(len(title)): ws0.cell(row=existed + 1, column=i + 1).value = reader_single[i] wb.save(file_name[:file_name.find(".")] + "(恢复文件,请将括号连带此提示删除并替换损坏文件即可使用).xlsx")
def json_to_excel_library(file_name): wb = Workbook() ws0 = wb.worksheets[0] ws0.title = u"书籍信息" ws1 = wb.create_sheet() ws1.title = u'书籍丢失信息' title = [ "ISBN", "书籍名称", "作者", "出版社", "出版日期", "页数", "价格", "主题", "馆藏本数", "索书号", "内容简介", "信息来源", "借阅次数", "借阅记录", "书籍位置" ] title2 = ["单位", "姓名", "性别", "ISBN", "书名", "价格", "登记时间"] for i in range(len(title)): ws0.cell(row=1, column=i + 1).value = title[i] for i in range(len(title2)): ws1.cell(row=1, column=i + 1).value = title2[i] with open(file_name, "r") as f: json_data = json.load(f) for book in json_data: book_single = [json_data[book][key] for key in title] existed = ws0.max_row for i in range(len(title)): ws0.cell(row=existed + 1, column=i + 1).value = book_single[i] wb.save(file_name[:file_name.find(".")] + "(恢复文件,请将括号连带此提示删除并替换损坏文件即可使用).xlsx")
def creta_table(): wb = Workbook() ws = wb.active im = Image.open("C:\Users\Administrator\Desktop\hsq.png") pix = im.load() width = im.size[0] height = im.size[1] for x in range(width): for y in range(height): print x, y colour = pix[x, y][0:3] #print colour R = colour[0] G = colour[1] B = colour[2] colour_code = Joining(R, G, B) row = int(width) - int(x) col = int(y) + 1 print row, col ws.cell(row=col, column=row).fill = openpyxl.styles.PatternFill( fill_type='solid', fgColor=colour_code) ws.column_dimensions[y].width = 0.1 ws.row_dimensions[x].height = 1 wb.save('C:\Users\Administrator\Desktop\zm5.xlsx')
def write_to_excel_with_openpyxl(self, records, head_row, save_excel_name="save.xlsx"): # 新建一个workbook wb = Workbook() # 新建一个excelWriter #ew = ExcelWriter(workbook=wb) # 设置文件输出路径与名称 dest_filename = save_excel_name.decode('utf-8') # 第一个sheet是ws ws = wb.worksheets[0] # 设置ws的名称 ws.title = "range names" # 写第一行,标题行 ## for h_x in range(1, len(head_row) + 1): ## h_col = get_column_letter(h_x) ## ws['%s%s' % (h_col, 1)].value = '%s' % head_row[h_x - 1] ## ws['A2'].value = 'xx' ws.append(head_row) # 写第二行及其以后的那些行 for record in records: ws.append(record) # save文件 wb.save(filename=dest_filename)
def combine_twitter_csv(path=None, analysisid=None , analysistype='hashtag', delim=',',quote='"', lookup=None ) : if path[len(path)-1] != '/' : path+='/' o_lookup={} lu=False if lookup != None : fin=open(path + lookup,'rbU') lu=True reader=csv.reader(fin) for r in reader : o_lookup[r[1]]= r[0] workbook = Workbook() filelist=[] sheet_index = -1 for fn in os.listdir(path) : if fn.find(analysistype) >= 0 and fn.find(".csv") > 0 : filelist.append(fn) sheet_index=0 for fn in filelist : sheet = workbook.create_sheet(sheet_index) title=fn.replace(".csv","").replace(analysistype,"").strip("_") if len(title) >31 : title=title[0:30] sheet.title=title for row_index, row in enumerate( csv.reader(open(path+fn), delimiter=delim, quotechar = quote)): for col_index, col in enumerate(row): if lu and col.find("_HT_") == 0 : col = o_lookup[(col.replace("_HT_",""))] + " (" + col.replace("_HT_","") + ")" sheet.cell(row = row_index, column = col_index).value = col sheet_index+=1 workbook.save(open(path + (analysistype + "_network_stats" + "_" + analysisid).upper() + ".xlsx",'w') )
def create_input_data(): dttime=dt.datetime.now().strftime("%H:%M:%S") readfile="C:\\NSE\\inputs\\NSEOptions.xlsx" writefile="C:\\NSE\\inputs\\NSEOptions1.xlsx" workbook_name = writefile wb = Workbook() wb.create_sheet('NSE') print(wb.sheetnames) std=wb['Sheet'] wb.remove(std) wb.save(filename = writefile) print(wb.sheetnames) workbook_name = readfile wb1 = openpyxl.load_workbook(readfile, data_only=True) wb2 = openpyxl.load_workbook(writefile) sheet1 = wb1['Sheet2'] sheet2 = wb2['NSE'] max_rows=sheet1.max_row for row in sheet1['A1' :'U'+str(max_rows)]: for cell in row: sheet2[cell.coordinate].value = cell.value wb1.save(writefile)
def process_data(self, **kwargs): # combine data in [day_head, day_tail) if ("day_head" not in kwargs) or ("day_tail" not in kwargs): print("Arguments must contain: day_head, day_tail") return day_head = kwargs["day_head"] day_tail = kwargs["day_tail"] day_it = day_head cnt = 2 day_idx = day_it.strftime("%Y-%m-%d") day_t = day_tail.strftime("%Y-%m-%d") wb = Workbook() ws = wb.active while day_idx != day_t: print("Processing...", day_idx) file_path = self.data_path + "\\" + day_idx + "--" + day_idx + ".xls" self.__combine_data(file_path, cnt, day_idx, ws) day_it = day_it + datetime.timedelta(days=1) cnt += 1 day_idx = day_it.strftime("%Y-%m-%d") jiangsu = "剔除江苏" nanjing = "剔除南京" if self.include_nanjing: nanjing = "包含南京" if self.include_jiangsu: jiangsu = "包含江苏" t_h = day_head.strftime("%Y-%m-%d") t_t = (day_tail - datetime.timedelta(days=1)).strftime("%Y-%m-%d") wb.save(self.data_path + "\\" + "全市乡村游客源排名" + jiangsu + nanjing + t_h + "--" + t_t + ".xlsx")
def print_UP_list(UP_list, UP_meta_info, base): from openpyxl.workbook import Workbook from openpyxl.worksheet import Worksheet filename = 'UP_print.xlsx' wb = Workbook(encoding='mac_roman') ws = Worksheet(wb, title = 'UP_list') #creating a sheet inside the workbook ws.freeze_panes = 'A2' header = ['#', 'UP name', 'unit', 'country', 'infrastructure'] for i in range(6): header.append('Category ' + str(i)) ws.append(header) for i in range(len(UP_list)): UP = UP_list[i] line = [i + base, UP_list[i], UP_meta_info[UP]['unit'], UP_meta_info[UP]['Country'], UP_meta_info[UP]['Infrastructure']] for j in range(6): try: line.append(UP_meta_info[UP]['Category type'][j]) except IndexError: break ws.append(line) print 'saving in excel sheet named: ' + filename wb.add_sheet(ws) wb.save(filename)
def initialSuite(self, path, suitename): if not os.path.exists(os.path.join(path, suitename)): os.mkdir(os.path.join(path, suitename)) os.chdir(os.path.join(path, suitename)) os.mkdir("files") os.mkdir("results") if os.path.exists(self.ScriptName) or os.path.exists( self.TestCaseName): print "the testcase has already exists, please check it" sys.exit(0) os.system("type nul>%s" % self.ScriptName) wb = Workbook() sheet = wb.active sheet.title = "Summary" sheet.append(self.SumTitleLine) MaxVerColumn = sheet.max_column column_widths = [20] * MaxVerColumn for i, column_width in enumerate(column_widths): sheet.column_dimensions[get_column_letter(i + 1)].width = column_width for i in range(1, MaxVerColumn + 1): sheet.cell(column=i, row=1).border = Border(left=Side(border_style='thin'), right=Side(border_style='thin'), top=Side(border_style='thin'), bottom=Side(border_style='thin')) sheet.cell(column=i, row=1).font = Font(name='Calibri', size=11, bold=True) wb.save(self.TestCaseName)
def process_dir(data_dir_path, output_file_path): # crate a new workbook and sheets wb_new = Workbook() ws_deliveries = wb_new.get_active_sheet() ws_deliveries.title = 'Deliveries' ws_returns = wb_new.create_sheet(1) ws_returns.title = 'Returns' ws_wastage = wb_new.create_sheet(2) ws_wastage.title = 'Wastage' ws_staff_meals = wb_new.create_sheet(3) ws_staff_meals.title = 'Staff Meals' ws_transfers_in = wb_new.create_sheet(4) ws_transfers_in.title = 'Transfers In' ws_transfers_out = wb_new.create_sheet(5) ws_transfers_out.title = 'Transfers Out' # get the list of files in the directory onlyfiles = [ f for f in listdir(data_dir_path) if isfile(join(data_dir_path,f))] # process each file for f in onlyfiles: process_file(data_dir_path + f, wb_new) # save the new workbook wb_new.save(output_file_path)
def xls2xlsx(filename): # Old .xls workbook book_xls = xlrd.open_workbook(filename) # New .xlsx workbook book_xlsx = Workbook() sheet_names = book_xls.sheet_names() # Iterate through .xls sheet names to create matching ones in new workbook for sheet_index in range(0, len(sheet_names)): sheet_xls = book_xls.sheet_by_name(sheet_names[sheet_index]) # If its the first sheet then get the first .xlsx sheet if sheet_index == 0: sheet_xlsx = book_xlsx.get_active_sheet() sheet_xlsx.title = sheet_names[sheet_index] else: sheet_xlsx = book_xlsx.create_sheet(title=sheet_names[sheet_index]) # Nested for loop to go through each cell and copy over to the new .xlsx sheet for row in range(0, sheet_xls.nrows): for col in range(0, sheet_xls.ncols): sheet_xlsx.cell(row=row + 1, column=col + 1).value = sheet_xls.cell_value( row, col) # After going through each sheet and copying them over to .xlsx # save the new .xlsx workbook by appending 'x' to file extension book_xlsx.save(filename + "x")
def test_dump_sheet_with_styles(): test_filename = _get_test_filename() wb = Workbook(optimized_write=True) ws = wb.create_sheet() letters = [get_column_letter(x + 1) for x in range(20)] expected_rows = [] for row in range(20): expected_rows.append(['%s%d' % (letter, row + 1) for letter in letters]) for row in range(20): expected_rows.append([(row + 1) for letter in letters]) for row in range(10): expected_rows.append([datetime(2010, ((x % 12) + 1), row + 1) for x in range(len(letters))]) for row in range(20): expected_rows.append(['=%s%d' % (letter, row + 1) for letter in letters]) for row in expected_rows: ws.append(row) wb.save(test_filename) wb2 = load_workbook(test_filename) ws = wb2.worksheets[0] for ex_row, ws_row in zip(expected_rows[:-20], ws.rows): for ex_cell, ws_cell in zip(ex_row, ws_row): assert ex_cell == ws_cell.value os.remove(test_filename)
def render_xlsx(self, outfd, data): wb = Workbook(optimized_write = True) ws = wb.create_sheet() ws.title = 'Timeline Output' header = ["Time", "Type", "Item", "Details", "Reason"] ws.append(header) total = 1 for line in data: coldata = line.split("|") ws.append(coldata) total += 1 wb.save(filename = self._config.OUTPUT_FILE) if self._config.HIGHLIGHT != None: wb = load_workbook(filename = self._config.OUTPUT_FILE) ws = wb.get_sheet_by_name(name = "Timeline Output") for col in xrange(1, len(header) + 1): ws.cell("{0}{1}".format(get_column_letter(col), 1)).style.font.bold = True for row in xrange(2, total + 1): for col in xrange(2, len(header)): if ws.cell("{0}{1}".format(get_column_letter(col), row)).value in self.suspicious.keys(): self.fill(ws, row, len(header) + 1, self.suspicious[ws.cell("{0}{1}".format(get_column_letter(col), row)).value]["color"]) ws.cell("{0}{1}".format(get_column_letter(col + 1), row)).value = self.suspicious[ws.cell("{0}{1}".format(get_column_letter(col), row)).value]["reason"] wb.save(filename = self._config.OUTPUT_FILE)
def test_open_too_many_files(): test_filename = _get_test_filename() wb = Workbook(optimized_write=True) for i in range(200): # over 200 worksheets should raise an OSError ('too many open files') wb.create_sheet() wb.save(test_filename) os.remove(test_filename)
def bc_generate_xlsx(fn): wb = Workbook() #wb = Workbook(optimized_write = True) #dest_filename = r'test_book.xlsx' dest_filename = fn.outdir + "/" + filename_from_path(fn.fiwalk_xmlfile) + ".xlsx" print("Generating Excel report ", dest_filename) ws = wb.worksheets[0] #ws = wb.create_sheet() ws.title = "File Object Information" ws.cell('%s%s'%('A', '1')).value = '%s' % "Partition" ws.cell('%s%s'%('B', '1')).value = '%s' % "Filename" ws.cell('%s%s'%('C', '1')).value = '%s' % "Extension" ws.cell('%s%s'%('D', '1')).value = '%s' % "Filesize" ws.cell('%s%s'%('E', '1')).value = '%s' % "File format" ws.cell('%s%s'%('F', '1')).value = '%s' % "Change time" ws.cell('%s%s'%('G', '1')).value = '%s' % "Access time" ws.cell('%s%s'%('H', '1')).value = '%s' % "Create time" ws.cell('%s%s'%('I', '1')).value = '%s' % "Modification time" ws.cell('%s%s'%('J', '1')).value = '%s' % "MD5 Hash" ws.cell('%s%s'%('K', '1')).value = '%s' % "SHA1 Hash" process_files(fn.fiwalk_xmlfile, ws) wb.save(filename=dest_filename)
def process_dir(data_dir_path, output_file_path): # crate a new workbook and sheets wb_new = Workbook() ws_deliveries = wb_new.get_active_sheet() ws_deliveries.title = 'Deliveries' ws_returns = wb_new.create_sheet(1) ws_returns.title = 'Returns' ws_wastage = wb_new.create_sheet(2) ws_wastage.title = 'Wastage' ws_staff_meals = wb_new.create_sheet(3) ws_staff_meals.title = 'Staff Meals' ws_transfers_in = wb_new.create_sheet(4) ws_transfers_in.title = 'Transfers In' ws_transfers_out = wb_new.create_sheet(5) ws_transfers_out.title = 'Transfers Out' # get the list of files in the directory onlyfiles = [ f for f in listdir(data_dir_path) if isfile(join(data_dir_path, f)) ] # process each file for f in onlyfiles: process_file(data_dir_path + f, wb_new) # save the new workbook wb_new.save(output_file_path)
def convert_xls_to_xlsx(src_file_path, dst_file_path): """ Purpose: Convert xls file to xlsx. Assumes simple files, no graphics or advanced formatting :param src_file_path: :param dst_file_path: :return: """ book_xls = xlrd.open_workbook(src_file_path) book_xlsx = Workbook() sheet_names = book_xls.sheet_names() for sheet_index in range(0, len(sheet_names)): sheet_xls = book_xls.sheet_by_name(sheet_names[sheet_index]) if sheet_index == 0: sheet_xlsx = book_xlsx.active sheet_xlsx.title = sheet_names[sheet_index] else: sheet_xlsx = book_xlsx.create_sheet(title=sheet_names[sheet_index]) for row in range(0, sheet_xls.nrows): for col in range(0, sheet_xls.ncols): sheet_xlsx.cell(row=row + 1, column=col + 1).value = sheet_xls.cell_value( row, col) book_xlsx.save(dst_file_path)
def saveData(): name = 'fights' try: wb = load_workbook(name + '.xlsx') except: wb = Workbook() ws = wb.worksheets[0] ws.title = "log" wb.save(name + '.xlsx') wb = load_workbook(name + '.xlsx') ws = wb.worksheets[0] i = 1 ws.cell('A1').value = 'Name' ws.cell('C1').value = 'Dammage' ws.cell('D1').value = 'Health' ws.cell('B1').value = 'Ignore' for n in persons: i = i + 1 ws.cell('A' + str(i)).value = n ws.cell('B' + str(i)).value = persons[n]['total']['Ignore'] ws.cell('C' + str(i)).value = persons[n]['total']['DmgDealed'] ws.cell('D' + str(i)).value = persons[n]['total']['DmgTake'] for b in persons[n]: if b == 'total': continue dd = persons[n][b]['DmgDealed'] ws.cell(row=0, column=int(b) + 3).value = "battle" + b ws.cell(row=i - 1, column=int(b) + 3).value = dd wb.save(name + '.xlsx')
class ExcelWriter(object): """ 创建Excel文件的工具类 Attributes: filename: 文件名称,保存文件的时候使用 title: 标题 """ def __init__(self, filename, title): self.filename = filename self.work = Workbook() self.chart = self.work.create_sheet() self.chart.title = title def set_column_width(self, column_dict): if not isinstance(column_dict, dict): raise TypeError('column_dict must be a dictionary') for key, value in column_dict.items(): self.chart.column_dimensions[key].width = column_dict[key] def add_content_cell(self, cell, value): self.chart[cell].value = value def add_content_row(self, row): """ Args: row: must be a list, tuple """ if not isgenerator(row) and not isinstance(row, (list, tuple, range)): raise TypeError( 'Value must be a list, tuple, range or a generator') self.chart.append(row) def save(self): self.work.save(self.filename + '.xlsx')
def separate_xl_content(src_filepath): src_wb = load_workbook(src_filepath, use_iterators=True) src_ws = src_wb.get_sheet_by_name(name="Sheet") mytree = {} for row in src_ws.iter_rows(): subxlfilename = row[0].internal_value if not mytree.has_key(subxlfilename): mytree[subxlfilename] = [] values = [] for cell in row[1:]: values.append(cell.internal_value) mytree[subxlfilename].append(values) ret = [] for subxlfilename in mytree.keys(): wb = Workbook() ws = wb.get_sheet_by_name(name="Sheet") for values in mytree[subxlfilename]: ws.append(values) wb.save(subxlfilename) ret.append(subxlfilename) return ret
def process_data(self, **kwargs): if ("day_head" not in kwargs) or ("day_tail" not in kwargs): print("Arguments must contain: day_head, day_tail!") return day_head = kwargs["day_head"] day_tail = kwargs["day_tail"] country_to_idx = {} for (scene_id, scene_name) in self.scenes.items(): print("Processing...", scene_name) data_dir = self.work_dir + "\\" + scene_name if not os.path.exists(data_dir): print("No directory named", scene_name) continue wb = Workbook() ws = wb.active cur_day = day_head cur_day_str = cur_day.strftime("%Y-%m-%d") day_t = day_tail + datetime.timedelta(days=1) day_t_str = day_t.strftime("%Y-%m-%d") cur_rows = 2 while cur_day_str != day_t_str: file_name = data_dir + "\\" + cur_day + "--" + cur_day + ".xls" self.__combine_data(ws,file_name,country_to_idx, cur_rows, cur_day_str) cur_rows += 1 cur_day = cur_day + datetime.timedelta(days=1) cur_day_str = cur_day.strftime("%Y-%m-%d") file_to_save = data_dir + "\\" + scene_name + day_head.strftime("%Y-%m-%d") + "--" \ + day_tail.strftime("%Y-%m-%d") + ".xlsx" for (country, idx) in country_to_idx.items(): ws.cell(row=1, column=idx).value = country wb.save(file_to_save)
def format_excel_sheet(report, ltp_file): # Create a workbook workbook = Workbook() sheet = workbook.active bold_font = Font(bold=True, color=colors.DARKYELLOW, size=20) # set the width of the column sheet.column_dimensions['A'].width = 30 sheet.column_dimensions['B'].width = 20 sheet.column_dimensions['C'].width = 10 sheet['A1'].font = bold_font sheet.merge_cells('A1:D1') sheet['A1'] = 'LTP Test report' sheet['A3'] = 'Module' sheet['B3'] = 'Test Case' sheet['C3'] = 'Result' sheet['D3'] = 'Exit Code' Generator.append_data_into_cells(sheet, report) filename_ltp = ltp_file.split('\\') filename_ltp_no_ext = (filename_ltp[-1].split('.'))[0] output_file = 'l4b-software___testReport' + '___' + filename_ltp_no_ext + '.xlsx' try: workbook.save(filename=output_file) except PermissionError as e: print("\n\n\n Excel file is open. Please close the excel file !!!")
def open_xls_as_xlsx(filename): try: # noinspection PyUnresolvedReferences import xlrd # noinspection PyUnresolvedReferences from openpyxl.workbook import Workbook # noinspection PyUnresolvedReferences from openpyxl.reader.excel import load_workbook book = xlrd.open_workbook(filename) index = 0 nrows, ncols = 0, 0 sheet = None while nrows * ncols == 0: sheet = book.sheet_by_index(index) nrows = sheet.nrows ncols = sheet.ncols index += 1 book1 = Workbook() sheet1 = book1.get_active_sheet() for row in range(0, nrows): for col in range(0, ncols): sheet1.cell(row=row + 1, column=col + 1).value = sheet.cell_value(row, col) filename = filename.replace('.xls', '.xlsx') book1.save(filename) return Path(filename) except ImportError: print('xlrd and openpyxl are required in order to convert an xls file to xlsx') raise
def appendSSEStocks(self, lastStockNumber): wb = Workbook() # load_workbook(filename=self.filePath) sheet = wb.active # 写标题 for i in range(self.__indexName.__len__()): _ = sheet.cell(column=i + 1, row=1, value=self.__indexName[i]) row = sheet.max_row + 1 for i in range(self.startStockNumber, lastStockNumber + 1): a = AchieveSSEStockInfo(i) sleep(1) # print a.getStatus() if not a.getStatus(): continue for j in range(a.__public__.__len__()): m = a.__public__[j] f = getattr(a, m) print m print f() _ = sheet.cell(column=j + 1, row=row, value="%s" % f()) row = row + 1 # 每获取一个上市公司完整信息就写入xlsx,避免占用过大内存 wb.save(filename=self.filePath)
def new_user(userid): """ Adds a new row to the bottom of column A with the specified value """ rownum = len(c['A']) + 1 c[f'A{rownum}'] = str(userid) WB.save(wb, FILE)
def output_courses_info_to_xlsx(filepath, courses_base): wb = Workbook() sheet = wb.active sheet.title = "Coursera courses" for row in courses_base: sheet.append(row) wb.save(filepath)
def saveData(data): name = 'top_info' #try: # wb = load_workbook(name+'.xlsx') #except: if True: wb = Workbook() ws = wb.worksheets[0] ws.title = "gettop" wb.save(name + '.xlsx') wb = load_workbook(name + '.xlsx') ws = wb.worksheets[0] ws.title = 'gettop' i = 1 for n in data: i = i + 1 putData(ws, 'A', i, n, 'NO', i - 1) putData(ws, 'B', i, n, 'name', '......') putData(ws, 'C', i, n, 'vk') putData(ws, 'D', i, n, 'rang') putData(ws, 'E', i, n, 'level') putData(ws, 'F', i, n, 'epower') putData(ws, 'G', i, n, 'clan_name') putData(ws, 'H', i, n, 'clan_owner') if n.has_key('adInfo'): n['adInfo'] = json.loads(n['adInfo']) putData(ws, 'I', i, n['adInfo'], 'currency') wb.save(name + '.xlsx')
def set_char(userid, index:str, entry): """ Sets all the cell values associated with character info """ try: row = 0 idx = index ctrl = False while ctrl == False: for cell in c['A']: if cell.value == None: pass elif str(cell.value) == f'{userid}': row = cell.row ctrl = True else: ctrl = False c[f'{idx}{row}'] = entry WB.save(wb, FILE) except: import traceback traceback.print_exc()
def write_excel_sheet(data_dict): try: book = load_workbook(rec_book) except: book = Workbook() # currtime = str(datetime.now()) # currtime = currtime.replace(" ", "_").replace(":", "_").replace("-", "_").replace(".", "_") # print currtime # Day = currtime sheet = book.create_sheet(currday, 0) sheet.cell(column=1, row=1, value='DEVICE') sheet.cell(column=2, row=1, value='ADMIN_STATE') i = 2 for k, v in data_dict.iteritems(): sheet.cell(column=1, row=i, value=k) sheet.cell(column=2, row=i, value=v) i += 1 book.save(rec_book) #book.close() try: daily_book = load_workbook(daily_report_book) except: daily_book = Workbook() sheet1 = daily_book.create_sheet(currday, 0) sheet1.cell(column=1, row=1, value='DEVICE') sheet1.cell(column=2, row=1, value='ADMIN_STATE') i = 2 for k, v in data_dict.iteritems(): sheet1.cell(column=1, row=i, value=k) sheet1.cell(column=2, row=i, value=v) i += 1 daily_book.save(daily_report_book)
def get_xlsx(src_file_path): try: book_xls = xlrd.open_workbook(f'{work_dicts["work_dir"]}/{src_file_path}') except xlrd.biffh.XLRDError: return 'file cannot be converted' book_xlsx = Workbook() sheet_names = book_xls.sheet_names() for sheet_index in range(0, len(sheet_names)): sheet_xls = book_xls.sheet_by_name(sheet_names[sheet_index]) if sheet_index == 0: sheet_xlsx = book_xlsx.active sheet_xlsx.title = sheet_names[sheet_index] else: sheet_xlsx = book_xlsx.create_sheet(title=sheet_names[sheet_index]) for row in range(0, sheet_xls.nrows): for col in range(0, sheet_xls.ncols): sheet_xlsx.cell(row=row + 1, column=col + 1).value = sheet_xls.cell_value(row, col) dst_file_path = f'{work_dicts["work_dir"]}/{src_file_path}x' book_xlsx.save(dst_file_path) try: os.rename(f'{work_dicts["work_dir"]}/{src_file_path}', f'{work_dicts["finish_dir"]}/{src_file_path}') return f'{src_file_path}' except Exception as e: # проверочный принт, не удалось переименовать # print(e) return None
def adj_matrix(): adj_matrix = [[0 for i in range(24)] for i in range(24)] dframe = pd.read_excel( "D:/mo/9.3/AllPanelMutDis.PASS_Fc_MAF_SNVs.overlap2_hcSNVs.MutNumberunion.xlsx", sheetname="AllPanelMutDis.PASS_Fc_MAF_SNVs") gene_list = list(dframe.index) adj_matrix = pd.DataFrame(adj_matrix, columns=gene_list, index=gene_list) for patients in dframe.columns: for i in range(len(gene_list)): for j in range(i + 1, len(gene_list)): if dframe[patients].iloc[i] > 0 and dframe[patients].iloc[ j] > 0: adj_matrix[gene_list[i]].loc[gene_list[j]] += 1 adj_matrix[gene_list[j]].loc[gene_list[i]] += 1 adj_matrix.to_csv('D:/mo/9.3/adj_mat.csv') wb = Workbook() ws0 = wb.active for i in range(len(gene_list)): for j in range(i + 1, len(gene_list)): ws0.append([ gene_list[i], gene_list[j], adj_matrix[gene_list[i]].loc[gene_list[j]] ]) file_path = "D:/mo/9.3/gene_mut_value.csv" wb.save(file_path)
def saveData(): name = 'fights' try: wb = load_workbook(name+'.xlsx') except: wb = Workbook() ws = wb.worksheets[0] ws.title = "log" wb.save(name+'.xlsx') wb = load_workbook(name+'.xlsx') ws = wb.worksheets[0] i = 1 ws.cell('A1').value = 'Name' ws.cell('C1').value = 'Dammage' ws.cell('D1').value = 'Health' ws.cell('B1').value = 'Ignore' for n in persons: i = i + 1 ws.cell('A'+str(i)).value = n ws.cell('B'+str(i)).value = persons[n]['total']['Ignore'] ws.cell('C'+str(i)).value = persons[n]['total']['DmgDealed'] ws.cell('D'+str(i)).value = persons[n]['total']['DmgTake'] for b in persons[n]: if b == 'total': continue dd = persons[n][b]['DmgDealed'] ws.cell(row = 0, column = int(b)+3).value = "battle"+b ws.cell(row = i-1, column = int(b)+3).value = dd wb.save(name+'.xlsx')
def test1(): wb = Workbook() ws = wb.get_sheet_by_name(name=r"Sheet") merge_xl_content(ws, '/home/huzhennan/Works/local/Gallery2/books.xlsx', 'Sheet') wb.save('test.xlsx')
def saveData(cname, data): name = 'clan_info_' + str(cid) try: wb = load_workbook(name + '.xlsx') except: wb = Workbook() ws = wb.worksheets[0] ws.title = "log" wb.save(name + '.xlsx') wb = load_workbook(name + '.xlsx') ws = wb.worksheets[0] ws.title = cname i = 1 ws.cell('C1').value = 'Name' ws.cell('E1').value = 'Level' ws.cell('G1').value = 'Ladder' ws.cell('H1').value = 'Win' ws.cell('I1').value = 'Kill' ws.cell('J1').value = 'Mission' for n in data: i = i + 1 ws.cell('A' + str(i)).value = '1' ws.cell('B' + str(i)).value = '.' ws.cell('C' + str(i)).value = n["Name"] ws.cell('D' + str(i)).value = '-' ws.cell('E' + str(i)).value = n["Lvl"] ws.cell('F' + str(i)).value = ',' ws.cell('G' + str(i)).value = n["Ladder"] ws.cell('H' + str(i)).value = n["WinCount"] ws.cell('I' + str(i)).value = n["KillCount"] ws.cell('J' + str(i)).value = n["DoMissionCount"] wb.save(name + '.xlsx')
def save_to_excel(symbol, contents): filename = symbol + '.xlsx' print 'save content into ', filename if os.path.exists(filename): wb = load_workbook(filename=filename) ws = wb[symbol] row = ws.max_row else: wb = Workbook() ws = wb.active ws.title = symbol ws.cell(row=1, column=1, value='Title') ws.cell(row=1, column=2, value='PageLink') ws.cell(row=1, column=3, value='Time') ws.cell(row=1, column=4, value='Author') ws.cell(row=1, column=5, value='Summary') row = 1 for i, c in zip(range(row + 1, row + 1 + len(contents)), contents): ws.cell(row=i, column=1, value=c['title']) ws.cell(row=i, column=2, value=c['url']) ws.cell(row=i, column=3, value=c['publishedTime']) ws.cell(row=i, column=4, value=c['author']) summary = c['summary'] for j, s in zip(range(len(summary)), summary): ws.cell(row=i, column=5 + j, value=s) wb.save(filename)
def saveClansData(cdata): name = 'clan_info_all' try: wb = load_workbook(name+'.xlsx') except: wb = Workbook() ws = wb.worksheets[0] ws.title = "log" wb.save(name+'.xlsx') wb = load_workbook(name+'.xlsx') ws = wb.worksheets[0] i = 0 j = 0 clansN = [] for c in cdata: for n in c: clansN.append(n) ws.cell(row=i, column=j).value = n j = j + 1 break for c in cdata: i = i + 1 j = 0 for n in c: ws.cell(row=i, column=j).value = c[n] j = j + 1 wb.save(name+'.xlsx')
def bc_generate_xlsx(fn): wb = Workbook() #wb = Workbook(optimized_write = True) dest_filename = fn.outdir + "/" + filename_from_path( fn.fiwalk_xmlfile) + ".xlsx" print("Generating Excel report ", dest_filename) ws = wb.worksheets[0] ws.title = "File Object Information" ws.cell('%s%s' % ('A', '1')).value = '%s' % "Partition" ws.cell('%s%s' % ('B', '1')).value = '%s' % "Filename" ws.cell('%s%s' % ('C', '1')).value = '%s' % "Extension" ws.cell('%s%s' % ('D', '1')).value = '%s' % "Filesize" ws.cell('%s%s' % ('E', '1')).value = '%s' % "File format" ws.cell('%s%s' % ('F', '1')).value = '%s' % "Change time" ws.cell('%s%s' % ('G', '1')).value = '%s' % "Access time" ws.cell('%s%s' % ('H', '1')).value = '%s' % "Create time" ws.cell('%s%s' % ('I', '1')).value = '%s' % "Modification time" ws.cell('%s%s' % ('J', '1')).value = '%s' % "MD5 Hash" ws.cell('%s%s' % ('K', '1')).value = '%s' % "SHA1 Hash" process_files(fn.fiwalk_xmlfile, ws) # Save the workbook to the open file wb.save(filename=dest_filename)
def print_CFs(CF_matrices, EF_list, CF_categories): from openpyxl.workbook import Workbook from openpyxl.worksheet import Worksheet result_filename = 'CF_print.xlsx' wb = Workbook() #creating a workbook for method in CF_matrices: ws = Worksheet(wb, title=method) #creating a sheet inside the workbook ws.freeze_panes = 'D2' header = ['compartment', 'substance', 'subcompartment'] for category in CF_categories[method]: header.append(category) ws.append(header) for EF in EF_list: matrix_column = EF_list.index(EF) compartment, substance, subcompartment = EF line = [compartment, substance, subcompartment] for category in CF_categories[method]: matrix_line = CF_categories[method].index(category) CF = CF_matrices[method][matrix_line, matrix_column] line.append(CF) ws.append(line) print 'saving in excel sheet named: ' + result_filename wb.add_sheet(ws) wb.save(result_filename)
def cli(): global filenum ParseFile(path) #print (Functionslist) wb = Workbook() ############################################### ws1 = wb.active ws1.title = "FunctionList" wsheader = ["PAR No.", "functions versions"] ws1.append(wsheader) for key in Functionslist.keys(): row = [key] row.extend(Functionslist.get(key)) ws1.append(row) ws2 = wb.create_sheet() ws2.title = "VersionList" wsheader = ["Version"] ws2.append(wsheader) for key in ICDVersionlist: row = [key] ws2.append(row) print ("-->Saving File: "+f_result_file) wb.save(f_result_file) print ("-->Done!")
class XLSXRenderer(Renderer): def __init__(self, renderers_func, config): if not has_openpyxl: debug.error("You must install OpenPyxl 2.1.2 for xlsx format:\n\thttps://pypi.python.org/pypi/openpyxl") self._config = config self._columns = None self._text_cell_renderers_func = renderers_func self._text_cell_renderers = None self._wb = Workbook(optimized_write=True) self._ws = self._wb.create_sheet() def description(self): output = [] for column in self._columns: output.append((column.name)) return output def _add_row(self, node, data): accumulator = data accumulator[node] = max(accumulator.values()) + 1 self._ws.append(list(node.values)) return accumulator def render(self, outfd, grid): """Renders the TreeGrid in data out to the output file from the config options""" if not self._config.OUTPUT_FILE: debug.error("Please specify a valid output file using --output-file") self._columns = grid.columns self._text_cell_renderers = self._text_cell_renderers_func(self._columns) self._ws.append(self.description()) grid.visit(None, self._add_row, {None: 0}) self._wb.save(filename=self._config.OUTPUT_FILE)
def saveData(data): name = 'top_info' #try: # wb = load_workbook(name+'.xlsx') #except: if True: wb = Workbook() ws = wb.worksheets[0] ws.title = "gettop" wb.save(name+'.xlsx') wb = load_workbook(name+'.xlsx') ws = wb.worksheets[0] ws.title = 'gettop' i = 1 for n in data: i = i + 1 putData(ws,'A',i,n,'NO',i-1) putData(ws,'B',i,n,'name','......') putData(ws,'C',i,n,'vk') putData(ws,'D',i,n,'rang') putData(ws,'E',i,n,'level') putData(ws,'F',i,n,'epower') putData(ws,'G',i,n,'clan_name') putData(ws,'H',i,n,'clan_owner') if n.has_key('adInfo'): n['adInfo'] = json.loads(n['adInfo']) putData(ws,'I',i,n['adInfo'],'currency') wb.save(name+'.xlsx')
def createReport(conn,series,dateList): #新建一个workbook wb = Workbook() #默认sheet"报表说明" sheet = wb.worksheets[0] sheet.title = u'报表说明' #生成"报表说明"内容 createReportSpecification(sheet, conn, series, dateList) #创建sheet"网站经销商数量变化分析" sheet = wb.create_sheet(u'网站经销商数量变化分析', 1) #生成"网站经销商数量变化分析"内容 createAnalysisOfWebsiteDealer(sheet, conn, series, dateList) #创建sheet"网站报价均值分析" sheet = wb.create_sheet(u'网站报价均值分析', 2) #生成"网站报价均值分析"内容 createAnalysisOfNetworkOffer(sheet, conn, series, dateList) #创建sheet"大区报价分析" sheet = wb.create_sheet(u'大区报价分析', 3) #生成"大区报价分析"内容 createAnalysisOfAreaPrice(sheet, conn, series, dateList) #创建sheet"省份报价分析" sheet = wb.create_sheet(u'省份报价分析', 4) #生成"省份报价分析"内容 createTableAnalysisOfProvincesOffer(sheet, conn, series, dateList) #创建sheet"报价详细" sheet = wb.create_sheet(u'报价详细', 5) #生成"报价详细"内容 createTableAnalysisOfDetailedQuotation(sheet, conn, series, dateList) file_dir = settings.filePath+'\\report\\'+series.encode('gbk')+'-报价日报_'.encode('gbk')+\ dateList[len(dateList)-1].strftime('%Y-%m-%d')+'.xlsx' #保存文件 wb.save(file_dir)
def print_CFs(CF_matrices, EF_list, CF_categories): from openpyxl.workbook import Workbook from openpyxl.worksheet import Worksheet result_filename = 'CF_print.xlsx' wb = Workbook() #creating a workbook for method in CF_matrices: ws = Worksheet(wb, title = method) #creating a sheet inside the workbook ws.freeze_panes = 'D2' header = ['compartment', 'substance', 'subcompartment'] for category in CF_categories[method]: header.append(category) ws.append(header) for EF in EF_list: matrix_column = EF_list.index(EF) compartment, substance, subcompartment = EF line = [compartment, substance, subcompartment] for category in CF_categories[method]: matrix_line = CF_categories[method].index(category) CF = CF_matrices[method][matrix_line, matrix_column] line.append(CF) ws.append(line) print 'saving in excel sheet named: ' + result_filename wb.add_sheet(ws) wb.save(result_filename)
def print_UP_list(UP_list, UP_meta_info, base): from openpyxl.workbook import Workbook from openpyxl.worksheet import Worksheet filename = 'UP_print.xlsx' wb = Workbook(encoding='mac_roman') ws = Worksheet(wb, title='UP_list') #creating a sheet inside the workbook ws.freeze_panes = 'A2' header = ['#', 'UP name', 'unit', 'country', 'infrastructure'] for i in range(6): header.append('Category ' + str(i)) ws.append(header) for i in range(len(UP_list)): UP = UP_list[i] line = [ i + base, UP_list[i], UP_meta_info[UP]['unit'], UP_meta_info[UP]['Country'], UP_meta_info[UP]['Infrastructure'] ] for j in range(6): try: line.append(UP_meta_info[UP]['Category type'][j]) except IndexError: break ws.append(line) print 'saving in excel sheet named: ' + filename wb.add_sheet(ws) wb.save(filename)
def saveData(cname, data): name = 'clan_info_'+str(cid) try: wb = load_workbook(name+'.xlsx') except: wb = Workbook() ws = wb.worksheets[0] ws.title = "log" wb.save(name+'.xlsx') wb = load_workbook(name+'.xlsx') ws = wb.worksheets[0] ws.title = cname i = 1 ws.cell('C1').value = 'Name' ws.cell('E1').value = 'Level' ws.cell('G1').value = 'Ladder' ws.cell('H1').value = 'Win' ws.cell('I1').value = 'Kill' ws.cell('J1').value = 'Mission' for n in data: i = i + 1 ws.cell('A'+str(i)).value = '1' ws.cell('B'+str(i)).value = '.' ws.cell('C'+str(i)).value = n["Name"] ws.cell('D'+str(i)).value = '-' ws.cell('E'+str(i)).value = n["Lvl"] ws.cell('F'+str(i)).value = ',' ws.cell('G'+str(i)).value = n["Ladder"] ws.cell('H'+str(i)).value = n["WinCount"] ws.cell('I'+str(i)).value = n["KillCount"] ws.cell('J'+str(i)).value = n["DoMissionCount"] wb.save(name+'.xlsx')
def separate_xl_content(src_filepath): src_wb = load_workbook(src_filepath, use_iterators=True) src_ws = src_wb.get_sheet_by_name(name = "Sheet") mytree = {} for row in src_ws.iter_rows(): subxlfilename = row[0].internal_value if not mytree.has_key(subxlfilename): mytree[subxlfilename] = [] values = [] for cell in row[1:]: values.append(cell.internal_value) mytree[subxlfilename].append(values) ret = [] for subxlfilename in mytree.keys(): wb = Workbook() ws = wb.get_sheet_by_name(name="Sheet") for values in mytree[subxlfilename]: ws.append(values) wb.save(subxlfilename) ret.append(subxlfilename) return ret
def saveClansData(cdata): name = 'clan_info_all' try: wb = load_workbook(name + '.xlsx') except: wb = Workbook() ws = wb.worksheets[0] ws.title = "log" wb.save(name + '.xlsx') wb = load_workbook(name + '.xlsx') ws = wb.worksheets[0] i = 0 j = 0 clansN = [] for c in cdata: for n in c: clansN.append(n) ws.cell(row=i, column=j).value = n j = j + 1 break for c in cdata: i = i + 1 j = 0 for n in c: ws.cell(row=i, column=j).value = c[n] j = j + 1 wb.save(name + '.xlsx')
def outputexcel(rowindex, eachfilesummary, filepath): # 把结果生成到一个文件中 # summyexcel = Workbook() # filepath=os.path.dirname(os.path.dirname(__file__)) # print(filepath) # 把文件保存在上一级目录,以方便生成exe文件时执行时放在主目录 if (os.path.exists(os.path.join(filepath, 'SqlloaderLogSummary.xlsx'))): summyexcel = openpyxl.load_workbook(os.path.join( filepath, 'SqlloaderLogSummary.xlsx'), data_only=True) else: summyexcel = Workbook() wssummary = summyexcel.active wssummary.title = 'SqlloaderLogSummary' # 设置每行的列名以方便查看 colindex = 'A' columntitle = ['FileName', 'LoadNumber', 'ErrorNumber', 'ErrorMessage'] for column in columntitle: cellindex = '{0}{1}'.format(colindex, 1) wssummary[cellindex] = column colindex = chr(ord(colindex) + 1) # 保存内容到指定的行 # 循环处理传送过来的内容,分散到各个列中 colindex = 'A' for eachvalue in eachfilesummary.split('|'): wssummary['{0}{1}'.format(colindex, rowindex)].value = eachvalue colindex = chr(ord(colindex) + 1) # 保存文件到磁盘 summyexcel.save(os.path.join(filepath, 'SqlloaderLogSummary.xlsx')) pass
def test_dump_sheet(): test_filename = _get_test_filename() wb = Workbook(optimized_write=True) ws = wb.create_sheet() letters = [get_column_letter(x + 1) for x in xrange(20)] expected_rows = [] for row in xrange(20): expected_rows.append( ['%s%d' % (letter, row + 1) for letter in letters]) for row in xrange(20): expected_rows.append([(row + 1) for letter in letters]) for row in xrange(10): expected_rows.append([ datetime(2010, ((x % 12) + 1), row + 1) for x in range(len(letters)) ]) for row in xrange(20): expected_rows.append( ['=%s%d' % (letter, row + 1) for letter in letters]) for row in expected_rows: ws.append(row) wb.save(test_filename) wb2 = load_workbook(test_filename) ws = wb2.worksheets[0] for ex_row, ws_row in zip(expected_rows[:-20], ws.rows): for ex_cell, ws_cell in zip(ex_row, ws_row): assert ex_cell == ws_cell.value os.remove(test_filename)
def write_location_studies(data: List[Data], path: str): location_country, location_study_id = rev_data(data) wb = Workbook() ws = wb.active ws.cell(row=1, column=1, value='Institution') ws.cell(row=1, column=2, value='Country') ws.cell(row=1, column=3, value='Study ID') k = 2 for location in location_country.keys(): start_row = k ws.cell(row=k, column=1, value=location) ws.cell(row=k, column=2, value=location_country[location]) for study_id in location_study_id[location]: ws.cell(row=k, column=3, value=study_id) k += 1 if k - 1 > start_row: ws.merge_cells(start_row=start_row, start_column=1, end_row=k - 1, end_column=1) ws.merge_cells(start_row=start_row, start_column=2, end_row=k - 1, end_column=2) k += 1 wb.save(path)
def test_save_empty_workbook(): fn = _get_test_filename() wb = Workbook(write_only=True) wb.save(fn) wb = load_workbook(fn) assert len(wb.worksheets) == 1
def test_add_local_named_range(): wb = Workbook() new_sheet = wb.create_sheet() named_range = NamedRange("test_nr", [(new_sheet, "A1")]) named_range.scope = new_sheet wb.add_named_range(named_range) dest_filename = os.path.join(TMPDIR, "local_named_range_book.xlsx") wb.save(dest_filename)
def render_xlsx(self, outfd, data): wb = Workbook(optimized_write = True) ws = wb.create_sheet() ws.title = 'Timeline Output' for line in data: coldata = line.split("|") ws.append(coldata) wb.save(filename = self._config.OUTPUT_FILE)
def test_add_local_named_range(): wb = Workbook() new_sheet = wb.create_sheet() named_range = NamedRange('test_nr', [(new_sheet, 'A1')]) named_range.scope = new_sheet wb.add_named_range(named_range) dest_filename = osp.join(TMPDIR, 'local_named_range_book.xlsx') wb.save(dest_filename)
def FacebookProfiles(path=None , analysisid=None, delim=',', quote ='"',num_to_str=False) : IDCOL=0 sheet_index = 0 if path[len(path)-1] != '/' : path+='/' profile_file_obj='ua_fb_user.csv' comment_net_files = get_comment_net_csvs(path) id_user_map=user_map(path+profile_file_obj) #create workbook workbook = Workbook() #add profiles sheet sheet = workbook.create_sheet(sheet_index) sheet_index+=1 sheet.title="All_Users" for row_index, row in enumerate( csv.reader(open(path+profile_file_obj), delimiter=delim, quotechar = quote)): for col_index, col in enumerate(row): if col_index == IDCOL : col = str(col) + "_" sheet.cell(row = row_index, column = col_index).value = col #add network metrics sheet(s) for netfile in comment_net_files : uid=-1 lastind=0 sheet = workbook.create_sheet(sheet_index) sheet_index+=1 #verify function call on sheet object sheet.title = netfile.replace(".csv","").replace("fb_comment_net","").strip("_") reader=csv.reader(open(path+netfile), delimiter=delim, quotechar = quote) print 'writing network stats...' for row_index, row in enumerate( reader): if row_index == 0 : sheet.cell(row = row_index, column = 0 ).value = 'node_id' sheet.cell(row = row_index, column = 1 ).value = 'name' for col_index in range(1,len(row)) : sheet.cell(row = row_index, column = col_index + 1 ).value = row[col_index] sheet.cell(row = row_index, column = len(row) + 1).value = 'link' else : for col_index, col in enumerate(row): if col_index == IDCOL : col_ = col + "_" # (for print out) uid = col sheet.cell(row = row_index, column = col_index ).value = col_ sheet.cell(row = row_index, column = col_index + 1 ).value = id_user_map[col] else : sheet.cell(row = row_index, column = col_index + 1).value = col lastind = col_index + 1 sheet.cell(row = row_index, column = lastind + 1 ).value = "http://facebook.com/" + uid #save workbook print 'writing output file ' + path + "fb_comment_network_stats" + "_" + analysisid + ".xlsx" workbook.save(open(path + ("fb_comment_network_stats" + "_" + analysisid ).upper()+ ".xlsx",'w') )
def test_dump_sheet_title(): test_filename = _get_test_filename() wb = Workbook(optimized_write=True) ws = wb.create_sheet(title='Test1') wb.save(test_filename) wb2 = load_workbook(test_filename) ws = wb2.get_sheet_by_name('Test1') assert 'Test1' == ws.title
def make_calenda(task_list, date_from, days, save_as): date_to = date_from + datetime.timedelta(days=days) print date_to from_column = 2 row = 0 # Load the workbook... wb = Workbook() ws = wb.get_active_sheet() ws.cell(row = 0, column = 0).value = u'任务名称' ws.cell(row = 0, column = 1).value = u'任务说明' month_set = set() weekday_column_list = [] for i in range(days): d = date_from + datetime.timedelta(days=i) month = d.month if month not in month_set: cell = ws.cell(row = row, column = i+from_column) cell.value = u'%s月' % d.month month_set.add(month) cell = ws.cell(row = row+1, column = i+from_column) cell.value = '%s' % d.day if d.weekday() in (5,6): weekday_column_list.append(i+from_column) for ws_column in range(from_column+1,from_column+days+1): col_letter = get_column_letter(ws_column) ###~ ws.column_dimensions[col_letter].auto_size = True ws.column_dimensions[col_letter].width = 2.7 for row in range(1,len(task_list)+2): for column in weekday_column_list: cell = ws.cell(row=row,column=column) cell.style.fill.fill_type = Fill.FILL_SOLID cell.style.fill.start_color.index = 'CBCACA' for i,task in enumerate(task_list): cell = ws.cell(row=i+2,column=0) cell.value = task #设置单元格border for row in range(0,len(task_list)+2): for col in range(0, days+2): cell = ws.cell(row=row,column=col) cell.style.borders.top.border_style = Border.BORDER_THIN cell.style.borders.right.border_style = Border.BORDER_THIN cell.style.borders.bottom.border_style = Border.BORDER_THIN cell.style.borders.left.border_style = Border.BORDER_THIN #设置第一列宽 ws.column_dimensions['A'].width = 30 wb.save(save_as)
def test_add_local_named_range(tmpdir): tmpdir.chdir() wb = Workbook() new_sheet = wb.create_sheet() named_range = NamedRange('test_nr', [(new_sheet, 'A1')]) named_range.scope = new_sheet wb.add_named_range(named_range) dest_filename = 'local_named_range_book.xlsx' wb.save(dest_filename)
def test_equal_string(): test_filename = _get_test_filename() wb = Workbook(optimized_write=True) ws = wb.create_sheet() ws.append(['', '', None, '=']) wb.save(test_filename) wb2 = load_workbook(test_filename, True) last_cell = list(wb2.worksheets[0].iter_rows())[0][-1] assert last_cell.data_type == 's'
def write_po_content_to_xlsx(dir, language_code): wb = Workbook() dest_filename = os.path.join(dir, COMPONET_XLSX_FILENAME) ws = wb.worksheets[0] args = {'worksheet': ws, 'language_code':language_code} os.path.walk(dir, visit_po_file, args) wb.save(filename=dest_filename)