def saveData(cname, data): name = 'clan_info_'+str(cid) try: wb = load_workbook(name+'.xlsx') except: wb = Workbook() ws = wb.worksheets[0] ws.title = "log" wb.save(name+'.xlsx') wb = load_workbook(name+'.xlsx') ws = wb.worksheets[0] ws.title = cname i = 1 ws.cell('C1').value = 'Name' ws.cell('E1').value = 'Level' ws.cell('G1').value = 'Ladder' ws.cell('H1').value = 'Win' ws.cell('I1').value = 'Kill' ws.cell('J1').value = 'Mission' for n in data: i = i + 1 ws.cell('A'+str(i)).value = '1' ws.cell('B'+str(i)).value = '.' ws.cell('C'+str(i)).value = n["Name"] ws.cell('D'+str(i)).value = '-' ws.cell('E'+str(i)).value = n["Lvl"] ws.cell('F'+str(i)).value = ',' ws.cell('G'+str(i)).value = n["Ladder"] ws.cell('H'+str(i)).value = n["WinCount"] ws.cell('I'+str(i)).value = n["KillCount"] ws.cell('J'+str(i)).value = n["DoMissionCount"] wb.save(name+'.xlsx')
def saveClansData(cdata): name = 'clan_info_all' try: wb = load_workbook(name+'.xlsx') except: wb = Workbook() ws = wb.worksheets[0] ws.title = "log" wb.save(name+'.xlsx') wb = load_workbook(name+'.xlsx') ws = wb.worksheets[0] i = 0 j = 0 clansN = [] for c in cdata: for n in c: clansN.append(n) ws.cell(row=i, column=j).value = n j = j + 1 break for c in cdata: i = i + 1 j = 0 for n in c: ws.cell(row=i, column=j).value = c[n] j = j + 1 wb.save(name+'.xlsx')
def testTrialHandlerAndXLSX(self): conds = data.importConditions(os.path.join(thisDir, 'trialTypes.xlsx')) trials = data.TrialHandler(trialList=conds, seed=100, nReps=2) responses=[1,1,2,3,2,3, 1,3,2,2,1,1] rts=numpy.array(responses)/10.0 for trialN, trial in enumerate(trials): trials.addData('resp', responses[trialN]) trials.addData('rt',rts[trialN]) trials.saveAsExcel(self.name) # Make sure the file is there assert os.path.isfile(self.fullName) expBook = load_workbook(os.path.join(thisDir,'corrXlsx.xlsx')) actBook = load_workbook(self.fullName) for wsN, expWS in enumerate(expBook.worksheets): actWS = actBook.worksheets[wsN] for key, expVal in expWS._cells.items(): actVal = actWS._cells[key] try: # convert to float if possible and compare with a reasonable # (default) precision expVal.value = float(expVal.value) nose.tools.assert_almost_equals(expVal.value, float(actVal.value)) except: # otherwise do precise comparison nose.tools.assert_equal(expVal.value, actVal.value)
def saveData(): name = 'fights' try: wb = load_workbook(name+'.xlsx') except: wb = Workbook() ws = wb.worksheets[0] ws.title = "log" wb.save(name+'.xlsx') wb = load_workbook(name+'.xlsx') ws = wb.worksheets[0] i = 1 ws.cell('A1').value = 'Name' ws.cell('C1').value = 'Dammage' ws.cell('D1').value = 'Health' ws.cell('B1').value = 'Ignore' for n in persons: i = i + 1 ws.cell('A'+str(i)).value = n ws.cell('B'+str(i)).value = persons[n]['total']['Ignore'] ws.cell('C'+str(i)).value = persons[n]['total']['DmgDealed'] ws.cell('D'+str(i)).value = persons[n]['total']['DmgTake'] for b in persons[n]: if b == 'total': continue dd = persons[n][b]['DmgDealed'] ws.cell(row = 0, column = int(b)+3).value = "battle"+b ws.cell(row = i-1, column = int(b)+3).value = dd wb.save(name+'.xlsx')
def __init__(self, path_or_buf): self.use_xlsx = True self.path_or_buf = path_or_buf self.tmpfile = None if isinstance(path_or_buf, basestring): if path_or_buf.endswith('.xls'): self.use_xlsx = False import xlrd self.book = xlrd.open_workbook(path_or_buf) else: try: from openpyxl.reader.excel import load_workbook self.book = load_workbook(path_or_buf, use_iterators=True) except ImportError: # pragma: no cover raise ImportError(_openpyxl_msg) else: data = path_or_buf.read() try: import xlrd self.book = xlrd.open_workbook(file_contents=data) self.use_xlsx = False except Exception: from openpyxl.reader.excel import load_workbook buf = py3compat.BytesIO(data) self.book = load_workbook(buf, use_iterators=True)
def plugin(fname, data=None): try: if data != None: flo = StringIO.StringIO(data) wb = load_workbook(flo) else: wb = load_workbook(filename=fname, use_iterators=True) except InvalidFileException: logger.Logger().log_ignore(fname, "Invalid Excel file") return sheetNames = wb.get_sheet_names() for name in sheetNames: sheet = wb.get_sheet_by_name(name) for cell in sheet.get_cell_collection(): if cell.value != None: # scan text for pans and return as a list value = str(cell.value) pans = panscan.panscan(" " + value + " ") # log each pan found - bit of a hack to see if the pans list is empty for p in pans: logger.Logger().log_pan(fname, p[1], name + " Cell " + cell.column + ":" + str(cell.row))
def compareXlsxFiles(pathToActual, pathToCorrect): from openpyxl.reader.excel import load_workbook # Make sure the file is there expBook = load_workbook(pathToCorrect) actBook = load_workbook(pathToActual) error = None for wsN, expWS in enumerate(expBook.worksheets): actWS = actBook.worksheets[wsN] for key, expVal in expWS._cells.items(): actVal = actWS._cells[key].value expVal = expVal.value # determine whether there will be errors try: # convert to float if possible and compare with a reasonable # (default) precision expVal = float(expVal) isFloatable = True except: isFloatable = False if isFloatable and abs(expVal - float(actVal)) > 0.0001: error = "Cell %s: %f != %f" % (key, expVal, actVal) break elif not isFloatable and expVal != actVal: error = "Cell %s: %s != %s" % (key, expVal, actVal) break if error: pathToLocal, ext = os.path.splitext(pathToCorrect) pathToLocal = pathToLocal + "_local" + ext shutil.copyfile(pathToActual, pathToLocal) logging.warning("xlsxActual!=xlsxCorr: Saving local copy to %s" % pathToLocal) raise IOError, error
def merge(yearly_file_name, daily_file_name): daily = load_workbook(daily_file_name) reconciled = load_workbook(yearly_file_name) reconciled_sheet = reconciled.worksheets[0] deleted_items = [] daily_values_worksheet = daily.worksheets[0] daily_values = set(c.value for c in daily_values_worksheet.columns[DAILY_VALUES_COLUMN-1][1:]) old_values = set(c.value for c in reconciled_sheet.columns[RECONCILED_VALUES_COLUMN-1][1:]) reconciled_values = set() for v in old_values: if v in daily_values: reconciled_values.add(v) else: deleted_items.append(v) reconciled_values.update(daily_values) log_start_row = 1 while reconciled_sheet.cell(column=DELETED_ITEMS_COLUMN, row=log_start_row).value: log_start_row += 1 copy_to_column(deleted_items, reconciled_sheet, DELETED_ITEMS_COLUMN, log_start_row) copy_to_column(reconciled_values, reconciled_sheet, RECONCILED_VALUES_COLUMN, 2) copy_to_column([None]*len(deleted_items), reconciled_sheet, RECONCILED_VALUES_COLUMN, 2 + len(reconciled_values)) reconciled.save(yearly_file_name)
def setup_class(cls): mac_wb_path = os.path.join(DATADIR, "reader", "date_1904.xlsx") cls.mac_wb = load_workbook(mac_wb_path) cls.mac_ws = cls.mac_wb.get_sheet_by_name("Sheet1") win_wb_path = os.path.join(DATADIR, "reader", "date_1900.xlsx") cls.win_wb = load_workbook(win_wb_path) cls.win_ws = cls.win_wb.get_sheet_by_name("Sheet1")
def setup_class(cls): mac_wb_path = os.path.join(DATADIR, 'reader', 'date_1904.xlsx') cls.mac_wb = load_workbook(mac_wb_path) cls.mac_ws = cls.mac_wb.get_sheet_by_name('Sheet1') win_wb_path = os.path.join(DATADIR, 'reader', 'date_1900.xlsx') cls.win_wb = load_workbook(win_wb_path) cls.win_ws = cls.win_wb.get_sheet_by_name('Sheet1')
def test_rewrite_styles(self): """Test to verify Bugfix # 46""" self.worksheet['A1'].value = 'Value' self.worksheet['B2'].value = '14%' saved_wb = save_virtual_workbook(self.workbook) second_wb = load_workbook(BytesIO(saved_wb)) assert isinstance(second_wb, Workbook) ws = second_wb.get_sheet_by_name('Sheet1') assert ws.cell('A1').value == 'Value' ws['A2'].value = 'Bar!' saved_wb = save_virtual_workbook(second_wb) third_wb = load_workbook(BytesIO(saved_wb)) assert third_wb
def markErrors(errors, excelFile, sheetName, tmpDir, printErrors = False): progressBar = Bar('Processing', max = len(errors)) if os.path.getsize(excelFile) > 10485760: print "Log broken cells" for error in errors: progressBar.next() if printErrors.lower() == "true": print "Broken Excel cell: " + error[0] + " [ "+ ','.join(error[1]) + " ]" else: print "Broken Excel cell: " + error[0] progressBar.finish(); return #open Excel file newFile = os.path.join(tmpDir , "errors_" + time.strftime("%Y-%m-%d") + "_" + str(int(time.time())) + "_" + os.path.basename(excelFile)) fileName,fileExtension = os.path.splitext(excelFile) if fileExtension == '.xlsm': wb = load_workbook(excelFile, keep_vba=True, data_only=True) else: wb = load_workbook(excelFile, data_only=True) creator = wb.properties.creator ws = wb.get_sheet_by_name(sheetName) redFill = PatternFill(start_color='FFFF0000', end_color = 'FFFF0000', fill_type = 'solid') for error in errors: progressBar.next() print "Broken Excel cell: " + error[0] cell = ws[error[0]] if printErrors: cell.value = ','.join(error[1]) cell.fill = redFill progressBar.finish() #save error excel file wb.properties.creator = creator print "[[Save file: " + newFile + "]]" try: wb.save(newFile) except Exception, e: print e exit(1)
def load(self): """ Parses the downloaded Excel file and writes it as JSON. """ try: book = load_workbook(self._filename, data_only=True) except IOError: raise CopyException('"%s" does not exist. Have you run "fab update_copy"?' % self._filename) for sheet in book: columns = [] rows = [] for i, row in enumerate(sheet.rows): row_data = [c.internal_value for c in row] if i == 0: columns = row_data continue # If nothing in a row then it doesn't matter if all([c is None for c in row_data]): continue rows.append(dict(zip(columns, row_data))) self._copy[sheet.title] = Sheet(sheet.title, rows, columns)
def test_comments_cell_association(): path = os.path.join(DATADIR, 'reader', 'comments.xlsx') wb = load_workbook(path) assert wb.worksheets[0].cell(coordinate="A1").comment.author == "Cuke" assert wb.worksheets[0].cell(coordinate="A1").comment.text == "Cuke:\nFirst Comment" assert wb.worksheets[1].cell(coordinate="A1").comment is None assert wb.worksheets[0].cell(coordinate="D1").comment.text == "Cuke:\nSecond Comment"
def parse(): filefullname = r'D:\GFT\Test\test_excel.xlsx' wb = load_workbook(filename=filefullname) ws = wb.get_sheet_by_name("Sheet1") ws.cell("A3") ws.cell("A4") ws.cell("A5")
def load_test_case_for_excel(): """ :description: 载入测试用例Excel :return: 返回所有测试数据结构 """ # 定义测试用例列表用于存放Excel的Workbook对象 excel_work_book_list = [] # 获取当前TestCase的路径(Excel) test_case_path = os.path.abspath('TestCase') # 获取当前TestCase中的文件列表(Excel) test_case_excel_list = os.listdir(test_case_path) if len(test_case_excel_list) > 0: for test_excel in test_case_excel_list: # 如果文件后缀是.xlsx,那么就把这个文件转化为WorkBook对象,并存入列表 if test_excel.endswith('.xlsx'): excel_absolute_path = test_case_path + "/" + test_excel work_book = load_workbook(filename=excel_absolute_path) excel_work_book_list.append(work_book) else: print("没有需要执行的测试用例(Excel)") # 返回所有测试数据 total_test_list = ReadExcelData.get_total_test_data(excel_work_book_list) # print(total_test_list) return total_test_list
def test_parse_dxfs(datadir): datadir.chdir() reference_file = 'conditional-formatting.xlsx' wb = load_workbook(reference_file) assert isinstance(wb, Workbook) archive = ZipFile(reference_file, 'r', ZIP_DEFLATED) read_xml = archive.read(ARC_STYLE) # Verify length assert '<dxfs count="164">' in str(read_xml) assert len(wb.style_properties['dxf_list']) == 164 # Verify first dxf style reference_file = 'dxf_style.xml' with open(reference_file) as expected: diff = compare_xml(read_xml, expected.read()) assert diff is None, diff cond_styles = wb.style_properties['dxf_list'][0] assert cond_styles['font'].color == Color('FF9C0006') assert not cond_styles['font'].bold assert not cond_styles['font'].italic f = PatternFill(end_color=Color('FFFFC7CE')) assert cond_styles['fill'] == f # Verify that the dxf styles stay the same when they're written and read back in. w = StyleWriter(wb) w._write_dxfs() write_xml = get_xml(w._root) read_style_prop = read_style_table(write_xml) assert len(read_style_prop['dxf_list']) == len(wb.style_properties['dxf_list']) for i, dxf in enumerate(read_style_prop['dxf_list']): assert repr(wb.style_properties['dxf_list'][i] == dxf)
def populate_model_from_xlsx(model_name, xlsx_file): LOGGER.info("Loading data in " + model_name) model = classes.my_class_import(model_name) workbook = load_workbook(xlsx_file) sheet = workbook.get_sheet_by_name(name=model.__name__) row_index = 1 # Reading header header = [] for column_index in range(1, sheet.get_highest_column() + 1): value = sheet.cell(row = row_index, column=column_index).value if value!=None: header.append(value if value!='' else header[-1]) else: break LOGGER.info('Using header:' + str(header)) row_index += 1 while row_index<=sheet.get_highest_row(): instance = model() for i in range(0,len(header)): if sheet.cell(row = row_index, column=i+1).internal_value!=None: value = sheet.cell(row = row_index, column=i+1).value field_info = Attributes() field_info.short_name = header[i] field_info.name = header[i] instance.set_attribute('excel', field_info, value) instance.save() row_index += 1
def populate_labels_from_xlsx(model_name, xlsx_file): model = classes.my_class_import(model_name) workbook = load_workbook(xlsx_file) sheet = workbook.get_sheet_by_name(name=model.__name__) row_index = 1 # Reading header header = [] for column_index in range(1, sheet.get_highest_column() + 1): value = sheet.cell(row = row_index, column=column_index).value if value!=None: header.append(value if value!='' else header[-1]) else: break LOGGER.info('Using header:' + str(header)) row_index += 1 while row_index<=sheet.get_highest_row(): if model.objects.filter(identifier=sheet.cell(row = row_index, column=1).value, language=sheet.cell(row = row_index, column=2)).exists(): instance = model.objects.get(identifier=sheet.cell(row = row_index, column=1).value, language=sheet.cell(row = row_index, column=2)) else: instance = model() for i in range(0,len(header)): value = sheet.cell(row = row_index, column=i+1).value setattr(instance, header[i], value) instance.save() row_index += 1
def separate_xl_content(src_filepath): src_wb = load_workbook(src_filepath, use_iterators=True) src_ws = src_wb.get_sheet_by_name(name = "Sheet") mytree = {} for row in src_ws.iter_rows(): subxlfilename = row[0].internal_value if not mytree.has_key(subxlfilename): mytree[subxlfilename] = [] values = [] for cell in row[1:]: values.append(cell.internal_value) mytree[subxlfilename].append(values) ret = [] for subxlfilename in mytree.keys(): wb = Workbook() ws = wb.get_sheet_by_name(name="Sheet") for values in mytree[subxlfilename]: ws.append(values) wb.save(subxlfilename) ret.append(subxlfilename) return ret
def read_UP_to_analyse(filename, unit_converter, UP_meta_info, infrastructure_rescale): from openpyxl.reader.excel import load_workbook wb = load_workbook(filename) system_counter = 0 systems = [] while 1: system_counter += 1 try: ws = wb.get_sheet_by_name('system ' + str(system_counter)) system = {} header = 1 for row in ws.rows: if header == 1: header = 0 else: (UP, quantity, unit) = row UP = str(UP.value) quantity = float(quantity.value) unit = str(unit.value) quantity = quantity * unit_converter[unit][1] if UP_meta_info[UP]['Infrastructure'] == 'Yes': quantity = quantity * infrastructure_rescale system[UP] = quantity systems.append(system) except AttributeError: break return systems
def upodate_component_xl_content2pofile(component_xl_file): component_dir = os.path.dirname(component_xl_file) # TODO: delete all po files. po_dict = {} src_wb = load_workbook(component_xl_file, use_iterators=True) src_ws = src_wb.get_sheet_by_name(name='Sheet') for row in src_ws.iter_rows(): pofilename = row[0].internal_value if not po_dict.has_key(pofilename): po_dict[pofilename] = [] values = [] for cell in row[1:]: values.append(cell.internal_value) po_dict[pofilename].append(values) for pofilename in po_dict.keys(): pofilepath = os.path.join(component_dir, pofilename) contents = po_dict[pofilename] catalog = convert_xlsm_content(contents) with open(pofilepath, 'w') as f: pofile.write_po(f, catalog)
def parseXLSXSource(self): wb = load_workbook(filename = PATH_TO_RES + FREQ + FREQUENCIES_XL) ws = wb.get_active_sheet() frequency_dict = {} for i in range(1, ws.get_highest_row()): word = ws.cell(row = i, column = 0) #frequency = ws.cell(row = i, column = 5) #SUBTL(WF) frequency frequency = ws.cell(row = i, column = 7) #SUBTL(CD) frequency (0.0 ~ 100.0) if isinstance(word.value, unicode): if len(word.value) > 1: frequency_dict[word.value] = frequency.value else: print word.value i = i + 1 dump = open(PATH_TO_RES + RESULTING_FREQUENCIES, 'w') pickle.dump(frequency_dict, dump) del frequency_dict dump.close()
def test_comments_cell_association(datadir): datadir.chdir() wb = load_workbook('comments.xlsx') assert wb['Sheet1'].cell(coordinate="A1").comment.author == "Cuke" assert wb['Sheet1'].cell(coordinate="A1").comment.text == "Cuke:\nFirst Comment" assert wb['Sheet2'].cell(coordinate="A1").comment is None assert wb['Sheet1'].cell(coordinate="D1").comment.text == "Cuke:\nSecond Comment"
def test_read_style_iter(tmpdir): ''' Test if cell styles are read properly in iter mode. ''' tmpdir.chdir() from openpyxl import Workbook from openpyxl.styles import Font FONT_NAME = "Times New Roman" FONT_SIZE = 15 ft = Font(name=FONT_NAME, size=FONT_SIZE) wb = Workbook() ws = wb.worksheets[0] cell = ws.cell('A1') cell.font = ft xlsx_file = "read_only_styles.xlsx" wb.save(xlsx_file) wb_iter = load_workbook(xlsx_file, read_only=True) ws_iter = wb_iter.worksheets[0] cell = ws_iter['A1'] assert cell.font == ft
def test_save_with_vba(datadir): datadir.join('reader').chdir() fname = 'vba-test.xlsm' wb = load_workbook(fname, keep_vba=True) buf = save_virtual_workbook(wb) files = set(zipfile.ZipFile(BytesIO(buf), 'r').namelist()) expected = set(['xl/drawings/_rels/vmlDrawing1.vml.rels', 'xl/worksheets/_rels/sheet1.xml.rels', '[Content_Types].xml', 'xl/drawings/vmlDrawing1.vml', 'xl/ctrlProps/ctrlProp1.xml', 'xl/vbaProject.bin', 'docProps/core.xml', '_rels/.rels', 'xl/theme/theme1.xml', 'xl/_rels/workbook.xml.rels', 'customUI/customUI.xml', 'xl/styles.xml', 'xl/worksheets/sheet1.xml', 'xl/sharedStrings.xml', 'docProps/app.xml', 'xl/ctrlProps/ctrlProp2.xml', 'xl/workbook.xml', 'xl/activeX/activeX2.bin', 'xl/activeX/activeX1.bin', 'xl/media/image2.emf', 'xl/activeX/activeX1.xml', 'xl/activeX/_rels/activeX2.xml.rels', 'xl/media/image1.emf', 'xl/activeX/_rels/activeX1.xml.rels', 'xl/activeX/activeX2.xml', ]) assert files == expected
def saveData(data): name = 'top_info' #try: # wb = load_workbook(name+'.xlsx') #except: if True: wb = Workbook() ws = wb.worksheets[0] ws.title = "gettop" wb.save(name+'.xlsx') wb = load_workbook(name+'.xlsx') ws = wb.worksheets[0] ws.title = 'gettop' i = 1 for n in data: i = i + 1 putData(ws,'A',i,n,'NO',i-1) putData(ws,'B',i,n,'name','......') putData(ws,'C',i,n,'vk') putData(ws,'D',i,n,'rang') putData(ws,'E',i,n,'level') putData(ws,'F',i,n,'epower') putData(ws,'G',i,n,'clan_name') putData(ws,'H',i,n,'clan_owner') if n.has_key('adInfo'): n['adInfo'] = json.loads(n['adInfo']) putData(ws,'I',i,n['adInfo'],'currency') wb.save(name+'.xlsx')
def test_save_without_vba(datadir): datadir.join("reader").chdir() fname = "vba-test.xlsm" vbFiles = set( [ "xl/activeX/activeX2.xml", "xl/drawings/_rels/vmlDrawing1.vml.rels", "xl/activeX/_rels/activeX1.xml.rels", "xl/drawings/vmlDrawing1.vml", "xl/activeX/activeX1.bin", "xl/media/image1.emf", "xl/vbaProject.bin", "xl/activeX/_rels/activeX2.xml.rels", "xl/worksheets/_rels/sheet1.xml.rels", "customUI/customUI.xml", "xl/media/image2.emf", "xl/ctrlProps/ctrlProp1.xml", "xl/activeX/activeX2.bin", "xl/activeX/activeX1.xml", "xl/ctrlProps/ctrlProp2.xml", "xl/drawings/drawing1.xml", ] ) wb = load_workbook(fname, keep_vba=False) buf = save_virtual_workbook(wb) files1 = set(zipfile.ZipFile(fname, "r").namelist()) files2 = set(zipfile.ZipFile(BytesIO(buf), "r").namelist()) difference = files1.difference(files2) assert difference.issubset(vbFiles), "Missing files: %s" % ", ".join(difference - vbFiles)
def test_read_worksheet(): path = os.path.join(DATADIR, "genuine", "empty.xlsx") wb = load_workbook(path) sheet2 = wb.get_sheet_by_name("Sheet2 - Numbers") assert isinstance(sheet2, Worksheet) eq_("This is cell G5", sheet2.cell("G5").value) eq_(18, sheet2.cell("D18").value)
def test_read_single_cell_date(self): wb = load_workbook(filename = self.workbook_name, use_iterators = True) ws = wb.get_sheet_by_name(name = self.sheet_name) eq_(datetime.datetime(1973, 5, 20), list(ws.iter_rows('A1'))[0][0].internal_value) eq_(datetime.datetime(1973, 5, 20, 9, 15, 2), list(ws.iter_rows('C1'))[0][0].internal_value)
def getConfig(tname): logger = logging.getLogger("AutoReportMms") logger.debug("tname: " + tname) wb = load_workbook(tname) sheet = wb["配置"] config = yaml.load(sheet.cell(row=1, column=1).value) kwargs = {} # 解析时间配置 cron = config.get("cron", {}) # cron风格 logger.debug("cron:" + str(cron)) # cron if cron and cron.get("启用", False): trigger = "cron" year = cron.get("年", None) if year: kwargs["year"] = year month = cron.get("月", None) if month: kwargs["month"] = month day = cron.get("日", None) if day: kwargs["day"] = day week = cron.get("周", None) if week: kwargs["week"] = week day_of_week = cron.get("日周", None) if day_of_week: kwargs["day_of_week"] = day_of_week hour = cron.get("时", None) if hour: kwargs["hour"] = hour minute = cron.get("分", None) if minute: kwargs["minute"] = minute second = cron.get("秒", None) if second: kwargs["second"] = second start_date = cron.get("开始日期", None) if start_date: kwargs["start_date"] = start_date end_date = cron.get("结束日期", None) if end_date: kwargs["end_date"] = end_date return trigger, kwargs, config time = config.get("时间") logger.debug("时间:" + str(time)) start_date = time.get("开始日期", 0) end_date = time.get("结束日期", 0) run_date = time.get("执行一次", 0) # 周期执行 if start_date: trigger = "interval" weeks = time.get("周", 0) days = time.get("天", 0) hours = time.get("时", 0) minutes = time.get("分", 0) seconds = time.get("秒", 0) if weeks == days == hours == minutes == seconds == 0: raise Exception("周天时分秒不能全部为0") if weeks: kwargs["weeks"] = weeks if days: kwargs["days"] = days if hours: kwargs["hours"] = hours if minutes: kwargs["minutes"] = minutes if seconds: kwargs["seconds"] = seconds if start_date: kwargs["start_date"] = start_date if end_date: kwargs["end_date"] = end_date elif run_date: trigger = "date" kwargs["run_date"] = run_date else: raise Exception("执行时间配置错误") logger.debug("trigger:{0}, kwargs:{1}, config:{2}".format( trigger, kwargs, config)) return trigger, kwargs, config
def test_write_virtual_workbook(): old_wb = Workbook() saved_wb = save_virtual_workbook(old_wb) new_wb = load_workbook(StringIO(saved_wb)) assert new_wb
def test_get_highest_row_iter(): path = os.path.join(DATADIR, 'genuine', 'empty.xlsx') wb = load_workbook(filename=path, use_iterators=True) sheet2 = wb.get_sheet_by_name('Sheet2 - Numbers') max_row = sheet2.get_highest_row() eq_(30, max_row)
def setup_class(cls): cls.genuine_wb = os.path.join(DATADIR, 'genuine', \ 'empty-with-styles.xlsx') wb = load_workbook(cls.genuine_wb) cls.ws = wb.get_sheet_by_name('Sheet1')
def test_read_no_theme(datadir): datadir.join("genuine").chdir() wb = load_workbook('libreoffice_nrt.xlsx') assert wb
def saveAsExcel(self, fileName, sheetName='rawData', stimOut=None, dataOut=('n', 'all_mean', 'all_std', 'all_raw'), matrixOnly=False, appendFile=True, fileCollisionMethod='rename'): """ Save a summary data file in Excel OpenXML format workbook (:term:`xlsx`) for processing in most spreadsheet packages. This format is compatible with versions of Excel (2007 or greater) and and with OpenOffice (>=3.0). It has the advantage over the simpler text files (see :func:`TrialHandler.saveAsText()` ) that data can be stored in multiple named sheets within the file. So you could have a single file named after your experiment and then have one worksheet for each participant. Or you could have one file for each participant and then multiple sheets for repeated sessions etc. The file extension `.xlsx` will be added if not given already. :Parameters: fileName: string the name of the file to create or append. Can include relative or absolute path sheetName: string the name of the worksheet within the file stimOut: list of strings the attributes of the trial characteristics to be output. To use this you need to have provided a list of dictionaries specifying to trialList parameter of the TrialHandler and give here the names of strings specifying entries in that dictionary dataOut: list of strings specifying the dataType and the analysis to be performed, in the form `dataType_analysis`. The data can be any of the types that you added using trialHandler.data.add() and the analysis can be either 'raw' or most things in the numpy library, including 'mean','std','median','max','min'. e.g. `rt_max` will give a column of max reaction times across the trials assuming that `rt` values have been stored. The default values will output the raw, mean and std of all datatypes found. appendFile: True or False If False any existing file with this name will be overwritten. If True then a new worksheet will be appended. If a worksheet already exists with that name a number will be added to make it unique. fileCollisionMethod: string Collision method passed to :func:`~psychopy.tools.fileerrortools.handleFileCollision` This is ignored if ``append`` is ``True``. """ fileName = pathToString(fileName) if stimOut is None: stimOut = [] if self.thisTrialN < 1 and self.thisRepN < 1: # if both are < 1 we haven't started if self.autoLog: logging.info('TrialHandler.saveAsExcel called but no ' 'trials completed. Nothing saved') return -1 # NB this was based on the limited documentation (1 page wiki) for # openpyxl v1.0 if not haveOpenpyxl: raise ImportError('openpyxl is required for saving files in' ' Excel (xlsx) format, but was not found.') # return -1 # create the data array to be sent to the Excel file dataArray = self._createOutputArray(stimOut=stimOut, dataOut=dataOut, matrixOnly=matrixOnly) if not fileName.endswith('.xlsx'): fileName += '.xlsx' # create or load the file if appendFile and os.path.isfile(fileName): wb = load_workbook(fileName) newWorkbook = False else: if not appendFile: # the file exists but we're not appending, will be overwritten fileName = handleFileCollision(fileName, fileCollisionMethod) wb = Workbook() # create new workbook wb.properties.creator = 'PsychoPy' + psychopy.__version__ newWorkbook = True if newWorkbook: ws = wb.worksheets[0] ws.title = sheetName else: ws = wb.create_sheet() ws.title = sheetName # loop through lines in the data matrix for lineN, line in enumerate(dataArray): if line is None: continue for colN, entry in enumerate(line): if entry is None: entry = '' try: # if it can convert to a number (from numpy) then do it val = float(entry) except Exception: val = u"{}".format(entry) ws.cell(column=colN + 1, row=lineN + 1, value=val) wb.save(filename=fileName)
def SalesRec(BOReport, INCReport): selectFiles #Sets Excel files to reports BO = load_workbook(BOReport) INC = load_workbook(INCReport) print("See me Sales")
def test_comments_with_iterators(datadir): datadir.chdir() wb = load_workbook('comments.xlsx', read_only=True) ws = wb['Sheet1'] with pytest.raises(AttributeError): assert ws["A1"].comment.author == "Cuke"
a = Dense(TIME_STEPS, activation='softmax')(a) a_probs = Permute((2, 1), name='attention_vec')(a) output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul') return output_attention_mul ######################################### #load content data pre_XX = [] path = '' files = os.listdir(path) for file in files: filename = path + '\\' + file wb = load_workbook(filename=filename) ws = wb.active temp1 = [] for row in ws.rows: temp2 = [] for col in row: temp2.append(col.value) temp1.append(temp2) temp2 = temp1[1:] pre_XX += temp2 #load weather data pre_XX_wea = [] filename = '' wb = load_workbook(filename=filename) ws = wb.active
def test_read_empty_archive(): null_file = os.path.join(DATADIR, 'reader', 'null_archive.xlsx') wb = load_workbook(null_file)
def test_read_nostring_workbook(): genuine_wb = os.path.join(DATADIR, 'genuine', 'empty-no-string.xlsx') wb = load_workbook(genuine_wb) assert isinstance(wb, Workbook)
def test_read_standard_workbook_from_fileobj(): path = os.path.join(DATADIR, 'genuine', 'empty.xlsx') fo = open(path, mode='rb') wb = load_workbook(fo) assert isinstance(wb, Workbook)
def importConditions(fileName, returnFieldNames=False, selection=""): """Imports a list of conditions from an .xlsx, .csv, or .pkl file The output is suitable as an input to :class:`TrialHandler` `trialTypes` or to :class:`MultiStairHandler` as a `conditions` list. If `fileName` ends with: - .csv: import as a comma-separated-value file (header + row x col) - .xlsx: import as Excel 2007 (xlsx) files. No support for older (.xls) is planned. - .pkl: import from a pickle file as list of lists (header + row x col) The file should contain one row per type of trial needed and one column for each parameter that defines the trial type. The first row should give parameter names, which should: - be unique - begin with a letter (upper or lower case) - contain no spaces or other punctuation (underscores are permitted) `selection` is used to select a subset of condition indices to be used It can be a list/array of indices, a python `slice` object or a string to be parsed as either option. e.g.: - "1,2,4" or [1,2,4] or (1,2,4) are the same - "2:5" # 2, 3, 4 (doesn't include last whole value) - "-10:2:" # tenth from last to the last in steps of 2 - slice(-10, 2, None) # the same as above - random(5) * 8 # five random vals 0-8 """ def _assertValidVarNames(fieldNames, fileName): """screens a list of names as candidate variable names. if all names are OK, return silently; else raise with msg """ fileName = pathToString(fileName) if not all(fieldNames): msg = ('Conditions file %s: Missing parameter name(s); ' 'empty cell(s) in the first row?') raise ValueError(msg % fileName) for name in fieldNames: OK, msg = isValidVariableName(name) if not OK: # tailor message to importConditions msg = msg.replace('Variables', 'Parameters (column headers)') raise ValueError('Conditions file %s: %s%s"%s"' % (fileName, msg, os.linesep * 2, name)) if fileName in ['None', 'none', None]: if returnFieldNames: return [], [] return [] if not os.path.isfile(fileName): msg = 'Conditions file not found: %s' raise ValueError(msg % os.path.abspath(fileName)) def pandasToDictList(dataframe): """Convert a pandas dataframe to a list of dicts. This helper function is used by csv or excel imports via pandas """ # convert the resulting dataframe to a numpy recarray trialsArr = dataframe.to_records(index=False) # Check for new line characters in strings, and replace escaped characters for record in trialsArr: for idx, element in enumerate(record): if isinstance(element, str): record[idx] = element.replace('\\n', '\n') if trialsArr.shape == (): # convert 0-D to 1-D with one element: trialsArr = trialsArr[np.newaxis] fieldNames = list(trialsArr.dtype.names) _assertValidVarNames(fieldNames, fileName) # convert the record array into a list of dicts trialList = [] for trialN, trialType in enumerate(trialsArr): thisTrial = OrderedDict() for fieldN, fieldName in enumerate(fieldNames): val = trialsArr[trialN][fieldN] if isinstance(val, basestring): if val.startswith('[') and val.endswith(']'): # val = eval('%s' %unicode(val.decode('utf8'))) val = eval(val) elif type(val) == np.string_: val = str(val.decode('utf-8-sig')) # if it looks like a list, convert it: if val.startswith('[') and val.endswith(']'): # val = eval('%s' %unicode(val.decode('utf8'))) val = eval(val) elif np.isnan(val): val = None thisTrial[fieldName] = val trialList.append(thisTrial) return trialList, fieldNames if fileName.endswith('.csv') or (fileName.endswith( ('.xlsx', '.xls')) and haveXlrd): if fileName.endswith('.csv'): trialsArr = pd.read_csv(fileName, encoding='utf-8-sig') logging.debug(u"Read csv file with pandas: {}".format(fileName)) else: trialsArr = pd.read_excel(fileName) logging.debug(u"Read Excel file with pandas: {}".format(fileName)) unnamed = trialsArr.columns.to_series().str.contains('^Unnamed: ') trialsArr = trialsArr.loc[:, ~unnamed] # clear unnamed cols logging.debug(u"Clearing unnamed columns from {}".format(fileName)) trialList, fieldNames = pandasToDictList(trialsArr) elif fileName.endswith('.xlsx'): if not haveOpenpyxl: raise ImportError('openpyxl or xlrd is required for loading excel ' 'files, but neither was found.') # data_only was added in 1.8 if parse_version(openpyxl.__version__) < parse_version('1.8'): wb = load_workbook(filename=fileName) else: wb = load_workbook(filename=fileName, data_only=True) ws = wb.worksheets[0] logging.debug(u"Read excel file with openpyxl: {}".format(fileName)) try: # in new openpyxl (2.3.4+) get_highest_xx is deprecated nCols = ws.max_column nRows = ws.max_row except Exception: # version openpyxl 1.5.8 (in Standalone 1.80) needs this nCols = ws.get_highest_column() nRows = ws.get_highest_row() # get parameter names from the first row header fieldNames = [] for colN in range(nCols): fieldName = ws.cell(_getExcelCellName(col=colN, row=0)).value fieldNames.append(fieldName) _assertValidVarNames(fieldNames, fileName) # loop trialTypes trialList = [] for rowN in range(1, nRows): # skip header first row thisTrial = {} for colN in range(nCols): val = ws.cell(_getExcelCellName(col=colN, row=rowN)).value # if it looks like a list or tuple, convert it if (isinstance(val, basestring) and (val.startswith('[') and val.endswith(']') or val.startswith('(') and val.endswith(')'))): val = eval(val) fieldName = fieldNames[colN] thisTrial[fieldName] = val trialList.append(thisTrial) elif fileName.endswith('.pkl'): f = open(fileName, 'rb') # Converting newline characters. if PY3: # 'b' is necessary in Python3 because byte object is # returned when file is opened in binary mode. buffer = f.read().replace(b'\r\n', b'\n').replace(b'\r', b'\n') else: buffer = f.read().replace('\r\n', '\n').replace('\r', '\n') try: trialsArr = pickle.loads(buffer) except Exception: raise IOError('Could not open %s as conditions' % fileName) f.close() trialList = [] if PY3: # In Python3, strings returned by pickle() is unhashable. # So, we have to convert them to str. trialsArr = [[ str(item) if isinstance(item, str) else item for item in row ] for row in trialsArr] fieldNames = trialsArr[0] # header line first _assertValidVarNames(fieldNames, fileName) for row in trialsArr[1:]: thisTrial = {} for fieldN, fieldName in enumerate(fieldNames): # type is correct, being .pkl thisTrial[fieldName] = row[fieldN] trialList.append(thisTrial) else: raise IOError('Your conditions file should be an ' 'xlsx, csv or pkl file') # if we have a selection then try to parse it if isinstance(selection, basestring) and len(selection) > 0: selection = indicesFromString(selection) if not isinstance(selection, slice): for n in selection: try: assert n == int(n) except AssertionError: raise TypeError("importConditions() was given some " "`indices` but could not parse them") # the selection might now be a slice or a series of indices if isinstance(selection, slice): trialList = trialList[selection] elif len(selection) > 0: allConds = trialList trialList = [] for ii in selection: trialList.append(allConds[int(round(ii))]) logging.exp('Imported %s as conditions, %d conditions, %d params' % (fileName, len(trialList), len(fieldNames))) if returnFieldNames: return (trialList, fieldNames) else: return trialList
def test_conditional_formatting_read(datadir): datadir.chdir() reference_file = 'conditional-formatting.xlsx' wb = load_workbook(reference_file) ws = wb.active rules = ws.conditional_formatting assert len(rules) == 30 # First test the conditional formatting rules read rule = rules['A1:A1048576'][0] assert dict(rule) == { 'priority': '30', 'type': 'colorScale', } rule = rules['B1:B10'][0] assert dict(rule) == {'priority': '29', 'type': 'colorScale'} rule = rules['C1:C10'][0] assert dict(rule) == {'priority': '28', 'type': 'colorScale'} rule = rules['D1:D10'][0] assert dict(rule) == { 'priority': '27', 'type': 'colorScale', } rule = rules['E1:E10'][0] assert dict(rule) == { 'priority': '26', 'type': 'colorScale', } rule = rules['F1:F10'][0] assert dict(rule) == { 'priority': '25', 'type': 'colorScale', } rule = rules['G1:G10'][0] assert dict(rule) == { 'priority': '24', 'type': 'colorScale', } rule = rules['H1:H10'][0] assert dict(rule) == { 'priority': '23', 'type': 'colorScale', } rule = rules['I1:I10'][0] assert dict(rule) == { 'priority': '22', 'type': 'colorScale', } rule = rules['J1:J10'][0] assert dict(rule) == { 'priority': '21', 'type': 'colorScale', } rule = rules['K1:K10'][0] assert dict(rule) == {'priority': '20', 'type': 'dataBar'} rule = rules['L1:L10'][0] assert dict(rule) == {'priority': '19', 'type': 'dataBar'} rule = rules['M1:M10'][0] assert dict(rule) == {'priority': '18', 'type': 'dataBar'} rule = rules['N1:N10'][0] assert dict(rule) == {'priority': '17', 'type': 'iconSet'} rule = rules['O1:O10'][0] assert dict(rule) == {'priority': '16', 'type': 'iconSet'} rule = rules['P1:P10'][0] assert dict(rule) == {'priority': '15', 'type': 'iconSet'} rule = rules['Q1:Q10'][0] assert dict(rule) == { 'text': '3', 'priority': '14', 'dxfId': '27', 'operator': 'containsText', 'type': 'containsText' } assert rule.dxf == DifferentialStyle(font=Font(color='FF9C0006'), fill=PatternFill(bgColor='FFFFC7CE')) rule = rules['R1:R10'][0] assert dict(rule) == { 'operator': 'between', 'dxfId': '26', 'type': 'cellIs', 'priority': '13' } assert rule.dxf == DifferentialStyle(font=Font(color='FF9C6500'), fill=PatternFill(bgColor='FFFFEB9C')) rule = rules['S1:S10'][0] assert dict(rule) == { 'priority': '12', 'dxfId': '25', 'percent': '1', 'type': 'top10', 'rank': '10' } rule = rules['T1:T10'][0] assert dict(rule) == { 'priority': '11', 'dxfId': '24', 'type': 'top10', 'rank': '4', 'bottom': '1' } rule = rules['U1:U10'][0] assert dict(rule) == { 'priority': '10', 'dxfId': '23', 'type': 'aboveAverage' } rule = rules['V1:V10'][0] assert dict(rule) == { 'aboveAverage': '0', 'dxfId': '22', 'type': 'aboveAverage', 'priority': '9' } rule = rules['W1:W10'][0] assert dict(rule) == { 'priority': '8', 'dxfId': '21', 'type': 'aboveAverage', 'equalAverage': '1' } rule = rules['X1:X10'][0] assert dict(rule) == { 'aboveAverage': '0', 'dxfId': '20', 'priority': '7', 'type': 'aboveAverage', 'equalAverage': '1' } rule = rules['Y1:Y10'][0] assert dict(rule) == { 'priority': '6', 'dxfId': '19', 'type': 'aboveAverage', 'stdDev': '1' } rule = rules['Z1:Z10'][0] assert dict(rule) == { 'aboveAverage': '0', 'dxfId': '18', 'type': 'aboveAverage', 'stdDev': '1', 'priority': '5' } assert rule.dxf == DifferentialStyle( font=Font(b=True, i=True, color='FF9C0006'), fill=PatternFill(bgColor='FFFFC7CE'), border=Border(left=Side(style='thin', color=Color(theme=5)), right=Side(style='thin', color=Color(theme=5)), top=Side(style='thin', color=Color(theme=5)), bottom=Side(style='thin', color=Color(theme=5)))) rule = rules['AA1:AA10'][0] assert dict(rule) == { 'priority': '4', 'dxfId': '17', 'type': 'aboveAverage', 'stdDev': '2' } rule = rules['AB1:AB10'][0] assert dict(rule) == { 'priority': '3', 'dxfId': '16', 'type': 'duplicateValues' } rule = rules['AC1:AC10'][0] assert dict(rule) == { 'priority': '2', 'dxfId': '15', 'type': 'uniqueValues' } rule = rules['AD1:AD10'][0] assert dict(rule) == { 'priority': '1', 'dxfId': '14', 'type': 'expression', }
def get_sheet_names(fpath): wb = load_workbook(fpath, read_only=True) return wb.get_sheet_names()
def main(args: Namespace): def get_result_name(file_name: str): return file_name.replace('.xlsx', '_new.xlsx') def remove_rows(ws, info_list: list): to_minus = 0 for info in info_list: ws.delete_rows(info['index'] - to_minus, info['count']) to_minus += info['count'] def find_duplication(ws, col: int = 0, skip_row: int = 1): rows_to_remove = [] count = -1 start_row = 1 + skip_row rows = ws.iter_rows(min_row=start_row) for index, row in enumerate(rows): if count == -1: count = 0 continue last_row = ws[index + skip_row] if last_row[col].value != row[col].value: if count > 0: # remove duplication rows_to_remove.append({ 'index': index - count + skip_row, 'count': count }) # reset count count = 0 print(f'Found \'{last_row[col].value}\'') else: try: last = ws[index + skip_row + 2] count += 1 except IndexError: if count > 0: # remove duplication rows_to_remove.append({ 'index': index - count + skip_row, 'count': count }) # reset count count = 0 print(f'Found \'{last_row[col].value}\'') return rows_to_remove # parse arguments file_name = args.file_name result_name = get_result_name( file_name) if args.result_name is None else args.result_name skip_rows = args.skip_rows if len(args.column) == 1: # TODO: support for column AA-AZ... column = ord(args.column) - ord('A') else: print('Column name length must be 1') return # laod worksheet print(f'Loading file {file_name}...') wb = load_workbook(filename=file_name) ws = wb.active print(f'Finding duplication in column {args.column}...\n') rows_to_remove = find_duplication(ws, col=column, skip_row=skip_rows) print(f'\nReducing {len(rows_to_remove)} groups...') remove_rows(ws, info_list=rows_to_remove) print(f'Saving result file to {result_name}...') wb.save(result_name)
def test_open_many_sheets(): src = os.path.join(DATADIR, "reader", "bigfoot.xlsx") wb = load_workbook(src, True) # if assert len(wb.worksheets) == 1024
def test_calculate_dimension(datadir): datadir.join("genuine").chdir() wb = load_workbook("empty.xlsx", use_iterators=True) sheet2 = wb.get_sheet_by_name('Sheet2 - Numbers') dimensions = sheet2.calculate_dimension() assert '%s%s:%s%s' % ('D', 1, 'AA', 30) == dimensions
def test_read_general_style(datadir, cell, number_format): datadir.join("genuine").chdir() wb = load_workbook('empty-with-styles.xlsx') ws = wb["Sheet1"] assert ws[cell].number_format == number_format
def _open_wb(self, data_only=False): return load_workbook(filename=self.workbook_name, use_iterators=True, data_only=data_only)
def test_open_many_sheets(datadir): datadir.join("reader").chdir() wb = load_workbook("bigfoot.xlsx", True) # if assert len(wb.worksheets) == 1024
def test_read_workbook_with_no_properties(): genuine_wb = os.path.join(DATADIR, 'genuine', \ 'empty_with_no_properties.xlsx') wb = load_workbook(filename=genuine_wb)
def test_get_highest_row(datadir): datadir.join("genuine").chdir() wb = load_workbook("empty.xlsx", use_iterators=True) sheet2 = wb.get_sheet_by_name('Sheet2 - Numbers') max_row = sheet2.get_highest_row() assert 30 == max_row
# -*- coding: utf-8 ''' Generate the dic of Python from xlsx file. Only using the first and the second row of the XLSX file. ''' import os import sys from openpyxl.reader.excel import load_workbook from .base_crud import XLSX_FILE, FILTER_COLUMNS, INPUT_ARR if os.path.exists(XLSX_FILE): WORK_BOOK = load_workbook(filename=XLSX_FILE) else: print('There must be at least one XLSX file.') sys.exit(0) def __write_filter_dic(wk_sheet, column): ''' return filter dic for certain column :param fo: :param wk_sheet: :param column: :return: ''' row1_val = wk_sheet['{0}1'.format(column)].value row2_val = wk_sheet['{0}2'.format(column)].value if row1_val and row1_val.strip() != '': row2_val = row2_val.strip()
response = urllib.urlopen(checkUrl) status = response.getcode() if status == 200: return True else: return False except: return False def checkVideoLink(videoLink): videoID = videoLink.split('/')[-1] return youtubeChecker(videoID) if __name__ == "__main__": wb = load_workbook('enki-video.xlsx') ws = wb.active for index in range(1,ws.get_highest_row() + 1): videoLink = ws.cell(row = index,column = 2).value ws.cell(row = index,column = 3).value = checkVideoLink(videoLink) print index,videoLink wb.save('enki-video.xlsx') # wb = load_workbook('enki-video.xlsx') # ws = wb.active # f = open('enki-video-error-link.txt',a) # for index in range(1,ws.get_highest_row() + 1): # status = ws.cell(row = index,column = 3).value # link = ws.cell(row = index,column = 1).value # if status == 'False': # f.write('http://www.enkivillage.com/%s.html' % (link)) # f.close()
def test_calculate_dimension_iter(): path = os.path.join(DATADIR, 'genuine', 'empty.xlsx') wb = load_workbook(filename=path, use_iterators=True) sheet2 = wb.get_sheet_by_name('Sheet2 - Numbers') dimensions = sheet2.calculate_dimension() eq_('%s%s:%s%s' % ('D', 1, 'K', 30), dimensions)
if row[assignee_col] not in dic_assignee.keys(): dic_assignee[row[assignee_col]] = [] dic_assignee[row[assignee_col]].append(row[issue_id_col]) #build a list of the stories for row in data: if row == header: continue dic_story_map[row[issue_key_col]] = row if parent_id_col != None: list_stories(row[issue_key_col], row[issue_id_col], row[parent_id_col]) row_id += 1 #generates the output file output_file = load_workbook('Sprint_load.xlsx') sheet = output_file.active row_max = sheet.max_row col_max = sheet.max_column #checks for existing lines and updates them (specifically useful for update) for key in dic_story_map: c = 13 type = dic_story_map[key][issue_type_col] if type != 'Sub-task': if row_max >= 13: for i in range(14, row_max + 2): c = i issue_key = sheet.cell(row=i, column=1).value if issue_key != None: if issue_key == key:
def __init__(self, filename): self.workbook = load_workbook(filename=filename)
def test_read_standard_workbook(): path = os.path.join(DATADIR, 'genuine', 'empty.xlsx') wb = load_workbook(path) assert isinstance(wb, Workbook)
import openpyxl from openpyxl.reader.excel import load_workbook #EXTRACTING wb = load_workbook('Extract.xlsx') sheet=wb.get_sheet_by_name('Sheet1') for i in range(1,4): print (sheet['A'+str(i)].value) print (sheet.cell(row=3, column=1).value) a=sheet.cell(row=3, column=1).value print a b=sheet.cell(row=3, column=1) print b.value #PUTTING book = load_workbook('Put.xlsx') s= book.get_sheet_by_name('Sheet1') s['A1']=4 s.cell(row=1,column=2).value=5 s.cell(row=1,column=3).value=b.value s.cell(row=1,column=5).value=sheet.cell(row=3, column=1).value for i in range(0,3): s.cell(row=i+1,column=5).value=sheet.cell(row=i, column=1).value