def carregar_registros(nome_arq): arq_planilha = ezodf.opendoc(nome_arq) planilha = arq_planilha.sheets[0] # Le linha de títulos nomes = [] for cell in planilha.row(0): texto = cell.value if texto: texto = texto.strip() if texto[-1:] == "*": texto = texto[:-1] PRINCIPAL.append(texto) nomes.append(texto) # Separa cada um dos registros colocando os nome certos para cada célula registros = [] num = planilha.nrows() for indice in range(1,num): linha = planilha.row(indice) if linha[0].value != None: registro = {} i = 0 for cell in linha: if len(nomes) > i: texto = cell.plaintext() #texto = texto.replace("$","\$") registro[nomes[i]] = texto i += 1 registros.append(registro) return registros
def _build_df(self, file_pointer): workbook = ezodf.opendoc(file_pointer.name) sheet = workbook.sheets[0] if sheet.ncols() > MAX_COLS or sheet.nrows() > MAX_ROWS: raise TooBigTableError("Too many columns or rows") data = [[cell.value for cell in row] for row in sheet.rows()] return {"dataframe":pd.DataFrame(data)}
def init_spreadsheet(self): spreadsheet = ezodf.opendoc(self.settings.src) sheet = spreadsheet.sheets[0] self.rows = sheet.rows() self.header = [ c.value.lower() if hasattr(c.value, "lower") else c.value for c in self.rows.next()[: self.nr_cols] ]
def openfile(fname): spreadsheet = ezodf.opendoc(fname) sheet = spreadsheet.sheets[0] rows = sheet.nrows() columns = sheet.ncols() for row in range(1 ,5): with open('search.txt','r') as f: for line in f: newline = "" splitline = line.split(" ") if sheet[row,0].value !=None or sheet[row,1].value !=None or sheet[row,3].value !=None: for word in splitline: if '*' in word: newline = newline+" " +encode(sheet[row,0].value) elif '#' in word: newline = newline+" " +encode(sheet[row,1].value) elif '!' in word: newline = newline+" " +encode(sheet[row,3].value) elif '<' in word: newline = newline+" " +encode(sheet[row,4].value) else: newline = newline+" "+word response = google(newline) if response == "Found": break
def write_data_to_ods_template(fname, oname='-1', tname="./spreadsheet_data_template.ods"): if oname == '-1': from time import strftime oname = './%s.ods'%(strftime('%Y%m%d-%H%M%S')) import ezodf ## requires ezodf (pip install ezodf) from string import ascii_uppercase as letters ## requires string to reformat duple to ascii data_matrix = parse_data_to_list(fname) ## in format of Ax, Ay, Az, Xo, S2, S4 data_header = [fname,"Acceleration (x)","Acceleration (y)","Acceleration (z)","PID Output Signal (x)","Speed Controller 2","Speed Controller 4"] derived_header = ["PID Output Signal (x) (1000)","Speed Controller 2 (1/10000)","Speed Controller 4 (1/10000)"] formula_list = ['of:=[.%s]*1000','of:=[.%s]/10000','of:=[.%s]/10000'] data_range = range(1,len(data_matrix[0])+1) ## Assumes a rectangular matrix of form n x m (returning m value iterator) header_range = range(len(data_header)) derived_header_range = range(len(data_header),len(data_header)+len(derived_header)) spreadsheet = ezodf.opendoc(tname) target_sheet = spreadsheet.sheets['Sheet1'] for column_number in header_range: target_sheet[(0,column_number)].set_value(data_header[column_number]) for column_number in derived_header_range: target_sheet[(0,column_number)].set_value(derived_header[column_number-len(data_header)]) for row_number in data_range: for column_number in header_range[:-1]: try: target_sheet[(row_number,column_number+1)].set_value(float(data_matrix[column_number][row_number-1])) except IndexError: target_sheet.append_rows() target_sheet[(row_number,column_number+1)].set_value(float(data_matrix[column_number][row_number-1])) target_sheet[(row_number,0)].set_value(row_number) for temp_column_number,formula_string in enumerate(formula_list): target_sheet[(row_number,column_number+2+temp_column_number)].formula=formula_string%(ascii_coordinate_from_duple((row_number+1,column_number+temp_column_number-1),letters)) spreadsheet.saveas(oname)
def convert(ods_file, sheets=None): spreadsheet = ezodf.opendoc(ods_file) sheets = [s.name for s in spreadsheet.sheets] table = spreadsheet.sheets[sheets[0]] # temporary, remove rows = list(table.rows()) reader = [[c.value for c in r] for r in rows] htmlfile = open(str(ods_file).replace('ods', 'html'), 'w') htmlfile.write('<meta charset="utf-8" />') htmlfile.write('<script src="sorttable.js"></script>') htmlfile.write('<table class="sortable" border="1" style="width:100%">\n') for row in reader: htmlfile.write('</tr>\n') for column in row: print column if column == None: column = str(None) elif type(column) == float: column = str(None) else: column = column.encode('utf-8') htmlfile.write('<th>' + column + '</th>\n') htmlfile.write('</tr>\n') htmlfile.write('</table>\n') print "Done!" htmlfile.close()
def open(self, source, encoding, loader): self.close() self.__loader = loader self.__bytes = loader.load(source, encoding, mode='b', allow_zip=True) self.__book = ezodf.opendoc(BytesIO(self.__bytes.read())) self.__sheet = self.__book.sheets[self.__index] self.reset()
def array(ods, sheet): '''Converts individual .ods spreadsheet sheet to an array''' spreadsheet = ezodf.opendoc(ods) table = spreadsheet.sheets[sheet] return [[str(c.value) if str(c.value)[-2:] != '.0' else str(c.value)[:-2] for c in r] for r in list(table.rows())]
def try_parse_ods(file_path): doc = ezodf.opendoc(file_path) csved = convert_odf_to_csv(get_sheet(doc.sheets)) df = csv_cleaner.try_to_parse_csv(raw_text=csved) return df
def calculo(self): ext=self.kwargs["filename"].split(".")[-1] if ext=="ods": spreadsheet = ezodf.opendoc(self.kwargs["filename"]) self.sheets=[name for name in spreadsheet.sheets.names()] if self.kwargs["datamap"]: for data in self.kwargs["datamap"]: entity=self.kwargs["project"].getObject(data["entity"]) sheet=spreadsheet.sheets[data["sheet"]] indProp=entity.propertiesTitle().index(data["property"]) if entity.propertiesUnit()[indProp]==str: value=entity.__getattribute__(entity.propertiesAttribute()[indProp]) else: indUnit=entity.propertiesUnit()[indProp].__text__.index(data["unit"]) units=entity.propertiesUnit()[indProp].__units__ value=entity.__getattribute__(entity.propertiesAttribute()[indProp]).__getattribute__(units[indUnit]) #Chequear celda=list(data["cell"]) column=[] while celda[0] in string.uppercase: column.append(celda.pop(0)) base=len(string.uppercase) exponente=0 columna=0 while column: ordinal=ord(column.pop())-64 columna+=ordinal*base**exponente exponente+=1 fila=int("".join(celda)) if fila>sheet.nrows(): sheet.append_rows(fila-sheet.nrows()) if columna>sheet.ncols(): sheet.append_columns(columna-sheet.ncols()) sheet[data["cell"]].set_value(value) spreadsheet.save() elif ext=="xlsx": spreadsheet = openpyxl.load_workbook(self.kwargs["filename"]) self.sheets=spreadsheet.get_sheet_names() if self.kwargs["datamap"]: for data in self.kwargs["datamap"]: entity=self.kwargs["project"].getObject(data["entity"]) sheet=spreadsheet[data["sheet"]] indProp=entity.propertiesTitle().index(data["property"]) if entity.propertiesUnit()[indProp]==str: value=entity.__getattribute__(entity.propertiesAttribute()[indProp]) else: indUnit=entity.propertiesUnit()[indProp].__text__.index(data["unit"]) units=entity.propertiesUnit()[indProp].__units__ value=entity.__getattribute__(entity.propertiesAttribute()[indProp]).__getattribute__(units[indUnit]) sheet[data["cell"]] = value comentario = openpyxl.comments.Comment("{0[entity]}.{0[property]}.{0[unit]} ---> {0[sheet]}.{0[cell]}".format(data), 'pychemqt') sheet[data["cell"]].comment=comentario spreadsheet.save(".".join(self.kwargs["filename"].split(".")[:-1])+"-bak"+".xlsx") self.salida=[]
def changeSpreadsheet(self, path): self.datamap.setEnabled(bool(path)) self.changeParams("filename", str(path)) self.datamap.blockSignals(True) self.datamap.clear() self.datamap.blockSignals(False) spreadsheet = ezodf.opendoc(path) sheets = [name for name in spreadsheet.sheets.names()] self.datamap.itemDelegateForRow(0).setItemsByIndex(3, sheets)
def __init__(self, file_name, sheet=None): self.document = opendoc(file_name) self.sheet = sheet if sheet is None: self.sheet = self.document.sheets[0] else: #int or string self.sheet = self.document.sheets[sheet] self.Value = self.generate_value_class([c.value for c in self.sheet.row(0)])
def print_headings(filename): """ Print all <text:h> elements of an ODF-Text document. """ doc = ezodf.opendoc(filename) if doc.doctype == 'odt': count = 0 for heading in doc.body.filter('Heading'): count += 1 level = heading.outline_level print("H {0:03d} {1} {2}".format(count, '>'*level, heading.plaintext())) print('done.\n') else: print('Need a text document to print headings.\n')
def sqlite(ods, sheets=None): '''Converts .ods into an sqlite3 database''' db = rename(ods, "db") conn = sqlite3.connect(db) c = conn.cursor() spreadsheet = ezodf.opendoc(ods) # Convert every sheet if sheets == None or len(sheets) == 0: # len == 0 for command line sheets = [s.name for s in spreadsheet.sheets] for sheet in sheets: table = spreadsheet.sheets[sheet] sheet_name = "\'"+table.name+"\'" # Prevents keyword and spacing errors rows = list(table.rows()) header = [str(column.value) if str(column.value) != 'None' else "COLUMN_"+str(i+1) for i, column in enumerate(rows[0])] create_command = '''create table %s %s''' % (sheet_name, tuple(header)) c.execute(create_command) for row in rows[1:]: # Floats as PK's going to be a problem? # Temporarily converting for quotes app # https://docs.python.org/2/library/sqlite3.html#introduction # http://pythonhosted.org/ezodf/tableobjects.html#cell-class t = tuple([int(cell.value) if cell.value_type=='float' else unicode(cell.value) for cell in row]) print t[0] marks = ', '.join(list(len(header)*"?")) command = '''insert into %s values (%s)''' % (sheet_name, marks) c.execute(command, t) conn.commit() c.close()
def open(self, path): ezodf.config.set_table_expand_strategy('all') self._path = path self._workbook = ezodf.opendoc(self._path) ezodf.config.reset_table_expand_strategy() if not self.settings.worksheet: self.settings.worksheet = self._workbook.sheets[0].name self._worksheet = self._workbook.sheets[self.settings.worksheet] return self._worksheet.nrows(), self._worksheet.ncols()
def ods_ezodf(fp): """Read and convert a ods file to JSON format using the ezodf library :param fp: File pointer object :return: tuple of table headers and data """ workbook = ezodf.opendoc(fp.name) sheet = workbook.sheets[0] list_data = [[cell.value for cell in row] for row in sheet.rows()] header = header_population(list_data[0]) data = data_population(list_data) return header, data
def get_stats(): try: driver = webdriver.Chrome() logging.basicConfig(filename='zenslogbook.log', format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',level=logging.INFO) driver.set_page_load_timeout(30) ezodf.config.set_table_expand_strategy('all') myspreadsheet = ezodf.opendoc(zenlogbook_settings.SPREADSHEETNAME) login(driver,hashlette_settings.ZENMINER_USERNAME,zenlogbook_settings.ZENMINER_PASSWORD) whatstopdate=get_stopdate(zenlogbook_settings.STOPDATE, myspreadsheet, zenlogbook_settings.SPREADSHEET_KEY) scraped_array = get_activitystats(driver,whatstopdate) write_stats(scraped_array,whatstopdate,myspreadsheet,zenlogbook_settings.SPREADSHEET_KEY) cleanup_exit(driver) except: logging.warning("We have a problem jim") cleanup_exit(driver)
def test_simple_variables_integeational(self): # {{{2 """Not exactly unittest but very usefull""" doc = opendoc("tests/data/variables.odt") self.assertTrue(isinstance(doc.body.variables, GenericWrapper)) self.assertTrue(isinstance(doc.body.variables, SimpleVariables)) self.assertEqual(doc.body.variables.xmlnode.tag, CN('text:variable-decls')) self.assertEqual(doc.body.variables['simple1'].value, "simple1") doc.body.variables['simple1'] = 'test123' self.assertEqual(doc.body.variables['simple1'].value, "test123") doc.body.variables['simple1'] = 1 self.assertEqual(doc.body.variables['simple1'].value, 1) self.assertEqual(doc.body.variables['simple1'].type, "float")
def test_user_fields_integeational(self): # {{{2 """Not exactly unittest but very usefull""" doc = opendoc("tests/data/variables.odt") self.assertTrue(isinstance(doc.body.userfields, GenericWrapper)) self.assertTrue(isinstance(doc.body.userfields, UserFields)) self.assertEqual(doc.body.userfields.xmlnode.tag, CN('text:user-field-decls')) self.assertEqual(doc.body.userfields['user_field1'].value, "user_field1_copy") doc.body.userfields['user_field1'] = 'test123' self.assertEqual(doc.body.userfields['user_field1'].value, "test123") doc.body.userfields['user_field1'] = 1 self.assertEqual(doc.body.userfields['user_field1'].value, 1) self.assertEqual(doc.body.userfields['user_field1'].type, "float")
def main(odf_path): ods = ezodf.opendoc(odf_path) for sheet in ods.sheets: print('##', sheet.name) column_widths = [max(display_len(display_text(cell)) for cell in column) for column in sheet.columns()] for n, row in enumerate(sheet.rows()): print('|', end=' ') for m, cell in enumerate(row): content = display_text(cell) disp_len = column_widths[m] + len(content) - display_len(content) print('{0:<{1}}'.format(content, disp_len), end=' | ') print() if n == 0: print('|', end='') for w in column_widths: print(':', '-' * (w+1), '|', sep='', end='') print()
def main(argv): # open excel file with Trefwoorden -- doc = ezodf.opendoc(argv[1]) print('opening excel file and defining column- and row values...') for sheet in doc.sheets: lines_list = [] companies = [] highlights = [] for row in sheet.rows(): row_list = [] for cell in row: row_list.append(cell.value) companies.append(row_list[1]) highlights.append(row_list[2]) companies = companies[1:-2] highlights = highlights[1:-2] highlight = [] matching = [] for i in highlights: i = i.lower() words = i.strip().split() words, query_words = remove_punctuation(words) highlight.append(words) matching.append(query_words) combined = list(zip(companies, highlight, matching)) print(combined[:10]) with open('data/Topsectoren/' + '{0}.txt'.format(argv[1][:-4]), 'w') as f: for line in combined: f.write(str(line[0]) + '\t') for word in line[1]: f.write(str(word) + ' ') f.write('\t') for word2 in line[2]: f.write(str(word2) + ' ') f.write('\n') print('Done') f.close()
def save_ans(answers, r, ide, division, zonesDict): ''' ''' doc = ezodf.opendoc(ods) sheet = doc.sheets['conteig'] nr = sheet.nrows() sheet.append_rows(1) c_districte = int(ide.split('_')[0][:-2]) n_districte = zonesDict[int(ide.split('_')[0])][1] zona = ide.split('_')[0] dx = int(ide.split('_')[1]) dy = int(ide.split('_')[2]) #sheet[nr, 0].set_value(districte) sheet[nr, 1].set_value(c_districte) sheet[nr, 2].set_value(n_districte) sheet[nr, 3].set_value(zona) sheet[nr, 4].set_value(dx) sheet[nr, 5].set_value(dy) sheet[nr, 6].set_value(r) sheet[nr, 7].set_value(answers.get('homes')) sheet[nr, 8].set_value(answers.get('dones')) sheet[nr, 9].set_value(answers.get('no se sap')) sheet[nr, 10].set_value(answers.get('animals')) if answers.get('al ras') != None: sheet[nr, 11].set_value(answers.get('al ras')) if answers.get('sota cobert') != None: sheet[nr, 12].set_value(answers.get('sota cobert')) if answers.get('dins cotxe') != None: sheet[nr, 13].set_value(answers.get('dins cotxe')) if answers.get('dins caixer') != None: sheet[nr, 14].set_value(answers.get('dins caixer')) doc.save() return
def get_symbols_from_spreadsheet(): """ Parse the spreadsheet and get symbols topology description. Order of iteration is as follows- first sheets, then rows and finally columns. For each cell there is a list created that contains two elements- symbol name with extension and bare symbol name (which is a temporary solution). :returns: topology of symbols pages, list of lists, each for one page, of lists, each for one row, of symbols or Nones, for an empty fields. """ spreadsheet = dirs.HOME_SYMBOLS_SPREADSHEET try: file = ezodf.opendoc(spreadsheet) except OSError: msg = "Symbols spreadsheet {} is not a valid file." _LOG.warning(msg.format(spreadsheet)) return return [[[[sheet[row, col].value + ".png", sheet[row, col].value] if isinstance(sheet[row, col].value, str) and get_symbol(sheet[row, col].value + ".png") else None for col in range(sheet.ncols())] for row in range(sheet.nrows())] for sheet in file.sheets]
def load_from_memory(self, file_content, **keywords): try: return ezodf.opendoc(None, file_content) except: raise NotImplementedError("Please use custom version of ezodf")
import ezodf documento = ezodf.opendoc('arquivonovo.odt') # Criando uma lista lista = ezodf.ezlist(['Laranja', 'Maçã', 'Banana']) documento.body.append(lista) documento.save()
def load_from_file(self, filename, **keywords): return ezodf.opendoc(filename)
from bottle import route, run from ezodf import opendoc import json import datetime def calc_time_to_unix(time): tt = datetime.time(int(time[2:4]), int(time[5:7]), int(time[8:10])) return tt.hour * 3600 + tt.minute * 60 + tt.second arkusz = "/home/hafron/dev/pomodoro_planner/plan_tygodnia.ods" ods = opendoc(arkusz) sheets = ods.sheets plan = sheets['Plan'] conf = sheets['Konfiguracja'] export_data = {'config': { 'pomodoro_length': calc_time_to_unix(conf['D6'].value), 'short_break_length': calc_time_to_unix(conf['D7'].value), 'long_break_length': calc_time_to_unix(conf['D9'].value), }, 'pomodoros':{}} week_days = ['pn', 'wt', 'sr', 'czw', 'pt', 'sb', 'nd'] i=2 k=0 last_end_date='' start_date='' while i <= 20: j=1 empty_combo=0 # ile pustych elementów pod rząd export_data['pomodoros'][week_days[k]] = [] while empty_combo < 2:
parser.add_argument('--additional_args', nargs="*", default=[]) parser.add_argument('--magic_prefix', default='EXPERIMENT_OUT=') parser.add_argument('--header_row', default=2, type=int) parser.add_argument('--data_column_offset', default=1, type=int) parser.add_argument('--verbose', action='store_true') parser.add_argument('--force_all', action='store_true') parser.add_argument('--first_only', action='store_true') parser.add_argument('--dry_run', action='store_true') parser.add_argument('--simulate', action='store_true') args = parser.parse_args() if not os.path.exists(args.ods): raise FileNotFoundError(args.ods) ods = ezodf.opendoc(args.ods) print("Spreadsheet contains %d sheet(s)." % len(ods.sheets)) for sheet in ods.sheets: print("-" * 40) print(" Sheet name : '%s'" % sheet.name) print("Size of Sheet : (rows=%d, cols=%d)" % (sheet.nrows(), sheet.ncols())) experiments_sheet = [ sheet for sheet in ods.sheets if sheet.name == 'experiments' ][0] rows = list(experiments_sheet.rows()) params = [cell.value for cell in rows[args.header_row]]
try: dbconn = sqlite3.connect(DATABASE) dbconn.execute("PRAGMA foreign_keys = 1") except: print(f"\n{sys.argv[0]} database error: {DATABASE}") sys.exit(1) crs = dbconn.cursor() crs.execute("DELETE FROM bm41_category") crs.execute("DELETE FROM bm41_subcategory") # # ADMINISZTRATIV védelmi intézkedések # doc = ezodf.opendoc(DATADIR + "BM-41/adminisztratív-védelmi-intézkedések.ods") sheet = doc.sheets[0] for rownum, cellist in enumerate(sheet.rows()): if rownum < 3: continue # skip header x, l1, l2, *rest = cellist[1].value.split('.') rest = [x.strip() for x in rest] desc = cellist[2].value.strip() assert x == '3' if rest[0] == '': # level2 q = "INSERT INTO bm41_category (bml1, bml2, bmdesc) VALUES (?,?,?)" crs.execute(q, (l1, l2, desc)) continue l3 = '.'.join(rest) minlevel = len([x.value for x in cellist[3:8] if x.value != 'X']) + 1 q = """INSERT INTO bm41_subcategory (bml1, bml2, bml3, minlevel1, bmdesc)
def load(cls, path): doc = ezodf.opendoc(path) sheets = [cls.from_ods_sheet(sheet) for sheet in doc.sheets] if len(sheets) == 1: return sheets[0] return sheets
def _load_from_memory(self): self.native_book = ezodf.opendoc(self.file_stream)
def _load_from_file(self): self._native_book = ezodf.opendoc(self._file_name)
def _load_from_memory(self): self._native_book = ezodf.opendoc(self._file_stream)
def _read_ods_file(self, ods_file_path): try: return ezodf.opendoc(ods_file_path) except KeyError: raise FileNotFoundError(f"Path '{ods_file_path}' does not exist.")
return i return 0 # # # # # # # # # # # # # # # # # # # # Inputfile verarbeiten und Sheets anlegen # # # # # # # # # # # # # # # # # # # input_datei1 = sys.argv[1] input_datei2 = sys.argv[2] from ezodf import opendoc, Sheet doc1 = opendoc(input_datei1) kursliste = doc1.sheets['Kursliste-Settings'] odsName = "klausur1.ods" if platform.system() == 'Linux': #Datei löschen Befehl Linux os.system("copy blanko-klausur.ods " + odsName) if platform.system() == 'Windows': #Datei löschen Befehl Windows #os.system("del " + odsName) os.system("copy blanko-klausur.ods " + odsName) doc2 = opendoc(odsName) klausur = doc2.sheets['Klausur'] # # # # # # # # # # # # # # # # # # #
def load_from_memory(self, file_content, **keywords): return ezodf.opendoc(file_content)
def generate_charts(csvfile, bridges): # Fill predefined ods file with collected data. # csvfile: the .csv file generated by the loopback_test application # bridges: list of targeted bridges global verbose info("\nGenerating charts...") # Build input CSV Files List (depends on target list) try: debug("Targeted bridge(s): {}".format(bridges)) debug("CSV File: {}".format(csvfile)) prefix = os.path.splitext(csvfile)[0] # remove file extension debug("prefix: {}".format(prefix)) csvfiles = {"Aggregated": "{}_agg.csv".format(prefix)} for i in range(len(bridges)): csvfiles[bridges[i]] = "{}_{}.csv".format(prefix, bridges[i]) debug("Input CSV files list: {}".format(csvfiles)) except Exception as e: err("Failed to build input CSV Files List!") if verbose: traceback.print_exc() raise e # Read input CSV Files to retrieve data try: csvfiledata = {} for k in csvfiles.keys(): csvfiledata[k] = readCSVFile(csvfiles[k]) alldata = readCSVFile(csvfile) except Exception as e: err("Failed to read input CSV Files!") debug("Key: {}. CSV File: {}".format(k, csvfiles[k])) if verbose: traceback.print_exc() raise e # Clone charts spreadsheet from template, and open it try: odsfilename = "{}_charts.ods".format(prefix) debug(".ods filename: {}".format(odsfilename)) shutil.copy(CHARTS_TEMPLATE, odsfilename) # Retrieve .ods file sheets spreadsheet = ezodf.opendoc(odsfilename) sheets = spreadsheet.sheets except Exception as e: err("Failed to clone charts spreadsheet from template!") if verbose: traceback.print_exc() # Save changes into file spreadsheet.save() raise e # Fill chart tables with respective data try: rowcount = len(csvfiledata["Aggregated"]) colcount = len(csvfiledata["Aggregated"][0]) debug("Row count: {}".format(rowcount)) debug("Column count: {}".format(colcount)) for r in range(1, rowcount): # first row is .csv header for k in csvfiles.keys(): src_row = csvfiledata[k][r] dest_row = r + 1 # +2 row offset in .ods file vs .csv for c in range(colcount): dest_cell = sheets[k][dest_row, c] if c < 4: src_val = src_row[c] else: src_val = int(src_row[c]) dest_cell.set_value(src_val) except Exception as e: err("Failed to fill chart tables with respective data!") debug("Row: {}. Key: {}. Col: {}. Data={}".format(r, k, c, src_row[c])) if verbose: traceback.print_exc() # Save changes into file spreadsheet.save() raise e # Remove unused sheets if len(csvfiles) != len(sheets): for s in sheets: if s.name != "Aggregated" and s.name not in bridges: debug("Removing {} sheet (not used).".format(s.name)) del sheets[s.name] # Copy all data from csvfile into last sheet # NB: add to use this method (create a new sheet and fill it) because of # some weird issue. If the sheet was already completed (like the previous # ones), code would crash, reported list index to be out of range. try: rowcount = len(alldata) colcount = len(alldata[0]) debug("All data Row count: {}".format(rowcount)) debug("All data Column count: {}".format(colcount)) sheets += ezodf.Sheet("Data") sheets["Data"].append_rows(rowcount) sheets["Data"].append_columns(colcount) for r in range(rowcount): for c in range(colcount): src_row = alldata[r] dest_cell = sheets["Data"][r, c] if (r == 0) or (c < 4): src_val = src_row[c] else: src_val = int(src_row[c]) dest_cell.set_value(src_val) except Exception as e: err("Failed to copy all data from csvfile into last sheet!") debug("Row: {}. Col: {}. Data={}".format(r, c, src_row[c])) if verbose: traceback.print_exc() # Save changes into file spreadsheet.save() raise e # Save changes into file spreadsheet.save() info("Completed. Charts saved into:\n {} file.".format(odsfilename))
def loadCsv(self, fileName, manualDelimiter=False): self.path = fileName try: ff = open(fileName, 'r') mytext = ff.read() # logging.debug(mytext) ff.close() self.loadCsvStr(mytext, manualDelimiter) except Exception: try: try: xl = pd.ExcelFile(fileName) # Print the sheet names logging.info(xl.sheet_names) # Load a sheet into a DataFrame by name: df1 df1 = xl.parse(xl.sheet_names[0]) except Exception: if ezodf is not None: doc = ezodf.opendoc(fileName) logging.info("Spreadsheet contains %d sheet(s)." % len(doc.sheets)) for sheet in doc.sheets: logging.info("-"*40) logging.info(" Sheet name : '%s'" % sheet.name) logging.info("Size of Sheet : (rows=%d, cols=%d)" % (sheet.nrows(), sheet.ncols())) # convert the first sheet to a pandas.DataFrame sheet = doc.sheets[0] df_dict = {} for i, row in enumerate(sheet.rows()): # row is a list of cells # assume the header is on the first row if i == 0: # columns as lists in a dictionary df_dict = {cell.value: [] for cell in row} # create index for the column headers col_index = {j: cell.value for j, cell in enumerate(row)} continue for j, cell in enumerate(row): # use header instead of column index df_dict[col_index[j]].append(cell.value) # and convert to a DataFrame df1 = pd.DataFrame(df_dict) mytext = df1.to_csv(sep='\t') self.loadCsvStr(mytext) except Exception: try: matlabfile = sio.loadmat(fileName) data = [] logging.info(matlabfile) for k in matlabfile.keys(): if k not in ['__version__', '__header__', '__globals__']: data.append(k) item, ok = pyqtlib.item_message(self, translate('RTOC', 'Matlab Import'), translate('RTOC', 'Please select an element from this file.\n') + matlabfile['__header__'].decode('utf8'), data) if ok: self.path = os.path.splitext(str(self.path))[0].split("/")[-1]+".csv" with open(self.path, 'w', newline='') as myfile: wr = csv.writer(myfile, quoting=csv.QUOTE_NONE, delimiter=' ', quotechar='|') for idx, sig in enumerate(matlabfile[item]): wr.writerow(sig) ff = open(self.path, 'r') mytext = ff.read() ff.close() self.loadCsvStr(mytext) except Exception: tb = traceback.format_exc() logging.debug(tb) pyqtlib.info_message(translate('RTOC', "Error"), translate('RTOC', "File {} could not be opened.").format(fileName), translate('RTOC', "This file may be damaged."))
argc = len(argvs) # prameter number if argc != 2: print_help() sys.exit() if not os.path.exists(argvs[1]): print "no source file" sys.exit() #open files infile = open(argvs[1], "r") read_line = infile.readlines() infile.close() fname = argvs[1].split('.') odt = ezodf.opendoc(filename = 'base.odt') print "delete page feed and header and footer" line_wo_lf = [] flag = 0 for line in read_line: if flag == 1: #next line of page feed flag = 0 continue if line.find('\f') > -1: #page feed line_wo_lf.pop() #delete last line of page feed flag = 1 continue line_wo_lf.append(line) print 'lines:' + str(len(line_wo_lf))
#!/usr/bin/python3 import ezodf import json data = [] spreadsheet = ezodf.opendoc('Kartoj_Listo.ods') sheet = spreadsheet.sheets[0] for row in sheet.rows(): data_row = [] for cell in row: data_row.append(cell.value) data.append(data_row); print(json.dumps(data))
def get_data_app(request): if request.method == "POST" and request.user.is_authenticated( ) and request.user.username == "pisb_credenz": #complete print("Hello1") #try: doc = opendoc('userdata.ods') index = 1 for sheet in doc.sheets: for i in range( 978 ): ###############Number of rows in .ods file read the complete function random_password = random_password_generate() registration_id = sheet['A' + str(index)].value excel_count = ExcelData.objects.get(id=1) excel_count.user_id = excel_count.user_id + 1 excel_count.save() username = excel_count.user_id try: registration_id = int(registration_id) except: print registration_id print sheet['A' + str(index)].value print "suhavan1" if sheet['B' + str( index )].value == "C": ###############################################Consider while integrating event_flag = 1 else: event_flag = 0 print i print registration_id print event_flag temp = User.objects.create_user(username=username, password=random_password) temp_slot = Slot.objects.get(pk=1) temp_user = UserData.objects.create( registration_id=registration_id, event_flag=event_flag, password=random_password, user=temp, slot=temp_slot) try: temp_user.mobile_no1 = int(sheet['D' + str(index)].value) except: print "no mobile number" try: temp_user.mobile_no2 = int(sheet['G' + str(index)].value) except: print "no mobile number" try: temp_user.email1 = sheet['E' + str(index)].value except: print "no email1 id" try: temp_user.email2 = sheet['H' + str(index)].value except: print "no email2 id" try: temp_user.name1 = sheet['C' + str(index)].value except: print "no name1" try: temp_user.name2 = sheet['F' + str(index)].value except: print "no name2" temp_user.save() index = index + 1 print i print index return render( request, 'Adminpage.html', { 'message': 'Data stored in the database and random passwords have been assigned' }) #except: # return render(request,'Adminpage.html',{'message':'There is no file'}) else: return render(request, 'Adminlogin.html', {'message': 'Authentication failed'})
def read_ods(filename, sheet_no=0, header=0): tab = ezodf.opendoc(filename=filename).sheets[sheet_no] return pd.DataFrame({ col[header].value: [x.value for x in col[header + 1:]] for col in tab.columns() })
print 'AMZN:',AMZN GRMN = eval(get_quote('grmn')) print 'GRMN:',GRMN EBAY = eval(get_quote('ebay')) print 'EBAY:',EBAY AAPL = eval(get_quote('aapl')) print 'AAPL:',AAPL IWO = eval(get_quote('iwo')) print 'IWO :',IWO DIA = eval(get_quote('dia')) print 'DIA :',DIA LNKD = eval(get_quote('lnkd')) print 'LNKD:', LNKD WFC = eval(get_quote('WFC')) print 'WFC :' ,WFC stock = ezodf.opendoc("valuation.ods") sheet = stock.sheets[0] sheet['J3'].set_value(TRMB,currency='USD') sheet['J4'].set_value(AAPL,currency='USD') sheet['J5'].set_value(AMZN,currency='USD') sheet['J6'].set_value(FB,currency='USD') sheet['J7'].set_value(CAT,currency='USD') sheet['J8'].set_value(GRMN,currency='USD') sheet['J9'].set_value(GOOG,currency='USD') sheet['J10'].set_value(EBAY,currency='USD') sheet['J11'].set_value(DIA,currency='USD') sheet['j24'].set_value(LNKD,currency='USD') sheet['j28'].set_value(WFC,currency='USD') stock.save()
def _open_odf_spreadsheet(path): try: return ezodf.opendoc(path) except OSError as exc: _LOG.error(exc)
#!/usr/bin/env python #coding:utf-8 # Purpose: swap row/columns of a table # Created: 28.05.2012 # Copyright (C) 2012, Manfred Moitzi # License: MIT license from __future__ import unicode_literals, print_function, division __author__ = "mozman <*****@*****.**>" import ezodf # open spreadsheet document doc = ezodf.opendoc("big-test-table.ods") print("Spreadsheet contains %d sheets.\n" % len(doc.sheets)) for sheet in doc.sheets: print("Sheet name: '%s'" % sheet.name) print("Size of Sheet : (rows=%d, cols=%d)" % (sheet.nrows(), sheet.ncols())) print("-" * 40) doc.save()
def CheckPattern(ownname, FileName): doc = ezodf.opendoc(FileName) Sheets = '"Sheets" : ' Sheets += str(list(doc.sheets.names())) return '{"ScriptName" :("ods"), "TargetInfo" : ("Сценарий загрузки файла компании (ods)"), ' + Sheets + '}'
import ezodf ezodf.config.set_table_expand_strategy('all') doc = ezodf.opendoc('/home/pi/Desktop/Associations.ods') associations = doc.sheets['Feuille1'] semis = doc.sheets['Feuille2'] a_row_count = associations.nrows() a_col_count = associations.ncols() names = [associations[i, 0].value for i in range(1, a_row_count)] s_row_count = semis.nrows() s_col_count = semis.ncols() vegetables = {} for row in range(1, a_row_count): name = "" for col in range(a_col_count): if col == 0: name = associations[row, col].value t = {'good': [], 'bad': []} vegetables[name] = t continue val = associations[row, col].value if val == 1.0: vegetables[name]['good'].append(associations[0, col].value) if val == 0.0:
moveDepartmentRe=re.compile(r'\s*'+pn+r' Главного распорядителя "(?P<departmentName1>.*?)" в целевой статье '+cc+r' ".*?" изменить на "(?P<departmentName2>.*?)"') moveSectionRe=re.compile(r'\s*'+pn+r' Подраздел (?P<sectionCode1>\d{4}) ".*?" в целевой статье '+cc+r' ".*?" '+dnms+r' изменить на (?P<sectionCode2>\d{4}) ".*?"') # don't do category name-only changes moveCategoryCodeOnlyRe=re.compile(r'\s*'+pn+r' Код целевой статьи (?P<categoryCode1>\d{7}) ".*?" '+dnms+r' изменить на (?P<categoryCode2>\d{7})') moveCategoryRe=re.compile(r'\s*'+pn+r' Изложить наименование целевой статьи (?P<categoryCode1>\d{7}) ".*?" '+dnms+r' в следующей редакции: ".*?" с изменением кода целевой статьи на (?P<categoryCode2>\d{7})') moveCategoryTypeRe=re.compile(r'\s*'+pn+r' Изложить наименование целевой статьи (?P<categoryCode1>\d{7}) ".*?" '+dnms+r' в следующей редакции: ".*?" с изменением кода целевой статьи на (?P<categoryCode2>\d{7}) и с изменением(?: кода)? вида расходов (?P<typeCode1>\d{3}) ".*?" на (?P<typeCode2>\d{3}) ".*?" по (?P<departmentNamesForType>.*?)\.') moveTypeRe=re.compile(r'\s*'+pn+r' Код вида расходов (?P<typeCode1>\d{3}) ".*?" в целевой статье '+cc+r' ".*?" '+dnms+r' изменить на (?P<typeCode2>\d{3}) ".*?"') for documentNumber,appendixNumberDepartmentY1,appendixNumberDepartmentY23,appendixNumberInvestment in (( ('3765','3','4','23'), ('3781','3','4','23'), ('4706','2','14','11'), ('4712','2','14','11'), )): filename=inputDirectory+'/assembly/'+documentNumber+'.odt' doc=ezodf.opendoc(filename) tableWriteWatcher=None for obj in doc.body: if type(obj) is ezodf.text.Paragraph: # print('paragraph {') for line in obj.plaintext().splitlines(): # m=paragraphRe.match(line) # if m: # print('== paragraph',m.group('paragraphNumber'),'==') m=amendParagraphTextRe.match(line) if m: # print('== amendment',m.group('paragraphNumber'),'for text ==') tableWriteWatcher=None m=amendParagraphAppendixRe.match(line) if m: # print('== amendment',m.group('paragraphNumber'),'for appendix',m.group('appendixNumber'),'==')
def get_fases(oferta): fases = set() for ps, f, d in oferta: fases.add(f) return fases prof_map = NameMap() disc_map = NameMap() diurno_map = NameMap() noturno_map = NameMap() name = 'oferta.ods' doc = ezodf.opendoc(name) s1 = Sheet(doc.sheets[0]) s2 = Sheet(doc.sheets[1]) profs1, ofertas1 = s1.professores() profs2, ofertas2 = s2.professores() print("""% Nro de dias com aula na semana % Numero de turnos % Para cada turno % Nome do turno % Numero de periodos % Para cada periodo % Numero de horas aula % Numero de horarios no turno com estas horas aula
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jan 3 18:40:09 2018 @author: zibski """ import ezodf import pandas as pd import numpy as np import matplotlib.pyplot as plt doc = ezodf.opendoc('innogy.ods') sheet = doc.sheets[0] df_dict = {} for i, row in enumerate(sheet.rows()): if i ==0: df_dict = {cell.value:[] for cell in row} col_index = {j:cell.value for j, cell in enumerate(row)} continue for j, cell in enumerate(row): df_dict[col_index[j]].append(cell.value) df = pd.DataFrame(df_dict) df['Odczyt'] = df['Odczyt'].str.replace(r',', '.') df['Odczyt'] = df['Odczyt'].str.replace(r' kWh', '').astype('float') df['Poprzedni odczyt'] = df['Odczyt'].shift(-1) df['Zużycie'] = df['Odczyt'] - df['Poprzedni odczyt'] df['Data odczytu '] = pd.to_datetime(df['Data odczytu '], dayfirst=True)
def calculo(self): ext = self.kwargs["filename"].split(".")[-1] if ext == "ods": self._dependence = "ezodf" spreadsheet = ezodf.opendoc(self.kwargs["filename"]) self.sheets = [name for name in spreadsheet.sheets.names()] if self.kwargs["datamap"]: for data in self.kwargs["datamap"]: entity = self.kwargs["project"].getObject(data["entity"]) sheet = spreadsheet.sheets[data["sheet"]] indProp = entity.propertiesTitle().index(data["property"]) if entity.propertiesUnit()[indProp] == str: value = entity.__getattribute__( entity.propertiesAttribute()[indProp]) else: indUnit = entity.propertiesUnit()[indProp].__text__.index(data["unit"]) units = entity.propertiesUnit()[indProp].__units__ value = entity.__getattribute__(entity.propertiesAttribute()[indProp]).__getattribute__(units[indUnit]) # Chequear celda = list(data["cell"]) column = [] while celda[0] in string.ascii_uppercase: column.append(celda.pop(0)) base = len(string.ascii_uppercase) exponente = 0 columna = 0 while column: ordinal = ord(column.pop())-64 columna += ordinal*base**exponente exponente += 1 fila = int("".join(celda)) if fila > sheet.nrows(): sheet.append_rows(fila-sheet.nrows()) if columna > sheet.ncols(): sheet.append_columns(columna-sheet.ncols()) sheet[data["cell"]].set_value(value) spreadsheet.save() elif ext == "xlsx": self._dependence = "openpyxl" spreadsheet = openpyxl.load_workbook(self.kwargs["filename"]) self.sheets = spreadsheet.get_sheet_names() if self.kwargs["datamap"]: for data in self.kwargs["datamap"]: entity = self.kwargs["project"].getObject(data["entity"]) sheet = spreadsheet[data["sheet"]] indProp = entity.propertiesTitle().index(data["property"]) if entity.propertiesUnit()[indProp] == str: value = entity.__getattribute__(entity.propertiesAttribute()[indProp]) else: indUnit = entity.propertiesUnit()[indProp].__text__.index(data["unit"]) units = entity.propertiesUnit()[indProp].__units__ value = entity.__getattribute__(entity.propertiesAttribute()[indProp]).__getattribute__(units[indUnit]) sheet[data["cell"]] = value comentario = openpyxl.comments.Comment("{0[entity]}.{0[property]}.{0[unit]} ---> {0[sheet]}.{0[cell]}".format(data), 'pychemqt') sheet[data["cell"]].comment = comentario spreadsheet.save(".".join(self.kwargs["filename"].split(".")[:-1])+"-bak"+".xlsx") elif ext == "xls": # TODO: Implement old office support pass self.salida = []
import pandas as pd import ezodf import numpy as np doc = ezodf.opendoc('Lista_Principal.ods') # convert the first sheet to a pandas.DataFrame sheet = doc.sheets[0] df_dict = {} for i, row in enumerate(sheet.rows()): # row is a list of cells # assume the header is on the first row if i == 0: # columns as lists in a dictionary df_dict = {cell.value: [] for cell in row} # create index for the column headers col_index = {j: cell.value for j, cell in enumerate(row)} continue for j, cell in enumerate(row): # use header instead of column index df_dict[col_index[j]].append(cell.value) # and convert to a DataFrame df = pd.DataFrame(df_dict) #Filtro para Folha e Senhas filtro = df[df['Folha'] == '002'] senhas = df[df['Folha'] == 'Senhas >>>'] if len(filtro.index) > 0: print('Senha ', 'Valor') for i in filtro.index:
def extract(source_path, sheet_no=0, header=0): tab = ezodf.opendoc(filename=source_path).sheets[sheet_no] return pd.DataFrame({ col[header].value: [x.value for x in col[header + 1:]] for col in tab.columns() })
import numpy as np from sklearn import preprocessing, neighbors, svm from sklearn.model_selection import train_test_split import pandas as pd import ezodf #df = pd.read_csv('svm_test_1.ods') doc = ezodf.opendoc('svm_test_1.ods') #print("Spreadsheet contains %d sheet(s)." % len(doc.sheets)) #print(df.head()) sheet = doc.sheets[0] df_dict = {} for i, row in enumerate(sheet.rows()): # row is a list of cells # assume the header is on the first row if i == 0: # columns as lists in a dictionary df_dict = {cell.value: [] for cell in row} # create index for the column headers col_index = {j: cell.value for j, cell in enumerate(row)} continue for j, cell in enumerate(row): # use header instead of column index df_dict[col_index[j]].append(cell.value) # and convert to a DataFrame df = pd.DataFrame(df_dict) #Drop S.No. colomn #df.replace('?',-99999, inplace=True) df.drop(['S.No', 'Engg Group'], 1, inplace=True) #df.drop(['None'], 1, inplace=True)
import ezodf documento = ezodf.opendoc( 'Newswest_2018b_Submission_Deadlines_and_Publishing_Dates.odt') for obj in documento.body: if type(obj) is ezodf.text.Paragraph and obj.text is not None: print(obj.text)
import methods import numpy as np import matplotlib import matplotlib.pyplot as plt import ezodf import pandas as pd import re import glob import sys FileName = "./optical/BeatingRate.ods" try: xlsx = pd.ExcelFile(FileName) allDrugs = xlsx.sheet_names except: ods = ezodf.opendoc(FileName) allDrugs = [a.name for a in ods.sheets] CONC_ID = [(1, 5), (6, 10), (11, 15), (16, 20)] plotchange = not True for (i, drug) in enumerate(allDrugs): try: sheet = xlsx.parse(drug) idx = np.array(sheet)[:, 0] control = np.array(sheet)[:, 1] applied = np.array(sheet)[:, 2] except: sheet = ods.sheets[drug] idx = np.array([int(i[0].value) for i in sheet.rows()]) control = np.array([i[1].value for i in sheet.rows()])
def process_doc(self, path): self.pre_tag = '{urn:oasis:names:tc:opendocument:xmlns:text:1.0}' self.p_tag = ''.join([self.pre_tag,'p']) self.s_tag = ''.join([self.pre_tag,'span']) self.page_break_tag = ''.join([self.pre_tag,'soft-page-break']) doc = ezodf.opendoc(path) if doc.doctype == 'odt': nodes = self._preprocess(doc) next_is_title = True count = 0 title = '' text = '' newspaper = '' date = None old_date = None id = None start_string = None for n in nodes: if debug: print n.tag, n.text, n.tail elif self.p_tag in n.tag or self.s_tag in n.tag: if n.text is not None and next_is_title: try: date = strptime(' '.join(n.text.rsplit(' ', 4)[1:]), '%A %d %B %Y') newspaper = ' '.join(n.text.rsplit(' ', 4)[:1]) except ValueError: try: date = strptime(' '.join(n.text.rsplit(' ', 3)[1:]), '%d %B %Y') newspaper = ' '.join(n.text.rsplit(' ', 3)[:1]) except ValueError: if n.text not in self.ignored_headings: title = n.text next_is_title = False else: text += n.text count = 0 elif n.text is None: count+=1 if count == 2: if persist: id = uuid.uuid1() if date: start = datetime(*date[:5]+(min(date[5], 59),)) else: start = datetime.fromtimestamp(mktime(old_date)) text_field = newspaper + "\r\n" + text start_string = start.strftime('%Y-%m-%d %H:%M:%S') #add to db ev = model.Event(id, start_string, None, title, text_field) self.db.add(ev) else: print "#########################################################" if date: print newspaper, datetime(*date[:5]+(min(date[5], 59),)).strftime('%A %-d %B %Y') print title print text print "#########################################################" next_is_title = True title = '' text = '' newspaper = '' if date: old_date = date date = None elif count == 1: text += "\r\n\r\n" elif n.text is not None: text += n.text count = 0
def compare(filein1, filein2, fileout): # 1/ Copy data from input spreadsheets into comparison spreadsheet # 2/ Analyze performance differences to detect regression(s) # 3/ Return regression count global verbose debug('Input Files:\n {}\n {}'.format(filein1, filein1)) debug('Output File:\n {}'.format(fileout)) # Open input files try: spreadsheets = {} sheets = {} spreadsheets['in1'] = ezodf.opendoc(filein1) sheets['in1'] = spreadsheets['in1'].sheets spreadsheets['in2'] = ezodf.opendoc(filein2) sheets['in2'] = spreadsheets['in2'].sheets except Exception as e: err("Failed to read input files!") if verbose: traceback.print_exc() raise e # Get some info from input spreadsheets and make sure we compare the same try: if len(sheets['in1']) != len(sheets['in2']): err('Input sheet count do not match! ({}) ({})'.format( len(sheets['in1']), len(sheets['in2']))) sheetcount = len(sheets['in1']) - 1 # discard last sheet titled 'Data' debug('Sheet count: {}'.format(sheetcount)) speedcount = {'in1': 0, 'in2': 0} for i in ['in1', 'in2']: for r in range(FIRST_SPEED_ROW, sheets[i][0].nrows()): operation_cell = sheets[i][0][r, OPERATION_COL] if operation_cell.value not in LOOPBACK_OPS: break else: speedcount[i] += 1 if speedcount['in1'] != speedcount['in2']: err('Input sheet speed count do not match! ({}) ({})'.format( speedcount['in1'], speedcount['in2'])) speedcount = speedcount['in1'] debug('Speed count: {}'.format(speedcount)) except Exception as e: err("Failed to retrieve infos from input files!") if verbose: traceback.print_exc() raise e # Clone output spreadsheet from template, and open it try: shutil.copy(TEMPLATE_ODS_FILE, fileout) # Retrieve .ods file sheets spreadsheets['out'] = ezodf.opendoc(fileout) sheets['out'] = spreadsheets['out'].sheets except Exception as e: err("Failed to clone charts spreadsheet from template!") if verbose: traceback.print_exc() raise e # Template spreadsheet has only 1 sheet. Duplicate it to match sheetcount try: debug('\nDuplicating sheets...') for i in range(1, sheetcount): duplicate = sheets['out'][0].copy(newname=sheets['in1'][i].name) sheets['out'] += duplicate # Add input filename to data tables header sheets['out'][i][HEADER_ROW_IN1, HEADER_COL_IN].set_value(filein1) sheets['out'][i][HEADER_ROW_IN2, HEADER_COL_IN].set_value(filein2) debug('Added {} sheet.'.format(sheets['in1'][i].name)) for i in range(sheetcount): # Add input filename to data tables header sheets['out'][i][HEADER_ROW_IN1, HEADER_COL_IN].set_value(filein1) sheets['out'][i][HEADER_ROW_IN2, HEADER_COL_IN].set_value(filein2) sheets['out'][i][HEADER_ROW_DELTA, HEADER_ROW_DELTA_COL].set_value('{} vs {}'.format( filein2, filein1)) sheets['out'][i][HEADER_ROW_DELTA_PCT, HEADER_ROW_DELTA_COL].set_value( '{} vs {} (%)'.format(filein2, filein1)) except Exception as e: err("Failed to duplicate sheets in output spreadsheet!") if verbose: traceback.print_exc() raise e # Fill tables with data and highlight potential regression try: debug('\nFilling tables with data...') cell_src = {} cell_dst = {} row_out = {} src_val = {} regcount = 0 for s in range(0, sheetcount): curr_sheet = sheets['out'][s] debug('Sheet: {}'.format(curr_sheet.name)) for r in range(speedcount): row_in = FIRST_SPEED_ROW + r row_out['in1'] = FIRST_ROW_OUT + r row_out['in2'] = row_out['in1'] + ROW_OFFSET_IN1_IN2 speed = sheets['in1'][0][row_in, SPEED_COL].value debug('Speed (row in:{} row_out:{}): {}'.format( row_in, row_out, speed)) for c in range(THROUGHPUT_JITTER_COL + 1): cell_src['in1'] = sheets['in1'][s][row_in, c] cell_src['in2'] = sheets['in2'][s][row_in, c] cell_dst['in1'] = sheets['out'][s][row_out['in1'], c] cell_dst['in2'] = sheets['out'][s][row_out['in2'], c] if c < 4: src_val['in1'] = cell_src['in1'].value src_val['in2'] = cell_src['in2'].value else: src_val['in1'] = int(cell_src['in1'].value) src_val['in2'] = int(cell_src['in2'].value) regcount += compare_figures(filein1, filein2, curr_sheet.name, speed, COLUMN_LIST[c], src_val['in1'], src_val['in2']) cell_dst['in1'].set_value(cell_src['in1'].value) cell_dst['in2'].set_value(cell_src['in2'].value) except Exception as e: debug('c={} val1={} val2={}'.format(c, cell_src['in1'].value, cell_src['in2'].value)) err("Failed to fill spreadsheet tables with data!") if verbose: traceback.print_exc() # Save spreadsheet spreadsheets['out'].save() raise e # Save spreadsheet spreadsheets['out'].save() return regcount