def ods_to_excel(filepath, target) -> None: ''' Generates an excel workbook with ods data argument: filepath --> a valid filepath to the directory containing ods documents target -> a valid filepath to the directory for the generated excel sheets returns: None ''' filename = os.path.basename(filepath) filename = filename.split(".")[0] data = get_data(filepath) json_data = json.dumps(data) json_data = json.loads(json_data) workbook_path = os.path.join(target, filename + ".xlsx") if not os.path.exists(workbook_path): wb = openpyxl.Workbook() else: wb = openpyxl.load_workbook(workbook_path) for sheetname in json_data: ws = wb.active ws.title = sheetname[:21] data_update = get_rows_xy(json_data[sheetname]) ws = add_rows_to_sheet(data_update, ws) logging.info("Adding data to " + sheetname) wb.save(workbook_path) logging.info("Complete")
def getDatasetsModelsAndObjects(filenameToLoad): lookupTable = pyexcel_ods.get_data(filenameToLoad) ## open up the excel file to get the data as a dict of 2-lists locations = ['BATB', 'TAHB', 'GTB', 'ATB'] ## the first key for the lookupTable is the site location datasets = {} for loc in locations: datasets[loc] = [row for row in lookupTable[loc]] ## under each key is a rectangular list with two columns to each row, ## the first one is elevation, the second one is age for d in datasets: print d, datasets[d], "\n\n\n" datasetObjects = {} datasetModels = {} for d in datasets: datasetObjects[d] = siteData(d, datasets[d]) ## build the dataset containers using the data retrieved for each site ## note that the siteData object automatically filters the data received ## to get rid of the first few non data lines and any empty spaces return datasets, datasetModels, datasetObjects
def getManualResults(): data = get_data("results-manual.ods") allHashs = [] for i in range(1, len(data['FM']) - 1): if (len(data['FM'][i]) > 0): allHashs.append(data['FM'][i][0]) return allHashs
def read_subject_results(subject, root): sub_results_fname = op.join(root, '{}_electrodes_num.ods'.format(subject)) data = get_data(sub_results_fname)['Sheet1'] all_extra, groups_num, elecs_num, all_hits = 0, 0, 0, 0 all_hits_prob = [] if isinstance(data[0][0], str): del data[0] for line in data: if len(line) == 0 or line[0] == '': continue try: elecs_inside = int(line[3]) found_by_mmvt = int(line[4]) if len(line) > 5: non_real_electrodes = int(line[5]) found_by_mmvt -= non_real_electrodes elecs_num += elecs_inside groups_num += 1 hits = found_by_mmvt if found_by_mmvt <= elecs_inside else elecs_inside all_hits += hits extra = max(found_by_mmvt - elecs_inside, 0) all_extra += extra hits_prob = (hits / elecs_inside) * 100 all_hits_prob.append(hits_prob) # print('{}, {:.2f}% found, {} extra'.format(line[1], hits_prob, extra)) except: # print('Error with {}'.format(line)) continue if elecs_num == 0 or groups_num == 0: # print('{}: No electrodes/groups!'.format(subject)) return False, 0, 0, 0, 0, 0 else: found = sum(all_hits_prob) / groups_num print('{}: {:.2f}% found, {:.2f}% extra'.format(subject, found, extra)) return True, found, all_hits, all_extra, groups_num, elecs_num
def get_families_from_ucnar_ods(ods_file): "Convert an UCNAR export ODS file into a list of Family data structures" # Crack open the workbook file book = pyexcel_ods.get_data(ods_file) members_sheet = book['Members'] member_keys = keys_row_to_keys_dict(members_sheet.pop(0)) try: adults_sheet = book['Adult Volunteers'] except KeyError: sys.stderr.write("No adult volunteers sheet") adults = [] else: adult_keys = keys_row_to_keys_dict(adults_sheet.pop(0)) # Parse the adult voluteers sheet adults = get_adult_volunteers_as_people(adults_sheet, adult_keys) # Parse the members sheet families = get_members_as_families(members_sheet, member_keys) # Unify the results for adult in adults: for family in families: if family.has_parent(adult.first_name, adult.last_name): family.add_or_update_parent(adult) break else: families.append(Family(parents=[adult])) return families
def test_bug_fix_for_issue_2(): data = {} data.update({"Sheet 1": [[1, 2, 3], [4, 5, 6]]}) data.update({"Sheet 2": [[u"row 1", u"Héllô!", u"HolÁ!"]]}) save_data("your_file.ods", data) new_data = get_data("your_file.ods") assert new_data["Sheet 2"] == [[u'row 1', u'H\xe9ll\xf4!', u'Hol\xc1!']]
def test_issue_13(): test_file = "test_issue_13.ods" data = [[1, 2], [], [], [], [3, 4]] save_data(test_file, {test_file: data}) written_data = get_data(test_file, skip_empty_rows=False) eq_(data, written_data[test_file]) os.unlink(test_file)
def openOdsFile(self): while True: fileName = input("Enter path and ODS filename (Exemple: /home/" + getpass.getuser() + "/Documents/sheet.ods): ") try: return json.loads(json.dumps(get_data("../.." + fileName))) except: print("File not found. Please, try again.")
def __init__(self): # this_dir,_ = os.path.split(__file__) # database_path = os.path.join(this_dir, "signal_database", "signal_database.ods") database_path = pkg_resources.resource_filename('car_controller', 'signal_database/signal_database.ods') self.database = px.get_data(database_path) self.signal_defs = self.load_signals() self.frame_defs = self.load_frames()
def get_data_academics (odspath, final_dict): for k in range(1,5): sheets = get_data(odspath+"/Data"+str(k)+'.ods') sheets = json.loads((json.dumps(sheets))) data_dict = {} sub_list = ["", "Algebra", "Physics", "PE", "Chemistry", "Geometry", "Biology", "Programming"] lst= [] for sheet in sheets: lst = sheets[sheet][3:] while [] in lst: lst.remove([]) for i in range(0,len(lst)): person = None for j in range(len(lst[i])): if isinstance(lst[i][j],str): data_dict[lst[i][j]] = {} person = lst[i][j] else: data_dict[person][sub_list[j]] = lst[i][j] else: data_dict[person]['Math'] = (data_dict[person]['Geometry']+data_dict[person]['Algebra'])/2 for person in data_dict: if k ==1: final_dict[person] = {} average = ((2*data_dict[person]["Physics"]) + data_dict[person]["Chemistry"] + 2*(data_dict[person]["Math"]) + data_dict[person]["Programming"] + data_dict[person]["Biology"] + data_dict[person]["PE"])/8 final_dict[person]['academics'+str(k)] = average
def write_cell(self, book_name, sheet_name, cell, message): cleansed_filename = self.finish_filename(book_name) book = pyexcel_ods.get_data(cleansed_filename) assert (book != None), "Spreadsheet book has not been set!!" assert ( len(cell) >= 2 ), "Invalid cell size. It must be at least two characters in length." # RECALL: Valid cell names could be really long like "ACE6561" match_obj = re.match("^([a-zA-Z]+)(\\d+)$", cell) assert ( match_obj != None ), "Invalid cell name. It must be a sequence of letters followed by a number." row = int( match_obj.group(2)) - 1 # don't forget, indices start at zero! col = self.convert_alphabetic_to_column(match_obj.group(1)) - 1 print("[DEBUG] Now trying to write %s at %s[%d][%d]" % (message, sheet_name, row, col)) selected_sheet = book[sheet_name] while (row >= len(selected_sheet)): # fill the sheet with more ROWS in order to access the given index selected_sheet.append([]) while (col >= len(selected_sheet[row])): # fill the sheet with more COLUMNS in order to be able to access the given index for i in range(0, (col + 1)): selected_sheet[row].append('') book[sheet_name][row][col] = message pyexcel_ods.save_data(cleansed_filename, book) if (self.enable_sync): print("connection to Nextcloud is a WIP")
def readODS(filename): data = get_data(filename) a = json.dumps(data) a = ast.literal_eval(str(a)) lists = a['Sheet1'] for i in range(1, len(lists)): #print i num_words = len(lists[i][2].split()) num_sent = len(lists[i][2].split('.')) global_number_words.append(num_words) global_number_sent.append(num_sent) clean_essay = cleanText(removeAt(lists[i][2])) num_spell = removeSpellingMistakes(clean_essay) global_spelling_list.append(num_spell) global_essay_list.append(clean_essay) global_marks_list.append(lists[i][6]) global_NN.append(getPOSCount(lists[i][2], ['NN'])) global_JJ.append(getPOSCount(lists[i][2], ['JJ'])) global_VBPD.append(getPOSCount(lists[i][2], ['VB', 'VBP', 'VBD'])) number_classes = max(global_marks_list) return len(lists), number_classes
def interview(file): """ setup and read info """ interview = [] interview_dict = {} try: data = get_data(file) except: raise Exception("Can't read/find file") """ read info and append info""" ctr = sum(map(len, json.loads(json.dumps(data)).values())) g = 3 for i in range(ctr - 3): interview.append(json.loads(json.dumps(data))['Sheet1'][g]) g += 1 """ append information to dict """ for i in interview: for j in i: if (type(j) == str): del i[0] interview_dict[j] = i """ AVG the score and multiply an overall factor """ interview_list = [] for i in interview_dict: interview_dict[i] = sum(interview_dict[i]) / len( interview_dict[i]) * 0.3 interview_list.append(interview_dict[i]) """ return inter_dict as final output """ return interview_dict
def ielts(file): """ Create a list and dict & read/find file""" ielts = [] ielts_dict = {} try: data = get_data(file) except: raise Exception("Can't read/find file") """ same approach as first function, read info and append to ielts""" ctr = sum(map(len, json.loads(json.dumps(data)).values())) g = 3 for i in range(ctr - 3): ielts.append(json.loads(json.dumps(data))['Sheet1'][g]) g += 1 """ Append detail information (name, score) to idelts_dict """ for i in ielts: for j in i: if (type(j) == str): del i[0] ielts_dict[j] = i """ AVG the score and multiple by overall factor """ ielts_list = [] for i in ielts_dict: ielts_dict[i] = sum(ielts_dict[i]) / len(ielts_dict[i]) * 0.3 ielts_list.append(ielts_dict[i]) """ return idelts_dict as final solution """ return ielts_dict
def load(self, *ficheros): for f in ficheros: data = get_data(f) for hoja in data.items(): hoja = hoja[1] cestas = None indices = None productor = None for row in hoja: if len(row) == 0: continue c1 = get_text(row[0]) if c1 == "COSTE PEDIDO RED": continue if c1 == "TOTAL UNIDAD": productor = None continue if len(row) == 1: productor = Productor(self, c1) self.productores.append(productor) continue if c1 == "PRODUCTO": cestas, indices = get_cestas(row) continue if productor == None or productor.nombre == "CULTIMAR PESCADO": continue if not cestas: continue self.add_producto(c1) for i in range(len(cestas)): r = indices[i] p = get_text(str(row[r])) self.add_reparto(cestas[i], p) self.ajustar()
def fill_transactions(db, ods_file): c = db.cursor() c.execute("""CREATE TABLE IF NOT EXISTS transactions ( year INTEGER, month INTEGER, day INTEGER, vendor TEXT, credit REAL, debit REAL, account TEXT, category TEXT )""") transactionData = get_data(ods_file) for _, sheet in transactionData.items(): for row in sheet[1:]: if len(row) == 0: continue d = datetime.strptime(row[0], '%Y-%m-%d') c.execute( 'INSERT INTO transactions VALUES (?, ?, ?, ?, ?, ?, ?, ?)', (d.year, d.month, d.day, row[1], row[2], row[3], row[4], row[5])) db.commit()
def read_ods(self, filename, header=0): data = get_data(filename) return { k: pd.DataFrame(d[header + 1:], columns=d[header]).set_index('Date') for k, d in data.items() }
def get_all_advices_given(path, advice_dict): print('Getting data') data = get_data(path) print('Data aquired') for name, sheet in data.items(): try: int(name) except: continue print(name) group = sheet[3][0] if 'G' not in str(group): group = 'G' + str(group) person = Person(name, group) # 3 because the last one isn't important advices = [ sheet[12 + x * 10][1].replace('/', '').replace(' ', ' ').lower() for x in range(3) ] person.advices = advices advice_dict[group].append(person) return advice_dict
def test_bug_fix_for_issue_2(): data = {} data.update({"Sheet 1": [[1, 2, 3], [4, 5, 6]]}) data.update({"Sheet 2": [[u"row 1", u"Héllô!", u"HolÁ!"]]}) save_data("your_file.ods", data) new_data = get_data("your_file.ods") assert new_data["Sheet 2"] == [[u"row 1", u"H\xe9ll\xf4!", u"Hol\xc1!"]]
def importUsers(spreadsheet=None, sheet_name=None): OBP_AUTH_TOKEN = get_config('OBP_AUTH_TOKEN') OBP_API_HOST = get_config('OBP_API_HOST') ''' Loading location data from ods spreadsheet ''' sheetdata = get_data(spreadsheet).popitem() #Pops first sheet users = [] def get_value(index=None, obj=None): try: return obj[index] except IndexError: return '' sucessCount = 0 failCount = 0 failedUsers = [] for index, user in enumerate( sheetdata[1:][0][1:]): #skips sheetname, and header try: username = get_value(0, user) email = get_value(1, user) password = get_value(2, user) first_name = get_value(3, user) last_name = get_value(4, user) #Post user to api response = createUser(username=username, email=email, password=password, first_name=first_name, last_name=last_name) print(response.text) if response.status_code is 200: print("WARNING: user aleady exists") print(response.text) sucessCount = sucessCount + 1 elif response.status_code is 201: print(response.text) sucessCount = sucessCount + 1 else: print(response.text) failCount = failCount + 1 failedUsers.append(user) except Exception as e: traceback.print_exc(file=sys.stdout) print("Success: {}".format(sucessCount)) print("Failed: {}".format(failCount)) print("The users which failed to import, if any, were:") for user in failedUsers: print(user) print("Success: {}".format(sucessCount)) print("Failed: {}".format(failCount))
def demo(request): session_key = get_session_key(request) if cache.get(session_key): cache.delete(session_key) os.system(f'rm -rf tmp/{session_key}-*.ods') ods_file = settings.STATIC_ROOT + '/demo.ods' save_data(path_file_save(session_key), get_data(ods_file)) return redirect('website:show_ods')
def get_members_and_volunteers_from_ucnar_ods(ods_file): "Get members and adult volunteers list from ODS excport." sheet = pyexcel_ods.get_data(ods_file) members = sheet['Members'] member_keys = members.pop(0) members_email_dict = {row[1]: row for row in members if row} adults = sheet['Adult Volunteers'] adult_keys = adults.pop(0) adults_email_dict = {row[1]: row for row in members if row} return member_keys, members_email_dict, adult_keys, adults_email_dict
def get_batts(filename): # Get data from file batts = [] data = get_data(filename) batt_array = data['Sheet1'] # Unpack into single list for line in batt_array: batts = batts + line return batts
def open_filename(self): filename = QFileDialog.getOpenFileName( self, self.tr("Abrir archivo"), "/home/luciano", self.tr("Hojas de Cálculo (*.ods)")) if filename[0] != "": self._enable_widgets(True) self.file_data = get_data(filename[0]) for key in self.file_data.keys(): self.cmb_ods_sheets.addItem(key) self.read_sheet(self.file_data, list(self.file_data.keys())[0])
def academic(file): """Setup Basic Dict/List""" subjects = {} grades = [] """Check whether file exist or is it readable""" try: data = get_data(file) except: raise Exception("Can't read/find file") list = json.loads(json.dumps(data))['Sheet1'][2] i = 0 """Factor by 2 for subjects: Maths (Algebra & Geometry) and Physics""" for item in list: subjects[i] = item if (item == 'Algebra'): global algebra_id algebra_id = i if (item == 'Geometry'): global geometry_id geometry_id = i if (item == 'Physics'): global physics_id physics_id = i i += 1 """Get total row number""" ctr = sum(map(len, json.loads(json.dumps(data)).values())) """Append candidate information to list: grade""" g = 3 for i in range(ctr - 3): grades.append(json.loads(json.dumps(data))['Sheet1'][g]) g += 1 """ if empty row exists, delete them """ if (x for x in grades if x != []): list2 = [x for x in grades if x != []] else: list2 = grades """ Multiply by the factor """ for i in list2: i[algebra_id] = i[algebra_id] * 2 i[geometry_id] = i[geometry_id] * 2 i[physics_id] = i[physics_id] * 2 """ grade dict to keep track individual student grade """ grade = {} # Loop through the grades for i in list2: for j in i: if (type(j) == str): del i[0] grade[j] = i """ append the grade """ for i in grade: grade[i] = sum(grade[i]) / len(grade[i]) * 0.4 """return grade dict as final result for this function""" return grade
def get_book(self, filename, force=True): finished_filename = self.finish_filename(filename) try: self.book = pyexcel_ods.get_data(finished_filename) return self.book except FileNotFoundError: if (force): self.new_book(finished_filename) return self.book else: return None
def get_liter_data(): ''' Read the data file with the literature values for each cluster as a dictionary. ''' # Read .ods file with literature data. cl_file = pe.get_data('lit_OCs_data.ods') # Store as list. cl_list = cl_file["S-LMC"] return cl_list
def extract_meta(xlsx_file): workbook = pyexcel_ods.get_data(xlsx_file) wb_sheets = [] for index, wb_sheet in enumerate(workbook): wb_sheets.append({ 'key': str(index), 'title': wb_sheet, }) return { 'sheets': wb_sheets, }
def getCategoriesFromFile(file): data = get_data(file) categories = set() for e in data["WR Biz Taxonomy Yelp Mapping"]: e = e + [''] * (10 - len(e)) if e[1].strip() == '': continue # print(e) categories.add(e[1].strip()) return list(categories)
def read_data(path): try: data = pd.read_excel(pathlib.Path(path), engine='odf') #Se o pandas tiver problemas ao ler os heathers seu retorno é um df Null if data.isnull().all().all().all(): sheet = get_data(path)['Sheet1'] data = mount_df(sheet) return data except Exception as excep: sys.stderr.write("'Não foi possível ler o arquivo: " + path + '. O seguinte erro foi gerado: ' + excep) os._exit(1)
def Read_cell( x=1, y=1 ): #this procedure uses an xls file and pyexcel the other should be harmonized print "reading cell", x, y, "from", input_file global input_file sheet = get_data( input_file ) #another way to load a sheet this time in an ordered dictionary value = sheet["Sheet1"][y - 1][x - 1] return value
def extrair_dados(self, caminho): try: data = get_data(caminho) nova = list() for i in xrange(len(data)): if data[i]: nova.append(data[i]) return nova except: trace = traceback.format_exc() file("trace.log","a").write(trace)
del row[0] for row in data[3:]: if row: del row[21] del row[19] del row[7] del row[6] del row[5] del row[4] del row[3] del row[2] del row[1] del row[0] def tostr(i, v): if 16<=i<=18: return str(float(v)) try: return str(int(v)) if float(v) == int(v) else str(float(v)) except: return str(v) with open('TBullets.pck', 'w') as of: of.write(''.join(r[0]+'\n' for r in data[0:2])) of.write(''.join((';'.join(tostr(i, v) for i, v in enumerate(r))+';\n') for r in data[2:] if r)) if __name__ == '__main__': data = get_data("tbullets.ods")['tbullets'] export(data)
def test_pr_22(): test_file = get_fixtures("white_space.ods") data = get_data(test_file) eq_(data["Sheet1"][0][0], "paragraph with tab(\t), space, \nnew line")
def test_issue_24(): test_file = get_fixtures("comment-in-cell.ods") data = get_data(test_file) eq_(data["Sheet1"], [["test"]])
def handle(self, *args, **options): self.handle1(get_data(FILE_NAME1))
def test_issue_27(): test_file = get_fixtures("issue_27.ods") data = get_data(test_file, skip_empty_rows=True) eq_(data["VGPMX"], [["", "Cost Basis", "0"]])
rows = cursor.fetchall() if not rows: # inserir na tabela tipo_item_pesquisa caso não haja linhas cursor.execute("insert into pesquisa_munic.tipo_item_pesquisa(pesquisa_munic.id_tipo_item_pesquisa, descricao) values(1, 'Recursos Humanos')") cursor.execute("insert into pesquisa_munic.tipo_item_pesquisa(pesquisa_munic.id_tipo_item_pesquisa, descricao) values(2, 'Planejamento Urbano')") cursor.execute("insert into pesquisa_munic.tipo_item_pesquisa(pesquisa_munic.id_tipo_item_pesquisa, descricao) values(3, 'Recursos para Gestão')") cursor.execute("insert into pesquisa_munic.tipo_item_pesquisa(pesquisa_munic.id_tipo_item_pesquisa, descricao) values(4, 'Terceirização e Informatização')") cursor.execute("insert into pesquisa_munic.tipo_item_pesquisa(pesquisa_munic.id_tipo_item_pesquisa, descricao) values(5, 'Gestão ambiental')") cursor.execute("insert into pesquisa_munic.tipo_item_pesquisa(pesquisa_munic.id_tipo_item_pesquisa, descricao) values(6, 'Articulação Interinstituicional')") cursor.execute("insert into pesquisa_munic.tipo_item_pesquisa(id_ pesquisa_munic.tipo_item_pesquisa, descricao) values(7, 'Variáveis Externas')") cursor.execute("commit") # fim tipo_item_pesquisa #Ajustar o path(file_name_with_path) file_name_with_path = "C:\desenv\dados\Base_MUNIC_2015_ods\Base_MUNIC_2015.ods" #abrir o arquivo ods que possui a munic from pyexcel_ods import get_data data = get_data(file_name_with_path) cursor.execute("select * from pesquisa_munic.item_composicao_quadro_pessoal where pesquisa_munic.id_tipo_item_pesquisa=1") rows = cursor.fetchall() if not rows: # inserir na tabela pesquisa_munic.item_composicao_quadro_pessoal caso não haja linhas, mas antes inserir na munic.esfera_municipal sheet_recursos_humanos = data.get('Recursos_humanos') id_esfera_municipal = 1 for row in sheet_recursos_humanos: if row[0] == 'A1': continue #insert into pesquisa_munic.esfera_municipal #cursor.execute("SELECT nextval('pesquisa_munic.s_esfera_municipal')") codigo_municipio = row[2] geocodigo = row[0] url_nome_municipio = 'http://idehco4.tk/instituicoes/ibge/bcim/municipios/'+ geocodigo + '/nome' url_geometry = 'http://idehco4.tk/instituicoes/ibge/bcim/municipios/'+ geocodigo + '/geom'
def __init__(self, pth): self.pth = pth self.data = get_data(pth)
def test_issue_14(): # pyexcel issue 61 test_file = "issue_61.ods" data = get_data(get_fixtures(test_file), skip_empty_rows=True) eq_(data["S-LMC"], [[u"aaa"], [0]])
def test_bug_fix_for_issue_1(): data = get_data(get_fixtures("repeated.ods")) eq_(data["Sheet1"], [["repeated", "repeated", "repeated", "repeated"]])
def test_issue_6(): test_file = "12_day_as_time.ods" data = get_data(get_fixtures(test_file), skip_empty_rows=True) eq_(data["Sheet1"][0][0].days, 12)
#!/usr/bin/env python from pyexcel_ods import get_data data = get_data('sum.ods') import json k = json.dumps(data) print(k) print(int(k[13]+k[14]) + int(k[17]+k[18]))
def test_issue_19(): test_file = "pyexcel_81_ods_19.ods" data = get_data(get_fixtures(test_file), skip_empty_rows=True) eq_(data["product.template"][1][1], "PRODUCT NAME PMP")
from pyexcel_ods import get_data data=get_data("/home/anant/Desktop/foss/pyth.ods") import json k=json.dumps(data) sum=int(k[13])+int(k[16]) print sum
def test_bug_fix_for_issue_1(): data = get_data(os.path.join("tests", "fixtures", "repeated.ods")) assert data == [['repeated', 'repeated', 'repeated', 'repeated']]
del row[15] del row[13] del row[9] del row[8] del row[7] del row[6] del row[5] del row[4] del row[3] del row[2] del row[1] del row[0] def tostr(i, v): try: return str(int(v)) if float(v) == int(v) else str(float(v)) except: return str(v) with open("TShips.pck", "w") as of: of.write("".join(r[0] + "\n" for r in data[0:2])) of.write("".join((";".join(tostr(i, v) for i, v in enumerate(r)) + ";\n") for r in data[2:] if r)) if __name__ == "__main__": xlsfile = "tships.xls" data = get_data("tships.ods")["tships"] export(data)
def main(): """ overall function, checks the spreadsheet and outputs the errors """ #workbook = open_workbook(sys.argv[1], on_demand=True) data = get_data(sys.argv[1]) workbookString = json.dumps(data) workbookDict = ast.literal_eval(workbookString) errorMessageList_setup = [] errorMessageList_questions = [] errorMessageList_choices = [] print "" if not check4correctSheets(workbookDict, errorMessageList_setup): for error in errorMessageList_setup: print error print "FATAL ERROR -- incorrect sheets in workbook" return False print "checking for correct worksheets.......................................OK!" # I wonder if it's ok to only have user input questions, and then no choices sheet or a blank one? # This definitely has to get checked and parsed before the survey questions can get checked if not choicesSheetHasCorrectSetup(workbookDict, errorMessageList_setup): for error in errorMessageList_setup: print error print "FATAL ERROR -- choices could not be parsed" return False choicesDict = parseChoices(workbookDict, errorMessageList_choices) print "checking choices sheet setup..........................................OK!" if not surveySheetHasCorrectSetup(workbookDict, errorMessageList_setup): for error in errorMessageList_setup: print error print "FATAL ERROR -- survey could not be parsed" return False questionsList = parseQuestions(workbookDict, errorMessageList_questions) ########SKETCHY######### questionsList = questionsList[:263] #print "there are this many questions: "+str(len(questionsList)) # print questionsList[-3].__dict__ print "checking survey sheet setup...........................................OK!" checkQuestions(questionsList, errorMessageList_questions, choicesDict) # This matters less so I put it below the other two so that they could still get checked if this fails if not settingsSheetHasCorrectSetup(workbookDict, errorMessageList_setup): for error in errorMessageList_setup: print error print "FATAL ERROR -- survey could not be parsed" return False print "checking settings sheet setup.........................................N/A" print "" if len(errorMessageList_choices) > 0: for error in errorMessageList_choices: print error print "" if len(errorMessageList_questions) > 0: for error in errorMessageList_questions: print error print "" print "Enjoy your survey!....................................................OK!" print "" return True
import sys from pyexcel_ods import get_data import pyexcel as pe import pyexcel.ext.ods import ast import json import pudb data = get_data(sys.argv[1]) workbook = json.dumps(data) d = ast.literal_eval(workbook) print d.keys() print len(d['choices']) print d['choices'][1][:5] #book = pe.get_book(file_name=sys.argv[1]) #sheets = book.to_dict() #for name in sheets.keys(): # print name
#!/usr/bin/env python3 """Напечатать список проектов, по которым не определены дальнейшие действия в taskwarrior""" from os.path import expanduser from subprocess import check_output from pyexcel_ods import get_data DATA = get_data(expanduser("~/.db/wiki/excel/prj.ods")) def check_learning(): """Проверить все направления изучения""" data = [v for v in DATA['Learning'][1:] if len(v) >= 3] for [code, _type, desc, *_] in data: if _type != 'Книга': have = have_tasks(code) if not have: print("task add", ("project:"+code).ljust(50), ' # '+desc) class PrjRow(): """Проект личный, по работе""" def __init__(self, row): self.row = row def have_data(self): """Есть ли данные в строке""" return len(self.row) > 0 def finished(self): """Закончен ли проект""" if self.completed() != "":