def download_xls_file(request): if verify_user_hash(request.data.get('user'), request.data.get('hash')): logger.info('under quiz.download_xls_file') que_type = request.data.get('que_type') if request.data.get('sub_cat_info'): sub_category_id = request.data.get('sub_cat_info').split('>>')[0] sub_category_name = request.data.get('sub_cat_info').split('>>')[1] else: logger.error('under quiz.download_xls_file error no subcategory selected') return Response({'errors': 'Select a sub-category first.'}, status=status.HTTP_400_BAD_REQUEST) try: sub_category = SubCategory.objects.get(pk = sub_category_id, sub_category_name = sub_category_name) except SubCategory.DoesNotExist as e: logger.error('under quiz.download_xls_file '+str(e.args)) return Response({'errors': 'Sub-category does not exist.'}, status=status.HTTP_404_NOT_FOUND) data = OrderedDict() if que_type == 'mcq': data.update({"Sheet 1": [MCQ_FILE_COLS,["", sub_category_name]]}) elif que_type == 'objective': data.update({"Sheet 1": [OBJECTIVE_FILE_COLS,[sub_category_name]]}) save_data(sub_category_name+"_"+que_type+"_file.xls", data) from django.http import FileResponse response = FileResponse(open(sub_category_name+"_"+que_type+"_file.xls", 'rb')) try: import os os.remove(sub_category_name+"_"+que_type+"_file.xls") except OSError as ose: logger.error('under quiz.download_xls_file '+str(ose.args)) return response logger.error('under quiz.download_xls_file wrong hash') return Response({'errors': 'Corrupted User.'}, status=status.HTTP_404_NOT_FOUND)
def convert_to_xls(filename): with open(filename, 'r') as f: data = json.load(f, object_pairs_hook=collections.OrderedDict) sheet = [[row_num, row_data] for row_num, row_data in data.items()] fn = ''.join(filename.split('.')[:-1]) xls = {fn: sheet} save_data(fn + '.xls', xls)
def shard_reviewer(self, interval, path, output_format): xls = get_data(path) data = json.dumps(xls, default=self.json_serial) lines = json.loads(data, object_hook=json_util.object_hook)['reviewer_info'] head=lines[0] start=1 count=1 # print head for i in xrange(start,len(lines),interval): output_file = output_format.format(count) result = OrderedDict() print i, min(i+interval,len(lines)) if os.path.isfile(output_file): print True # if not os.path.isfile(output_format.format(count + 1)): # data=lines[i:min(i+interval,len(lines))] # data.insert(0,head) # result.update({"reviewer_info": data}) # output_file = output_format.format(str(count)+"_x") # save_data(output_file, result) else: data=lines[i:min(i+interval,len(lines))] data.insert(0,head) result.update({"reviewer_info": data}) save_data(output_file, result) count+=1
def makeexcelfile(path,data): dic=OrderedDict() for sheetName,sheetValue in data.items(): d={} d[sheetName]=sheetValue dic.update(d) save_data(path,dic)
def render(self, data, accepted_media_type=None, renderer_context=None): """ Render `data` into XLS """ xls_file = StringIO() xls_data = OrderedDict() if data is None: xls_data.update({"Data": [["No Data"]]}) else: try: data = dict(data) # TODO: change to more specific exceptions except Exception: # pylint: disable=broad-except data = list(data) if isinstance(data, list): headers = list(dict(data[0]).keys()) rows = [] for row in data: row = dict(row) rows.append([simpleType(row[c]) for c in headers]) xls_data.update({"Data": [headers, *rows]}) else: xls_data.update( {"Data": [[k, simpleType(v)] for k, v in data.items()]}) save_data(xls_file, xls_data) return xls_file.getvalue()
def reporte_excel(request): resultado = filtros_reporte(request) data = [[ 'Id', 'Forma de pago', 'Monto pagado', 'total', 'Fecha', 'Subtotal', 'Iva', 'Cambio', 'Cliente', 'Caja', 'Empleado' ]] for elemento in resultado: lista = [ elemento.id, elemento.get_forma_de_pago_display(), elemento.monto_pagado, elemento.total, elemento.fecha, elemento.subtotal, elemento.iva, elemento.cambio, elemento.cliente.nombre, elemento.caja.nombre, elemento.empleado.username ] data.append(lista) datos = OrderedDict() datos.update({"Sheet 1": data}) memoria = StringIO() save_data(memoria, datos) response = HttpResponse(memoria.getvalue(), content_type="application/vnd.ms-excel") response['Content-Disposition'] = 'inline; filename=reporte.xls' return response
def encode(self, data: dict, **options) -> io.BinaryFileStream: """ Encodes the data into a Microsoft Excel Spreadsheet file-like stream. Arguments: data: The data to encode **options: The encoding options Returns: A Microsoft Excel Spreadsheet file-like stream Raises: geodatabr.core.encoders.EncodeError: If data fails to encode """ try: xls_file = io.BinaryFileStream() xls_data = types.OrderedMap() for entity, records in data.items(): xls_data[entity] = [list(records.first().keys())] \ + [list(record.values()) for record in records] pyexcel_xls.save_data(xls_file, xls_data) xls_file.seek(0) return xls_file except Exception: raise encoders.EncodeError
def get_campaigns2015(dbname, conn): try: cur = conn.cursor() #print(dbname + " Campaigns 2015?") # Get the column headers for campaigns. SQL = '''select "column_name" from information_schema.columns where table_name = 'state_of_channel_campaigns2015to2016';''' cur.execute(SQL) campaign_headers = cur.fetchall() #print("Here?") SQL = 'select * from state_of_channel_campaigns2015to2016;' #print("Here 1?") cur.execute(SQL) campaigns = cur.fetchall() #logger.log(campaign); #print("Here 2?") campaigns.insert(0, campaign_headers) campaigns_prep = {} campaigns_prep.update({"Sheet 1": campaigns}) save_data("exports/" + dbname + "_campaigns2015to2016.xls", campaigns_prep) cur.close() except (psycopg2.OperationalError) as e: conn.rollback() logger.log("Error: " + str(e), 0) except: conn.rollback() logger.log(str(sys.exc_info()), 0)
def shard(self, path, output_template): xls = get_data(path) data = json.dumps(xls, default=self.json_serial) lines = json.loads(data, object_hook=json_util.object_hook)['hotel_info'] lines2 = json.loads(data, object_hook=json_util.object_hook)['review_info'] # lines3 = json.loads(data, object_hook=json_util.object_hook)['reviewer_info'] head=lines[0] head2 = lines2[0] # head3 = lines3[0] interval=1 start=1 count=1 # print head for i in xrange(start,len(lines),interval): result = OrderedDict() print i, min(i+interval,len(lines)) data=lines[i:min(i+interval,len(lines))] data.insert(0,head) result.update({"hotel_info": data}) result.update({"review_info": [head2]}) # result.update({"reviewer_info": [head3]}) save_data(output_template.format(i), result) count+=1
def save_to_xls(): od = OrderedDict() if len(grades) == 1: print("爬取失败,请重新登录") sys.exit(1) od.update({'成绩单1': grades}) save_data('成绩单.xls', od)
def crawl_hotel_reviewer_name(path, reviewer_path): print path result = OrderedDict() xls = get_data(path) data = json.dumps(xls, default=json_serial) # print data lines = json.loads(data, object_hook=json_util.object_hook)['hotel_info'] reviews = json.loads(data, object_hook=json_util.object_hook)['review_info'] # reviewer_result = OrderedDict() # reviewer_xls = get_data(reviewer_path) # reviewer_data = json.dumps(reviewer_xls, default=json_serial) # reviewers=json.loads(reviewer_data, object_hook=json_util.object_hook)['reviewer_info'] # reviewers_id_set=[reviewer[1] for reviewer in reviewers[1:] if len(reviewer) > 1] reviewers_id_set = [] # print reviewers_id_set # exit(0) driver = get_webdriver() for index, review in enumerate(reviews[1:]): print index # review[5],review[8] try: url = review[5] reviewer_data = hotel_reviewer_name_crawler( driver, url, reviewers_id_set) reviewer_id = reviewer_data[0] reviewer_name = reviewer_data[1] reviewer_exist = reviewer_data[2] print "Get reviewer: ", reviewer_name # if not reviewer_id or not reviewer_name: # print 'Passed' # continue reviews[index + 1][1] = reviewer_id reviews[index + 1][2] = reviewer_name if not reviewer_exist: print "Add new reviewer: ", reviewer_id, reviewer_name temp = [] temp.append(reviewer_id) temp.append(reviewer_name) temp.append(url) # print len(reviewers) # reviewers.append(temp) # print len(reviewers) except Exception as e: print e driver = get_new_webdriver(driver) closeDriver(driver) result.update({"hotel_info": lines}) result.update({"review_info": reviews}) save_data(path, result)
def export_v2v_task_excel(): params = { 'page_size': request.values.get('page_size'), 'page_no': request.values.get('page_no') } total_nums, data = v2v_t_s.v2v_task_list(**params) # 生成excel excel_data = OrderedDict() sheet_1 = [] # 标题行 row_title_data = [ u"IP地址", u"任务ID", u"开始时间", u"结束时间", u"任务状态", u"任务来源", u"任务详情", u"操作者" ] sheet_1.append(row_title_data) for i in data: _row_data = [ i['vm_ip'], i['request_id'], i['start_time'], i['finish_time'], unicode(V2vTaskStatusTransform.MSG_DICT.get(str(i['status']), '')), unicode(V2vCreateSourceTransform.MSG_DICT.get(i['source'], '')), i['message'], i['username'] ] sheet_1.append(_row_data) excel_data.update({u"tasks": sheet_1}) io = StringIO.StringIO() save_data(io, excel_data) response = make_response(io.getvalue()) response.headers[ "Content-Disposition"] = "attachment; filename=v2v迁移任务信息.xls" return response
def process_directory(self, directory: str): meta_all = [] dir_path = os.path.abspath(directory) if os.path.exists(dir_path): files = os.listdir(dir_path) for file in files: if os.path.isfile(os.path.join(dir_path, file)): pass else: continue ext = os.path.splitext(file)[1] if ext.lower() == '.sqlite': meta_one = self.process_db_file(db_filename=os.path.join(dir_path, file)) if meta_one is None: pass else: meta_all.append(meta_one) # 保存meta文件到excel文档 save_root = os.path.abspath(self.export_dir) save_path = os.path.join(save_root, self.meta_export_filename) if os.path.exists(save_root): pass else: os.makedirs(save_root) save_data(save_path, {'Sheet1': meta_all}) print('[+]\tmetadata saved') else: print('[-]\tdirectory \"{}\" not found, please check again.'.format(dir_path)) pass
def writeXlsFile(path, data): # 定义有序字典 dic = OrderedDict() # 首先对数据进行排序更新 dic.update(data) # 写入数据 save_data(path, dic)
def download_project_xlsx(self): try: projects = Project.objects.all() except ObjectDoesNotExist: return HttpResponse(' Something went wrong!') projects = projects.values( 'id', 'company', 'title', 'start_date', 'end_date', 'estimated_design', 'actual_design', 'estimated_development', 'actual_development', 'estimated_testing', 'actual_testing' ).order_by('id') data = OrderedDict() sheet_data = {"Projects": [ ['SNo', 'ID', 'company', 'title', 'start_date', 'end_date', 'estimated_design', 'actual_design', 'estimated_development', 'actual_development', 'estimated_testing', 'actual_testing']]} i = 1 for project in projects: project_details = {} project_details['data'] = [i, project['id'], project['company'], project['title'], project['start_date'], project['end_date'], project['estimated_design'], project['actual_design'], project['estimated_development'], project['actual_development'], project['estimated_testing'], project['actual_testing']] sheet_data['Projects'].append(project_details['data']) i += 1 data.update(sheet_data) io = BytesIO() save_data(io, data) io.seek(0) response = HttpResponse( io.read(), content_type='application/ms-excel') response['Content-Disposition'] = 'attachment; filename="ProjectData.xls"' return response
def get_hotel_tripadvisor_link(path): result = OrderedDict() xls = get_data(path) data = json.dumps(xls, default=json_serial) lines = json.loads(data, object_hook=json_util.object_hook)['hotel_info'] lines2 = json.loads(data, object_hook=json_util.object_hook)['review_info'] # lines3=json.loads(data, object_hook=json_util.object_hook)['reviewer_info'] # lines[0].append('Link') for i in xrange(1, len(lines)): while len(lines[i]) < 9: lines[i].append('') q = [str(x) for x in lines[i]] # print q # try: link = url_crawler(q[0], ' '.join(q[2:5]) + " tripadvisor").start_crawl() print link try: lines[i][7] = link except: lines[i].append(link) # time.sleep(1) # except: # # link='' # # print 'fail' # break # # print lines[:10] result.update({"hotel_info": lines}) result.update({"review_info": lines2}) # result.update({"reviewer_info":lines3}) save_data(path, result)
def update_hotel_reviewer_score(path): driver = get_webdriver() result = OrderedDict() xls = get_data(path) data = json.dumps(xls, default=json_serial) lines = json.loads(data, object_hook=json_util.object_hook)['hotel_info'] reviews = json.loads(data, object_hook=json_util.object_hook)['review_info'] reviewers = json.loads(data, object_hook=json_util.object_hook)['reviewer_info'] for index, reviewer in enumerate(reviewers[1:]): print reviewer while len(reviewer) < 18: reviewer.append('') if True: #reviewer[11]!='' and reviewer[12]!='': link = reviewer[2] # print reviewer print link try: scores = get_reviewer_scores(driver, link) print scores # break reviewer[11] = scores[0] reviewer[12] = scores[1] reviewer[13] = scores[2] reviewer[14] = scores[3] reviewer[15] = scores[4] except: pass result.update({"hotel_info": lines}) result.update({"reviewer_info": reviewers}) result.update({"review_info": reviews}) save_data(path, result)
def test_issue_10_generator_as_content(): def data_gen(): def custom_row_renderer(row): for e in row: yield e for i in range(2): yield custom_row_renderer([1, 2]) save_data("test.xls", {"sheet": data_gen()})
def test_pyexcel_xls_issue_2(): data = OrderedDict() array = [] for i in range(4100): array.append([datetime.datetime.now()]) data.update({"test": array}) save_data("test.xls", data) os.unlink("test.xls")
def xlsExcelFileMaker(path, data): dict = OrderedDict() for sheetName, sheetValue in data.items(): d = {} dict[sheetName] = sheetValue dict.update(d) # 保存 save_data(path, dict)
def makeExcelFile(path, data): dic = OrderedDict() for sheetNum, sheetValue in data.items(): d = {} d[sheetNum] = sheetValue dic.update(d) save_data(path, dic)
def save_xls(path, name, title, datalist): data = OrderedDict() sheet_1 = [] sheet_1.append(title) for item in datalist: sheet_1.append(item) data.update({name: sheet_1}) save_data(path, data)
def makeExcelFile(path, data): dic = OrderedDict() # 调用有序字典 for sheetName, sheetValue in data.items( ): # 字典遍历:data.items()同时获取data.keys和data.values d = {} d[sheetName] = sheetValue dic.update(d) save_data(path, dic)
def writeExcelFile(self, path, data): # 创建有序字典 dict = OrderedDict() for sheetName, sheetValue in data.items(): tempDict = {} tempDict[sheetName] = sheetValue dict.update(tempDict) save_data(path, dict)
def makeExcelFile(path, data): dic = OrderedDict() for sheetName, sheetValue in data.items(): d = {} d[sheetName] = sheetValue #字典的update是什么功能??? dic.update(d) save_data(path, dic)
def TxtSaveXls(path1, path2): order = OrderedDict() sheet = [] with open(path1, 'r', encoding='utf-8') as f: all_data = json.loads(f.read()) for key, value in all_data.items(): sheet.append([key, value]) order.update({"city": sheet}) save_data(path2, order)
def TxtSaveXsl(path1, path2): order = OrderedDict() sheet = [] with open(path1, 'r', encoding='utf-8')as f: data = json.loads(f.read()) for key, value in data.items(): value.insert(0, key) sheet.append(value) order.update({'students': sheet}) save_data(path2, order)
def read_xls_file(file): data = get_data(file) new_date = OrderedDict() # sheet表的数据 for sheet_n in data.keys(): # print(sheet_n) # 表名 # # 添加sheet表 new_date.update({"Sheet1": data[sheet_n]}) # 保存成xls文件 save_data("%s.xls" % sheet_n, new_date)
def convert_data_to_spreadsheet(sheet_tables): import StringIO from pyexcel_xls import save_data xls_data = OrderedDict() for name, array in sheet_tables.iteritems(): xls_data.update({name: array}) #xls_data.update({"Sheet 1": sheet_tables}) output = StringIO.StringIO() save_data(output, xls_data, encoding="UTF-8") return output.getvalue()
def save_xls_file(): data = OrderedDict() sheet_1 = [] row_1_data = [u"ID", u"卷面", u"平时"] row_2_data = [4, 5, 6] sheet_1.append(row_1_data) sheet_1.append(row_2_data) data.update({u"这是成绩表": sheet_1}) save_data("write_test.xls", data)
def download_access_xls_file(request): if verify_user_hash(request.data.get('user'), request.data.get('hash')): logger.info('under quiz.download_access_xls_file') test_id = request.data.get('test_id') if test_id: quiz = Quiz.objects.get(pk = test_id) else: logger.error('under quiz.download_access_xls_file error invalid data') return Response({'errors': 'Invalid data here.'}, status=status.HTTP_400_BAD_REQUEST) data = {"Sheet 1": [QUIZ_ACCESS_FILE_COLS]} save_data(quiz.title+"_file.xls", data) from django.http import FileResponse response = FileResponse(open(quiz.title+"_file.xls", 'rb')) try: import os os.remove(quiz.title+"access_file.xls") except OSError as ose: logger.error('under quiz.download_access_xls_file '+str(ose.args)) return response logger.error('under quiz.download_access_xls_file wrong hash') return Response({'errors': 'Corrupted User.'}, status=status.HTTP_404_NOT_FOUND)
def save_xls_file(insertData): data = OrderedDict() data.update({u"sheet": insertData}) save_data("write2.xls", data)
from pyexcel_xls import save_data from ordereddict import OrderedDict data = OrderedDict() f = open('census01', 'r') k = [] for (i, line) in enumerate(f.readlines()): line = line.split('|') line[2] = line[2] line[3] = line[3][:len(line[3]) - 2] line[1] = line[1].replace(',', '') k.append(line) data.update({"Sheet 1": k}) data.update({"Sheet 2": [["row 1", "row 2", "row 3","row 4"]]}) save_data("your_file.xls", data) f.close()
if word.isalpha() and not (word == 'State' or word == 'UT'): if flag: state = state + ' ' + word else: state = word flag = True # print state # removing sl no line.remove(line[0]) # removing name of state for i in range(state.count(' ')): line.remove(line[0]) line.remove(line[1]) line.remove(line[3]) line.remove(line[3]) line.remove(line[0]) line.insert(0, state) line[1] = line[1].replace(',', '') line = line[:4] print line k.append(line) f.close() data.update({"Sheet 1": k}) save_data("population_census2011.xls", data)
data = OrderedDict() string = 'system/breakwindows@XE' conn = cx.connect(string) c = conn.cursor() query = 'SELECT STATE, COUNT FROM POPULATION WHERE YEAR = 2001' c.execute(query) ra = [] # Fetching data from oracle for n in c: ra.append(n) #Converting tuple to list for i in range(len(ra)): ra[i] = list(ra[i]) query = 'SELECT COUNT FROM POPULATION WHERE YEAR = 2011' c.execute(query) # Fetchin data from oracle for key, n in enumerate(c): ra[key].append(n[0]) data.update({"Sheet 1": ra}) save_data("/home/negi/Documents/Topological_Crime_Analysis/Population/testing.xls", data)
data = OrderedDict() string = 'system/breakwindows@XE' conn = cx.connect(string) c = conn.cursor() query = 'SELECT STATE, PERCENTAGE FROM LITERACY WHERE YEAR = 2001' c.execute(query) ra = [] # Fetching data from oracle for n in c: ra.append(n) #Converting tuple to list for i in range(len(ra)): ra[i] = list(ra[i]) query = 'SELECT PERCENTAGE FROM LITERACY WHERE YEAR = 2011' c.execute(query) # Fetchin data from oracle for key, n in enumerate(c): ra[key].append(n[0]) ra.sort() data.update({"Sheet 1": ra}) save_data("/home/negi/Documents/Topological_Crime_Analysis/Literacy/testing.xls", data)