def as_excel_data(self, sheet_name): from pyExcelerator import Workbook from excel import ExcelColumn, ExcelSheet import tempfile, os logger = logging.getLogger("report.excel") if self.paging_controller: self.paging_controller.update_view_size(self.data_count) data, cols = self.paging_controller.data_view( self.data, self.cells) else: data, cols = self.data, self.cells.cols temp_file = tempfile.NamedTemporaryFile(suffix='.xls', dir=tempfile.tempdir) logger.debug("exported tmp excel name: %s" % temp_file.name) temp_file.close() work_book = Workbook() log_colums = [ExcelColumn(e, e) for e in cols if e != 'row_no'] sheet = ExcelSheet(sheet_name, log_colums) sheet.init_sheet(work_book, data) sheet.export_header(data) sheet.export_data(data) work_book.save(temp_file.name) data = open(temp_file.name, "rb").read() logger.debug("done to export excel, data length: %s" % len(data)) os.remove(temp_file.name) return data
def create_xls(file_xls,list_header,list_data): ''' 创建一个xls文件 file_xls: 目标xls文件的绝对路径 开始时不存在该文件 执行完毕后再生成 list_header: 第一行的各个取值 类似<table>中的<th>,格式: [h1,h2,...] list_data: 每行的取值,格式: [[l1_1,l1_2,...],...] ''' fun = 'function create_xls of python_function.py' #from pyExcelerator import * from pyExcelerator import Workbook w = Workbook() ws = w.add_sheet('sheet') ''' 第一行作为header:注意是(0,0)作为第一行第一列 ''' i = 0 for header in list_header: ws.write(0,i,header) i += 1 ''' 写数据 ''' #data = [["aaaa",9,u"三年二班"], ["bbbb",9,u"三年二班"], ["cccc",9,u"三年二班"]] for i in range(len(list_data)): for j in range(len(list_data[i])): ws.write(i+1,j,list_data[i][j]) i+=1 w.save(file_xls)
def export_file_excel(data): try: # filepath = os.path.join(export_filepath, data['downloadid']) filename = os.path.join(export_filepath, export_filename + '.' + data[0]['downloadid']) w = Workbook() ws = w.add_sheet('sheet') names = [] values = [] for d in data: if "name" in d.keys(): names.append(d['name']) values.append(d['data']) # logger.info(values) for i in range(len(names)): ws.write(0, i, names[i]) for i in range(len(values)): for j in range(len(values[i])): value = values[i][j] if not value: value = u'' ws.write(j + 1, i, value) # i += 1 w.save(filename) except Exception, e: logger.warn(e, exc_info=True)
class ExcelWriter(object): XLS_HEADERS = [u"公司ID编码", u"公司名", u"公司简介", u"公司主要产品", u"公司网站", u"公司网站标题"] COLS = len(XLS_HEADERS) def __init__(self, logger, output_name="text.xls"): self.logger = logger self.workbook = Workbook() self.worksheet = self.workbook.add_sheet("CompanyInformation") self.output_name = output_name self.row = 1 for col in range(ExcelWriter.COLS): self.worksheet.write(0, col, ExcelWriter.XLS_HEADERS[col]) def insert(self, obj): items = obj.get_info_as_tuple() for col, item in enumerate(items): self.worksheet.write(self.row, col, item.decode("utf-8")) self.logger.info("成功在%s中写入%s", self.output_name, obj.corp_name) def next_row(self): self.row += 1 return self.row def commit(self): self.workbook.save(self.output_name)
def openExcelSheet(): """ Opens a reference to an Excel WorkBook and Worksheet objects """ from pyExcelerator import Workbook workbook = Workbook() worksheet = workbook.add_sheet("Sheet 1") return workbook, worksheet
class ExcelWriter(object): XLS_HEADERS = [u'公司ID编码', u'公司名', u'公司简介', u'公司主要产品', u'公司网站', u'公司网站标题'] COLS = len(XLS_HEADERS) def __init__(self, logger, output_name='text.xls'): self.logger = logger self.workbook = Workbook() self.worksheet = self.workbook.add_sheet('CompanyInformation') self.output_name = output_name self.row = 1 for col in range(ExcelWriter.COLS): self.worksheet.write(0, col, ExcelWriter.XLS_HEADERS[col]) def insert(self, obj): items = obj.get_info_as_tuple() for col, item in enumerate(items): self.worksheet.write(self.row, col, item.decode('utf-8')) self.logger.info('成功在%s中写入%s', self.output_name, obj.corp_name) def next_row(self): self.row += 1 return self.row def commit(self): self.workbook.save(self.output_name)
def as_excel_data(self, sheet_name): from pyExcelerator import Workbook from excel import ExcelColumn, ExcelSheet import tempfile, os logger = logging.getLogger("report.excel") if self.paging_controller: self.paging_controller.update_view_size(self.data_count) data, cols = self.paging_controller.data_view(self.data, self.cells) else: data, cols = self.data, self.cells.cols temp_file = tempfile.NamedTemporaryFile(suffix='.xls', dir=tempfile.tempdir) logger.debug("exported tmp excel name: %s" % temp_file.name) temp_file.close() work_book = Workbook() log_colums = [ ExcelColumn(e, e) for e in cols if e != 'row_no'] sheet = ExcelSheet(sheet_name, log_colums) sheet.init_sheet(work_book, data) sheet.export_header(data) sheet.export_data(data) work_book.save(temp_file.name) data = open(temp_file.name, "rb").read() logger.debug("done to export excel, data length: %s" % len(data)) os.remove(temp_file.name) return data
class ExcelWriter(object): #XLS_HEADERS = [u'公司ID编码', u'公司名', u'公司简介', u'公司主要产品', u'公司网站', u'公司网站标题'] XLS_HEADERS = [ u'公司名', u'公司网址', u'网站标题',u'网站描述', u'网站搜索关键词或产品'] COLS = len(XLS_HEADERS) def __init__(self, output_name='findKeywordAndTitle.xls'): #self.logger = logger self.workbook = Workbook() self.worksheet = self.workbook.add_sheet('CompanyInformation') self.output_name = output_name self.row = 1 for col in range(ExcelWriter.COLS): self.worksheet.write(0, col, ExcelWriter.XLS_HEADERS[col]) def insert(self,obj): items = obj for col, item in enumerate(items): self.worksheet.write(self.row, col, item) #self.logger.info('成功在%s中写入%s', self.output_name, obj.corp_name) def next_row(self): self.row += 1 return self.row def commit(self): self.workbook.save(self.output_name)
class ExcelWriter(_CaseWriter): """ Writes case data to file in Excel format. """ def write(self, file_or_filename): """ Writes case data to file in Excel format. """ self.book = Workbook() self._write_data(None) self.book.save(file_or_filename) def write_case_data(self, file): """ Writes the header to file. """ case_sheet = self.book.add_sheet("Case") case_sheet.write(0, 0, "Name") case_sheet.write(0, 1, self.case.name) case_sheet.write(1, 0, "base_mva") case_sheet.write(1, 1, self.case.base_mva) def write_bus_data(self, file): """ Writes bus data to an Excel spreadsheet. """ bus_sheet = self.book.add_sheet("Buses") for i, bus in enumerate(self.case.buses): for j, attr in enumerate(BUS_ATTRS): bus_sheet.write(i, j, getattr(bus, attr))
def __init__(self, logger, output_name='text.xls'): self.logger = logger self.workbook = Workbook() self.worksheet = self.workbook.add_sheet('CompanyInformation') self.output_name = output_name self.row = 1 for col in range(ExcelWriter.COLS): self.worksheet.write(0, col, ExcelWriter.XLS_HEADERS[col])
def saveDatas(filename, datas): w = Workbook() ws = w.add_sheet("Results") count = 0 for t in datas: ws.write(count, 0, t) count = count + 1 w.save(filename)
def post(self, request, *args, **kwargs): action = request.POST.get('action') ids = request.POST.get('ids') # 取消任务 if action == 'cancel': cancel_task(ids) return get_task_by_ids(ids) # 重新分配任务 elif action == 'redistribute': user_id = request.POST.get('user_id') if not user_id: return False redistribute_task(ids, user_id) return get_task_by_ids(ids) # 导入 elif action == 'import': def handle(key): return sheets[key] if sheets.has_key(key) else None user_file = request.FILES.values()[0] _, xls = tempfile.mkstemp() with open(xls, 'wb') as f: f.write(user_file.read()) sheets = parse_xls(xls)[0][1] with transaction.atomic(): tasks = [] for i in range(1, max(sheets.keys())[0] + 1): task = TaskInfo() task.name = handle((i, 0)) task.email = handle((i, 1)) task.remark = handle((i, 2)) task.is_new = True tasks.append(task) TaskInfo.objects.bulk_create(tasks) return undistributed_task() # 导出 elif action == 'export': tasks = TaskInfo.objects.filter(deleted=0) w = Workbook() ws = w.add_sheet() for r, task in enumerate(tasks): r += 1 ws.write(r, 0, task.name) ws.write(r, 1, task.email) ws.write(r, 2, task.phone) ws.write(r, 3, task.remark) _, xls = tempfile.mkstemp() w.save(xls) response = HttpResponse(open(xls).read(), content_type='application/xls') response['Content-Disposition'] = 'attachment; filename=users.xls' return response
def export(): spec = session.get('spspec') form = ExportForm() if request.method == 'POST': dbkeys = request.values.getlist('dbkey') session['spchecked'] = dbkeys if not dbkeys: flash(t('no_field_was_selected'), 'error') return render_template('spareparts/export.html', fields=get_fields(), form=form, checked=session['spchecked'] or [None], ) wb = Workbook() ws = wb.add_sheet('Part List') fill_header(ws, dbkeys) objs = Sparepart.find(spec=spec, sort='department, project, code') row = 1 for sp in objs: for i, k in enumerate(dbkeys): if k == 'total_price': ws.write(row, i, sp.unit_price * sp.store_good) elif k == 'is_local': if sp.is_local: txt = t('spareparts.local') else: txt = t('spareparts.oversea') ws.write(row, i, txt) else: ws.write(row, i, getattr(sp, k)) row += 1 file_name = 'Part List {}.xls'.format(str(g.user.id)) file_dir = tempfile.gettempdir() file_path = os.path.join(file_dir, file_name) if os.path.isfile(file_path): os.remove(file_path) session.pop('spspec', None) wb.save(file_path) return send_from_directory(file_dir, file_name, as_attachment=True, attachment_filename=file_name, ) return render_template('spareparts/export.html', fields=get_fields(), form=form, checked=session.get('spchecked', []), )
def write_xls_price(self): if self.options['whole']: filename = path.join(settings.MEDIA_ROOT, 'upload/wprice.xls') else: filename = path.join(settings.MEDIA_ROOT, 'upload/price.xls') workBookDocument = Workbook() docSheet = workBookDocument.add_sheet(u'Прайс соло-парфюм') docSheet.col(1).width = 10000 headerFont = Font() headerFont.bold = True headerFont.size = 400 headerStyle = XFStyle() headerStyle.font = headerFont docSheet.row(0).set_style(headerStyle) if self.options['whole']: docSheet.write_merge( 0, 0, 0, 2, u'Оптовый прайс-лист Соло-парфюм (%s)' % datetime.now().strftime('%d.%m.%Y')) else: docSheet.write_merge( 0, 0, 0, 2, u'Прайс-лист Соло-парфюм (%s)' % datetime.now().strftime('%d.%m.%Y')) docSheet.write(2, 0, u'Артикул') docSheet.write( 2, 1, u'Наименование', ) docSheet.write(2, 2, u'Цена') sectionFont = Font() sectionFont.bold = True sectionStyle = XFStyle() sectionStyle.font = sectionFont align = Alignment() align.horz = Alignment.HORZ_CENTER sectionStyle.alignment = align row = 3 for section in self.data.iterkeys(): docSheet.write_merge(row, row, 0, 2, section, sectionStyle) row += 1 for item in self.data[section]: docSheet.write(row, 0, item['identifier']) docSheet.write(row, 1, item['name']) docSheet.write(row, 2, item['price']) row += 1 workBookDocument.save(filename)
def export(): spec = session.get('espec') form = ExportForm() if request.method == 'POST': dbkeys = request.values.getlist('dbkey') session['echecked'] = dbkeys if not dbkeys: flash(t('no_field_was_selected'), 'error') return render_template('equipment/export.html', fields=get_fields(), form=form, checked=session['echecked'] or [None], ) wb = Workbook() ws = wb.add_sheet('Equipment List') fill_header(ws, dbkeys) objs = Equipment.find(spec=spec, sort='department, project, flex_id') row = 1 for e in objs: for i, k in enumerate(dbkeys): if k == 'is_good': ws.write(row, i, t('good' if e.is_good else 'bad')) elif k == 'is_instore': ws.write(row, i, t('yes' if e.is_instore else 'no')) else: ws.write(row, i, getattr(e, k)) row += 1 file_name = 'Equipment List {}.xls'.format(str(g.user.id)) file_dir = tempfile.gettempdir() file_path = os.path.join(file_dir, file_name) if os.path.isfile(file_path): os.remove(file_path) session.pop('espec', None) wb.save(file_path) return send_from_directory(file_dir, file_name, as_attachment=True, attachment_filename=file_name, ) return render_template('equipment/export.html', fields=get_fields(), form=form, checked=session.get('echecked', []), )
def export(): spec = session.get('tfspec') form = ExportForm() if request.method == 'POST': dbkeys = request.values.getlist('dbkey') session['tfchecked'] = dbkeys if not dbkeys: flash(t('no_field_was_selected'), 'error') return render_template('transfers/export.html', fields=get_fields(), form=form, checked=session['tfchecked'] or [None], ) wb = Workbook() ws = wb.add_sheet('Transfers List') fill_header(ws, dbkeys) objs = Transfer.find(spec=spec, sort='created_at') row = 1 for o in objs: for i, k in enumerate(dbkeys): if k in ('code', 'sn', 'flex_id', 'fixed_id'): ws.write(row, i, o.asset.get(k, '')) else: ws.write(row, i, getattr(o, k)) row += 1 file_name = 'Transfers List {}.xls'.format(str(g.user.id)) file_dir = tempfile.gettempdir() file_path = os.path.join(file_dir, file_name) if os.path.isfile(file_path): os.remove(file_path) session.pop('tfspec', None) wb.save(file_path) return send_from_directory(file_dir, file_name, as_attachment=True, attachment_filename=file_name, ) return render_template('transfers/export.html', fields=get_fields(), form=form, checked=session.get('tfchecked', []), )
def exportAllXlsData(self, request): """get XLS file for all folder objects""" sitemanger = zapi.getParent(self) locSitemanager = zapi.getParent(sitemanger) root_folder = zapi.getParent(locSitemanager) filename = datetime.now().strftime('ict_all_%Y%m%d%H%M%S.xls') f_handle, f_name = tempfile.mkstemp(filename) wbook = Workbook() for folder in root_folder.values(): print "folder: ", folder folder.exportXlsData(request, folder.ikName, wbook) wbook.save(f_name) datafile = open(f_name, "r") dataMem = datafile.read() datafile.close() os.remove(f_name) return (filename, dataMem)
class ExcelWriter(object): XLS_HEADERS = [u'公司ID', u'公司名', u'公司网址', u'网站标题',u'网站描述', u'网站搜索关键词或产品'] COLS = len(XLS_HEADERS) def __init__(self, logger, output_name='text.xls'): self.logger = logger self.workbook = Workbook() self.worksheet = self.workbook.add_sheet('CompanyInformation') self.output_name = output_name self.row = 1 for col in range(ExcelWriter.COLS): self.worksheet.write(0, col, ExcelWriter.XLS_HEADERS[col]) def insert(self, obj): items = obj.get_info_as_tuple() for col, item in enumerate(items): self.worksheet.write(self.row, col, item.decode('utf-8')) # if col==5: # ss=item # ss=re.sub(r'\t','|',item,re.M) # ss=ss.replace("?","|") # ss=ss.replace("?","|") # ss=ss.replace("、","|") # ss=ss.replace(",","|") # ss=ss.replace("。","|") # ss=ss.replace(",","|") # ss=ss.replace(".","|") # ss=ss.replace("||","|") # if ss.endswith("|"): # ss=ss[:-1] # print ss # self.worksheet.write(self.row, col, ss.decode("utf-8")) # #item=str() # else self.logger.info('成功在%s中写入%s', self.output_name, obj.corp_name) def next_row(self): self.row += 1 return self.row def commit(self): self.workbook.save(self.output_name)
def __init__(self, logger, output_name="text.xls"): self.logger = logger self.workbook = Workbook() self.worksheet = self.workbook.add_sheet("CompanyInformation") self.output_name = output_name self.row = 1 for col in range(ExcelWriter.COLS): self.worksheet.write(0, col, ExcelWriter.XLS_HEADERS[col])
def export_special_excel(filename, data, names=[]): full_filepath = '' try: full_filepath = os.path.join(export_filepath, filename) w = Workbook() ws = w.add_sheet('sheet') if not names: names = [u'id',u'name',u'detail_times',u'outer_times',u'total_times',u'order'] column_length = len(names) for i in range(column_length): ws.write(0, i, names[i]) for i,value in enumerate(data): for j in range(column_length): index_name = names[j] ws.write(i + 1, j, value[index_name]) w.save(full_filepath) except Exception, e: logger.warn(e, exc_info=True)
def __init__(self, output_name='findKeywordAndTitle.xls'): #self.logger = logger self.workbook = Workbook() self.worksheet = self.workbook.add_sheet('CompanyInformation') self.output_name = output_name self.row = 1 for col in range(ExcelWriter.COLS): self.worksheet.write(0, col, ExcelWriter.XLS_HEADERS[col])
def main(): if len(sys.argv) == 1: target_year = 1997 else: target_year = int(sys.argv[1]) print "Collecting statistics for the " + str(target_year) + '-' + str(target_year+1) + " season...\n" fname = str(target_year) + '-' + str(target_year+1) + '.xls' style = XFStyle() wb = Workbook() ws0 = wb.add_sheet('stats') write_categories(ws0) n_players = 0 for key in range(ord('a'), ord('z')+1): n_players = parse_comp(chr(key), target_year, n_players, ws0) print '# of players = ' + str(n_players) wb.save(fname)
def output_sheet(contents,titles,sheet_name="test",excel_name="test.xls"): if len(contents)<=0 or len(titles)<=0: return wb = Workbook() ws0 =wb.add_sheet(sheet_name) for i in range(len(titles)): ws0.write(0,i,titles[i]) for i in range(len(contents)): content =contents[i] for j in range(len(titles)): if type(content) is not type(dict()): ws0.write(i+1,j,getattr(content,titles[j])) else: ws0.write(i+1,j,content[titles[j]]) #ws0.write(i+1,j,content.titles[j]) wb.save(excel_name) return wb
def write_xls_price(self): if self.options['whole']: filename = path.join(settings.MEDIA_ROOT, 'upload/wprice.xls') else: filename = path.join(settings.MEDIA_ROOT, 'upload/price.xls') workBookDocument = Workbook() docSheet = workBookDocument.add_sheet(u'Прайс соло-парфюм') docSheet.col(1).width = 10000 headerFont = Font() headerFont.bold = True headerFont.size = 400 headerStyle = XFStyle() headerStyle.font = headerFont docSheet.row(0).set_style(headerStyle) if self.options['whole']: docSheet.write_merge(0, 0, 0, 2, u'Оптовый прайс-лист Соло-парфюм (%s)' % datetime.now().strftime('%d.%m.%Y')) else: docSheet.write_merge(0, 0, 0, 2, u'Прайс-лист Соло-парфюм (%s)' % datetime.now().strftime('%d.%m.%Y')) docSheet.write(2, 0, u'Артикул') docSheet.write(2, 1, u'Наименование', ) docSheet.write(2, 2, u'Цена') sectionFont = Font() sectionFont.bold = True sectionStyle = XFStyle() sectionStyle.font = sectionFont align = Alignment() align.horz = Alignment.HORZ_CENTER sectionStyle.alignment = align row = 3 for section in self.data.iterkeys(): docSheet.write_merge(row, row, 0, 2, section, sectionStyle) row += 1 for item in self.data[section]: docSheet.write(row, 0, item['identifier']) docSheet.write(row, 1, item['name']) docSheet.write(row, 2, item['price']) row += 1 workBookDocument.save(filename)
def generateLabelSpreadsheet(branch, release_id): release = Release.objects.get(id=release_id) release_labels = [] for story in release.stories.all(): deployables = DeployableObject.objects.filter(pending_stories=story, el_type='labels') release_labels += [d.el_name for d in deployables] os.chdir(branch.repo.location) path = os.path.join(CODE_BASE, 'labels', 'CustomLabels.labels') f = open(path) doc = etree.XML(f.read()) f.close() labelmap = {} children = doc.findall(SF_NAMESPACE + 'labels') for child in children: labelName = child.find(SF_NAMESPACE + 'fullName').text if labelName in release_labels: if not labelmap.has_key(labelName): labelmap[labelName] = {} langmap = labelmap[labelName] langkey = child.find(SF_NAMESPACE + 'language').text desc = child.find(SF_NAMESPACE + 'value').text langmap[langkey] = desc print('label=%s, key=%s, desc=%s' % (labelName, langkey, desc)) wb = Workbook() ws0 = wb.add_sheet('0') ws0.write(0, 0, 'Key') ws0.write(0, 1, 'en_US') row = 1 for labelkey, langmap in labelmap.items(): for langkey, value in langmap.items(): ws0.write(row, 0, labelkey) if langkey == 'en_US': ws0.write(row, 1, value) row += 1 wb.save('/tmp/labels.xls') f = open('/tmp/labels.xls') xls = f.read() f.close() return xls
def main(): if len(sys.argv) == 1: target_year = 1997 else: target_year = int(sys.argv[1]) print "Collecting statistics for the " + str(target_year) + '-' + str( target_year + 1) + " season...\n" fname = str(target_year) + '-' + str(target_year + 1) + '.xls' style = XFStyle() wb = Workbook() ws0 = wb.add_sheet('stats') write_categories(ws0) n_players = 0 for key in range(ord('a'), ord('z') + 1): n_players = parse_comp(chr(key), target_year, n_players, ws0) print '# of players = ' + str(n_players) wb.save(fname)
def export(): spec = session.get("iospec") form = ExportForm() if request.method == "POST": dbkeys = request.values.getlist("dbkey") session["iochecked"] = dbkeys if not dbkeys: flash(t("no_field_was_selected"), "error") return render_template( "iorecords/export.html", fields=get_fields(), form=form, checked=session["iochecked"] or [None] ) wb = Workbook() ws = wb.add_sheet("IOHistory List") fill_header(ws, dbkeys) objs = Iorecord.find(spec=spec, sort="created_at") row = 1 for o in objs: for i, k in enumerate(dbkeys): if k == "direction": ws.write(row, i, t(o.is_out and "out_store" or "in_store")) elif k == "kind_name": ws.write(row, i, t(o.kind_name)) else: ws.write(row, i, getattr(o, k)) row += 1 file_name = "IOHistory List {}.xls".format(str(g.user.id)) file_dir = tempfile.gettempdir() file_path = os.path.join(file_dir, file_name) if os.path.isfile(file_path): os.remove(file_path) session.pop("iospec", None) wb.save(file_path) return send_from_directory(file_dir, file_name, as_attachment=True, attachment_filename=file_name) return render_template( "iorecords/export.html", fields=get_fields(), form=form, checked=session.get("iochecked", []) )
def export_special_excel(filename, data, names=[]): full_filepath = '' try: full_filepath = os.path.join(export_filepath, filename) w = Workbook() ws = w.add_sheet('sheet') if not names: names = [ u'id', u'name', u'detail_times', u'outer_times', u'total_times', u'order' ] column_length = len(names) for i in range(column_length): ws.write(0, i, names[i]) for i, value in enumerate(data): for j in range(column_length): index_name = names[j] ws.write(i + 1, j, value[index_name]) w.save(full_filepath) except Exception, e: logger.warn(e, exc_info=True)
def txt2els(sourcename, destname): w = Workbook() ws = w.add_sheet("Cpp glossary") row = 0 ep = re.compile(r'\s\s+') # 用两个及以上空格分解开 EnRow, TwRow, CnRow = [], [], [] with open(sourcename) as txtline: for r in txtline: result = re.split(ep, r) EnRow.append(result[0]) if len(result) >= 3: # 检查是否分割正确 TwRow.append(result[1]) CnRow.append(result[2]) ws.write(row, 1, TwRow[row].decode('GBK')) ws.write(row, 2, CnRow[row].decode('GBK')) else: TwRow.append("丢失") CnRow.append("丢失") ws.write(row, 0, EnRow[row].decode('GBK')) row += 1 w.save(destname)
def fetch_data_from_db_write_into_xls(): #Connect to Testlink DB and fetch all automated cases conn = MySQLdb.connect(host=TESTLINK_HOST_NAME, user=TESTLINK_USER_NAME, passwd=TESTLINK_PWD, db=TESTLINK_DB_NAME) cu_select = conn.cursor(MySQLdb.cursors.DictCursor) testlink_xls_path = TESTLINK_XLS_PATH try: cu_select.execute(SELECT_QUERY) except MySQLdb.Error: errInsertSql = "Insert Sql ERROR!! sql is==>%s" % (SELECT_QUERY) sys.exit(errInsertSql) testlink_dict = cu_select.fetchall() #Create an excel from the fetched data wb = Workbook() ws0 = wb.add_sheet('0') row_number = 1 for row in testlink_dict: i = 0 for item in row: val = str(row[item]) if i == 0: val = "ATH-" + val if row_number == 1: ws0.write(0, i, '') ws0.write(0, i, item) ws0.write(row_number, i, val) i = i + 1 else: ws0.write(row_number, i, val) i = i + 1 row_number = row_number + 1 #Save the file wb.save('%s.xls' % testlink_xls_path)
def export_simple_excel(filename, data): full_filepath = '' try: full_filepath = os.path.join(export_filepath, filename) w = Workbook() ws = w.add_sheet('sheet') names = None values = [] for d in data: if names is None: names = d.keys() values.append(d.values()) for i in range(len(names)): ws.write(0, i, names[i]) for i in range(len(values)): for j in range(len(values[i])): value = values[i][j] if value is None: value = '' ws.write(i + 1, j, value) w.save(full_filepath) except Exception, e: logger.warn(e, exc_info=True)
def fetch_data_from_db_write_into_xls(): #Connect to Testlink DB and fetch all automated cases conn = MySQLdb.connect(host=TESTLINK_HOST_NAME,user=TESTLINK_USER_NAME,passwd=TESTLINK_PWD,db=TESTLINK_DB_NAME) cu_select=conn.cursor(MySQLdb.cursors.DictCursor) testlink_xls_path = TESTLINK_XLS_PATH; try: cu_select.execute(SELECT_QUERY) except MySQLdb.Error: errInsertSql = "Insert Sql ERROR!! sql is==>%s" %(SELECT_QUERY) sys.exit(errInsertSql) testlink_dict = cu_select.fetchall() #Create an excel from the fetched data wb = Workbook() ws0 = wb.add_sheet('0') row_number=1 for row in testlink_dict: i=0 for item in row: val=str(row[item]) if i==0: val="ATH-"+val if row_number==1: ws0.write(0,i,'') ws0.write(0,i,item) ws0.write(row_number,i,val) i=i+1 else: ws0.write(row_number,i,val) i=i+1 row_number=row_number+1 #Save the file wb.save('%s.xls'%testlink_xls_path)
def sandbox(): #set attributes for workbook fnt = Font() fnt.name, fnt.colour_index, fnt.bold, fnt._weight = 'Arial',0x08,False,0x0190 borders = Borders() borders.left, borders.right, borders.top, borders.bottom = 0,0,0,0 al = Alignment() al.horz, al.vert = Alignment.HORZ_CENTER,Alignment.VERT_CENTER style = XFStyle() style.font, style.borders,style.alignment = fnt,borders,al # create wb instance wb = Workbook() #add a sheet for each csv file being imported to excel s = util.GetFileListing('Note*.csv') csv_files = s.sys_cmd.output worksheet_titles = [ ':'.join(a_file.split(':')[1:-2]) for a_file in csv_files ] ws_list = [ wb.add_sheet(ws_title) for ws_title in worksheet_titles ] F = util.FileEdit(csv_titles) for ws,f in zip(ws_list,F.file_utils): a_list = [ a_row.rstrip().split(',') for a_row in f.contents ] for a_row,a_line in enumerate(a_list): for a_col,a_cell in enumerate(a_line): ws.write(a_row,a_col,a_cell,style) wb.save('chk_out.xls')
class ExcelWriter(_CaseWriter): """ Writes case data to file in Excel format. """ def write(self, file_or_filename): """ Writes case data to file in Excel format. """ self.book = Workbook() self._write_data(None) self.book.save(file_or_filename) def write_case_data(self, file): """ Writes the header to file. """ case_sheet = self.book.add_sheet("Case") case_sheet.write(0, 0, "Name") case_sheet.write(0, 1, self.case.name) case_sheet.write(1, 0, "base_mva") case_sheet.write(1, 1, self.case.base_mva) def write_bus_data(self, file): """ Writes bus data to an Excel spreadsheet. """ bus_sheet = self.book.add_sheet("Buses") for i, bus in enumerate(self.case.buses): for j, attr in enumerate(BUS_ATTRS): bus_sheet.write(i, j, getattr(bus, attr)) def write_branch_data(self, file): """ Writes branch data to an Excel spreadsheet. """ branch_sheet = self.book.add_sheet("Branches") for i, branch in enumerate(self.case.branches): for j, attr in enumerate(BRANCH_ATTRS): branch_sheet.write(i, j, getattr(branch, attr)) def write_generator_data(self, file): """ Write generator data to file. """ generator_sheet = self.book.add_sheet("Generators") for j, generator in enumerate(self.case.generators): i = generator.bus._i for k, attr in enumerate(GENERATOR_ATTRS): generator_sheet.write(j, 0, i)
def generate_workbook(self,*args): self.pm.snap() start_time = self.pm.snap_time delimeter = ',' if len(args)==3: csv_source_list,worksheet_titles,file_name = args else: csv_source_list,worksheet_titles,file_name = self.csv_source,self.worksheet_titles,self.file_name self.wb = Workbook() self.ws_list = [ self.wb.add_sheet(ws_title) for ws_title in worksheet_titles ] self.pm_info.append(' + setup workbook; %f'%self.pm.snap()) for csv_source,ws_title,ws in zip(csv_source_list,worksheet_titles,self.ws_list): for a_row,row_length,a_line in zip(xrange(len(csv_source)),iter(len(sub_line.split(delimeter)) for sub_line in csv_source),iter( sub_row.rstrip().split(delimeter) for sub_row in csv_source )): for a_col,a_cell in zip(xrange(row_length),a_line): ws.write(a_row,a_col,a_cell,self.style) self.__auto_scale_column_width__(csv_source,ws,max_word_width=self.max_word_width) self.pm_info.append(' ++ generated worksheet %s; %f'%(ws_title,self.pm.snap())) self.wb.save(file_name) self.pm_info.append(' + saved workbook file; %f'%self.pm.snap()) self.pm_info.append(' + total time to generate workbook; %f'%(self.pm.snap_time-start_time))
def __init__(self,path,mode="r"): self.path = path self.mode = mode if mode=="r": if os.path.exists(path): try: self.data = parse_xls(path) self.sheets = self.data[0] except IndexError: self.sheets = "" print "Can't find sheet %d"%sheet except: self.sheets = "" print "Unknown Error!" else: print "Can't find %s" %path elif mode=="w": self.newexcel = Workbook() self.ws = self.newexcel.add_sheet('sheet1') elif mode=="a":#add to excel pass
def export_as_xls(modeladmin, request, queryset): """ Generic xls export admin action. """ if queryset.count() > settings.EXPORT_RECORDS_LIMIT: messages.error( request, "Can't export more then %s Records in one go. Narrow down your criteria using filters or search" % str(settings.EXPORT_RECORDS_LIMIT)) return HttpResponseRedirect(request.path_info) fields = [] #PUT THE LIST OF FIELD NAMES YOU DON'T WANT TO EXPORT exclude_fields = [] #foreign key related fields extras = [''] if not request.user.is_staff: raise PermissionDenied for f in modeladmin.list_display: if f not in exclude_fields: fields.append(f) fields.extend(extras) opts = modeladmin.model._meta wb = Workbook() ws0 = wb.add_sheet('0') col = 0 field_names = [] # write header row for field in fields: ws0.write(0, col, field) field_names.append(field) col = col + 1 row = 1 # Write data rows for obj in queryset: col = 0 for field in field_names: if field in extras: try: val = [eval('obj.' + field) ] #eval sucks but easiest way to deal except: val = ['None'] else: try: val = lookup_field(field, obj, modeladmin) except: val = ['None'] if not val[-1] == None: if isinstance(val[-1], bool): ws0.write(row, col, strip_tags(str(val[-1]))) elif not isinstance(val[-1], str) and not isinstance( val[-1], unicode): ws0.write(row, col, strip_tags(val[-1].__unicode__())) elif val[-1]: ws0.write(row, col, strip_tags(val[-1])) else: ws0.write(row, col, strip_tags('')) col = col + 1 row = row + 1 wb.save('/tmp/output.xls') response = HttpResponse(open('/tmp/output.xls', 'r').read(), content_type='application/ms-excel') response['Content-Disposition'] = 'attachment; filename=%s.xls' % unicode( opts).replace('.', '_') return response
ws.write(row, col + 1, db.iorecords.find(spec).count(), style) col += 2 row += 1 st += one_hour if __name__ == '__main__': depts = ('EE', 'TE', 'PE') conn = Connection('teasset') db = conn.flexasset # db = conn.engasset # dept_ids = get_dept_ids() wb = Workbook() ws = wb.add_sheet('In-Out Summary') write_header(ws) today = datetime.today() days = 7 start_day = today - one_hour * 24 * days row = 2 for i in range(days): handle_one_day(start_day + i * 24 * one_hour, ws, row) row += 24 conn.disconnect() dir_path = tempfile.gettempdir() file_name = 'WK{} IOSummary.xls'.format(today.strftime('%W')) file_path = os.path.join(dir_path, file_name) wb.save(file_path)
return newval def writeExle(wbsave, ws, style, tvals, outfile): ws.write(0, 0, "\\", style) zflag = 1 for vals in tvals: flag = 1 for ll in vals: ws.write(0, zflag, unicode(ll[0]), style) ws.write(flag, zflag, ll[2], style) ws.write_merge(flag, flag + 1, 0, 0, ll[1], style) flag = flag + 1 ws.write(flag, zflag, ll[3], style) flag = flag + 1 zflag = zflag + 1 wbsave.save(outfile) if __name__ == "__main__": wbsave = Workbook() ws = wbsave.add_sheet('sheet1') style = returnStyle() outfile = "end.xls" infile = "in.txt" if len(sys.argv) > 2: outfile = sys.argv[2] infile = sys.argv[1] filelist = returnReadList(infile) tvals = returnLast(filelist) writeExle(wbsave, ws, style, tvals, outfile)
def exportXlsData(self, request, sheetName=u'ict', wbook=None): """get XLS file for all folder objects""" if wbook is None: localWbook = True filename = datetime.now().strftime('ict_%Y%m%d%H%M%S.xls') f_handle, f_name = tempfile.mkstemp(filename) wbook = Workbook() else: localWbook = False wb_hosts = wbook.add_sheet(getattr(self, 'ikName', sheetName)) style0 = XFStyle() font0 = Font() font0.height = 6 * 20 #style0.num_format_str = '@' style0.font = font0 style1 = XFStyle() font1 = Font() font1.height = 6 * 20 #style1.num_format_str = '@' style1.font = font1 heading_pattern = xl.Pattern() heading_pattern.pattern = xl.Pattern.SOLID_PATTERN heading_pattern.pattern_back_colour = 0x5 heading_pattern.pattern_fore_colour = 0x5 fields = fieldsForFactory(self.contentFactory, ['objectID']) attrList = [fname for fname, fval in fields.items()] itemList = self.items() pos_y = 0 pos_x = 0 for attr in attrList: style0.pattern = heading_pattern wb_hosts.write(pos_y, pos_x, attr, style0) pos_x += 1 # objectID wb_hosts.write(pos_y, pos_x, "objectID", style0) wb_hosts.col(pos_x).width *= 3 pos_x += 1 pos_y = 1 # allAttributes = {} for interface in implementedBy(self.contentFactory): for i_attrName in interface: i_attr = interface[i_attrName] if IField.providedBy(i_attr): allAttributes[i_attrName] = i_attr # for item_n, item_v in itemList: pos_x = 0 for attr in attrList: attrField = allAttributes[attr] attrDm = datamanager.AttributeField(item_v, attrField) v_style = XFStyle() v_font = Font() v_font.height = 6 * 20 v_style.font = v_font value = None if IChoice.providedBy(attrField): v_style.num_format_str = '@' # try: dateValue = attrDm.get() v_widget = getMultiAdapter(\ (attrField,request), interfaces.IFieldWidget) v_widget.context = item_v v_dataconverter = queryMultiAdapter(\ (attrDm.field, v_widget), interfaces.IDataConverter) # except AttributeError, errText: # print "Error2: ### ", errText # print "item_v: ", item_v # print "item_v.ikName: ", item_v.ikName # print "attr: ", attr # dateValue = None # #import pdb # #pdb.set_trace() if dateValue is not None: # try: valueVector = v_dataconverter.toWidgetValue(dateValue) if len(valueVector) > 0: value = valueVector[0] # except IndexError, errText: # print "Error3: ### ", errText # print "item_v: ", item_v # print "item_v.ikName: ", item_v.ikName # print "attr: ", attr else: v_style.num_format_str = '@' try: # import pdb # pdb.set_trace() dateValue = attrDm.get() v_widget = getMultiAdapter(\ (attrField,request), interfaces.IFieldWidget) v_widget.context = item_v v_dataconverter = queryMultiAdapter(\ (attrDm.field, v_widget), interfaces.IDataConverter) except AttributeError, errText: print "Error1: ### ", errText print "item_v: ", item_v print "item_v.ikName: ", item_v.ikName print "attr: ", attr dateValue = None if dateValue is not None: try: value = v_dataconverter.toWidgetValue(dateValue) except AttributeError, errText: print "Error4: ### ", errText print "item_v: ", item_v print "item_v.ikName: ", item_v.ikName print "attr: ", attr value = None if type(value) is list: value = u";".join(value) if value is not None: wb_hosts.write(pos_y, pos_x, value, v_style) pos_x += 1
def export_as_xls(modeladmin, request, queryset): """ Generic xls export admin action. """ if queryset.count() > settings.EXPORT_RECORDS_LIMIT: messages.error(request, "Can't export more then %s Records in one go. Narrow down your criteria using filters or search" % str(settings.EXPORT_RECORDS_LIMIT)) return HttpResponseRedirect(request.path_info) fields = [] #PUT THE LIST OF FIELD NAMES YOU DON'T WANT TO EXPORT exclude_fields = [] #foreign key related fields extras = [''] if not request.user.is_staff: raise PermissionDenied for f in modeladmin.list_display: if f not in exclude_fields: fields.append(f) fields.extend(extras) opts = modeladmin.model._meta wb = Workbook() ws0 = wb.add_sheet('0') col = 0 field_names = [] # write header row for field in fields: ws0.write(0, col, field) field_names.append(field) col = col + 1 row = 1 # Write data rows for obj in queryset: col = 0 for field in field_names: if field in extras: try: val = [eval('obj.'+field)] #eval sucks but easiest way to deal except : val = ['None'] else: try: val = lookup_field(field, obj, modeladmin) except : val = ['None'] if not val[-1] == None: if isinstance(val[-1], bool): ws0.write(row, col, strip_tags(str(val[-1]))) elif not isinstance(val[-1], str) and not isinstance(val[-1], unicode): if type(val[-1]) == int: ws0.write(row, col, val[-1].__str__()) else: ws0.write(row, col, strip_tags(val[-1].__unicode__())) elif val[-1]: ws0.write(row, col, strip_tags(val[-1])) else: ws0.write(row, col, strip_tags('')) col = col + 1 row = row + 1 wb.save('/tmp/output.xls') response = HttpResponse(open('/tmp/output.xls','r').read(), content_type='application/ms-excel') response['Content-Disposition'] = 'attachment; filename=%s.xls' % unicode(opts).replace('.', '_') return response
#!/usr/bin/env python # tries stress SST, SAT and MSAT __rev_id__ = """$Id: big-16Mb.py,v 1.3 2005/03/27 12:47:06 rvk Exp $""" from time import * from pyExcelerator.Workbook import * from pyExcelerator.Style import * style = XFStyle() wb = Workbook() ws0 = wb.add_sheet('0') colcount = 200 + 1 rowcount = 6000 + 1 t0 = time() print "\nstart: %s" % ctime(t0) print "Filling..." for col in xrange(colcount): print "[%d]" % col, for row in xrange(rowcount): #ws0.write(row, col, "BIG(%d, %d)" % (row, col)) ws0.write(row, col, "BIG") t1 = time() - t0 print "\nsince starting elapsed %.2f s" % (t1) print "Storing..."
print("行:", rows, "列:", cols) #获取整行和整列的值(数组) listA = [] for i in range(rows - 1): dd = table1.row_values(i) listA.append(dd) #print(table2.col_values(i)) print(listA) #单元格 cell_A1 = table1.cell(0, 0).value cell_C4 = table1.cell(1, 2).value print(cell_A1, cell_C4) #使用行索引 #cell_A1 = table2.row(0)[0].value #cell_A2 = table2.col(1)[0].value #获取第一行第一列数据 cell_value = table1.cell_value(0, 0) print(cell_value) #写入Excel w = Workbook() #创建一个工作簿 ws = w.add_sheet('Hey, Hades') #创建一个工作表 ws.write(0, 0, 'bit') #在1行1列写入bit ws.write(0, 1, 'huang') #在1行2列写入huang ws.write(1, 0, 'xuan') #在2行1列写入xuan w.save('mini.xls') #保存
class OpenExcel: ''' #Usage: from pyeasy.excel import OpenExcel f = OpenExcel(path) #default sheet(1) you can use 'f = OpenExcel(path).sheet(2) ########### read data ########### f.read() #return all data f.read(1) or read("1") # return a list of line data (horizontal) f.read("A") # return a list of column data (vertical) f.read('A7') or read('24A') # return a string of special data! f.read("A","1") #you can also use read("A",1) read(1,"A") or read("1","A") ########### read sheet name ########### OpenExcel(path).sheet(2).readSheetName() # return a string of sheet2 name f.readAllsheetsName() # return a list of all sheets names ########### getposition ########### f.getPosition("tuweizhong")#default completeMatch=False,stripOn=False #find a string position in excel #for examples: OpenExcel("/home/tu/tu.xls").getPosition("3D")#default completeMatch=False,stripOn=False OpenExcel("/home/tu/tu.xls").getPosition("3D",completeMatch=True,stripOn=True) or ("3D",1,1) ''' def __init__(self,path,mode="r"): self.path = path self.mode = mode if mode=="r": if os.path.exists(path): try: self.data = parse_xls(path) self.sheets = self.data[0] except IndexError: self.sheets = "" print "Can't find sheet %d"%sheet except: self.sheets = "" print "Unknown Error!" else: print "Can't find %s" %path elif mode=="w": self.newexcel = Workbook() self.ws = self.newexcel.add_sheet('sheet1') elif mode=="a":#add to excel pass def __toNum(self,args): if len(args)==1: return (ord(args.upper())-ord("A")) if len(args)==2: return ((ord(args[0].upper())-ord("A") + 1)*26 + (ord(args[1].upper())-ord("A"))) #AA means 26 def __hasChar(self,args): if re.compile('[a-zA-Z]').search(str(args)): return True return False def __hasNum(self,args): if re.compile('[0-9]').search(str(args)): return True return False ''' #================================= # A B C D E # 1 (0,0) (0,1) (0,2) (0,3) (0,4) # 2 (1,0) (1,1) (1,2) (1,3) (1,4) # 3 (2,0) (2,1) (2,2) (2,3) (2,4) #================================= # (row,col) ''' def _convert(self,args):#convert '(1,1)' to 'B2' and 'B2' to '(1,1)' auto-recongnize if args.find(",") > -1: b,a=args.replace("(","").replace(")","").split(",") a=chr(int(a)+65)#chr(65) is "A" and ord("A") is 65 b=str(int(b)+1) return a+b else: a=str(int(args[1:2])-1) # D1-->(0,3) 1-->0 b=str(ord(args[0:1].upper())-65) # D1-->(0,3) D-->3 ord("D") is 68 return "("+a+","+b+")" def readSheetName(self): return self.sheets[0] def readAllSheetsName(self): sL = len(self.data) sheetNameList = [] for i in range(sL): sheetNameList.append(sC[i][0]) return sheetNameList def getPosition(self,string,completeMatch=False,stripOn=False):#edit to find all positions tmp=repr(self.sheets) posList = []#positions list pList = StringHandle(tmp).findAll(string)#if can't find return vacant list [] for p in pList: list1=StringHandle(tmp[:p]).findAll("(")# >>> a[:43] begin=list1[len(list1)-1] list2=StringHandle(tmp[:p]).findAll(":")# "[(u'sheetname1', {(0, 1): u'1B', (1, 2): u'" end=list2[len(list2)-1] posList.append(self._convert(tmp[begin:end])) if completeMatch:#remove incomplete match! for i in posList: readString = self.read(i) if stripOn: readString = readString.strip() if readString != string: posList.remove(i) return sorted(posList) def sheet(self,sheet_num): self.sheets = self.data[sheet_num-1] def read(self,*args): if self.sheets == "": return#stop here if read failed elif len(args) == 0: # read() return all data return self.data elif len(args) == 1:# read('A') or read('10') or read('A3') #1. judge read a line or a position args = str(args[0]) if self.__hasChar(args) and self.__hasNum(args):# contains char and num ,such as "A5" # read('A3') _char = ''.join(re.compile('[a-zA-Z]').findall(args)) _num = ''.join(re.compile('[0-9]').findall(args)) return self.read(_char,_num) # read('A') or read('10') #bugs here! use read('A',3) tmpList = [] if self.__hasNum(args): for i in self.sheets[1]: if (i[0] == int(args)-1): tmpList.append(self.sheets[1].get(i)) else: for i in self.sheets[1]: if (i[1] == self.__toNum(args)): tmpList.append(self.sheets[1].get(i)) return tmpList elif len(args) == 2:#read a given position, such as read("A",10) if self.__hasNum(args[1]): b = self.__toNum(args[0]) a = int(args[1])-1 else: a = int(args[0])-1 b = int(self.__toNum(args[1])) return self.sheets[1].get((a,b)) def write(self,position,content):#write("A2","ecoli") a,b = self._convert(position).replace("(","").replace(")","").split(",") self.ws.write(int(a),int(b),str(content)) def save(self): if self.mode == "w": self.newexcel.save(self.path) else:print "OpenExcel mode is not 'w',OpenExcel(path,'w')"
def exportXlsData(self): """get XLS file for all folder objects""" filename = datetime.now().strftime('ict_%Y%m%d%H%M%S.xls') f_handle, f_name = tempfile.mkstemp(filename) wbook = Workbook() wb_hosts = wbook.add_sheet('ddd') style0 = XFStyle() font0 = Font() font0.height = 6 * 20 style0.num_format_str = '@' style0.font = font0 style1 = XFStyle() font1 = Font() font1.height = 6 * 20 style1.num_format_str = '@' style1.font = font1 heading_pattern = xl.Pattern() heading_pattern.pattern = xl.Pattern.SOLID_PATTERN heading_pattern.pattern_back_colour = 0x5 heading_pattern.pattern_fore_colour = 0x5 fields = self.fields attrList = [fname for fname, fval in fields.items()] itemList = self.context.items() pos_y = 0 pos_x = 0 for attr in attrList: wb_data = Formula(u'"%s"' % attr) style0.pattern = heading_pattern wb_hosts.write(pos_y, pos_x, wb_data, style0) pos_x += 1 # IntID wb_data = Formula(u'"IntID"') wb_hosts.write(pos_y, pos_x, wb_data, style0) pos_x += 1 # objectID wb_data = Formula(u'"objectID"') wb_hosts.write(pos_y, pos_x, wb_data, style0) wb_hosts.col(pos_x).width *= 3 pos_x += 1 pos_y = 1 # allAttributes = {} for interface in implementedBy(self.factory): for i_attrName in interface: i_attr = interface[i_attrName] if IField.providedBy(i_attr): allAttributes[i_attrName] = i_attr # for item_n, item_v in itemList: pos_x = 0 for attr in attrList: # from zope.interface import implementedBy # ff=self.factory # tt=[i for i in implementedBy(ff)] # it=tt[-1] #attrField = self.attrInterface[attr] attrField = allAttributes[attr] # tmpFieldProperty = getattr(self.factory, attr) # if hasattr(tmpFieldProperty, '_FieldProperty__field'): # attrField = getattr(self.factory, attr)._FieldProperty__field attrDm = datamanager.AttributeField(item_v, attrField) v_style = XFStyle() v_font = Font() v_font.height = 6 * 20 v_style.font = v_font value = None if IChoice.providedBy(attrField): v_style.num_format_str = '@' dateValue = attrDm.get() v_widget = getMultiAdapter(\ (attrField,self.request), interfaces.IFieldWidget) v_widget.context = item_v # dm = zope.component.getMultiAdapter( # (self.content, field.field), interfaces.IDataManager) # zope.component.getMultiAdapter( # (self.context, # self.request, # self.form, # getattr(widget, 'field', None), # widget), # interfaces.IValidator).validate(fvalue) # dm = zope.component.getMultiAdapter( # (self.__context__, field), interfaces.IDataManager) v_dataconverter = queryMultiAdapter(\ (attrDm.field, v_widget), interfaces.IDataConverter) #print u"ddd55: %s: %s" % (attr, dateValue) if dateValue is not None: value = v_dataconverter.toWidgetValue(dateValue)[0] #print "value3-> %s: %s " % (attr, value) # elif ICollection.providedBy(attrField): # v_style.num_format_str = '@' # value = getattr(item_v, attr) # print "ddd66: %s: %s" % (attr, value) # elif IBool.providedBy(attrField): # v_style.num_format_str = '@' # value = getattr(item_v, attr) # print "value2-> %s: %s " % (attr, value) else: v_style.num_format_str = '@' dateValue = attrDm.get() v_widget = getMultiAdapter(\ (attrField,self.request), interfaces.IFieldWidget) v_widget.context = item_v v_dataconverter = queryMultiAdapter(\ (attrDm.field, v_widget), interfaces.IDataConverter) #d2 = queryMultiAdapter((attrDm.field, v_widget),interfaces.IDataConverter) if dateValue is not None: value = v_dataconverter.toWidgetValue(dateValue) if type(value) is list: value = u";".join(value) #print u"value1-> %s: %s " % (attr, value) if value is not None: #print u"wb_hosts.write(%s, %s, %s, %s)" % (pos_y, pos_x, value, v_style) wb_hosts.write(pos_y, pos_x, value, v_style) pos_x += 1 # IntID uidutil = queryUtility(IIntIds) wb_data = Formula(u'"%s"' % uidutil.getId(item_v)) wb_hosts.write(pos_y, pos_x, wb_data, style0) pos_x += 1 # objectID wb_data = Formula(u'"%s"' % item_v.objectID) wb_hosts.write(pos_y, pos_x, wb_data, style0) pos_x += 1 pos_y += 1 wbook.save(f_name) self.request.response.setHeader('Content-Type', 'application/vnd.ms-excel') self.request.response.setHeader(\ 'Content-Disposition', 'attachment; filename=\"%s\"' % filename) setNoCacheHeaders(self.request.response) datafile = open(f_name, "r") dataMem = datafile.read() datafile.close() os.remove(f_name) return dataMem
def write(self, file_or_filename): """ Writes case data to file in Excel format. """ self.book = Workbook() self._write_data(None) self.book.save(file_or_filename)
def write_excel(results): title = [ 'DATE', 'User ID', 'SCREEN NAME', 'Fans', 'Fan Growth', 'Fan Growth %', 'Tweets', 'Retweets', 'Comments', 'direct @', 'Likes', 'Impression', 'ER 30 days', 'ER 7 days', '#1 Post URL', '#1 Post ER', '#1 Post RT', '#1 Post CT', '#2 Post URL', '#2 Post ER', '#2 Post RT', '#2 Post CT', '#3 Post URL', '#3 Post ER', '#3 Post RT', '#3 Post CT', '#4 Post URL', '#4 Post ER', '#4 Post RT', '#4 Post CT', '#5 Post URL', '#5 Post ER', '#5 Post RT', '#5 Post CT', '#1 Influencer URL', '#1 Influencer Tweets Count', '#1 Influencer Comments Count', '#1 Influencer Direct @ Count', '#2 Influencer URL', '#2 Influencer Tweets Count', '#2 Influencer Comments', '#2 Influencer Direct @ Count', '#3 Influencer URL', '#3 Influencer Tweets Count', '#3 Influencer Comments', '#3 Influencer Direct @ Count', '#4 Influencer URL', '#4 Influencer Tweets Count', '#4 Influencer Comments', '#4 Influencer Direct @ Count', '#5 Influencer URL', '#5 Influencer Tweets Count', '#5 Influencer Comments', '#5 Influencer Direct @ Count', '#1 Hashtag', "#1 Hashtag's engagement rate", '#2 Hashtag', "#2 Hashtag's engagement rate", '#3 Hashtag', "#3 Hashtag's engagement rate", 'Question Posted', 'Question Responded', 'Question Response Time', 'Response Share' ] w = Workbook() ws = w.add_sheet('Weekly Raw Data') wt = w.add_sheet('Fans Info') font = Font() font.height = 12 * 0x14 font.name = str_to_unicode('微软雅黑') title_style = Style.XFStyle() title_style.font = font fontbold = Font() fontbold.height = 12 * 0x14 fontbold.bold = True fontbold.name = str_to_unicode('微软雅黑') boldstyle = Style.XFStyle() boldstyle.font = fontbold percent = Style.XFStyle() percent.font = font percent.num_format_str = '0.00%' for i in range(len(title)): ws.col(i).width = 3600 ws.write_cols(0, 0, title, title_style) abscissa = 0 for res in range(len(results)): screen_name, uid, day, period, fans, fan_growth, tweets, retweets, comments, direct_at, likes, impressions, \ er_30, er_7, response_share, top_posts, top_influencer, top_hashtag, questions, responds, mean_res, active, interactive, \ verified, subfans, province, gender, age, tag, fans_week, fans_hour, brand_week, brand_hour = results[res] fan_percent = fan_growth / float(fans - fan_growth) top_posts, top_influencer, top_hashtag, verified, subfans, province, gender, age, tag, fans_week, \ fans_hour, brand_week, brand_hour = [format_data(p) for p in (top_posts, top_influencer, top_hashtag, \ verified, subfans, province, gender, age, tag, fans_week, \ fans_hour, brand_week, brand_hour)] mean_res = round(mean_res / 60.0, 2) #print res t_posts = [] top_posts = sorted(top_posts, key=lambda x: x[0], reverse=True) for post in top_posts: t_posts.extend( [post[1]['url'], post[0], post[1]['nret'], post[1]['ncmt']]) t_posts.extend(['N/A'] * (20 - len(t_posts))) t_influencer = [] top_influencer = sorted(top_influencer, key=lambda x: x[0], reverse=True) for influ in top_influencer: weibourl = 'http://weibo.com/u/%s' % influ[1]['uid'] t_influencer.extend([ weibourl, influ[1].get('reposts', 0), influ[1].get('comments', 0), influ[1].get('direct_at', 0) ]) t_influencer.extend(['N/A'] * (20 - len(t_influencer))) t_hashtag = [] if top_hashtag: top_hashtag = sorted(top_hashtag, key=lambda x: x[0], reverse=True) [t_hashtag.extend([hash[1], hash[0]]) for hash in top_hashtag] t_hashtag.extend(['N/A'] * (6 - len(t_hashtag))) account_index = [day.strftime('%Y-%m-%d'), uid, screen_name, fans, fan_growth, fan_percent, tweets, retweets, \ comments, direct_at, likes if likes else 0, \ impressions, er_30, er_7] + t_posts + t_influencer + t_hashtag + [questions, responds, mean_res, response_share if response_share else 'N/A'] #将第一页数据写入execl ws.write_cols(res + 1, 0, account_index, title_style) tag.extend([('N/A', 0) for i in range(10 - len(tag))]) province.extend([('N/A', 0) for i in range(10 - len(province))]) subfans.extend([0 for i in range(12 - len(subfans))]) vertical_title = [screen_name, 'Active', 'Active Fans', 'Other Fans', '', 'Interaction', 'Interactive Fans', 'Other Fans', '', 'Verified Type', \ 'Verified', 'Daren', 'Un-verified', '', 'Fan Number', '0~9', '10~49', '50~99', '100~199', '200~299', '300~399', '400~499',\ '500~999', '1000~1999', '2000~4999', '5000~9999', '>=10000', '', 'Gender', 'Male', 'Female', '', 'Age', '<18', '18~24', \ '25~29', '30~34', '35~39', '40~49', '50~59', '>=60', '', 'Tag'] + [i[0] for i in tag] + ['', 'Province'] + \ [str_to_unicode(provincesdict.get(str(i[0]), '')) for i in province]+ ['', 'Hour', '0-1', '1-2', '2-3', '3-4', '4-5', '5-6',\ '6-7', '7-8', '8-9', '9-10', '10-11', '11-12', '12-13', '13-14', '14-15', '15-16', '16-17', '17-18', '18-19', '19-20', '20-21', '21-22', \ '22-23', '23-0', '', 'Days', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', ''] wt.col(res * 3).width = 4000 ordinate = 0 for ver in range(len(vertical_title)): if ver in (0, 1) or not vertical_title[ver - 1]: wt.write(ordinate, abscissa, vertical_title[ver], boldstyle) else: wt.write(ordinate, abscissa, vertical_title[ver], title_style) ordinate += 1 if verified: verified_count = float(sum(verified)) verified_type = [ verified[1] / verified_count, verified[2] / verified_count, verified[0] / verified_count ] if verified_count > 0 else [0, 0, 0] else: verified_type = [0] * 3 if len(gender) != 2: gender = [0, 0] else: gender.sort() gender = [gender[1][1], gender[0][1]] tagdata = [i[1] for i in tag] provincedata = [i[1] for i in province] if age: age = [i[1] for i in sorted(age.items())] age_type = [i / float(sum(age)) for i in age] else: age_type = [0] * 8 if not brand_hour: brand_hour = [0] * 24 if not brand_week: brand_week = [0] * 7 subfans_type, gender_type, tag_type, province_type, fanshour_type, fansweek_type, brandhour_type, brandweek_type = \ [div_data(j) for j in [subfans, gender, tagdata, provincedata, fans_hour, fans_week, brand_hour, brand_week]] gaps = ['', 'Percentage'] vertical_data = ['', 'Percentage', active, 1-active if active else 0, '', 'Percentage', interactive, 1-interactive if interactive else 0, \ '', 'Percentage'] + verified_type + gaps + subfans_type + gaps + gender_type + gaps + age_type + gaps + \ tag_type + gaps + province_type + ['', 'Fans Activity'] + fanshour_type + ['', 'Fans Activity'] + fansweek_type wt.col(res * 3 + 1).width = 4000 ordinate = 0 abscissa += 1 for ver in range(len(vertical_data)): if ver in (0, 1) or not vertical_title[ver - 1]: wt.write(ordinate, abscissa, vertical_data[ver], boldstyle) else: wt.write(ordinate, abscissa, vertical_data[ver], percent) ordinate += 1 wt.col(res * 3 + 2).width = 4000 ordinate = 0 rest_data = [''] * 66 + ['Brand Activity'] + brandhour_type + [ '', 'Brand Activity' ] + brandweek_type abscissa += 1 for ver in range(len(rest_data)): if ver in (0, 1) or not vertical_title[ver - 1]: wt.write(ordinate, abscissa, rest_data[ver], boldstyle) else: wt.write(ordinate, abscissa, rest_data[ver], percent) ordinate += 1 abscissa += 2 return w
def write(seq_records, options): if options.input_type == 'prot': return #Open up TXT file and XLS record outfolder = options.full_outputfolder_path txtfile = open(path.join(outfolder, "geneclusters.txt"), "w") wb = Workbook() font1 = Font() style1 = XFStyle() style1.font = font1 font1.bold = True ws0 = wb.add_sheet('0') ws0.write(0, 0, "Input accession number", style1) ws0.write(0, 1, "Input name", style1) ws0.write(0, 2, "Gene cluster type", style1) ws0.write(0, 3, "Gene cluster genes", style1) ws0.write(0, 4, "Gene cluster gene accessions", style1) if options.knownclusterblast: ws0.write(0, 5, "Compound with gene cluster of highest homology", style1) #For each gene cluster, write out info column = 1 for seq_record in seq_records: clusters = utils.get_cluster_features(seq_record) for cluster in clusters: clustertype = utils.get_cluster_type(cluster) clusternr = utils.get_cluster_number(cluster) clustergenes = [ utils.get_gene_id(cds) for cds in utils.get_cluster_cds_features(cluster, seq_record) ] accessions = [ utils.get_gene_acc(cds) for cds in utils.get_cluster_cds_features(cluster, seq_record) ] ws0.write(column, 0, seq_record.id) try: ws0.write(column, 1, seq_record.description) except: ws0.write( column, 1, "Name to long to be contained in Excel cell; see txt file in downloadable zip archive." ) ws0.write(column, 2, clustertype) try: ws0.write(column, 3, ";".join(clustergenes)) except: ws0.write( column, 3, "Too many genes to be contained in Excel cell; see txt file in downloadable zip archive." ) try: ws0.write(column, 4, ";".join(accessions)) except: ws0.write( column, 4, "Too many genes to be contained in Excel cell; see txt file in downloadable zip archive." ) if hasattr(seq_record, 'closestcompounddict') and \ seq_record.closestcompounddict.has_key(clusternr): ws0.write(column, 5, seq_record.closestcompounddict[clusternr]) column += 1 txtfile.write("\t".join([ seq_record.id, seq_record.description, clustertype, ";".join( clustergenes), ";".join(accessions) ]) + "\n") wb.save(path.join(outfolder, "%s.geneclusters.xls" % seq_record.id))
#!/usr/bin/env python # tries stress SST, SAT and MSAT __rev_id__ = """$Id: big-16Mb.py,v 1.3 2005/03/27 12:47:06 rvk Exp $""" from time import * from pyExcelerator.Workbook import * from pyExcelerator.Style import * style = XFStyle() wb = Workbook() ws0 = wb.add_sheet('0') colcount = 200 + 1 rowcount = 6000 + 1 t0 = time() print "\nstart: %s" % ctime(t0) print "Filling..." for col in xrange(colcount): print "[%d]" % col, for row in xrange(rowcount): #ws0.write(row, col, "BIG(%d, %d)" % (row, col)) ws0.write(row, col, "BIG") t1 = time() - t0 print "\nsince starting elapsed %.2f s" % (t1) print "Storing..." wb.save('big-16Mb.xls')
elif localtime.tm_hour >= 18 and localtime.tm_hour < 19: cate = 18 elif localtime.tm_hour >= 19 and localtime.tm_hour < 20: cate = 19 elif localtime.tm_hour >= 20 and localtime.tm_hour < 21: cate = 20 elif localtime.tm_hour >= 21 and localtime.tm_hour < 22: cate = 21 elif localtime.tm_hour >= 22 and localtime.tm_hour < 23: cate = 22 elif localtime.tm_hour >= 23 and localtime.tm_hour < 24: cate = 23 return cate w = Workbook() ws = w.add_sheet("iptime") conn = pymongo.Connection('10.3.3.220', 27017) iae_hitlog_a = conn.gehua.iae_hitlog_a iae_hitlog_a.create_index("ip") ip = conn.gehua.ip res = ip.find() ind = 0 rowNumber = 0 mysqlconn = MySQLdb.connect(host="172.16.168.57", user="******", passwd="ZAQ!XSW@CDE#", db="demo_vsp_a") #mysqlconn1=MySQLdb.connect(host="172.16.168.57",user="******",passwd="ZAQ!XSW@CDE#",db="ire") for result in res: #Qestion