def as_excel_data(self, sheet_name): from pyExcelerator import Workbook from excel import ExcelColumn, ExcelSheet import tempfile, os logger = logging.getLogger("report.excel") if self.paging_controller: self.paging_controller.update_view_size(self.data_count) data, cols = self.paging_controller.data_view( self.data, self.cells) else: data, cols = self.data, self.cells.cols temp_file = tempfile.NamedTemporaryFile(suffix='.xls', dir=tempfile.tempdir) logger.debug("exported tmp excel name: %s" % temp_file.name) temp_file.close() work_book = Workbook() log_colums = [ExcelColumn(e, e) for e in cols if e != 'row_no'] sheet = ExcelSheet(sheet_name, log_colums) sheet.init_sheet(work_book, data) sheet.export_header(data) sheet.export_data(data) work_book.save(temp_file.name) data = open(temp_file.name, "rb").read() logger.debug("done to export excel, data length: %s" % len(data)) os.remove(temp_file.name) return data
def export_file_excel(data): try: # filepath = os.path.join(export_filepath, data['downloadid']) filename = os.path.join(export_filepath, export_filename + '.' + data[0]['downloadid']) w = Workbook() ws = w.add_sheet('sheet') names = [] values = [] for d in data: if "name" in d.keys(): names.append(d['name']) values.append(d['data']) # logger.info(values) for i in range(len(names)): ws.write(0, i, names[i]) for i in range(len(values)): for j in range(len(values[i])): value = values[i][j] if not value: value = u'' ws.write(j + 1, i, value) # i += 1 w.save(filename) except Exception, e: logger.warn(e, exc_info=True)
def __init__(self, logger, output_name='text.xls'): self.logger = logger self.workbook = Workbook() self.worksheet = self.workbook.add_sheet('CompanyInformation') self.output_name = output_name self.row = 1 for col in range(ExcelWriter.COLS): self.worksheet.write(0, col, ExcelWriter.XLS_HEADERS[col])
def post(self, request, *args, **kwargs): action = request.POST.get('action') ids = request.POST.get('ids') # 取消任务 if action == 'cancel': cancel_task(ids) return get_task_by_ids(ids) # 重新分配任务 elif action == 'redistribute': user_id = request.POST.get('user_id') if not user_id: return False redistribute_task(ids, user_id) return get_task_by_ids(ids) # 导入 elif action == 'import': def handle(key): return sheets[key] if sheets.has_key(key) else None user_file = request.FILES.values()[0] _, xls = tempfile.mkstemp() with open(xls, 'wb') as f: f.write(user_file.read()) sheets = parse_xls(xls)[0][1] with transaction.atomic(): tasks = [] for i in range(1, max(sheets.keys())[0] + 1): task = TaskInfo() task.name = handle((i, 0)) task.email = handle((i, 1)) task.remark = handle((i, 2)) task.is_new = True tasks.append(task) TaskInfo.objects.bulk_create(tasks) return undistributed_task() # 导出 elif action == 'export': tasks = TaskInfo.objects.filter(deleted=0) w = Workbook() ws = w.add_sheet() for r, task in enumerate(tasks): r += 1 ws.write(r, 0, task.name) ws.write(r, 1, task.email) ws.write(r, 2, task.phone) ws.write(r, 3, task.remark) _, xls = tempfile.mkstemp() w.save(xls) response = HttpResponse(open(xls).read(), content_type='application/xls') response['Content-Disposition'] = 'attachment; filename=users.xls' return response
def write_xls_price(self): if self.options['whole']: filename = path.join(settings.MEDIA_ROOT, 'upload/wprice.xls') else: filename = path.join(settings.MEDIA_ROOT, 'upload/price.xls') workBookDocument = Workbook() docSheet = workBookDocument.add_sheet(u'Прайс соло-парфюм') docSheet.col(1).width = 10000 headerFont = Font() headerFont.bold = True headerFont.size = 400 headerStyle = XFStyle() headerStyle.font = headerFont docSheet.row(0).set_style(headerStyle) if self.options['whole']: docSheet.write_merge( 0, 0, 0, 2, u'Оптовый прайс-лист Соло-парфюм (%s)' % datetime.now().strftime('%d.%m.%Y')) else: docSheet.write_merge( 0, 0, 0, 2, u'Прайс-лист Соло-парфюм (%s)' % datetime.now().strftime('%d.%m.%Y')) docSheet.write(2, 0, u'Артикул') docSheet.write( 2, 1, u'Наименование', ) docSheet.write(2, 2, u'Цена') sectionFont = Font() sectionFont.bold = True sectionStyle = XFStyle() sectionStyle.font = sectionFont align = Alignment() align.horz = Alignment.HORZ_CENTER sectionStyle.alignment = align row = 3 for section in self.data.iterkeys(): docSheet.write_merge(row, row, 0, 2, section, sectionStyle) row += 1 for item in self.data[section]: docSheet.write(row, 0, item['identifier']) docSheet.write(row, 1, item['name']) docSheet.write(row, 2, item['price']) row += 1 workBookDocument.save(filename)
def exportAllXlsData(self, request): """get XLS file for all folder objects""" sitemanger = zapi.getParent(self) locSitemanager = zapi.getParent(sitemanger) root_folder = zapi.getParent(locSitemanager) filename = datetime.now().strftime('ict_all_%Y%m%d%H%M%S.xls') f_handle, f_name = tempfile.mkstemp(filename) wbook = Workbook() for folder in root_folder.values(): print "folder: ", folder folder.exportXlsData(request, folder.ikName, wbook) wbook.save(f_name) datafile = open(f_name, "r") dataMem = datafile.read() datafile.close() os.remove(f_name) return (filename, dataMem)
def output_sheet(contents,titles,sheet_name="test",excel_name="test.xls"): if len(contents)<=0 or len(titles)<=0: return wb = Workbook() ws0 =wb.add_sheet(sheet_name) for i in range(len(titles)): ws0.write(0,i,titles[i]) for i in range(len(contents)): content =contents[i] for j in range(len(titles)): if type(content) is not type(dict()): ws0.write(i+1,j,getattr(content,titles[j])) else: ws0.write(i+1,j,content[titles[j]]) #ws0.write(i+1,j,content.titles[j]) wb.save(excel_name) return wb
def generateLabelSpreadsheet(branch, release_id): release = Release.objects.get(id=release_id) release_labels = [] for story in release.stories.all(): deployables = DeployableObject.objects.filter(pending_stories=story, el_type='labels') release_labels += [d.el_name for d in deployables] os.chdir(branch.repo.location) path = os.path.join(CODE_BASE, 'labels', 'CustomLabels.labels') f = open(path) doc = etree.XML(f.read()) f.close() labelmap = {} children = doc.findall(SF_NAMESPACE + 'labels') for child in children: labelName = child.find(SF_NAMESPACE + 'fullName').text if labelName in release_labels: if not labelmap.has_key(labelName): labelmap[labelName] = {} langmap = labelmap[labelName] langkey = child.find(SF_NAMESPACE + 'language').text desc = child.find(SF_NAMESPACE + 'value').text langmap[langkey] = desc print('label=%s, key=%s, desc=%s' % (labelName, langkey, desc)) wb = Workbook() ws0 = wb.add_sheet('0') ws0.write(0, 0, 'Key') ws0.write(0, 1, 'en_US') row = 1 for labelkey, langmap in labelmap.items(): for langkey, value in langmap.items(): ws0.write(row, 0, labelkey) if langkey == 'en_US': ws0.write(row, 1, value) row += 1 wb.save('/tmp/labels.xls') f = open('/tmp/labels.xls') xls = f.read() f.close() return xls
def main(): if len(sys.argv) == 1: target_year = 1997 else: target_year = int(sys.argv[1]) print "Collecting statistics for the " + str(target_year) + '-' + str( target_year + 1) + " season...\n" fname = str(target_year) + '-' + str(target_year + 1) + '.xls' style = XFStyle() wb = Workbook() ws0 = wb.add_sheet('stats') write_categories(ws0) n_players = 0 for key in range(ord('a'), ord('z') + 1): n_players = parse_comp(chr(key), target_year, n_players, ws0) print '# of players = ' + str(n_players) wb.save(fname)
def export_special_excel(filename, data, names=[]): full_filepath = '' try: full_filepath = os.path.join(export_filepath, filename) w = Workbook() ws = w.add_sheet('sheet') if not names: names = [ u'id', u'name', u'detail_times', u'outer_times', u'total_times', u'order' ] column_length = len(names) for i in range(column_length): ws.write(0, i, names[i]) for i, value in enumerate(data): for j in range(column_length): index_name = names[j] ws.write(i + 1, j, value[index_name]) w.save(full_filepath) except Exception, e: logger.warn(e, exc_info=True)
def fetch_data_from_db_write_into_xls(): #Connect to Testlink DB and fetch all automated cases conn = MySQLdb.connect(host=TESTLINK_HOST_NAME, user=TESTLINK_USER_NAME, passwd=TESTLINK_PWD, db=TESTLINK_DB_NAME) cu_select = conn.cursor(MySQLdb.cursors.DictCursor) testlink_xls_path = TESTLINK_XLS_PATH try: cu_select.execute(SELECT_QUERY) except MySQLdb.Error: errInsertSql = "Insert Sql ERROR!! sql is==>%s" % (SELECT_QUERY) sys.exit(errInsertSql) testlink_dict = cu_select.fetchall() #Create an excel from the fetched data wb = Workbook() ws0 = wb.add_sheet('0') row_number = 1 for row in testlink_dict: i = 0 for item in row: val = str(row[item]) if i == 0: val = "ATH-" + val if row_number == 1: ws0.write(0, i, '') ws0.write(0, i, item) ws0.write(row_number, i, val) i = i + 1 else: ws0.write(row_number, i, val) i = i + 1 row_number = row_number + 1 #Save the file wb.save('%s.xls' % testlink_xls_path)
def export_simple_excel(filename, data): full_filepath = '' try: full_filepath = os.path.join(export_filepath, filename) w = Workbook() ws = w.add_sheet('sheet') names = None values = [] for d in data: if names is None: names = d.keys() values.append(d.values()) for i in range(len(names)): ws.write(0, i, names[i]) for i in range(len(values)): for j in range(len(values[i])): value = values[i][j] if value is None: value = '' ws.write(i + 1, j, value) w.save(full_filepath) except Exception, e: logger.warn(e, exc_info=True)
print("行:", rows, "列:", cols) #获取整行和整列的值(数组) listA = [] for i in range(rows - 1): dd = table1.row_values(i) listA.append(dd) #print(table2.col_values(i)) print(listA) #单元格 cell_A1 = table1.cell(0, 0).value cell_C4 = table1.cell(1, 2).value print(cell_A1, cell_C4) #使用行索引 #cell_A1 = table2.row(0)[0].value #cell_A2 = table2.col(1)[0].value #获取第一行第一列数据 cell_value = table1.cell_value(0, 0) print(cell_value) #写入Excel w = Workbook() #创建一个工作簿 ws = w.add_sheet('Hey, Hades') #创建一个工作表 ws.write(0, 0, 'bit') #在1行1列写入bit ws.write(0, 1, 'huang') #在1行2列写入huang ws.write(1, 0, 'xuan') #在2行1列写入xuan w.save('mini.xls') #保存
def write(self, file_or_filename): """ Writes case data to file in Excel format. """ self.book = Workbook() self._write_data(None) self.book.save(file_or_filename)
def write(seq_records, options): if options.input_type == 'prot': return #Open up TXT file and XLS record outfolder = options.full_outputfolder_path txtfile = open(path.join(outfolder, "geneclusters.txt"), "w") wb = Workbook() font1 = Font() style1 = XFStyle() style1.font = font1 font1.bold = True ws0 = wb.add_sheet('0') ws0.write(0, 0, "Input accession number", style1) ws0.write(0, 1, "Input name", style1) ws0.write(0, 2, "Gene cluster type", style1) ws0.write(0, 3, "Gene cluster genes", style1) ws0.write(0, 4, "Gene cluster gene accessions", style1) if options.knownclusterblast: ws0.write(0, 5, "Compound with gene cluster of highest homology", style1) #For each gene cluster, write out info column = 1 for seq_record in seq_records: clusters = utils.get_cluster_features(seq_record) for cluster in clusters: clustertype = utils.get_cluster_type(cluster) clusternr = utils.get_cluster_number(cluster) clustergenes = [ utils.get_gene_id(cds) for cds in utils.get_cluster_cds_features(cluster, seq_record) ] accessions = [ utils.get_gene_acc(cds) for cds in utils.get_cluster_cds_features(cluster, seq_record) ] ws0.write(column, 0, seq_record.id) try: ws0.write(column, 1, seq_record.description) except: ws0.write( column, 1, "Name to long to be contained in Excel cell; see txt file in downloadable zip archive." ) ws0.write(column, 2, clustertype) try: ws0.write(column, 3, ";".join(clustergenes)) except: ws0.write( column, 3, "Too many genes to be contained in Excel cell; see txt file in downloadable zip archive." ) try: ws0.write(column, 4, ";".join(accessions)) except: ws0.write( column, 4, "Too many genes to be contained in Excel cell; see txt file in downloadable zip archive." ) if hasattr(seq_record, 'closestcompounddict') and \ seq_record.closestcompounddict.has_key(clusternr): ws0.write(column, 5, seq_record.closestcompounddict[clusternr]) column += 1 txtfile.write("\t".join([ seq_record.id, seq_record.description, clustertype, ";".join( clustergenes), ";".join(accessions) ]) + "\n") wb.save(path.join(outfolder, "%s.geneclusters.xls" % seq_record.id))
#!/usr/bin/env python # tries stress SST, SAT and MSAT __rev_id__ = """$Id: big-16Mb.py,v 1.3 2005/03/27 12:47:06 rvk Exp $""" from time import * from pyExcelerator.Workbook import * from pyExcelerator.Style import * style = XFStyle() wb = Workbook() ws0 = wb.add_sheet('0') colcount = 200 + 1 rowcount = 6000 + 1 t0 = time() print "\nstart: %s" % ctime(t0) print "Filling..." for col in xrange(colcount): print "[%d]" % col, for row in xrange(rowcount): #ws0.write(row, col, "BIG(%d, %d)" % (row, col)) ws0.write(row, col, "BIG") t1 = time() - t0 print "\nsince starting elapsed %.2f s" % (t1) print "Storing..." wb.save('big-16Mb.xls')
return newval def writeExle(wbsave, ws, style, tvals, outfile): ws.write(0, 0, "\\", style) zflag = 1 for vals in tvals: flag = 1 for ll in vals: ws.write(0, zflag, unicode(ll[0]), style) ws.write(flag, zflag, ll[2], style) ws.write_merge(flag, flag + 1, 0, 0, ll[1], style) flag = flag + 1 ws.write(flag, zflag, ll[3], style) flag = flag + 1 zflag = zflag + 1 wbsave.save(outfile) if __name__ == "__main__": wbsave = Workbook() ws = wbsave.add_sheet('sheet1') style = returnStyle() outfile = "end.xls" infile = "in.txt" if len(sys.argv) > 2: outfile = sys.argv[2] infile = sys.argv[1] filelist = returnReadList(infile) tvals = returnLast(filelist) writeExle(wbsave, ws, style, tvals, outfile)
def exportXlsData(self): """get XLS file for all folder objects""" filename = datetime.now().strftime('ict_%Y%m%d%H%M%S.xls') f_handle, f_name = tempfile.mkstemp(filename) wbook = Workbook() wb_hosts = wbook.add_sheet('ddd') style0 = XFStyle() font0 = Font() font0.height = 6 * 20 style0.num_format_str = '@' style0.font = font0 style1 = XFStyle() font1 = Font() font1.height = 6 * 20 style1.num_format_str = '@' style1.font = font1 heading_pattern = xl.Pattern() heading_pattern.pattern = xl.Pattern.SOLID_PATTERN heading_pattern.pattern_back_colour = 0x5 heading_pattern.pattern_fore_colour = 0x5 fields = self.fields attrList = [fname for fname, fval in fields.items()] itemList = self.context.items() pos_y = 0 pos_x = 0 for attr in attrList: wb_data = Formula(u'"%s"' % attr) style0.pattern = heading_pattern wb_hosts.write(pos_y, pos_x, wb_data, style0) pos_x += 1 # IntID wb_data = Formula(u'"IntID"') wb_hosts.write(pos_y, pos_x, wb_data, style0) pos_x += 1 # objectID wb_data = Formula(u'"objectID"') wb_hosts.write(pos_y, pos_x, wb_data, style0) wb_hosts.col(pos_x).width *= 3 pos_x += 1 pos_y = 1 # allAttributes = {} for interface in implementedBy(self.factory): for i_attrName in interface: i_attr = interface[i_attrName] if IField.providedBy(i_attr): allAttributes[i_attrName] = i_attr # for item_n, item_v in itemList: pos_x = 0 for attr in attrList: # from zope.interface import implementedBy # ff=self.factory # tt=[i for i in implementedBy(ff)] # it=tt[-1] #attrField = self.attrInterface[attr] attrField = allAttributes[attr] # tmpFieldProperty = getattr(self.factory, attr) # if hasattr(tmpFieldProperty, '_FieldProperty__field'): # attrField = getattr(self.factory, attr)._FieldProperty__field attrDm = datamanager.AttributeField(item_v, attrField) v_style = XFStyle() v_font = Font() v_font.height = 6 * 20 v_style.font = v_font value = None if IChoice.providedBy(attrField): v_style.num_format_str = '@' dateValue = attrDm.get() v_widget = getMultiAdapter(\ (attrField,self.request), interfaces.IFieldWidget) v_widget.context = item_v # dm = zope.component.getMultiAdapter( # (self.content, field.field), interfaces.IDataManager) # zope.component.getMultiAdapter( # (self.context, # self.request, # self.form, # getattr(widget, 'field', None), # widget), # interfaces.IValidator).validate(fvalue) # dm = zope.component.getMultiAdapter( # (self.__context__, field), interfaces.IDataManager) v_dataconverter = queryMultiAdapter(\ (attrDm.field, v_widget), interfaces.IDataConverter) #print u"ddd55: %s: %s" % (attr, dateValue) if dateValue is not None: value = v_dataconverter.toWidgetValue(dateValue)[0] #print "value3-> %s: %s " % (attr, value) # elif ICollection.providedBy(attrField): # v_style.num_format_str = '@' # value = getattr(item_v, attr) # print "ddd66: %s: %s" % (attr, value) # elif IBool.providedBy(attrField): # v_style.num_format_str = '@' # value = getattr(item_v, attr) # print "value2-> %s: %s " % (attr, value) else: v_style.num_format_str = '@' dateValue = attrDm.get() v_widget = getMultiAdapter(\ (attrField,self.request), interfaces.IFieldWidget) v_widget.context = item_v v_dataconverter = queryMultiAdapter(\ (attrDm.field, v_widget), interfaces.IDataConverter) #d2 = queryMultiAdapter((attrDm.field, v_widget),interfaces.IDataConverter) if dateValue is not None: value = v_dataconverter.toWidgetValue(dateValue) if type(value) is list: value = u";".join(value) #print u"value1-> %s: %s " % (attr, value) if value is not None: #print u"wb_hosts.write(%s, %s, %s, %s)" % (pos_y, pos_x, value, v_style) wb_hosts.write(pos_y, pos_x, value, v_style) pos_x += 1 # IntID uidutil = queryUtility(IIntIds) wb_data = Formula(u'"%s"' % uidutil.getId(item_v)) wb_hosts.write(pos_y, pos_x, wb_data, style0) pos_x += 1 # objectID wb_data = Formula(u'"%s"' % item_v.objectID) wb_hosts.write(pos_y, pos_x, wb_data, style0) pos_x += 1 pos_y += 1 wbook.save(f_name) self.request.response.setHeader('Content-Type', 'application/vnd.ms-excel') self.request.response.setHeader(\ 'Content-Disposition', 'attachment; filename=\"%s\"' % filename) setNoCacheHeaders(self.request.response) datafile = open(f_name, "r") dataMem = datafile.read() datafile.close() os.remove(f_name) return dataMem
elif localtime.tm_hour >= 18 and localtime.tm_hour < 19: cate = 18 elif localtime.tm_hour >= 19 and localtime.tm_hour < 20: cate = 19 elif localtime.tm_hour >= 20 and localtime.tm_hour < 21: cate = 20 elif localtime.tm_hour >= 21 and localtime.tm_hour < 22: cate = 21 elif localtime.tm_hour >= 22 and localtime.tm_hour < 23: cate = 22 elif localtime.tm_hour >= 23 and localtime.tm_hour < 24: cate = 23 return cate w = Workbook() ws = w.add_sheet("iptime") conn = pymongo.Connection('10.3.3.220', 27017) iae_hitlog_a = conn.gehua.iae_hitlog_a iae_hitlog_a.create_index("ip") ip = conn.gehua.ip res = ip.find() ind = 0 rowNumber = 0 mysqlconn = MySQLdb.connect(host="172.16.168.57", user="******", passwd="ZAQ!XSW@CDE#", db="demo_vsp_a") #mysqlconn1=MySQLdb.connect(host="172.16.168.57",user="******",passwd="ZAQ!XSW@CDE#",db="ire") for result in res: #Qestion
def openExcelSheet(): """ Opens a reference to an Excel WorkBook and Worksheet objects """ from pyExcelerator import Workbook workbook = Workbook() worksheet = workbook.add_sheet("Sheet 1") return workbook, worksheet
def exportXlsData(self, request, sheetName=u'ict', wbook=None): """get XLS file for all folder objects""" if wbook is None: localWbook = True filename = datetime.now().strftime('ict_%Y%m%d%H%M%S.xls') f_handle, f_name = tempfile.mkstemp(filename) wbook = Workbook() else: localWbook = False wb_hosts = wbook.add_sheet(getattr(self, 'ikName', sheetName)) style0 = XFStyle() font0 = Font() font0.height = 6 * 20 #style0.num_format_str = '@' style0.font = font0 style1 = XFStyle() font1 = Font() font1.height = 6 * 20 #style1.num_format_str = '@' style1.font = font1 heading_pattern = xl.Pattern() heading_pattern.pattern = xl.Pattern.SOLID_PATTERN heading_pattern.pattern_back_colour = 0x5 heading_pattern.pattern_fore_colour = 0x5 fields = fieldsForFactory(self.contentFactory, ['objectID']) attrList = [fname for fname, fval in fields.items()] itemList = self.items() pos_y = 0 pos_x = 0 for attr in attrList: style0.pattern = heading_pattern wb_hosts.write(pos_y, pos_x, attr, style0) pos_x += 1 # objectID wb_hosts.write(pos_y, pos_x, "objectID", style0) wb_hosts.col(pos_x).width *= 3 pos_x += 1 pos_y = 1 # allAttributes = {} for interface in implementedBy(self.contentFactory): for i_attrName in interface: i_attr = interface[i_attrName] if IField.providedBy(i_attr): allAttributes[i_attrName] = i_attr # for item_n, item_v in itemList: pos_x = 0 for attr in attrList: attrField = allAttributes[attr] attrDm = datamanager.AttributeField(item_v, attrField) v_style = XFStyle() v_font = Font() v_font.height = 6 * 20 v_style.font = v_font value = None if IChoice.providedBy(attrField): v_style.num_format_str = '@' # try: dateValue = attrDm.get() v_widget = getMultiAdapter(\ (attrField,request), interfaces.IFieldWidget) v_widget.context = item_v v_dataconverter = queryMultiAdapter(\ (attrDm.field, v_widget), interfaces.IDataConverter) # except AttributeError, errText: # print "Error2: ### ", errText # print "item_v: ", item_v # print "item_v.ikName: ", item_v.ikName # print "attr: ", attr # dateValue = None # #import pdb # #pdb.set_trace() if dateValue is not None: # try: valueVector = v_dataconverter.toWidgetValue(dateValue) if len(valueVector) > 0: value = valueVector[0] # except IndexError, errText: # print "Error3: ### ", errText # print "item_v: ", item_v # print "item_v.ikName: ", item_v.ikName # print "attr: ", attr else: v_style.num_format_str = '@' try: # import pdb # pdb.set_trace() dateValue = attrDm.get() v_widget = getMultiAdapter(\ (attrField,request), interfaces.IFieldWidget) v_widget.context = item_v v_dataconverter = queryMultiAdapter(\ (attrDm.field, v_widget), interfaces.IDataConverter) except AttributeError, errText: print "Error1: ### ", errText print "item_v: ", item_v print "item_v.ikName: ", item_v.ikName print "attr: ", attr dateValue = None if dateValue is not None: try: value = v_dataconverter.toWidgetValue(dateValue) except AttributeError, errText: print "Error4: ### ", errText print "item_v: ", item_v print "item_v.ikName: ", item_v.ikName print "attr: ", attr value = None if type(value) is list: value = u";".join(value) if value is not None: wb_hosts.write(pos_y, pos_x, value, v_style) pos_x += 1
def export_as_xls(modeladmin, request, queryset): """ Generic xls export admin action. """ if queryset.count() > settings.EXPORT_RECORDS_LIMIT: messages.error( request, "Can't export more then %s Records in one go. Narrow down your criteria using filters or search" % str(settings.EXPORT_RECORDS_LIMIT)) return HttpResponseRedirect(request.path_info) fields = [] #PUT THE LIST OF FIELD NAMES YOU DON'T WANT TO EXPORT exclude_fields = [] #foreign key related fields extras = [''] if not request.user.is_staff: raise PermissionDenied for f in modeladmin.list_display: if f not in exclude_fields: fields.append(f) fields.extend(extras) opts = modeladmin.model._meta wb = Workbook() ws0 = wb.add_sheet('0') col = 0 field_names = [] # write header row for field in fields: ws0.write(0, col, field) field_names.append(field) col = col + 1 row = 1 # Write data rows for obj in queryset: col = 0 for field in field_names: if field in extras: try: val = [eval('obj.' + field) ] #eval sucks but easiest way to deal except: val = ['None'] else: try: val = lookup_field(field, obj, modeladmin) except: val = ['None'] if not val[-1] == None: if isinstance(val[-1], bool): ws0.write(row, col, strip_tags(str(val[-1]))) elif not isinstance(val[-1], str) and not isinstance( val[-1], unicode): ws0.write(row, col, strip_tags(val[-1].__unicode__())) elif val[-1]: ws0.write(row, col, strip_tags(val[-1])) else: ws0.write(row, col, strip_tags('')) col = col + 1 row = row + 1 wb.save('/tmp/output.xls') response = HttpResponse(open('/tmp/output.xls', 'r').read(), content_type='application/ms-excel') response['Content-Disposition'] = 'attachment; filename=%s.xls' % unicode( opts).replace('.', '_') return response
def write_excel(results): title = [ 'DATE', 'User ID', 'SCREEN NAME', 'Fans', 'Fan Growth', 'Fan Growth %', 'Tweets', 'Retweets', 'Comments', 'direct @', 'Likes', 'Impression', 'ER 30 days', 'ER 7 days', '#1 Post URL', '#1 Post ER', '#1 Post RT', '#1 Post CT', '#2 Post URL', '#2 Post ER', '#2 Post RT', '#2 Post CT', '#3 Post URL', '#3 Post ER', '#3 Post RT', '#3 Post CT', '#4 Post URL', '#4 Post ER', '#4 Post RT', '#4 Post CT', '#5 Post URL', '#5 Post ER', '#5 Post RT', '#5 Post CT', '#1 Influencer URL', '#1 Influencer Tweets Count', '#1 Influencer Comments Count', '#1 Influencer Direct @ Count', '#2 Influencer URL', '#2 Influencer Tweets Count', '#2 Influencer Comments', '#2 Influencer Direct @ Count', '#3 Influencer URL', '#3 Influencer Tweets Count', '#3 Influencer Comments', '#3 Influencer Direct @ Count', '#4 Influencer URL', '#4 Influencer Tweets Count', '#4 Influencer Comments', '#4 Influencer Direct @ Count', '#5 Influencer URL', '#5 Influencer Tweets Count', '#5 Influencer Comments', '#5 Influencer Direct @ Count', '#1 Hashtag', "#1 Hashtag's engagement rate", '#2 Hashtag', "#2 Hashtag's engagement rate", '#3 Hashtag', "#3 Hashtag's engagement rate", 'Question Posted', 'Question Responded', 'Question Response Time', 'Response Share' ] w = Workbook() ws = w.add_sheet('Weekly Raw Data') wt = w.add_sheet('Fans Info') font = Font() font.height = 12 * 0x14 font.name = str_to_unicode('微软雅黑') title_style = Style.XFStyle() title_style.font = font fontbold = Font() fontbold.height = 12 * 0x14 fontbold.bold = True fontbold.name = str_to_unicode('微软雅黑') boldstyle = Style.XFStyle() boldstyle.font = fontbold percent = Style.XFStyle() percent.font = font percent.num_format_str = '0.00%' for i in range(len(title)): ws.col(i).width = 3600 ws.write_cols(0, 0, title, title_style) abscissa = 0 for res in range(len(results)): screen_name, uid, day, period, fans, fan_growth, tweets, retweets, comments, direct_at, likes, impressions, \ er_30, er_7, response_share, top_posts, top_influencer, top_hashtag, questions, responds, mean_res, active, interactive, \ verified, subfans, province, gender, age, tag, fans_week, fans_hour, brand_week, brand_hour = results[res] fan_percent = fan_growth / float(fans - fan_growth) top_posts, top_influencer, top_hashtag, verified, subfans, province, gender, age, tag, fans_week, \ fans_hour, brand_week, brand_hour = [format_data(p) for p in (top_posts, top_influencer, top_hashtag, \ verified, subfans, province, gender, age, tag, fans_week, \ fans_hour, brand_week, brand_hour)] mean_res = round(mean_res / 60.0, 2) #print res t_posts = [] top_posts = sorted(top_posts, key=lambda x: x[0], reverse=True) for post in top_posts: t_posts.extend( [post[1]['url'], post[0], post[1]['nret'], post[1]['ncmt']]) t_posts.extend(['N/A'] * (20 - len(t_posts))) t_influencer = [] top_influencer = sorted(top_influencer, key=lambda x: x[0], reverse=True) for influ in top_influencer: weibourl = 'http://weibo.com/u/%s' % influ[1]['uid'] t_influencer.extend([ weibourl, influ[1].get('reposts', 0), influ[1].get('comments', 0), influ[1].get('direct_at', 0) ]) t_influencer.extend(['N/A'] * (20 - len(t_influencer))) t_hashtag = [] if top_hashtag: top_hashtag = sorted(top_hashtag, key=lambda x: x[0], reverse=True) [t_hashtag.extend([hash[1], hash[0]]) for hash in top_hashtag] t_hashtag.extend(['N/A'] * (6 - len(t_hashtag))) account_index = [day.strftime('%Y-%m-%d'), uid, screen_name, fans, fan_growth, fan_percent, tweets, retweets, \ comments, direct_at, likes if likes else 0, \ impressions, er_30, er_7] + t_posts + t_influencer + t_hashtag + [questions, responds, mean_res, response_share if response_share else 'N/A'] #将第一页数据写入execl ws.write_cols(res + 1, 0, account_index, title_style) tag.extend([('N/A', 0) for i in range(10 - len(tag))]) province.extend([('N/A', 0) for i in range(10 - len(province))]) subfans.extend([0 for i in range(12 - len(subfans))]) vertical_title = [screen_name, 'Active', 'Active Fans', 'Other Fans', '', 'Interaction', 'Interactive Fans', 'Other Fans', '', 'Verified Type', \ 'Verified', 'Daren', 'Un-verified', '', 'Fan Number', '0~9', '10~49', '50~99', '100~199', '200~299', '300~399', '400~499',\ '500~999', '1000~1999', '2000~4999', '5000~9999', '>=10000', '', 'Gender', 'Male', 'Female', '', 'Age', '<18', '18~24', \ '25~29', '30~34', '35~39', '40~49', '50~59', '>=60', '', 'Tag'] + [i[0] for i in tag] + ['', 'Province'] + \ [str_to_unicode(provincesdict.get(str(i[0]), '')) for i in province]+ ['', 'Hour', '0-1', '1-2', '2-3', '3-4', '4-5', '5-6',\ '6-7', '7-8', '8-9', '9-10', '10-11', '11-12', '12-13', '13-14', '14-15', '15-16', '16-17', '17-18', '18-19', '19-20', '20-21', '21-22', \ '22-23', '23-0', '', 'Days', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', ''] wt.col(res * 3).width = 4000 ordinate = 0 for ver in range(len(vertical_title)): if ver in (0, 1) or not vertical_title[ver - 1]: wt.write(ordinate, abscissa, vertical_title[ver], boldstyle) else: wt.write(ordinate, abscissa, vertical_title[ver], title_style) ordinate += 1 if verified: verified_count = float(sum(verified)) verified_type = [ verified[1] / verified_count, verified[2] / verified_count, verified[0] / verified_count ] if verified_count > 0 else [0, 0, 0] else: verified_type = [0] * 3 if len(gender) != 2: gender = [0, 0] else: gender.sort() gender = [gender[1][1], gender[0][1]] tagdata = [i[1] for i in tag] provincedata = [i[1] for i in province] if age: age = [i[1] for i in sorted(age.items())] age_type = [i / float(sum(age)) for i in age] else: age_type = [0] * 8 if not brand_hour: brand_hour = [0] * 24 if not brand_week: brand_week = [0] * 7 subfans_type, gender_type, tag_type, province_type, fanshour_type, fansweek_type, brandhour_type, brandweek_type = \ [div_data(j) for j in [subfans, gender, tagdata, provincedata, fans_hour, fans_week, brand_hour, brand_week]] gaps = ['', 'Percentage'] vertical_data = ['', 'Percentage', active, 1-active if active else 0, '', 'Percentage', interactive, 1-interactive if interactive else 0, \ '', 'Percentage'] + verified_type + gaps + subfans_type + gaps + gender_type + gaps + age_type + gaps + \ tag_type + gaps + province_type + ['', 'Fans Activity'] + fanshour_type + ['', 'Fans Activity'] + fansweek_type wt.col(res * 3 + 1).width = 4000 ordinate = 0 abscissa += 1 for ver in range(len(vertical_data)): if ver in (0, 1) or not vertical_title[ver - 1]: wt.write(ordinate, abscissa, vertical_data[ver], boldstyle) else: wt.write(ordinate, abscissa, vertical_data[ver], percent) ordinate += 1 wt.col(res * 3 + 2).width = 4000 ordinate = 0 rest_data = [''] * 66 + ['Brand Activity'] + brandhour_type + [ '', 'Brand Activity' ] + brandweek_type abscissa += 1 for ver in range(len(rest_data)): if ver in (0, 1) or not vertical_title[ver - 1]: wt.write(ordinate, abscissa, rest_data[ver], boldstyle) else: wt.write(ordinate, abscissa, rest_data[ver], percent) ordinate += 1 abscissa += 2 return w