def testImportWithForbiddenFileAccess(self): """ Verify parser error breaks imports with forbidden file access """ resource = current.s3db.resource("org_organisation") with self.assertRaises(SyntaxError): resource.import_xml(BytesIO(self.forbidden.encode("utf-8")))
def document_onvalidation(form, document=True): """ Form validation for both, documents and images """ form_vars = form.vars doc = form_vars.file if doc is None: # If this is a prepop, then file not in form # Interactive forms with empty doc has this as "" not None return if not document: encoded_file = form_vars.get("imagecrop-data", None) if encoded_file: # S3ImageCropWidget import base64 import uuid metadata, encoded_file = encoded_file.split(",") #filename, datatype, enctype = metadata.split(";") filename = metadata.split(";", 1)[0] f = Storage() f.filename = uuid.uuid4().hex + filename f.file = BytesIO(base64.b64decode(encoded_file)) doc = form_vars.file = f if not form_vars.name: form_vars.name = filename if not hasattr(doc, "file"): # Record update without new file upload => keep existing record_id = current.request.post_vars.id if record_id: db = current.db if document: tablename = "doc_document" else: tablename = "doc_image" table = db[tablename] record = db(table.id == record_id).select( table.file, limitby=(0, 1), ).first() if record: doc = record.file if not hasattr(doc, "file") and not doc and not form_vars.url: if document: msg = current.T("Either file upload or document URL required.") else: msg = current.T("Either file upload or image URL required.") if "file" in form_vars: form.errors.file = msg if "url" in form_vars: form.errors.url = msg if hasattr(doc, "file"): name = form_vars.name if not name: # Use filename as document/image title form_vars.name = doc.filename
def testPermissibleRelativePathAccess(self): """ Verify that permissible file access does not lead to parser error """ xml = current.xml tree = xml.parse(BytesIO(self.relpath.encode("utf-8"))) self.assertNotEqual(tree, None) self.assertEqual(xml.error, None)
def testForbiddenFileAccess(self): """ Verify that forbidden file access leads to parser error """ xml = current.xml tree = xml.parse(BytesIO(self.forbidden.encode("utf-8"))) self.assertEqual(tree, None) self.assertNotEqual(xml.error, None)
def encode_pt(pt, title): """ Encode a S3PivotTable as XLS sheet @param pt: the S3PivotTable @param title: the title for the report @returns: the XLS file as stream """ output = BytesIO() book = S3PivotTableXLS(pt).encode(title) book.save(output) output.seek(0) return output
def __init__(self, fileobj=None, compressed=True): """ Create or open an archive @param fileobj: the file object containing the archive, None to create a new archive @param compress: enable (or suppress) compression of new archives """ import zipfile if compressed: compression = zipfile.ZIP_DEFLATED else: compression = zipfile.ZIP_STORED if fileobj is not None: if not hasattr(fileobj, "seek"): # Possibly a addinfourl instance from urlopen, # => must copy to BytesIO buffer for random access fileobj = BytesIO(fileobj.read()) try: archive = zipfile.ZipFile(fileobj, "r") except RuntimeError: current.log.warn("invalid ZIP archive: %s" % sys.exc_info()[1]) archive = None else: fileobj = BytesIO() try: archive = zipfile.ZipFile(fileobj, "w", compression, True) except RuntimeError: # Zlib not available? => try falling back to STORED compression = zipfile.ZIP_STORED archive = zipfile.ZipFile(fileobj, "w", compression, True) current.log.warn( "zlib not available - cannot compress archive") self.fileobj = fileobj self.archive = archive
def series_export_word(widget_list, lang_dict, title, logo): """ Export a Series in RTF Format @ToDo: rewrite as S3Method handler """ import gluon.contrib.pyrtf as pyrtf from s3compat import BytesIO output = BytesIO() doc = pyrtf.Document(default_language=pyrtf.Languages.EnglishUK) section = pyrtf.Section() ss = doc.StyleSheet ps = ss.ParagraphStyles.Normal.Copy() ps.SetName("NormalGrey") ps.SetShadingPropertySet( pyrtf.ShadingPropertySet(pattern=1, background=pyrtf.Colour( "grey light", 224, 224, 224))) ss.ParagraphStyles.append(ps) ps = ss.ParagraphStyles.Normal.Copy() ps.SetName("NormalCentre") ps.SetParagraphPropertySet(pyrtf.ParagraphPropertySet(alignment=3)) ss.ParagraphStyles.append(ps) doc.Sections.append(section) heading = pyrtf.Paragraph(ss.ParagraphStyles.Heading1) if logo: image = pyrtf.Image(logo) heading.append(image) heading.append(title) section.append(heading) col = [2800, 6500] table = pyrtf.Table(*col) AddRow = table.AddRow sorted_widget_list = sorted(widget_list.values(), key=lambda widget: widget.question.posn) for widget in sorted_widget_list: line = widget.writeQuestionToRTF(ss, lang_dict) try: AddRow(*line) except: if settings.base.debug: raise pass section.append(table) renderer = pyrtf.Renderer() renderer.Write(doc, output) return output
def item_export_pdf(): """ Export a list of Items in Adobe PDF format Uses Geraldo Grouping Report """ try: from reportlab.lib.units import cm from reportlab.lib.pagesizes import A4 from reportlab.lib.enums import TA_CENTER, TA_RIGHT except ImportError: session.error = "Python needs the ReportLab module installed for PDF export" redirect(URL(c="item")) try: from geraldo import Report, ReportBand, ReportGroup, Label, ObjectValue, SystemField, landscape, BAND_WIDTH from geraldo.generators import PDFGenerator except ImportError: session.error = "Python needs the Geraldo module installed for PDF export" redirect(URL(c="item")) table = db.budget_item objects_list = db(table.id > 0).select(orderby=table.category_type) if not objects_list: session.warning = T("No data in this table - cannot create PDF!") redirect(URL(f="item")) from s3compat import BytesIO output = BytesIO() class MyReport(Report): def __init__(self, queryset=None, T=None): " Initialise parent class & make any necessary modifications " Report.__init__(self, queryset) self.T = T def _T(self, rawstring): return self.T(rawstring) # can't use T() here! #title = _T("Items") title = "Items" page_size = landscape(A4) class band_page_header(ReportBand): height = 1.3*cm elements = [ SystemField(expression="%(report_title)s", top=0.1*cm, left=0, width=BAND_WIDTH, style={"fontName": "Helvetica-Bold", "fontSize": 14, "alignment": TA_CENTER} ), Label(text="Code", top=0.8*cm, left=0.2*cm), Label(text="Description", top=0.8*cm, left=3*cm), Label(text="Unit Cost", top=0.8*cm, left=13*cm), Label(text="per Month", top=0.8*cm, left=15*cm), Label(text="per Minute", top=0.8*cm, left=17*cm), Label(text="per Megabyte", top=0.8*cm, left=19*cm), Label(text="Comments", top=0.8*cm, left=21*cm), ] borders = {"bottom": True} class band_page_footer(ReportBand): height = 0.5*cm elements = [ Label(text="%s" % request.utcnow.date(), top=0.1*cm, left=0), SystemField(expression="Page # %(page_number)d of %(page_count)d", top=0.1*cm, width=BAND_WIDTH, style={"alignment": TA_RIGHT}), ] borders = {"top": True} class band_detail(ReportBand): height = 0.5*cm auto_expand_height = True elements = ( ObjectValue(attribute_name="code", left=0.2*cm, width=2.8*cm), ObjectValue(attribute_name="description", left=3*cm, width=10*cm), ObjectValue(attribute_name="unit_cost", left=13*cm, width=2*cm), ObjectValue(attribute_name="monthly_cost", left=15*cm, width=2*cm), ObjectValue(attribute_name="minute_cost", left=17*cm, width=2*cm), ObjectValue(attribute_name="megabyte_cost", left=19*cm, width=2*cm), ObjectValue(attribute_name="comments", left=21*cm, width=6*cm), ) groups = [ ReportGroup(attribute_name="category_type", band_header=ReportBand( height=0.7*cm, elements=[ ObjectValue(attribute_name="category_type", left=0, top=0.1*cm, get_value=lambda instance: instance.category_type and budget_category_type_opts[instance.category_type], style={"fontName": "Helvetica-Bold", "fontSize": 12}) ], borders={"bottom": True}, ), ), ] #report = MyReport(queryset=objects_list) report = MyReport(queryset=objects_list, T=T) report.generate_by(PDFGenerator, filename=output) output.seek(0) import gluon.contenttype response.headers["Content-Type"] = gluon.contenttype.contenttype(".pdf") filename = "%s_items.pdf" % (request.env.server_name) response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename return output.read()
def kit_export_pdf(): """ Export a list of Kits in Adobe PDF format Uses Geraldo SubReport """ try: from reportlab.lib.units import cm from reportlab.lib.pagesizes import A4 from reportlab.lib.enums import TA_CENTER, TA_RIGHT except ImportError: session.error = "Python needs the ReportLab module installed for PDF export" redirect(URL(c="kit")) try: from geraldo import Report, ReportBand, SubReport, Label, ObjectValue, SystemField, landscape, BAND_WIDTH from geraldo.generators import PDFGenerator except ImportError: session.error = "Python needs the Geraldo module installed for PDF export" redirect(URL(c="kit")) table = db.budget_kit objects_list = db(table.id > 0).select() if not objects_list: session.warning = T("No data in this table - cannot create PDF!") redirect(URL(r=request)) from s3compat import BytesIO output = BytesIO() #class MySubReport(SubReport): # def __init__(self, db=None, **kwargs): # " Initialise parent class & make any necessary modifications " # self.db = db # SubReport.__init__(self, **kwargs) class MyReport(Report): def __init__(self, queryset=None, db=None): " Initialise parent class & make any necessary modifications " Report.__init__(self, queryset) self.db = db # can't use T() here! title = "Kits" page_size = landscape(A4) class band_page_header(ReportBand): height = 1.3*cm elements = [ SystemField(expression="%(report_title)s", top=0.1*cm, left=0, width=BAND_WIDTH, style={"fontName": "Helvetica-Bold", "fontSize": 14, "alignment": TA_CENTER} ), Label(text="Code", top=0.8*cm, left=0.2*cm), Label(text="Description", top=0.8*cm, left=2*cm), Label(text="Cost", top=0.8*cm, left=10*cm), Label(text="Monthly", top=0.8*cm, left=12*cm), Label(text="per Minute", top=0.8*cm, left=14*cm), Label(text="per Megabyte", top=0.8*cm, left=16*cm), Label(text="Comments", top=0.8*cm, left=18*cm), ] borders = {"bottom": True} class band_page_footer(ReportBand): height = 0.5*cm elements = [ Label(text="%s" % request.utcnow.date(), top=0.1*cm, left=0), SystemField(expression="Page # %(page_number)d of %(page_count)d", top=0.1*cm, width=BAND_WIDTH, style={"alignment": TA_RIGHT}), ] borders = {"top": True} class band_detail(ReportBand): height = 0.5*cm auto_expand_height = True elements = ( ObjectValue(attribute_name="code", left=0.2*cm, width=1.8*cm), ObjectValue(attribute_name="description", left=2*cm, width=8*cm), ObjectValue(attribute_name="total_unit_cost", left=10*cm, width=2*cm), ObjectValue(attribute_name="total_monthly_cost", left=12*cm, width=2*cm), ObjectValue(attribute_name="total_minute_cost", left=14*cm, width=2*cm), ObjectValue(attribute_name="total_megabyte_cost", left=16*cm, width=2*cm), ObjectValue(attribute_name="comments", left=18*cm, width=6*cm), ) subreports = [ SubReport( #queryset_string = "db((db.budget_kit_item.kit_id == %(object)s.id) & (db.budget_item.id == db.budget_kit_item.item_id)).select(db.budget_item.code, db.budget_item.description, db.budget_item.unit_cost)", #queryset_string = "db(db.budget_kit_item.kit_id == %(object)s.id).select()", band_header = ReportBand( height=0.5*cm, elements=[ Label(text="Item ID", top=0, left=0.2*cm, style={"fontName": "Helvetica-Bold"}), Label(text="Quantity", top=0, left=2*cm, style={"fontName": "Helvetica-Bold"}), #Label(text="Unit Cost", top=0, left=4*cm, style={"fontName": "Helvetica-Bold"}), ], borders={"top": True, "left": True, "right": True}, ), detail_band = ReportBand( height=0.5*cm, elements=[ ObjectValue(attribute_name="item_id", top=0, left=0.2*cm), ObjectValue(attribute_name="quantity", top=0, left=2*cm), #ObjectValue(attribute_name="unit_cost", top=0, left=4*cm), ] ), ), ] #report = MyReport(queryset=objects_list) report = MyReport(queryset=objects_list, db=db) report.generate_by(PDFGenerator, filename=output) output.seek(0) import gluon.contenttype response.headers["Content-Type"] = gluon.contenttype.contenttype(".pdf") filename = "%s_kits.pdf" % (request.env.server_name) response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename return output.read()
def series_export_spreadsheet(matrix, matrix_answers, logo): """ Now take the matrix data type and generate a spreadsheet from it """ try: import xlwt except ImportError: response.error = T( "xlwt not installed, so cannot export as a Spreadsheet") output = s3_rest_controller(module, "survey_series", rheader=s3db.survey_series_rheader) return output import math from s3compat import BytesIO # ------------------------------------------------------------------------- def wrap_text(sheet, cell, style): row = cell.row col = cell.col try: text = s3_unicode(cell.text) except: text = cell.text width = 16 # Wrap text and calculate the row width and height characters_in_cell = float(width - 2) twips_per_row = 255 #default row height for 10 point font if cell.merged(): try: sheet.write_merge( cell.row, cell.row + cell.mergeV, cell.col, cell.col + cell.mergeH, text, style, ) except Exception as msg: log = current.log log.error(msg) log.debug("row: %s + vert: %s, col: %s + horiz %s" % \ (cell.row, cell.mergeV, cell.col, cell.mergeH)) posn = "%s,%s" % (cell.row, cell.col) if matrix.matrix[posn]: log.debug(matrix.matrix[posn]) rows = math.ceil( (len(text) / characters_in_cell) / (1 + cell.mergeH)) else: sheet.write( cell.row, cell.col, text, style, ) rows = math.ceil(len(text) / characters_in_cell) new_row_height = int(rows * twips_per_row) new_col_width = width * COL_WIDTH_MULTIPLIER if sheet.row(row).height < new_row_height: sheet.row(row).height = new_row_height if sheet.col(col).width < new_col_width: sheet.col(col).width = new_col_width # ------------------------------------------------------------------------- def merge_styles(list_template, style_list): """ Take a list of styles and return a single style object with all the differences from a newly created object added to the resultant style. """ if len(style_list) == 0: final_style = xlwt.XFStyle() elif len(style_list) == 1: final_style = list_template[style_list[0]] else: zero_style = xlwt.XFStyle() final_style = xlwt.XFStyle() for i in range(0, len(style_list)): final_style = merge_object_diff(final_style, list_template[style_list[i]], zero_style) return final_style # ------------------------------------------------------------------------- def merge_object_diff(base_obj, new_obj, zero_obj): """ Function to copy all the elements in new_obj that are different from the zero_obj and place them in the base_obj """ element_list = new_obj.__dict__ for (element, value) in element_list.items(): try: base_obj.__dict__[element] = merge_object_diff( base_obj.__dict__[element], value, zero_obj.__dict__[element]) except: if zero_obj.__dict__[element] != value: base_obj.__dict__[element] = value return base_obj COL_WIDTH_MULTIPLIER = 240 book = xlwt.Workbook(encoding="utf-8") output = BytesIO() protection = xlwt.Protection() protection.cell_locked = 1 no_protection = xlwt.Protection() no_protection.cell_locked = 0 borders = xlwt.Borders() borders.left = xlwt.Borders.DOTTED borders.right = xlwt.Borders.DOTTED borders.top = xlwt.Borders.DOTTED borders.bottom = xlwt.Borders.DOTTED border_t1 = xlwt.Borders() border_t1.top = xlwt.Borders.THIN border_t2 = xlwt.Borders() border_t2.top = xlwt.Borders.MEDIUM border_l1 = xlwt.Borders() border_l1.left = xlwt.Borders.THIN border_l2 = xlwt.Borders() border_l2.left = xlwt.Borders.MEDIUM border_r1 = xlwt.Borders() border_r1.right = xlwt.Borders.THIN border_r2 = xlwt.Borders() border_r2.right = xlwt.Borders.MEDIUM border_b1 = xlwt.Borders() border_b1.bottom = xlwt.Borders.THIN border_b2 = xlwt.Borders() border_b2.bottom = xlwt.Borders.MEDIUM align_base = xlwt.Alignment() align_base.horz = xlwt.Alignment.HORZ_LEFT align_base.vert = xlwt.Alignment.VERT_TOP align_wrap = xlwt.Alignment() align_wrap.horz = xlwt.Alignment.HORZ_LEFT align_wrap.vert = xlwt.Alignment.VERT_TOP align_wrap.wrap = xlwt.Alignment.WRAP_AT_RIGHT shaded_fill = xlwt.Pattern() shaded_fill.pattern = xlwt.Pattern.SOLID_PATTERN shaded_fill.pattern_fore_colour = 0x16 # 25% Grey shaded_fill.pattern_back_colour = 0x08 # Black heading_fill = xlwt.Pattern() heading_fill.pattern = xlwt.Pattern.SOLID_PATTERN heading_fill.pattern_fore_colour = 0x1F # ice_blue heading_fill.pattern_back_colour = 0x08 # Black style_title = xlwt.XFStyle() style_title.font.height = 0x0140 # 320 twips, 16 points style_title.font.bold = True style_title.alignment = align_base style_header = xlwt.XFStyle() style_header.font.height = 0x00F0 # 240 twips, 12 points style_header.font.bold = True style_header.alignment = align_base style_sub_header = xlwt.XFStyle() style_sub_header.font.bold = True style_sub_header.alignment = align_wrap style_section_heading = xlwt.XFStyle() style_section_heading.font.bold = True style_section_heading.alignment = align_wrap style_section_heading.pattern = heading_fill style_hint = xlwt.XFStyle() style_hint.protection = protection style_hint.font.height = 160 # 160 twips, 8 points style_hint.font.italic = True style_hint.alignment = align_wrap style_text = xlwt.XFStyle() style_text.protection = protection style_text.alignment = align_wrap style_instructions = xlwt.XFStyle() style_instructions.font.height = 0x00B4 # 180 twips, 9 points style_instructions.font.italic = True style_instructions.protection = protection style_instructions.alignment = align_wrap style_box = xlwt.XFStyle() style_box.borders = borders style_box.protection = no_protection style_input = xlwt.XFStyle() style_input.borders = borders style_input.protection = no_protection style_input.pattern = shaded_fill box_l1 = xlwt.XFStyle() box_l1.borders = border_l1 box_l2 = xlwt.XFStyle() box_l2.borders = border_l2 box_t1 = xlwt.XFStyle() box_t1.borders = border_t1 box_t2 = xlwt.XFStyle() box_t2.borders = border_t2 box_r1 = xlwt.XFStyle() box_r1.borders = border_r1 box_r2 = xlwt.XFStyle() box_r2.borders = border_r2 box_b1 = xlwt.XFStyle() box_b1.borders = border_b1 box_b2 = xlwt.XFStyle() box_b2.borders = border_b2 style_list = {} style_list["styleTitle"] = style_title style_list["styleHeader"] = style_header style_list["styleSubHeader"] = style_sub_header style_list["styleSectionHeading"] = style_section_heading style_list["styleHint"] = style_hint style_list["styleText"] = style_text style_list["styleInstructions"] = style_instructions style_list["styleInput"] = style_input style_list["boxL1"] = box_l1 style_list["boxL2"] = box_l2 style_list["boxT1"] = box_t1 style_list["boxT2"] = box_t2 style_list["boxR1"] = box_r1 style_list["boxR2"] = box_r2 style_list["boxB1"] = box_b1 style_list["boxB2"] = box_b2 sheet1 = book.add_sheet(T("Assessment")) sheet2 = book.add_sheet(T("Metadata")) max_col = 0 for cell in matrix.matrix.values(): if cell.col + cell.mergeH > 255: current.log.warning("Cell (%s,%s) - (%s,%s) ignored" % \ (cell.col, cell.row, cell.col + cell.mergeH, cell.row + cell.mergeV)) continue if cell.col + cell.mergeH > max_col: max_col = cell.col + cell.mergeH if cell.joined(): continue style = merge_styles(style_list, cell.styleList) if (style.alignment.wrap == style.alignment.WRAP_AT_RIGHT): # get all the styles from the joined cells # and merge these styles in. joined_styles = matrix.joinedElementStyles(cell) joined_style = merge_styles(style_list, joined_styles) try: wrap_text(sheet1, cell, joined_style) except: pass else: if cell.merged(): # get all the styles from the joined cells # and merge these styles in. joined_styles = matrix.joinedElementStyles(cell) joined_style = merge_styles(style_list, joined_styles) try: sheet1.write_merge( cell.row, cell.row + cell.mergeV, cell.col, cell.col + cell.mergeH, s3_unicode(cell.text), joined_style, ) except Exception as msg: log = current.log log.error(msg) log.debug("row: %s + vert: %s, col: %s + horiz %s" % \ (cell.row, cell.mergeV, cell.col, cell.mergeH)) posn = "%s,%s" % (cell.row, cell.col) if matrix.matrix[posn]: log.debug(matrix.matrix[posn]) else: sheet1.write( cell.row, cell.col, s3_unicode(cell.text), style, ) CELL_WIDTH = 480 # approximately 2 characters if max_col > 255: max_col = 255 for col in range(max_col + 1): sheet1.col(col).width = CELL_WIDTH sheet2.write(0, 0, "Question Code") sheet2.write(0, 1, "Response Count") sheet2.write(0, 2, "Values") sheet2.write(0, 3, "Cell Address") for cell in matrix_answers.matrix.values(): style = merge_styles(style_list, cell.styleList) sheet2.write( cell.row, cell.col, s3_unicode(cell.text), style, ) if logo != None: sheet1.insert_bitmap(logo, 0, 0) sheet1.protect = True sheet2.protect = True for i in range(26): sheet2.col(i).width = 0 sheet2.write( 0, 26, s3_unicode(T("Please do not remove this sheet")), style_header, ) sheet2.col(26).width = 12000 book.save(output) return output
def import_xls(uploadFile): """ Import Assessment Spreadsheet """ from s3compat import BytesIO if series_id is None: response.error = T("Series details missing") return openFile = BytesIO() try: import xlrd from xlwt.Utils import cell_to_rowcol2 except ImportError: current.log.error( "ERROR: xlrd & xlwt modules are needed for importing spreadsheets" ) return None workbook = xlrd.open_workbook(file_contents=uploadFile) try: sheetR = workbook.sheet_by_name("Assessment") sheetM = workbook.sheet_by_name("Metadata") except: session.error = T( "You need to use the spreadsheet which you can download from this page" ) redirect( URL(c="survey", f="new_assessment", args=[], vars={"viewing": "survey_series.%s" % series_id})) header = "" body = "" for row in range(1, sheetM.nrows): header += ',"%s"' % sheetM.cell_value(row, 0) code = sheetM.cell_value(row, 0) qstn = s3.survey_getQuestionFromCode(code, series_id) type = qstn["type"] count = sheetM.cell_value(row, 1) if count != "": count = int(count) optionList = sheetM.cell_value(row, 2).split("|#|") else: count = 1 optionList = None if type == "Location" and optionList != None: answerList = {} elif type == "MultiOption": answerList = [] else: answerList = "" for col in range(count): cell = sheetM.cell_value(row, 3 + col) (rowR, colR) = cell_to_rowcol2(cell) try: cellValue = sheetR.cell_value(rowR, colR) except IndexError: cellValue = "" # BUG: The option list needs to work in different ways # depending on the question type. The question type should # be added to the spreadsheet to save extra db calls: # * Location save all the data as a hierarchy # * MultiOption save all selections # * Option save the last selection if cellValue != "": if optionList != None: if type == "Location": answerList[optionList[col]] = cellValue elif type == "MultiOption": answerList.append(optionList[col]) else: answerList = optionList[col] else: if type == "Date": try: (dtYear, dtMonth, dtDay, dtHour, dtMinute, dtSecond) = \ xlrd.xldate_as_tuple(cellValue, workbook.datemode) dtValue = datetime.date(dtYear, dtMonth, dtDay) cellValue = dtValue.isoformat() except: pass elif type == "Time": try: time = cellValue hour = int(time * 24) minute = int((time * 24 - hour) * 60) cellValue = "%s:%s" % (hour, minute) except: pass answerList += "%s" % cellValue body += ',"%s"' % answerList openFile.write(header) openFile.write("\n") openFile.write(body) openFile.seek(0) return openFile
def xls(self, r, **attr): """ Export the performance indicators as XLS data sheet @param r: the S3Request instance @param attr: controller attributes """ try: import xlwt except ImportError: raise HTTP(503, body="XLWT not installed") T = current.T resource = self.resource table = resource.table # Get the statistics indicators = self.indicators # Create workbook and sheet book = xlwt.Workbook(encoding="utf-8") title = s3_str(T("Performance Indicators")) sheet = book.add_sheet(title) # Title and Report Dates (from filter) dates = [] get_vars = r.get_vars field = table.date for fvar in ("~.start_date__ge", "~.end_date__le"): dtstr = get_vars.get(fvar) if dtstr: try: dt = s3_decode_iso_datetime(dtstr).date() except (ValueError, AttributeError): dt = None else: dates.append(field.represent(dt)) else: dates.append("...") dates = " -- ".join(dates) if dates else None # Write the performance indicators indicators.export(resource, sheet, title, subtitle=dates) # Output output = BytesIO() book.save(output) output.seek(0) # Response headers from gluon.contenttype import contenttype disposition = "attachment; filename=\"%s\"" % "indicators.xls" response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition from gluon.streamer import DEFAULT_CHUNK_SIZE return response.stream( output, chunk_size=DEFAULT_CHUNK_SIZE, request=r, )
def encode(self, resource, **attr): """ API Method to encode a resource as cards @param resource: the S3Resource, or - the data items as list [{fieldname: representation, ...}, ...], or - a callable that produces such a list of items @param attr: additional encoding parameters (see below) @keyword layout: the layout (a S3PDFCardLayout subclass, overrides the resource's pdf_card_layout setting @keyword orderby: orderby-expression for data extraction, overrides the resource's orderby setting @keyword labels: the labels for the fields, - a dict {colname: label}, or - a callable that produces it, - defaults to the labels of the extracted fields @keyword pagesize: the PDF page size, - a string "A4" or "Letter", or - a tuple (width, height), in points - defaults to the layout's card size @keyword margins: the page margins, - a tuple (N, E, S, W), in points, or - a single number, in points - will be computed if omitted @keyword spacing: the spacing between cards, - a tuple (H, V), in points, or - a single number, in points - defaults to 18 points in both directions @keyword title: the document title, - defaults to title_list crud string of the resource @return: a handle to the output """ if not REPORTLAB: # FIXME is this the correct handling of a dependency failure? raise HTTP(503, "Python ReportLab library not installed") # Do we operate on a S3Resource? is_resource = isinstance(resource, S3Resource) # The card layout layout = attr.get("layout") if layout is None and is_resource: layout = resource.get_config("pdf_card_layout") if layout is None: layout = S3PDFCardLayout # Card (and hence page) orientation orientation = layout.orientation if orientation == "Landscape": orientation = landscape else: orientation = portrait # Card and page size cardsize = orientation(layout.cardsize) pagesize = attr.get("pagesize") if pagesize == "A4": pagesize = A4 elif pagesize == "Letter": pagesize = LETTER elif not isinstance(pagesize, (tuple, list)): pagesize = cardsize pagesize = orientation(pagesize) # Extract the data if is_resource: # Extract the data items from the resource fields = layout.fields(resource) data = self.extract(resource, fields, orderby=attr.get("orderby")) items = data.rows elif callable(resource): # External getter => call with resource, returns the data items data = None items = resource() else: # The data items have been passed-in in place of the resource data = None items = resource # Get the labels labels = attr.get("labels") if callable(labels): labels = labels(resource) elif not isinstance(labels, dict): if data and hasattr(data, "rfields"): # Collect the labels from rfields rfields = data.rfields labels = {rfield.colname: rfield.label for rfield in rfields} else: labels = {} # Document title title = attr.get("title") if not title and is_resource: crud_strings = current.response.s3.crud_strings[resource.tablename] if crud_strings: title = crud_strings["title_list"] # Instantiate the doc template doc = S3PDFCardTemplate( pagesize, cardsize, margins=attr.get("margins"), spacing=attr.get("spacing"), title=title, ) # Produce the flowables flowables = self.get_flowables( layout, resource, items, labels=labels, cards_per_page=doc.cards_per_page, ) # Build the doc output_stream = BytesIO() doc.build( flowables, output_stream, #canvasmaker=canvas.Canvas, # is default ) output_stream.seek(0) return output_stream
def testPOSTFilterAjax(self): """ Test POST filter interpretation with JSON request body """ assertEqual = self.assertEqual assertNotIn = self.assertNotIn request = current.request # Test with valid filter expression JSON jsonstr = '''{"service_organisation.service_id__belongs":"1","~.example__lt":1,"~.other__like":[1,2],"~.name__like":"*Liquiçá*"}''' request._body = BytesIO(jsonstr.encode("utf-8")) r = S3Request( prefix="org", name="organisation", http="POST", get_vars={ "$search": "ajax", "test": "retained" }, ) # Method changed to GET: assertEqual(r.http, "GET") get_vars = r.get_vars # $search removed from GET vars: assertNotIn("$search", get_vars) # Verify that parsed $filter vars can safely be re-encoded as GET URL try: r.url() except (UnicodeDecodeError, UnicodeEncodeError): self.fail( "r.url raises Unicode exception with non-ASCII characters in $filter" ) # Filter queries from JSON body added to GET vars (always str, or list of str): assertEqual(get_vars.get("service_organisation.service_id__belongs"), "1") assertEqual(get_vars.get("~.example__lt"), "1") assertEqual(get_vars.get("~.other__like"), ["1", "2"]) assertEqual(get_vars.get("~.name__like"), "*Liquiçá*") # Must retain other GET vars: assertEqual(get_vars.get("test"), "retained") # Test without $search request._body = BytesIO( b'{"service_organisation.service_id__belongs":"1"}') r = S3Request( prefix="org", name="organisation", http="POST", get_vars={"test": "retained"}, ) # Method should still be POST: assertEqual(r.http, "POST") get_vars = r.get_vars # $search never was in GET vars - confirm this to exclude test regression assertNotIn("$search", get_vars) # Filter queries from JSON body not added to GET vars: assertNotIn("service_organisation.service_id__belongs", get_vars) # Must retain other GET vars: assertEqual(get_vars.get("test"), "retained") # Test with valid JSON but invalid filter expression request._body = BytesIO(b'[1,2,3]') r = S3Request( prefix="org", name="organisation", http="POST", get_vars={ "$search": "ajax", "test": "retained" }, ) # Method changed to GET: assertEqual(r.http, "GET") get_vars = r.get_vars # $search removed from GET vars: assertNotIn("$search", get_vars) # Filter queries from JSON body not added to GET vars: assertNotIn("service_organisation.service_id__belongs", get_vars) # Must retain other GET vars: assertEqual(get_vars.get("test"), "retained") # Test with empty body request._body = BytesIO(b'') r = S3Request( prefix="org", name="organisation", http="POST", get_vars={ "$search": "ajax", "test": "retained" }, ) # Method changed to GET: assertEqual(r.http, "GET") get_vars = r.get_vars # $search removed from GET vars: assertNotIn("$search", get_vars) # Filter queries from JSON body not added to GET vars: assertNotIn("service_organisation.service_id__belongs", get_vars) # Must retain other GET vars: assertEqual(get_vars.get("test"), "retained")
def encode(self, resource, **attr): """ Export data as a Microsoft Excel spreadsheet @param resource: the source of the data that is to be encoded as a spreadsheet, can be either of: 1) an S3Resource 2) an array of value dicts (dict of column labels as first item, list of field types as second item) 3) a dict like: {columns: [key, ...], headers: {key: label}, types: {key: type}, rows: [{key:value}], } @param attr: keyword arguments (see below) @keyword as_stream: return the buffer (BytesIO) rather than its contents (str), useful when the output is supposed to be stored locally @keyword title: the main title of the report @keyword list_fields: fields to include in list views @keyword report_groupby: used to create a grouping of the result: either a Field object of the resource or a string which matches a value in the heading @keyword use_colour: True to add colour to the cells, default False @keyword evenodd: render different background colours for even/odd rows ("stripes") """ # Do not redirect from here! # ...but raise proper status code, which can be caught by caller try: import xlwt except ImportError: error = self.ERROR.XLWT_ERROR current.log.error(error) raise HTTP(503, body=error) try: from xlrd.xldate import xldate_from_date_tuple, \ xldate_from_time_tuple, \ xldate_from_datetime_tuple except ImportError: error = self.ERROR.XLRD_ERROR current.log.error(error) raise HTTP(503, body=error) import datetime MAX_CELL_SIZE = self.MAX_CELL_SIZE COL_WIDTH_MULTIPLIER = self.COL_WIDTH_MULTIPLIER # Get the attributes title = attr.get("title") if title is None: title = current.T("Report") list_fields = attr.get("list_fields") group = attr.get("dt_group") use_colour = attr.get("use_colour", False) evenodd = attr.get("evenodd", True) # Extract the data from the resource if isinstance(resource, dict): headers = resource.get("headers", {}) lfields = resource.get("columns", list_fields) column_types = resource.get("types") types = [column_types[col] for col in lfields] rows = resource.get("rows") elif isinstance(resource, (list, tuple)): headers = resource[0] types = resource[1] rows = resource[2:] lfields = list_fields else: if not list_fields: list_fields = resource.list_fields() (title, types, lfields, headers, rows) = self.extract(resource, list_fields, ) # Verify columns in items request = current.request if len(rows) > 0 and len(lfields) > len(rows[0]): msg = """modules/s3/codecs/xls: There is an error in the list items, a field doesn't exist requesting url %s Headers = %d, Data Items = %d Headers %s List Fields %s""" % (request.url, len(lfields), len(rows[0]), headers, lfields) current.log.error(msg) # Grouping report_groupby = lfields[group] if group else None groupby_label = headers[report_groupby] if report_groupby else None # Date/Time formats from L10N deployment settings settings = current.deployment_settings date_format = settings.get_L10n_date_format() date_format_str = str(date_format) dt_format_translate = self.dt_format_translate date_format = dt_format_translate(date_format) time_format = dt_format_translate(settings.get_L10n_time_format()) datetime_format = dt_format_translate(settings.get_L10n_datetime_format()) title_row = settings.get_xls_title_row() # Get styles styles = self._styles(use_colour = use_colour, evenodd = evenodd, datetime_format = datetime_format, ) # Create the workbook book = xlwt.Workbook(encoding="utf-8") # Add sheets sheets = [] # XLS exports are limited to 65536 rows per sheet, we bypass # this by creating multiple sheets row_limit = 65536 sheetnum = len(rows) / row_limit # Can't have a / in the sheet_name, so replace any with a space sheet_name = s3_str(title.replace("/", " ")) if len(sheet_name) > 28: # Sheet name cannot be over 31 chars # (take sheet number suffix into account) sheet_name = sheet_name[:28] count = 1 while len(sheets) <= sheetnum: sheets.append(book.add_sheet("%s-%s" % (sheet_name, count))) count += 1 if callable(title_row): # Calling with sheet None to get the number of title rows title_row_length = title_row(None) else: title_row_length = 2 # Add header row to all sheets, determine columns widths header_style = styles["header"] for sheet in sheets: # Move this down if a title row will be added if title_row: header_row = sheet.row(title_row_length) else: header_row = sheet.row(0) column_widths = [] has_id = False col_index = 0 for selector in lfields: if selector == report_groupby: continue label = headers[selector] if label == "Id": # Indicate to adjust col_index when writing out has_id = True column_widths.append(0) col_index += 1 continue if label == "Sort": continue if has_id: # Adjust for the skipped column write_col_index = col_index - 1 else: write_col_index = col_index header_row.write(write_col_index, str(label), header_style) width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000) width = min(width, 65535) # USHRT_MAX column_widths.append(width) sheet.col(write_col_index).width = width col_index += 1 title = s3_str(title) # Title row (optional, deployment setting) if title_row: T = current.T large_header_style = styles["large_header"] notes_style = styles["notes"] for sheet in sheets: if callable(title_row): # Custom title rows title_row(sheet) else: # First row => Title (standard = "title_list" CRUD string) current_row = sheet.row(0) if col_index > 0: sheet.write_merge(0, 0, 0, col_index, title, large_header_style, ) current_row.height = 500 # Second row => Export date/time current_row = sheet.row(1) current_row.write(0, "%s:" % T("Date Exported"), notes_style) current_row.write(1, request.now, notes_style) # Fix the size of the last column to display the date if 16 * COL_WIDTH_MULTIPLIER > width: sheet.col(col_index).width = 16 * COL_WIDTH_MULTIPLIER # Initialize counters total_cols = col_index # Move the rows down if a title row is included if title_row: row_index = title_row_length else: row_index = 0 # Helper function to get the current row def get_current_row(row_count, row_limit): sheet_count = int(row_count / row_limit) row_number = row_count - (sheet_count * row_limit) if sheet_count > 0: row_number += 1 return sheets[sheet_count], sheets[sheet_count].row(row_number) # Write the table contents subheading = None odd_style = styles["odd"] even_style = styles["even"] subheader_style = styles["subheader"] for row in rows: # Current row row_index += 1 current_sheet, current_row = get_current_row(row_index, row_limit) style = even_style if row_index % 2 == 0 else odd_style # Group headers if report_groupby: represent = s3_strip_markup(s3_unicode(row[report_groupby])) if subheading != represent: # Start of new group - write group header subheading = represent current_sheet.write_merge(row_index, row_index, 0, total_cols, subheading, subheader_style, ) # Move on to next row row_index += 1 current_sheet, current_row = get_current_row(row_index, row_limit) style = even_style if row_index % 2 == 0 else odd_style col_index = 0 remaining_fields = lfields # Custom row style? row_style = None if "_style" in row: stylename = row["_style"] if stylename in styles: row_style = styles[stylename] # Group header/footer row? if "_group" in row: group_info = row["_group"] label = group_info.get("label") totals = group_info.get("totals") if label: label = s3_strip_markup(s3_unicode(label)) style = row_style or subheader_style span = group_info.get("span") if span == 0: current_sheet.write_merge(row_index, row_index, 0, total_cols - 1, label, style, ) if totals: # Write totals into the next row row_index += 1 current_sheet, current_row = \ get_current_row(row_index, row_limit) else: current_sheet.write_merge(row_index, row_index, 0, span - 1, label, style, ) col_index = span remaining_fields = lfields[span:] if not totals: continue for field in remaining_fields: label = headers[field] if label == groupby_label: continue if label == "Id": # Skip the ID column from XLS exports col_index += 1 continue if field not in row: represent = "" else: represent = s3_strip_markup(s3_unicode(row[field])) coltype = types[col_index] if coltype == "sort": continue if len(represent) > MAX_CELL_SIZE: represent = represent[:MAX_CELL_SIZE] value = represent if coltype == "date": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day) value = xldate_from_date_tuple(date_tuple, 0) style.num_format_str = date_format except: pass elif coltype == "datetime": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day, cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_datetime_tuple(date_tuple, 0) style.num_format_str = datetime_format except: pass elif coltype == "time": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_time_tuple(date_tuple) style.num_format_str = time_format except: pass elif coltype == "integer": try: value = int(value) style.num_format_str = "0" except: pass elif coltype == "double": try: value = float(value) style.num_format_str = "0.00" except: pass if has_id: # Adjust for the skipped column write_col_index = col_index - 1 else: write_col_index = col_index current_row.write(write_col_index, value, style) width = len(represent) * COL_WIDTH_MULTIPLIER if width > column_widths[col_index]: column_widths[col_index] = width current_sheet.col(write_col_index).width = width col_index += 1 # Additional sheet settings for sheet in sheets: sheet.panes_frozen = True sheet.horz_split_pos = 1 # Write output output = BytesIO() book.save(output) output.seek(0) if attr.get("as_stream", False): return output # Response headers filename = "%s_%s.xls" % (request.env.server_name, title) disposition = "attachment; filename=\"%s\"" % filename response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition return output.read()
def kit_export_xls(): """ Export a list of Kits in Excel XLS format Sheet 1 is a list of Kits Then there is a separate sheet per kit, listing it's component items """ try: import xlwt except ImportError: session.error = "xlwt module not available within the running Python - this needs installing for XLS output!" redirect(URL(c="kit")) from s3compat import BytesIO output = BytesIO() book = xlwt.Workbook() # List of Kits sheet1 = book.add_sheet("Kits") # Header row for Kits sheet row0 = sheet1.row(0) cell = 0 table = db.budget_kit kits = db(table.id > 0).select() fields = [table[f] for f in table.fields if table[f].readable] for field in fields: row0.write(cell, field.label, xlwt.easyxf("font: bold True;")) cell += 1 # For Header row on Items sheets table = db.budget_item fields_items = [table[f] for f in table.fields if table[f].readable] row = 1 for kit in kits: # The Kit details on Sheet 1 rowx = sheet1.row(row) row += 1 cell1 = 0 for field in fields: tab, col = str(field).split(".") rowx.write(cell1, kit[col]) cell1 += 1 # Sheet per Kit detailing constituent Items # Replace characters which are illegal in sheetnames sheetname = kit.code.replace("/", "_") sheet = book.add_sheet(sheetname) # Header row for Items sheet row0 = sheet.row(0) cell = 0 for field_item in fields_items: row0.write(cell, field_item.label, xlwt.easyxf("font: bold True;")) cell += 1 # List Items in each Kit table = db.budget_kit_item contents = db(table.kit_id == kit.id).select() rowy = 1 for content in contents: table = db.budget_item item = db(table.id == content.item_id).select().first() rowx = sheet.row(rowy) rowy += 1 cell = 0 for field_item in fields_items: tab, col = str(field_item).split(".") # Do lookups for option fields if col == "cost_type": opt = item[col] value = str(budget_cost_type_opts[opt]) elif col == "category_type": opt = item[col] value = str(budget_category_type_opts[opt]) else: value = item[col] rowx.write(cell, value) cell += 1 book.save(output) output.seek(0) import gluon.contenttype response.headers["Content-Type"] = gluon.contenttype.contenttype(".xls") filename = "%s_kits.xls" % (request.env.server_name) response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename return output.read()
def xls(self, r, **attr): """ Export the performance indicators as XLS data sheet @param r: the S3Request instance @param attr: controller attributes """ T = current.T s3db = current.s3db try: import xlwt except ImportError: raise HTTP(503, body="XLWT not installed") title = s3_str(T("Performance Indicators")) write = self.write # Create workbook and sheet book = xlwt.Workbook(encoding="utf-8") sheet = book.add_sheet(title) # Get the statistics resource = self.resource table = resource.table indicators = self.indicators(resource) # Title and Report Dates (from filter) write(sheet, 0, 0, title, style="header") dates = [] get_vars = r.get_vars field = table.date for fvar in ("~.date__ge", "~.date__le"): dtstr = get_vars.get(fvar) if dtstr: try: dt = s3_decode_iso_datetime(dtstr).date() except (ValueError, AttributeError): dt = None else: dates.append(field.represent(dt)) else: dates.append("...") if dates: write(sheet, 1, 0, " -- ".join(dates)) # Basic performance indicators rowindex = 3 # Total number of consultations write(sheet, rowindex, 0, T("Total Number of Consultations")) write(sheet, rowindex, 1, indicators.get("total_responses", "")) rowindex += 1 write(sheet, rowindex, 0, T("Total Number of Clients")) write(sheet, rowindex, 1, indicators.get("total_clients", "")) rowindex += 1 write(sheet, rowindex, 0, T("Average Duration of Consultations (minutes)")) avg_hours_per_response = indicators.get("avg_hours_per_response") if avg_hours_per_response: avg_minutes_per_response = int(round(avg_hours_per_response * 60)) else: avg_minutes_per_response = "" write(sheet, rowindex, 1, avg_minutes_per_response) rowindex += 1 write(sheet, rowindex, 0, T("Average Number of Consultations per Client")) write(sheet, rowindex, 1, indicators.get("avg_responses_per_client", "")) # Distribution rowindex = 8 write(sheet, rowindex, 0, T("Distribution of Clients")) write(sheet, rowindex, 1, T("Single")) write(sheet, rowindex, 2, indicators.get("singles", "")) rowindex += 1 write(sheet, rowindex, 1, T("Family")) write(sheet, rowindex, 2, indicators.get("families", "")) rowindex += 1 write(sheet, rowindex, 1, T("Group Counseling")) rowindex += 1 write(sheet, rowindex, 1, T("Individual Counseling")) write(sheet, rowindex, 2, indicators.get("total_responses", "")) # Top-5's rowindex = 13 write(sheet, rowindex, 0, T("Top 5 Countries of Origin")) top_5_nationalities = indicators.get("top_5_nationalities") if top_5_nationalities: dtable = s3db.pr_person_details field = dtable.nationality for rank, nationality in enumerate(top_5_nationalities): write(sheet, rowindex, 1, "%s - %s" % (rank + 1, field.represent(nationality))) rowindex += 1 rowindex += 1 write(sheet, rowindex, 0, T("Top 5 Counseling Reasons")) top_5_needs = indicators.get("top_5_needs") if top_5_needs: ttable = s3db.dvr_response_theme field = ttable.need_id for rank, need in enumerate(top_5_needs): write(sheet, rowindex, 1, "%s - %s" % (rank + 1, field.represent(need))) rowindex += 1 # Output output = BytesIO() book.save(output) output.seek(0) # Response headers from gluon.contenttype import contenttype disposition = "attachment; filename=\"%s\"" % "indicators.xls" response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition from gluon.streamer import DEFAULT_CHUNK_SIZE return response.stream( output, chunk_size=DEFAULT_CHUNK_SIZE, request=r, )