def setUp(self): xmlstr = """<?xml version="1.0"?><s3xml/>""" stylesheet = """<?xml version="1.0"?> <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" xmlns:s3="http://eden.sahanafoundation.org/wiki/S3"> <xsl:output method="xml"/> <s3:fields tables="gis_location" select="ALL"/> <s3:fields tables="org_office" exclude="site_id"/> <s3:fields tables="pr_person" select="ALL" exclude="last_name"/> <s3:fields tables="pr_*" select="pe_id" exclude="pe_label"/> <s3:fields tables="pr_c*" select="ALL"/> <s3:fields tables="ANY" select="location_id,site_id"/> <xsl:template match="/"> <test>Test</test> </xsl:template> </xsl:stylesheet>""" self.tree = etree.ElementTree(etree.fromstring(xmlstr)) self.stylesheet = S3XMLFormat(StringIO(stylesheet))
def testValidateComponentTableFailure(self): """ Test error in component validation """ request = self.request crud = self.resource.crud jsonstr = """{"name":"", "acronym":"test"}""" request.body = StringIO(jsonstr) request.get_vars["component"] = "office" output = crud.validate(request) self.assertTrue(isinstance(output, basestring)) data = json.loads(output) self.assertTrue(isinstance(data, dict)) self.assertEqual(len(data), 2) self.assertTrue("name" in data) name = data["name"] self.assertTrue(isinstance(name, dict)) self.assertTrue("value" in name) self.assertFalse("text" in name) self.assertTrue("_error" in name) self.assertTrue("acronym" in data) acronym = data["acronym"] self.assertTrue(isinstance(acronym, dict)) self.assertTrue("value" in acronym) self.assertFalse("text" in acronym) self.assertTrue("_error" in acronym)
def testValidateMainTableError(self): """ Test error in main table validation """ request = self.request crud = self.resource.crud jsonstr = """{"name":"", "acronym":"TO"}""" request.body = StringIO(jsonstr) output = crud.validate(request) self.assertTrue(isinstance(output, basestring)) data = json.loads(output) self.assertTrue(isinstance(data, dict)) self.assertEqual(len(data), 2) self.assertTrue("name" in data) name = data["name"] self.assertTrue(isinstance(name, dict)) self.assertTrue("value" in name) self.assertFalse("text" in name) self.assertTrue("_error" in name) acronym = data["acronym"] self.assertTrue(isinstance(acronym, dict)) self.assertTrue("value" in acronym) self.assertTrue("text" in acronym) self.assertTrue(isinstance(acronym["text"], basestring)) self.assertFalse("_error" in acronym)
def testTypeConversionFeature(self): """ Check that values get converted into the field type during validation """ s3db = current.s3db # Create a fake request resource = s3db.resource("project_organisation") request = Storage(prefix="project", name="organisation", resource=resource, table=resource.table, tablename=resource.tablename, method="validate", get_vars=Storage(), representation="json", http="GET") crud = resource.crud jsonstr = """{"organisation_id":"1", "role":"1"}""" request.body = StringIO(jsonstr) output = crud.validate(request) self.assertTrue(isinstance(output, basestring)) data = json.loads(output) self.assertTrue(isinstance(data, dict)) self.assertEqual(len(data), 2) self.assertTrue("role" in data) role = data["role"] self.assertTrue(isinstance(role, dict)) self.assertTrue("value" in role) self.assertTrue(isinstance(role["value"], int))
def setUp(self): current.auth.override = True s3db = current.s3db ptable = s3db.pr_person ctable = s3db.pr_contact itable = s3db.pr_image # Create a person record person = { "pe_label": "KBT012", "first_name": "Example", "last_name": "Person", #"date_of_birth": ..., "comments": "This is a comment", } person_id = ptable.insert(**person) person["id"] = person_id s3db.update_super(ptable, person) self.person_id = person_id # Add a contact record contact = { "pe_id": person["pe_id"], "contact_method": "SMS", "value": "+60738172623", "comments": "This is a comment", } self.contact_id = ctable.insert(**contact) # Add an image record stream = StringIO() stream.write("TEST") filename = itable.image.store(stream, "test.txt") filepath = os.path.join(current.request.folder, "uploads", filename) image = { "pe_id": person["pe_id"], "image": filename, "description": "Example description", } self.image_id = itable.insert(**image) self.assertTrue(os.path.exists(filepath)) self.image_path = filepath
def submission(): """ Allows for submission of Xforms by ODK Collect http://code.google.com/p/opendatakit/ @todo: re-implement in S3XForms, deprecate """ # @ToDo: Something better than this crude check if not auth.s3_logged_in(): auth.permission.fail() from s3compat import StringIO import cgi from lxml import etree source = request.post_vars.get("xml_submission_file", None) if isinstance(source, cgi.FieldStorage): if source.filename: xmlinput = source.file else: xmlinput = source.value if isinstance(xmlinput, basestring): xmlinput = StringIO(xmlinput) elif request.env.request_method == "HEAD": raise HTTP(204) else: raise HTTP(400, "Invalid Request: Expected an XForm") tree = etree.parse(xmlinput) tablename = tree.getroot().tag resource = s3db.resource(tablename) stylesheet = os.path.join(request.folder, "static", "formats", "odk", "import.xsl") try: result = resource.import_xml(source=tree, stylesheet=stylesheet) except (IOError, SyntaxError): raise HTTP(500, "Internal server error") # Parse response status = json.loads(result)["statuscode"] if status == "200": r = HTTP(201, "Saved") # ODK Collect only accepts 201 r.headers["Location"] = request.env.http_host raise r else: raise HTTP(status, result)
def series_export_word(widget_list, lang_dict, title, logo): """ Export a Series in RTF Format @ToDo: rewrite as S3Method handler """ import gluon.contrib.pyrtf as pyrtf from s3compat import StringIO output = StringIO() doc = pyrtf.Document(default_language=pyrtf.Languages.EnglishUK) section = pyrtf.Section() ss = doc.StyleSheet ps = ss.ParagraphStyles.Normal.Copy() ps.SetName("NormalGrey") ps.SetShadingPropertySet(pyrtf.ShadingPropertySet(pattern=1, background=pyrtf.Colour("grey light", 224, 224, 224))) ss.ParagraphStyles.append(ps) ps = ss.ParagraphStyles.Normal.Copy() ps.SetName("NormalCentre") ps.SetParagraphPropertySet(pyrtf.ParagraphPropertySet(alignment=3)) ss.ParagraphStyles.append(ps) doc.Sections.append(section) heading = pyrtf.Paragraph(ss.ParagraphStyles.Heading1) if logo: image = pyrtf.Image(logo) heading.append(image) heading.append(title) section.append(heading) col = [2800, 6500] table = pyrtf.Table(*col) AddRow = table.AddRow sorted_widget_list = sorted(widget_list.values(), key=lambda widget: widget.question.posn) for widget in sorted_widget_list: line = widget.writeQuestionToRTF(ss, lang_dict) try: AddRow(*line) except: if settings.base.debug: raise pass section.append(table) renderer = pyrtf.Renderer() renderer.Write(doc, output) return output
def testValueParsing(self): """ Test handling of S3JSON @value attribute """ assertEqual = self.assertEqual json_str = """{ "$_test_resource": [ { "valuelist": { "@value": ["value1", "value2"] }, "jsonlist": { "@value": "[\\"value1\\", \\"value2\\"]" }, "valuestring": { "@value": "value1" }, "valueinteger": { "@value": 2 } } ]}""" tree = current.xml.json2tree(StringIO(json_str)) root = tree.getroot() # A value list gives a JSON string with a list value_list = root.findall('resource/data[@field="valuelist"]')[0] v = value_list.get("value") assertEqual(v, '["value1", "value2"]') assertEqual(json.loads(v), ["value1", "value2"]) # A JSON list gives the same JSON string value_list = root.findall('resource/data[@field="jsonlist"]')[0] v = value_list.get("value") assertEqual(v, '["value1", "value2"]') assertEqual(json.loads(v), ["value1", "value2"]) # A string gives the same string value_list = root.findall('resource/data[@field="valuestring"]')[0] v = value_list.get("value") assertEqual(v, "value1") # A numeric value gives its string representation value_list = root.findall('resource/data[@field="valueinteger"]')[0] v = value_list.get("value") assertEqual(v, "2")
def listen(self): """ Start recording S3Log messages """ if self.handler is not None: return strbuf = self.strbuf if strbuf is None: from s3compat import StringIO strbuf = StringIO() handler = logging.StreamHandler(strbuf) logger = logging.getLogger(__name__) logger.addHandler(handler) self.handler = handler self.strbuf = strbuf return
def testValidateComponentTable(self): """ Test successful component validation """ request = self.request crud = self.resource.crud jsonstr = """{"name":"TestOffice"}""" request.body = StringIO(jsonstr) request.get_vars["component"] = "office" output = crud.validate(request) self.assertTrue(isinstance(output, basestring)) data = json.loads(output) self.assertTrue(isinstance(data, dict)) self.assertEqual(len(data), 1) self.assertTrue("name" in data) name = data["name"] self.assertTrue(isinstance(name, dict)) self.assertTrue("value" in name) self.assertTrue("text" in name) self.assertTrue(isinstance(name["text"], basestring)) self.assertFalse("_error" in name)
def draw(self, output="xml"): """ Output the chart as a PNG embedded in an IMG tag - used by the Delphi module """ fig = self.fig if not fig: return "Matplotlib not installed" # For interactive shell tests #plt.show() # For web response #savefig(response.body) chart = Storage() chart.body = StringIO() chart.headers = Storage() chart.headers["Content-Type"] = "image/png" canvas = self.FigureCanvas(fig) canvas.print_figure(chart.body) #return response.body.getvalue() image = chart.body.getvalue() # IE 8 and before has a 32K limit on URIs this can be quickly # gobbled up if the image is too large. So the image will # stored on the server and a URI used in the src cachePath = self.storeCachedFile(self.filename, image) if output == "xml": if cachePath != None: image = IMG(_src=cachePath) else: import base64 base64Img = base64.b64encode(image) image = IMG(_src="data:image/png;base64,%s" % base64Img) else: current.response.headers["Content-Type"] = "image/png" return image
def importxml(db, xmlinput): """ Converts the XML to a CSV compatible with the import_from_csv_file of web2py @ToDo: rewrite this to go via S3Resource for proper Auth checking, Audit. @todo: deprecate """ from s3compat import StringIO import xml.dom.minidom try: doc = xml.dom.minidom.parseString(xmlinput) except: raise Exception("XML parse error") parent = doc.childNodes[0].tagName csvout = csvheader(parent, doc.childNodes[0].childNodes) for subnode in doc.childNodes: csvout = csvout + csvdata(subnode.childNodes) fh = StringIO() fh.write(csvout) fh.seek(0, 0) db[parent].import_from_csv_file(fh)
def __call__(self): module = "default" resource = "index" vars = { "module": module, "resource": "index", } table = current.s3db.cms_post db = current.db ltable = db.cms_post_module query = (ltable.module == module) & \ (ltable.resource == resource) & \ (ltable.post_id == table.id) & \ (table.deleted != True) row = db(query).select( table.id, table.title, table.body, limitby=(0, 1), ).first() title = None if row: title = row.title if row.body: from s3compat import StringIO try: body = current.response.render(StringIO(row.body), {}) except: body = row.body item = DIV(XML(body), _class="cms-item") if current.auth.s3_has_role(current.session.s3.system_roles.ADMIN): item.append(BR()) item.append( A( current.T("Edit"), _href=URL( c="cms", f="post", args=[row.id, "update"], vars=vars, ), _class="action-btn", )) elif current.auth.s3_has_role(current.session.s3.system_roles.ADMIN): item = A( current.T("Edit"), _href=URL( c="cms", f="post", args="create", vars=vars, ), _class="action-btn", ) else: item = None if not title: title = current.deployment_settings.get_system_name() current.response.title = title self._view(THEME, "index.html") return { "content": item, }
def parse_rss_2_cap(message): """ Parse RSS Feeds into the CAP Module """ db = current.db s3db = current.s3db table = s3db.msg_rss message_id = message.message_id record = db(table.message_id == message_id).select( table.id, table.channel_id, table.title, table.from_address, table.body, table.date, table.location_id, table.author, limitby=(0, 1)).first() if not record: return pstable = s3db.msg_parsing_status # not adding (pstable.channel_id == record.channel_id) to query # because two channels (http://host.domain/eden/cap/public.rss and # (http://host.domain/eden/cap/alert.rss) may contain common url # eg. http://host.domain/eden/cap/public/xx.cap pquery = (pstable.message_id == message_id) prows = db(pquery).select(pstable.id, pstable.is_parsed) for prow in prows: if prow.is_parsed: return alert_table = s3db.cap_alert info_table = s3db.cap_info # Is this an Update or a Create? # @ToDo: Use guid? # Use Body body = record.body or record.title query = (info_table.description == body) exists = db(query).select(info_table.id, limitby=(0, 1)).first() author = record.author if author: ptable = s3db.pr_person # https://code.google.com/p/python-nameparser/ from nameparser import HumanName name = HumanName(author) first_name = name.first middle_name = name.middle last_name = name.last query = (ptable.first_name == first_name) & \ (ptable.middle_name == middle_name) & \ (ptable.last_name == last_name) pexists = db(query).select(ptable.id, limitby=(0, 1)).first() if pexists: person_id = pexists.id else: person_id = ptable.insert(first_name=first_name, middle_name=middle_name, last_name=last_name) s3db.update_super(ptable, {"id": person_id}) else: person_id = None if exists: # @ToDo: Use XSLT info_id = exists.id db(info_table.id == info_id).update( headline=record.title, description=body, created_on=record.date, #location_id = record.location_id, #person_id = person_id, ) else: # Embedded link url = record.from_address import_xml = s3db.resource("cap_alert").import_xml stylesheet = os.path.join(current.request.folder, "static", "formats", "cap", "import.xsl") try: file = fetch(url) except HTTPError as e: import base64 rss_table = s3db.msg_rss_channel query = (rss_table.channel_id == record.channel_id) channel = db(query).select(rss_table.date, rss_table.etag, rss_table.url, rss_table.username, rss_table.password, limitby=(0, 1)).first() username = channel.username password = channel.password if e.code == 401 and username and password: request = urllib2.Request(url) base64string = base64.encodestring("%s:%s" % (username, password)) request.add_header("Authorization", "Basic %s" % base64string) else: request = None try: file = urlopen(request).read() if request else fetch(url) except HTTPError as e: # Check if there are links to look into ltable = s3db.msg_rss_link query_ = (ltable.rss_id == record.id) & (ltable.deleted != True) rows_ = db(query_).select(ltable.type, ltable.url) url_format = "{uri.scheme}://{uri.netloc}/".format url_domain = url_format(uri=urlparse.urlparse(url)) for row_ in rows_: url = row_.url if url and row_.type == "application/cap+xml" and \ url_domain == url_format(uri=urlparse.urlparse(url)): # Same domain, so okey to use same username/pwd combination if e.code == 401 and username and password: request = urllib2.Request(url) request.add_header("Authorization", "Basic %s" % base64string) else: request = None try: file = urlopen( request).read() if request else fetch(url) except HTTPError as e: current.log.error( "Getting content from link failed: %s" % e) else: # Import via XSLT import_xml(StringIO(file), stylesheet=stylesheet, ignore_errors=True) else: # Import via XSLT import_xml(StringIO(file), stylesheet=stylesheet, ignore_errors=True) else: # Public Alerts # eg. http://host.domain/eden/cap/public/xx.cap # Import via XSLT import_xml(StringIO(file), stylesheet=stylesheet, ignore_errors=True) # No Reply return
def series_export_spreadsheet(matrix, matrix_answers, logo): """ Now take the matrix data type and generate a spreadsheet from it """ try: import xlwt except ImportError: response.error = T("xlwt not installed, so cannot export as a Spreadsheet") output = s3_rest_controller(module, "survey_series", rheader=s3db.survey_series_rheader) return output import math from s3compat import StringIO # ------------------------------------------------------------------------- def wrap_text(sheet, cell, style): row = cell.row col = cell.col try: text = s3_unicode(cell.text) except: text = cell.text width = 16 # Wrap text and calculate the row width and height characters_in_cell = float(width-2) twips_per_row = 255 #default row height for 10 point font if cell.merged(): try: sheet.write_merge(cell.row, cell.row + cell.mergeV, cell.col, cell.col + cell.mergeH, text, style, ) except Exception as msg: log = current.log log.error(msg) log.debug("row: %s + vert: %s, col: %s + horiz %s" % \ (cell.row, cell.mergeV, cell.col, cell.mergeH)) posn = "%s,%s" % (cell.row, cell.col) if matrix.matrix[posn]: log.debug(matrix.matrix[posn]) rows = math.ceil((len(text) / characters_in_cell) / (1 + cell.mergeH)) else: sheet.write(cell.row, cell.col, text, style, ) rows = math.ceil(len(text) / characters_in_cell) new_row_height = int(rows * twips_per_row) new_col_width = width * COL_WIDTH_MULTIPLIER if sheet.row(row).height < new_row_height: sheet.row(row).height = new_row_height if sheet.col(col).width < new_col_width: sheet.col(col).width = new_col_width # ------------------------------------------------------------------------- def merge_styles(list_template, style_list): """ Take a list of styles and return a single style object with all the differences from a newly created object added to the resultant style. """ if len(style_list) == 0: final_style = xlwt.XFStyle() elif len(style_list) == 1: final_style = list_template[style_list[0]] else: zero_style = xlwt.XFStyle() final_style = xlwt.XFStyle() for i in range(0, len(style_list)): final_style = merge_object_diff(final_style, list_template[style_list[i]], zero_style) return final_style # ------------------------------------------------------------------------- def merge_object_diff(base_obj, new_obj, zero_obj): """ Function to copy all the elements in new_obj that are different from the zero_obj and place them in the base_obj """ element_list = new_obj.__dict__ for (element, value) in element_list.items(): try: base_obj.__dict__[element] = merge_object_diff(base_obj.__dict__[element], value, zero_obj.__dict__[element]) except: if zero_obj.__dict__[element] != value: base_obj.__dict__[element] = value return base_obj COL_WIDTH_MULTIPLIER = 240 book = xlwt.Workbook(encoding="utf-8") output = StringIO() protection = xlwt.Protection() protection.cell_locked = 1 no_protection = xlwt.Protection() no_protection.cell_locked = 0 borders = xlwt.Borders() borders.left = xlwt.Borders.DOTTED borders.right = xlwt.Borders.DOTTED borders.top = xlwt.Borders.DOTTED borders.bottom = xlwt.Borders.DOTTED border_t1 = xlwt.Borders() border_t1.top = xlwt.Borders.THIN border_t2 = xlwt.Borders() border_t2.top = xlwt.Borders.MEDIUM border_l1 = xlwt.Borders() border_l1.left = xlwt.Borders.THIN border_l2 = xlwt.Borders() border_l2.left = xlwt.Borders.MEDIUM border_r1 = xlwt.Borders() border_r1.right = xlwt.Borders.THIN border_r2 = xlwt.Borders() border_r2.right = xlwt.Borders.MEDIUM border_b1 = xlwt.Borders() border_b1.bottom = xlwt.Borders.THIN border_b2 = xlwt.Borders() border_b2.bottom = xlwt.Borders.MEDIUM align_base = xlwt.Alignment() align_base.horz = xlwt.Alignment.HORZ_LEFT align_base.vert = xlwt.Alignment.VERT_TOP align_wrap = xlwt.Alignment() align_wrap.horz = xlwt.Alignment.HORZ_LEFT align_wrap.vert = xlwt.Alignment.VERT_TOP align_wrap.wrap = xlwt.Alignment.WRAP_AT_RIGHT shaded_fill = xlwt.Pattern() shaded_fill.pattern = xlwt.Pattern.SOLID_PATTERN shaded_fill.pattern_fore_colour = 0x16 # 25% Grey shaded_fill.pattern_back_colour = 0x08 # Black heading_fill = xlwt.Pattern() heading_fill.pattern = xlwt.Pattern.SOLID_PATTERN heading_fill.pattern_fore_colour = 0x1F # ice_blue heading_fill.pattern_back_colour = 0x08 # Black style_title = xlwt.XFStyle() style_title.font.height = 0x0140 # 320 twips, 16 points style_title.font.bold = True style_title.alignment = align_base style_header = xlwt.XFStyle() style_header.font.height = 0x00F0 # 240 twips, 12 points style_header.font.bold = True style_header.alignment = align_base style_sub_header = xlwt.XFStyle() style_sub_header.font.bold = True style_sub_header.alignment = align_wrap style_section_heading = xlwt.XFStyle() style_section_heading.font.bold = True style_section_heading.alignment = align_wrap style_section_heading.pattern = heading_fill style_hint = xlwt.XFStyle() style_hint.protection = protection style_hint.font.height = 160 # 160 twips, 8 points style_hint.font.italic = True style_hint.alignment = align_wrap style_text = xlwt.XFStyle() style_text.protection = protection style_text.alignment = align_wrap style_instructions = xlwt.XFStyle() style_instructions.font.height = 0x00B4 # 180 twips, 9 points style_instructions.font.italic = True style_instructions.protection = protection style_instructions.alignment = align_wrap style_box = xlwt.XFStyle() style_box.borders = borders style_box.protection = no_protection style_input = xlwt.XFStyle() style_input.borders = borders style_input.protection = no_protection style_input.pattern = shaded_fill box_l1 = xlwt.XFStyle() box_l1.borders = border_l1 box_l2 = xlwt.XFStyle() box_l2.borders = border_l2 box_t1 = xlwt.XFStyle() box_t1.borders = border_t1 box_t2 = xlwt.XFStyle() box_t2.borders = border_t2 box_r1 = xlwt.XFStyle() box_r1.borders = border_r1 box_r2 = xlwt.XFStyle() box_r2.borders = border_r2 box_b1 = xlwt.XFStyle() box_b1.borders = border_b1 box_b2 = xlwt.XFStyle() box_b2.borders = border_b2 style_list = {} style_list["styleTitle"] = style_title style_list["styleHeader"] = style_header style_list["styleSubHeader"] = style_sub_header style_list["styleSectionHeading"] = style_section_heading style_list["styleHint"] = style_hint style_list["styleText"] = style_text style_list["styleInstructions"] = style_instructions style_list["styleInput"] = style_input style_list["boxL1"] = box_l1 style_list["boxL2"] = box_l2 style_list["boxT1"] = box_t1 style_list["boxT2"] = box_t2 style_list["boxR1"] = box_r1 style_list["boxR2"] = box_r2 style_list["boxB1"] = box_b1 style_list["boxB2"] = box_b2 sheet1 = book.add_sheet(T("Assessment")) sheet2 = book.add_sheet(T("Metadata")) max_col = 0 for cell in matrix.matrix.values(): if cell.col + cell.mergeH > 255: current.log.warning("Cell (%s,%s) - (%s,%s) ignored" % \ (cell.col, cell.row, cell.col + cell.mergeH, cell.row + cell.mergeV)) continue if cell.col + cell.mergeH > max_col: max_col = cell.col + cell.mergeH if cell.joined(): continue style = merge_styles(style_list, cell.styleList) if (style.alignment.wrap == style.alignment.WRAP_AT_RIGHT): # get all the styles from the joined cells # and merge these styles in. joined_styles = matrix.joinedElementStyles(cell) joined_style = merge_styles(style_list, joined_styles) try: wrap_text(sheet1, cell, joined_style) except: pass else: if cell.merged(): # get all the styles from the joined cells # and merge these styles in. joined_styles = matrix.joinedElementStyles(cell) joined_style = merge_styles(style_list, joined_styles) try: sheet1.write_merge(cell.row, cell.row + cell.mergeV, cell.col, cell.col + cell.mergeH, s3_unicode(cell.text), joined_style, ) except Exception as msg: log = current.log log.error(msg) log.debug("row: %s + vert: %s, col: %s + horiz %s" % \ (cell.row, cell.mergeV, cell.col, cell.mergeH)) posn = "%s,%s" % (cell.row, cell.col) if matrix.matrix[posn]: log.debug(matrix.matrix[posn]) else: sheet1.write(cell.row, cell.col, s3_unicode(cell.text), style, ) CELL_WIDTH = 480 # approximately 2 characters if max_col > 255: max_col = 255 for col in xrange(max_col + 1): sheet1.col(col).width = CELL_WIDTH sheet2.write(0, 0, "Question Code") sheet2.write(0, 1, "Response Count") sheet2.write(0, 2, "Values") sheet2.write(0, 3, "Cell Address") for cell in matrix_answers.matrix.values(): style = merge_styles(style_list, cell.styleList) sheet2.write(cell.row, cell.col, s3_unicode(cell.text), style, ) if logo != None: sheet1.insert_bitmap(logo, 0, 0) sheet1.protect = True sheet2.protect = True for i in range(26): sheet2.col(i).width = 0 sheet2.write(0, 26, s3_unicode(T("Please do not remove this sheet")), style_header, ) sheet2.col(26).width = 12000 book.save(output) return output
def kit_export_pdf(): """ Export a list of Kits in Adobe PDF format Uses Geraldo SubReport @ToDo: Use S3PDF Method """ try: from reportlab.lib.units import cm from reportlab.lib.pagesizes import A4 from reportlab.lib.enums import TA_CENTER, TA_RIGHT except ImportError: session.error = "Python needs the ReportLab module installed for PDF export" redirect(URL(c="kit")) try: from geraldo import Report, ReportBand, SubReport, Label, ObjectValue, SystemField, landscape, BAND_WIDTH from geraldo.generators import PDFGenerator except ImportError: session.error = "Python needs the Geraldo module installed for PDF export" redirect(URL(c="kit")) table = db.budget_kit objects_list = db(table.id > 0).select() if not objects_list: session.warning = T("No data in this table - cannot create PDF!") redirect(URL(r=request)) from s3compat import StringIO output = StringIO() #class MySubReport(SubReport): # def __init__(self, db=None, **kwargs): # " Initialise parent class & make any necessary modifications " # self.db = db # SubReport.__init__(self, **kwargs) class MyReport(Report): def __init__(self, queryset=None, db=None): " Initialise parent class & make any necessary modifications " Report.__init__(self, queryset) self.db = db # can't use T() here! title = "Kits" page_size = landscape(A4) class band_page_header(ReportBand): height = 1.3 * cm elements = [ SystemField(expression="%(report_title)s", top=0.1 * cm, left=0, width=BAND_WIDTH, style={ "fontName": "Helvetica-Bold", "fontSize": 14, "alignment": TA_CENTER }), Label(text="Code", top=0.8 * cm, left=0.2 * cm), Label(text="Description", top=0.8 * cm, left=2 * cm), Label(text="Cost", top=0.8 * cm, left=10 * cm), Label(text="Monthly", top=0.8 * cm, left=12 * cm), Label(text="per Minute", top=0.8 * cm, left=14 * cm), Label(text="per Megabyte", top=0.8 * cm, left=16 * cm), Label(text="Comments", top=0.8 * cm, left=18 * cm), ] borders = {"bottom": True} class band_page_footer(ReportBand): height = 0.5 * cm elements = [ Label(text="%s" % request.utcnow.date(), top=0.1 * cm, left=0), SystemField( expression="Page # %(page_number)d of %(page_count)d", top=0.1 * cm, width=BAND_WIDTH, style={"alignment": TA_RIGHT}), ] borders = {"top": True} class band_detail(ReportBand): height = 0.5 * cm auto_expand_height = True elements = ( ObjectValue(attribute_name="code", left=0.2 * cm, width=1.8 * cm), ObjectValue(attribute_name="description", left=2 * cm, width=8 * cm), ObjectValue(attribute_name="total_unit_cost", left=10 * cm, width=2 * cm), ObjectValue(attribute_name="total_monthly_cost", left=12 * cm, width=2 * cm), ObjectValue(attribute_name="total_minute_cost", left=14 * cm, width=2 * cm), ObjectValue(attribute_name="total_megabyte_cost", left=16 * cm, width=2 * cm), ObjectValue(attribute_name="comments", left=18 * cm, width=6 * cm), ) subreports = [ SubReport( #queryset_string = "db((db.budget_kit_item.kit_id == %(object)s.id) & (db.budget_item.id == db.budget_kit_item.item_id)).select(db.budget_item.code, db.budget_item.description, db.budget_item.unit_cost)", #queryset_string = "db(db.budget_kit_item.kit_id == %(object)s.id).select()", band_header=ReportBand( height=0.5 * cm, elements=[ Label(text="Item ID", top=0, left=0.2 * cm, style={"fontName": "Helvetica-Bold"}), Label(text="Quantity", top=0, left=2 * cm, style={"fontName": "Helvetica-Bold"}), #Label(text="Unit Cost", top=0, left=4*cm, style={"fontName": "Helvetica-Bold"}), ], borders={ "top": True, "left": True, "right": True }, ), detail_band=ReportBand( height=0.5 * cm, elements=[ ObjectValue(attribute_name="item_id", top=0, left=0.2 * cm), ObjectValue(attribute_name="quantity", top=0, left=2 * cm), #ObjectValue(attribute_name="unit_cost", top=0, left=4*cm), ]), ), ] #report = MyReport(queryset=objects_list) report = MyReport(queryset=objects_list, db=db) report.generate_by(PDFGenerator, filename=output) output.seek(0) import gluon.contenttype response.headers["Content-Type"] = gluon.contenttype.contenttype(".pdf") filename = "%s_kits.pdf" % (request.env.server_name) response.headers[ "Content-disposition"] = "attachment; filename=\"%s\"" % filename return output.read()
def import_xls(uploadFile): """ Import Assessment Spreadsheet """ from s3compat import StringIO if series_id is None: response.error = T("Series details missing") return openFile = StringIO() try: import xlrd from xlwt.Utils import cell_to_rowcol2 except ImportError: current.log.error("ERROR: xlrd & xlwt modules are needed for importing spreadsheets") return None workbook = xlrd.open_workbook(file_contents=uploadFile) try: sheetR = workbook.sheet_by_name("Assessment") sheetM = workbook.sheet_by_name("Metadata") except: session.error = T("You need to use the spreadsheet which you can download from this page") redirect(URL(c="survey", f="new_assessment", args=[], vars={"viewing": "survey_series.%s" % series_id})) header = "" body = "" for row in xrange(1, sheetM.nrows): header += ',"%s"' % sheetM.cell_value(row, 0) code = sheetM.cell_value(row, 0) qstn = s3.survey_getQuestionFromCode(code, series_id) type = qstn["type"] count = sheetM.cell_value(row, 1) if count != "": count = int(count) optionList = sheetM.cell_value(row, 2).split("|#|") else: count = 1 optionList = None if type == "Location" and optionList != None: answerList = {} elif type == "MultiOption": answerList = [] else: answerList = "" for col in range(count): cell = sheetM.cell_value(row, 3 + col) (rowR, colR) = cell_to_rowcol2(cell) try: cellValue = sheetR.cell_value(rowR, colR) except IndexError: cellValue = "" # BUG: The option list needs to work in different ways # depending on the question type. The question type should # be added to the spreadsheet to save extra db calls: # * Location save all the data as a hierarchy # * MultiOption save all selections # * Option save the last selection if cellValue != "": if optionList != None: if type == "Location": answerList[optionList[col]]=cellValue elif type == "MultiOption": answerList.append(optionList[col]) else: answerList = optionList[col] else: if type == "Date": try: (dtYear, dtMonth, dtDay, dtHour, dtMinute, dtSecond) = \ xlrd.xldate_as_tuple(cellValue, workbook.datemode) dtValue = datetime.date(dtYear, dtMonth, dtDay) cellValue = dtValue.isoformat() except: pass elif type == "Time": try: time = cellValue hour = int(time * 24) minute = int((time * 24 - hour) * 60) cellValue = "%s:%s" % (hour, minute) except: pass answerList += "%s" % cellValue body += ',"%s"' % answerList openFile.write(header) openFile.write("\n") openFile.write(body) openFile.seek(0) return openFile
def upload_bulk(): """ Receive the Uploaded data from bulk_upload() https://github.com/valums/file-uploader/blob/master/server/readme.txt @ToDo: Read EXIF headers to geolocate the Photos """ tablename = "doc_image" table = s3db[tablename] import cgi source = request.post_vars.get("qqfile", None) if isinstance(source, cgi.FieldStorage) and source.filename: # For IE6-8, Opera, older versions of other browsers you get the file as you normally do with regular form-base uploads. name = source.filename image = source.file else: # For browsers which upload file with progress bar, you will need to get the raw post data and write it to the file. if "name" in request.vars: name = request.vars.name else: HTTP(400, "Invalid Request: Need a Name!") image = request.body.read() # Convert to StringIO for onvalidation/import from s3compat import StringIO image = StringIO(image) source = Storage() source.filename = name source.file = image form = SQLFORM(table) vars = Storage() vars.name = name vars.image = source vars._formname = "%s_create" % tablename # onvalidation callback onvalidation = s3db.get_config(tablename, "create_onvalidation", s3db.get_config(tablename, "onvalidation")) if form.accepts(vars, onvalidation=onvalidation): msg = Storage(success=True) # onaccept callback onaccept = s3db.get_config(tablename, "create_onaccept", s3db.get_config(tablename, "onaccept")) from gluon.tools import callback callback(onaccept, form, tablename=tablename) else: error_msg = "" for error in form.errors: error_msg = "%s\n%s:%s" % (error_msg, error, form.errors[error]) msg = Storage(error=error_msg) response.headers[ "Content-Type"] = "text/html" # This is what the file-uploader widget expects return json.dumps(msg)
def encode(self, resource, **attr): """ API Method to encode a resource as cards @param resource: the S3Resource, or - the data items as list [{fieldname: representation, ...}, ...], or - a callable that produces such a list of items @param attr: additional encoding parameters (see below) @keyword layout: the layout (a S3PDFCardLayout subclass, overrides the resource's pdf_card_layout setting @keyword orderby: orderby-expression for data extraction, overrides the resource's orderby setting @keyword labels: the labels for the fields, - a dict {colname: label}, or - a callable that produces it, - defaults to the labels of the extracted fields @keyword pagesize: the PDF page size, - a string "A4" or "Letter", or - a tuple (width, height), in points - defaults to the layout's card size @keyword margins: the page margins, - a tuple (N, E, S, W), in points, or - a single number, in points - will be computed if omitted @keyword spacing: the spacing between cards, - a tuple (H, V), in points, or - a single number, in points - defaults to 18 points in both directions @keyword title: the document title, - defaults to title_list crud string of the resource @return: a handle to the output """ if not REPORTLAB: # FIXME is this the correct handling of a dependency failure? raise HTTP(503, "Python ReportLab library not installed") # Do we operate on a S3Resource? is_resource = isinstance(resource, S3Resource) # The card layout layout = attr.get("layout") if layout is None and is_resource: layout = resource.get_config("pdf_card_layout") if layout is None: layout = S3PDFCardLayout # Card (and hence page) orientation orientation = layout.orientation if orientation == "Landscape": orientation = landscape else: orientation = portrait # Card and page size cardsize = orientation(layout.cardsize) pagesize = attr.get("pagesize") if pagesize == "A4": pagesize = A4 elif pagesize == "Letter": pagesize = LETTER elif not isinstance(pagesize, (tuple, list)): pagesize = cardsize pagesize = orientation(pagesize) # Extract the data if is_resource: # Extract the data items from the resource fields = layout.fields(resource) data = self.extract(resource, fields, orderby=attr.get("orderby")) items = data.rows elif callable(resource): # External getter => call with resource, returns the data items data = None items = resource() else: # The data items have been passed-in in place of the resource data = None items = resource # Get the labels labels = attr.get("labels") if callable(labels): labels = labels(resource) elif not isinstance(labels, dict): if data and hasattr(data, "rfields"): # Collect the labels from rfields rfields = data.rfields labels = {rfield.colname: rfield.label for rfield in rfields} else: labels = {} # Document title title = attr.get("title") if not title and is_resource: crud_strings = current.response.s3.crud_strings[resource.tablename] if crud_strings: title = crud_strings["title_list"] # Instantiate the doc template doc = S3PDFCardTemplate( pagesize, cardsize, margins=attr.get("margins"), spacing=attr.get("spacing"), title=title, ) # Produce the flowables flowables = self.get_flowables( layout, resource, items, labels=labels, cards_per_page=doc.cards_per_page, ) # Build the doc output_stream = StringIO() doc.build( flowables, output_stream, #canvasmaker=canvas.Canvas, # is default ) output_stream.seek(0) return output_stream
def testPOSTFilterAjax(self): """ Test POST filter interpretation with JSON request body """ assertEqual = self.assertEqual assertNotIn = self.assertNotIn request = current.request # Test with valid filter expression JSON jsonstr = '''{"service_organisation.service_id__belongs":"1","~.example__lt":1,"~.other__like":[1,2],"~.name__like":"*Liquiçá*"}''' request._body = StringIO(jsonstr) r = S3Request( prefix="org", name="organisation", http="POST", get_vars={ "$search": "ajax", "test": "retained" }, ) # Method changed to GET: assertEqual(r.http, "GET") get_vars = r.get_vars # $search removed from GET vars: assertNotIn("$search", get_vars) # Verify that parsed $filter vars can safely be re-encoded as GET URL try: r.url() except (UnicodeDecodeError, UnicodeEncodeError): self.fail( "r.url raises Unicode exception with non-ASCII characters in $filter" ) # Filter queries from JSON body added to GET vars (always str, or list of str): assertEqual(get_vars.get("service_organisation.service_id__belongs"), "1") assertEqual(get_vars.get("~.example__lt"), "1") assertEqual(get_vars.get("~.other__like"), ["1", "2"]) assertEqual(get_vars.get("~.name__like"), "*Liquiçá*") # Must retain other GET vars: assertEqual(get_vars.get("test"), "retained") # Test without $search request._body = StringIO( '{"service_organisation.service_id__belongs":"1"}') r = S3Request( prefix="org", name="organisation", http="POST", get_vars={"test": "retained"}, ) # Method should still be POST: assertEqual(r.http, "POST") get_vars = r.get_vars # $search never was in GET vars - confirm this to exclude test regression assertNotIn("$search", get_vars) # Filter queries from JSON body not added to GET vars: assertNotIn("service_organisation.service_id__belongs", get_vars) # Must retain other GET vars: assertEqual(get_vars.get("test"), "retained") # Test with valid JSON but invalid filter expression request._body = StringIO('[1,2,3]') r = S3Request( prefix="org", name="organisation", http="POST", get_vars={ "$search": "ajax", "test": "retained" }, ) # Method changed to GET: assertEqual(r.http, "GET") get_vars = r.get_vars # $search removed from GET vars: assertNotIn("$search", get_vars) # Filter queries from JSON body not added to GET vars: assertNotIn("service_organisation.service_id__belongs", get_vars) # Must retain other GET vars: assertEqual(get_vars.get("test"), "retained") # Test with empty body request._body = StringIO('') r = S3Request( prefix="org", name="organisation", http="POST", get_vars={ "$search": "ajax", "test": "retained" }, ) # Method changed to GET: assertEqual(r.http, "GET") get_vars = r.get_vars # $search removed from GET vars: assertNotIn("$search", get_vars) # Filter queries from JSON body not added to GET vars: assertNotIn("service_organisation.service_id__belongs", get_vars) # Must retain other GET vars: assertEqual(get_vars.get("test"), "retained")
def item_export_pdf(): """ Export a list of Items in Adobe PDF format Uses Geraldo Grouping Report @ToDo: Use S3PDF Method """ try: from reportlab.lib.units import cm from reportlab.lib.pagesizes import A4 from reportlab.lib.enums import TA_CENTER, TA_RIGHT except ImportError: session.error = "Python needs the ReportLab module installed for PDF export" redirect(URL(c="item")) try: from geraldo import Report, ReportBand, ReportGroup, Label, ObjectValue, SystemField, landscape, BAND_WIDTH from geraldo.generators import PDFGenerator except ImportError: session.error = "Python needs the Geraldo module installed for PDF export" redirect(URL(c="item")) table = db.budget_item objects_list = db(table.id > 0).select(orderby=table.category_type) if not objects_list: session.warning = T("No data in this table - cannot create PDF!") redirect(URL(f="item")) from s3compat import StringIO output = StringIO() class MyReport(Report): def __init__(self, queryset=None, T=None): " Initialise parent class & make any necessary modifications " Report.__init__(self, queryset) self.T = T def _T(self, rawstring): return self.T(rawstring) # can't use T() here! #title = _T("Items") title = "Items" page_size = landscape(A4) class band_page_header(ReportBand): height = 1.3 * cm elements = [ SystemField(expression="%(report_title)s", top=0.1 * cm, left=0, width=BAND_WIDTH, style={ "fontName": "Helvetica-Bold", "fontSize": 14, "alignment": TA_CENTER }), Label(text="Code", top=0.8 * cm, left=0.2 * cm), Label(text="Description", top=0.8 * cm, left=3 * cm), Label(text="Unit Cost", top=0.8 * cm, left=13 * cm), Label(text="per Month", top=0.8 * cm, left=15 * cm), Label(text="per Minute", top=0.8 * cm, left=17 * cm), Label(text="per Megabyte", top=0.8 * cm, left=19 * cm), Label(text="Comments", top=0.8 * cm, left=21 * cm), ] borders = {"bottom": True} class band_page_footer(ReportBand): height = 0.5 * cm elements = [ Label(text="%s" % request.utcnow.date(), top=0.1 * cm, left=0), SystemField( expression="Page # %(page_number)d of %(page_count)d", top=0.1 * cm, width=BAND_WIDTH, style={"alignment": TA_RIGHT}), ] borders = {"top": True} class band_detail(ReportBand): height = 0.5 * cm auto_expand_height = True elements = ( ObjectValue(attribute_name="code", left=0.2 * cm, width=2.8 * cm), ObjectValue(attribute_name="description", left=3 * cm, width=10 * cm), ObjectValue(attribute_name="unit_cost", left=13 * cm, width=2 * cm), ObjectValue(attribute_name="monthly_cost", left=15 * cm, width=2 * cm), ObjectValue(attribute_name="minute_cost", left=17 * cm, width=2 * cm), ObjectValue(attribute_name="megabyte_cost", left=19 * cm, width=2 * cm), ObjectValue(attribute_name="comments", left=21 * cm, width=6 * cm), ) groups = [ ReportGroup( attribute_name="category_type", band_header=ReportBand( height=0.7 * cm, elements=[ ObjectValue( attribute_name="category_type", left=0, top=0.1 * cm, get_value=lambda instance: instance.category_type and budget_category_type_opts[instance. category_type], style={ "fontName": "Helvetica-Bold", "fontSize": 12 }) ], borders={"bottom": True}, ), ), ] #report = MyReport(queryset=objects_list) report = MyReport(queryset=objects_list, T=T) report.generate_by(PDFGenerator, filename=output) output.seek(0) import gluon.contenttype response.headers["Content-Type"] = gluon.contenttype.contenttype(".pdf") filename = "%s_items.pdf" % (request.env.server_name) response.headers[ "Content-disposition"] = "attachment; filename=\"%s\"" % filename return output.read()
def xls(self, r, **attr): """ Export the performance indicators as XLS data sheet @param r: the S3Request instance @param attr: controller attributes """ T = current.T s3db = current.s3db try: import xlwt except ImportError: raise HTTP(503, body="XLWT not installed") title = s3_str(T("Performance Indicators")) write = self.write # Create workbook and sheet book = xlwt.Workbook(encoding="utf-8") sheet = book.add_sheet(title) # Get the statistics resource = self.resource table = resource.table indicators = self.indicators(resource) # Title and Report Dates (from filter) write(sheet, 0, 0, title, style="header") dates = [] get_vars = r.get_vars field = table.date for fvar in ("~.date__ge", "~.date__le"): dtstr = get_vars.get(fvar) if dtstr: try: dt = s3_decode_iso_datetime(dtstr).date() except (ValueError, AttributeError): dt = None else: dates.append(field.represent(dt)) else: dates.append("...") if dates: write(sheet, 1, 0, " -- ".join(dates)) # Basic performance indicators rowindex = 3 # Total number of consultations write(sheet, rowindex, 0, T("Total Number of Consultations")) write(sheet, rowindex, 1, indicators.get("total_responses", "")) rowindex += 1 write(sheet, rowindex, 0, T("Total Number of Clients")) write(sheet, rowindex, 1, indicators.get("total_clients", "")) rowindex += 1 write(sheet, rowindex, 0, T("Average Duration of Consultations (minutes)")) avg_hours_per_response = indicators.get("avg_hours_per_response") if avg_hours_per_response: avg_minutes_per_response = int(round(avg_hours_per_response * 60)) else: avg_minutes_per_response = "" write(sheet, rowindex, 1, avg_minutes_per_response) rowindex += 1 write(sheet, rowindex, 0, T("Average Number of Consultations per Client")) write(sheet, rowindex, 1, indicators.get("avg_responses_per_client", "")) # Distribution rowindex = 8 write(sheet, rowindex, 0, T("Distribution of Clients")) write(sheet, rowindex, 1, T("Single")) write(sheet, rowindex, 2, indicators.get("singles", "")) rowindex += 1 write(sheet, rowindex, 1, T("Family")) write(sheet, rowindex, 2, indicators.get("families", "")) rowindex += 1 write(sheet, rowindex, 1, T("Group Counseling")) rowindex += 1 write(sheet, rowindex, 1, T("Individual Counseling")) write(sheet, rowindex, 2, indicators.get("total_responses", "")) # Top-5's rowindex = 13 write(sheet, rowindex, 0, T("Top 5 Countries of Origin")) top_5_nationalities = indicators.get("top_5_nationalities") if top_5_nationalities: dtable = s3db.pr_person_details field = dtable.nationality for rank, nationality in enumerate(top_5_nationalities): write(sheet, rowindex, 1, "%s - %s" % (rank + 1, field.represent(nationality))) rowindex += 1 rowindex += 1 write(sheet, rowindex, 0, T("Top 5 Counseling Reasons")) top_5_needs = indicators.get("top_5_needs") if top_5_needs: ttable = s3db.dvr_response_theme field = ttable.need_id for rank, need in enumerate(top_5_needs): write(sheet, rowindex, 1, "%s - %s" % (rank + 1, field.represent(need))) rowindex += 1 # Output output = StringIO() book.save(output) output.seek(0) # Response headers from gluon.contenttype import contenttype disposition = "attachment; filename=\"%s\"" % "indicators.xls" response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition from gluon.streamer import DEFAULT_CHUNK_SIZE return response.stream(output, chunk_size=DEFAULT_CHUNK_SIZE, request=r, )
def kit_export_xls(): """ Export a list of Kits in Excel XLS format Sheet 1 is a list of Kits Then there is a separate sheet per kit, listing it's component items """ try: import xlwt except ImportError: session.error = "xlwt module not available within the running Python - this needs installing for XLS output!" redirect(URL(c="kit")) from s3compat import StringIO output = StringIO() book = xlwt.Workbook() # List of Kits sheet1 = book.add_sheet("Kits") # Header row for Kits sheet row0 = sheet1.row(0) cell = 0 table = db.budget_kit kits = db(table.id > 0).select() fields = [table[f] for f in table.fields if table[f].readable] for field in fields: row0.write(cell, field.label, xlwt.easyxf("font: bold True;")) cell += 1 # For Header row on Items sheets table = db.budget_item fields_items = [table[f] for f in table.fields if table[f].readable] row = 1 for kit in kits: # The Kit details on Sheet 1 rowx = sheet1.row(row) row += 1 cell1 = 0 for field in fields: tab, col = str(field).split(".") rowx.write(cell1, kit[col]) cell1 += 1 # Sheet per Kit detailing constituent Items # Replace characters which are illegal in sheetnames sheetname = kit.code.replace("/", "_") sheet = book.add_sheet(sheetname) # Header row for Items sheet row0 = sheet.row(0) cell = 0 for field_item in fields_items: row0.write(cell, field_item.label, xlwt.easyxf("font: bold True;")) cell += 1 # List Items in each Kit table = db.budget_kit_item contents = db(table.kit_id == kit.id).select() rowy = 1 for content in contents: table = db.budget_item item = db(table.id == content.item_id).select().first() rowx = sheet.row(rowy) rowy += 1 cell = 0 for field_item in fields_items: tab, col = str(field_item).split(".") # Do lookups for option fields if col == "cost_type": opt = item[col] value = str(budget_cost_type_opts[opt]) elif col == "category_type": opt = item[col] value = str(budget_category_type_opts[opt]) else: value = item[col] rowx.write(cell, value) cell += 1 book.save(output) output.seek(0) import gluon.contenttype response.headers["Content-Type"] = gluon.contenttype.contenttype(".xls") filename = "%s_kits.xls" % (request.env.server_name) response.headers[ "Content-disposition"] = "attachment; filename=\"%s\"" % filename return output.read()
def push(self, task): """ Extract new updates from the local database and send them to the peer repository (active push) @param task: the synchronization task (sync_task Row) @return: tuple (error, mtime), with error=None if successful, else error=message, and mtime=modification timestamp of the youngest record sent """ repository = self.repository resource_name = task.resource_name log = repository.log remote = False output = None current.log.debug("S3SyncRepository.push(%s, %s)" % (repository.url, resource_name)) # Define the resource resource = current.s3db.resource( resource_name, # FTP remote deletion is not supported yet #include_deleted=True, ) # Apply sync filters for this task filters = current.sync.get_filters(task.id) table = resource.table tablename = resource.tablename if filters: queries = S3URLQuery.parse(resource, filters[tablename]) [resource.add_filter(q) for a in queries for q in queries[a]] # Filter to records after last push msince = task.last_push if msince: strategy = task.strategy created = "create" in strategy updated = "update" in strategy if created and updated: mtime_filter = table.modified_on > msince elif created: mtime_filter = table.created_on > msince elif updated: mtime_filter = (table.created_on <= msince) & \ (table.modified_on > msince) else: mtime_filter = None if mtime_filter: resource.add_filter(mtime_filter) mtime = resource.muntil # Get the ID of the resource after filter and msince resource_ids = resource.get_id() # No Changes since last push? if resource_ids is None: message = "No Changes since last push" result = log.WARNING else: # Filename settings = current.deployment_settings # Placeholders for filename placeholders = { "systemname": settings.get_system_name(), "systemname_short": settings.get_system_name_short(), "resource": resource_name, "public_url": settings.get_base_public_url(), } from string import Template filename = resource.get_config("upload_filename") if not filename: filename = settings.get_sync_upload_filename() filename = Template(filename).safe_substitute( s="%(systemname_short)s", r="%(resource)s") filename = filename % placeholders # Get Representation representation = task.representation filename = ("%s.%s") % (filename, representation) # FTP Transfer remote = True import ftplib ftp_connection = self.ftp_connection if task.multiple_file: if type(resource_ids) is not list: resource_ids = [resource_ids] for resource_id in resource_ids: resource.clear_query() resource.add_filter(FS("id") == resource_id) data = self._get_data(resource, representation) try: ftp_connection.storbinary("STOR %s" % filename, StringIO(data)) except ftplib.error_perm: message = sys.exc_info()[1] result = log.ERROR output = message else: message = "FTP Transfer Successful" result = log.SUCCESS current.log.debug(message) else: data = self._get_data(resource, representation) try: ftp_connection.storbinary("STOR %s" % filename, StringIO(data)) except ftplib.error_perm: message = sys.exc_info()[1] result = log.ERROR output = message else: message = "FTP Transfer Successful" result = log.SUCCESS current.log.debug(message) # Quit the connection here ftp_connection.quit() # Log the operation log.write( repository_id=repository.id, resource_name=resource_name, transmission=log.OUT, mode=log.PUSH, action="send", remote=remote, result=result, message=message, ) # Returns after operation is complete if output is not None: mtime = None return output, mtime
def send(cls, r, resource): """ Method to retrieve updates for a subscription, render the notification message and send it - responds to POST?format=msg requests to the respective resource. @param r: the S3Request @param resource: the S3Resource """ _debug = current.log.debug _debug("S3Notifications.send()") json_message = current.xml.json_message # Read subscription data source = r.body source.seek(0) data = source.read() subscription = json.loads(data) #_debug("Notify PE #%s by %s on %s of %s since %s" % \ # (subscription["pe_id"], # str(subscription["method"]), # str(subscription["notify_on"]), # subscription["resource"], # subscription["last_check_time"], # )) # Check notification settings notify_on = subscription["notify_on"] methods = subscription["method"] if not notify_on or not methods: return json_message(message="No notifications configured " "for this subscription") # Authorization (pe_id must not be None) pe_id = subscription["pe_id"] if not pe_id: r.unauthorised() # Fields to extract fields = resource.list_fields(key="notify_fields") if "created_on" not in fields: fields.append("created_on") # Extract the data data = resource.select(fields, represent=True, raw_data=True) rows = data["rows"] # How many records do we have? numrows = len(rows) if not numrows: return json_message(message="No records found") #_debug("%s rows:" % numrows) # Prepare meta-data get_config = resource.get_config settings = current.deployment_settings page_url = subscription["page_url"] crud_strings = current.response.s3.crud_strings.get(resource.tablename) if crud_strings: resource_name = crud_strings.title_list else: resource_name = string.capwords(resource.name, "_") last_check_time = s3_decode_iso_datetime(subscription["last_check_time"]) email_format = subscription["email_format"] if not email_format: email_format = settings.get_msg_notify_email_format() filter_query = subscription.get("filter_query") meta_data = {"systemname": settings.get_system_name(), "systemname_short": settings.get_system_name_short(), "resource": resource_name, "page_url": page_url, "notify_on": notify_on, "last_check_time": last_check_time, "filter_query": filter_query, "total_rows": numrows, } # Render contents for the message template(s) renderer = get_config("notify_renderer") if not renderer: renderer = settings.get_msg_notify_renderer() if not renderer: renderer = cls._render contents = {} if email_format == "html" and "EMAIL" in methods: contents["html"] = renderer(resource, data, meta_data, "html") contents["default"] = contents["html"] if email_format != "html" or "EMAIL" not in methods or len(methods) > 1: contents["text"] = renderer(resource, data, meta_data, "text") contents["default"] = contents["text"] # Subject line subject = get_config("notify_subject") if not subject: subject = settings.get_msg_notify_subject() if callable(subject): subject = subject(resource, data, meta_data) from string import Template subject = Template(subject).safe_substitute(S="%(systemname)s", s="%(systemname_short)s", r="%(resource)s") subject = subject % meta_data # Attachment attachment = subscription.get("attachment", False) document_ids = None if attachment: attachment_fnc = settings.get_msg_notify_attachment() if attachment_fnc: document_ids = attachment_fnc(resource, data, meta_data) # **data for send_by_pe_id function in s3msg send_data = {} send_data_fnc = settings.get_msg_notify_send_data() if callable(send_data_fnc): send_data = send_data_fnc(resource, data, meta_data) # Helper function to find message templates from a priority list join = lambda *f: os.path.join(current.request.folder, *f) def get_msg_template(path, filenames): for fn in filenames: filepath = join(path, fn) if os.path.exists(filepath): try: return open(filepath, "rb") except: pass return None # Render and send the message(s) templates = settings.get_template() if templates != "default" and not isinstance(templates, (tuple, list)): templates = (templates,) prefix = resource.get_config("notify_template", "notify") send = current.msg.send_by_pe_id success = False errors = [] for method in methods: error = None # Get the message template msg_template = None filenames = ["%s_%s.html" % (prefix, method.lower())] if method == "EMAIL" and email_format: filenames.insert(0, "%s_email_%s.html" % (prefix, email_format)) if templates != "default": for template in templates[::-1]: path = join("modules", "templates", template, "views", "msg") msg_template = get_msg_template(path, filenames) if msg_template is not None: break if msg_template is None: path = join("views", "msg") msg_template = get_msg_template(path, filenames) if msg_template is None: msg_template = StringIO(s3_str(current.T("New updates are available."))) # Select contents format if method == "EMAIL" and email_format == "html": output = contents["html"] else: output = contents["text"] # Render the message try: message = current.response.render(msg_template, output) except: exc_info = sys.exc_info()[:2] error = ("%s: %s" % (exc_info[0].__name__, exc_info[1])) errors.append(error) continue finally: if hasattr(msg_template, "close"): msg_template.close() if not message: continue # Send the message #_debug("Sending message per %s" % method) #_debug(message) try: sent = send(pe_id, # RFC 2822 subject=s3_truncate(subject, 78), message=message, contact_method=method, system_generated=True, document_ids=document_ids, **send_data) except: exc_info = sys.exc_info()[:2] error = ("%s: %s" % (exc_info[0].__name__, exc_info[1])) sent = False if sent: # Successful if at least one notification went out success = True else: if not error: error = current.session.error if isinstance(error, list): error = "/".join(error) if error: errors.append(error) # Done if errors: message = ", ".join(errors) else: message = "Success" return json_message(success=success, statuscode=200 if success else 403, message=message)