def _datalist(self, r, widget, **attr): """ Generate a data list @param r: the S3Request instance @param widget: the widget definition as dict @param attr: controller attributes for the request """ T = current.T context = widget.get("context", None) tablename = widget.get("tablename", None) resource, context = self._resolve_context(r, tablename, context) # Config Options: # 1st choice: Widget # 2nd choice: get_config # 3rd choice: Default config = resource.get_config list_fields = widget.get("list_fields", config("list_fields", None)) list_layout = widget.get("list_layout", config("list_layout", None)) orderby = widget.get( "orderby", config("list_orderby", config("orderby", ~resource.table.created_on))) filter = widget.get("filter", None) if filter: resource.add_filter(filter) # Use the widget-index to create a unique ID list_id = "profile-list-%s-%s" % (tablename, widget["index"]) # Page size pagesize = widget.get("pagesize", 4) representation = r.representation if representation == "dl": # Ajax-update get_vars = r.get_vars record_id = get_vars.get("record", None) if record_id is not None: # Ajax-update of a single record resource.add_filter(FS("id") == record_id) start, limit = 0, 1 else: # Ajax-update of full page start = get_vars.get("start", None) limit = get_vars.get("limit", None) if limit is not None: try: start = int(start) limit = int(limit) except ValueError: start, limit = 0, pagesize else: start = None else: # Page-load start, limit = 0, pagesize # Ajax-delete items? if representation == "dl" and r.http in ("DELETE", "POST"): if "delete" in r.get_vars: return self._dl_ajax_delete(r, resource) else: r.error(405, current.ERROR.BAD_METHOD) # dataList datalist, numrows, ids = resource.datalist(fields=list_fields, start=start, limit=limit, list_id=list_id, orderby=orderby, layout=list_layout) # Render the list ajaxurl = r.url(vars={"update": widget["index"]}, representation="dl") data = datalist.html( ajaxurl=ajaxurl, pagesize=pagesize, empty=P(I(_class="icon-folder-open-alt"), BR(), S3CRUD.crud_string(tablename, "msg_no_match"), _class="empty_card-holder"), ) if representation == "dl": # This is an Ajax-request, so we don't need the wrapper current.response.view = "plain.html" return data # Interactive only below here label = widget.get("label", "") if label: label = T(label) icon = widget.get("icon", "") if icon: icon = TAG[""](I(_class=icon), " ") if pagesize and numrows > pagesize: # Button to display the rest of the records in a Modal more = numrows - pagesize get_vars_new = {} if context: filters = context.serialize_url(resource) for f in filters: get_vars_new[f] = filters[f] if filter: filters = filter.serialize_url(resource) for f in filters: get_vars_new[f] = filters[f] c, f = tablename.split("_", 1) f = widget.get("function", f) url = URL(c=c, f=f, args=["datalist.popup"], vars=get_vars_new) more = DIV(A( BUTTON( "%s (%s)" % (T("see more"), more), _class="btn btn-mini", _type="button", ), _class="s3_modal", _href=url, _title=label, ), _class="more_profile") else: more = "" # Link for create-popup create_popup = self._create_popup(r, widget, list_id, resource, context, numrows) _class = self._lookup_class(r, widget) # Render the widget output = DIV(create_popup, H4(icon, label, _class="profile-sub-header"), DIV(data, more, _class="card-holder"), _class=_class) return output
def add_event_data(self, event_frame, resource, event_start, event_end, facts): """ Extract event data from resource and add them to the event frame @param event_frame: the event frame @param resource: the resource @param event_start: the event start field (S3ResourceField) @param event_end: the event_end field (S3ResourceField) @param fact: list of fact fields (S3ResourceField) @return: the extracted data (dict from S3Resource.select) """ # Fields to extract fields = set(fact.selector for fact in facts) fields.add(event_start.selector) fields.add(event_end.selector) fields.add(resource._id.name) # Filter by event frame start: # End date of events must be after the event frame start date if event_end: end_selector = FS(event_end.selector) start = event_frame.start query = (end_selector == None) | (end_selector >= start) else: # No point if events have no end date query = None # Filter by event frame end: # Start date of events must be before event frame end date start_selector = FS(event_start.selector) end = event_frame.end q = (start_selector == None) | (start_selector <= end) query = query & q if query is not None else q # Add as temporary filter resource.add_filter(query) # Extract the records data = resource.select(fields) # Remove the filter we just added resource.rfilter.filters.pop() resource.rfilter.query = None # Do we need to convert dates into datetimes? convert_start = True if event_start.ftype == "date" else False convert_end = True if event_start.ftype == "date" else False fromordinal = datetime.datetime.fromordinal convert_date = lambda d: fromordinal(d.toordinal()) # Column names for extractions pkey = str(resource._id) start_colname = event_start.colname end_colname = event_end.colname # Use table name as event type tablename = resource.tablename # Create the events events = [] add_event = events.append for row in data["rows"]: values = dict((fact.colname, row[fact.colname]) for fact in facts) start = row[start_colname] if convert_start: start = convert_date(start) end = row[end_colname] if convert_end: end = convert_date(end) event = S3TimePlotEvent(row[pkey], start=start, end=end, values=values, event_type=tablename) add_event(event) # Extend the event frame with these events if events: event_frame.extend(events) return data
def duplicates(self, r, **attr): """ Renders a list of all currently duplicate-bookmarked records in this resource, with option to select two and initiate the merge process from here @param r: the S3Request @param attr: the controller attributes for the request """ s3 = current.response.s3 session_s3 = current.session.s3 resource = self.resource tablename = self.tablename if r.http == "POST": return self.merge(r, **attr) # Bookmarks record_ids = [] DEDUPLICATE = self.DEDUPLICATE if DEDUPLICATE in session_s3: bookmarks = session_s3[DEDUPLICATE] if tablename in bookmarks: record_ids = bookmarks[tablename] query = FS(resource._id.name).belongs(record_ids) resource.add_filter(query) # Representation representation = r.representation # List fields list_fields = resource.list_fields() # Start/Limit get_vars = r.get_vars if representation == "aadata": start = get_vars.get("displayStart", None) limit = get_vars.get("pageLength", None) draw = int(get_vars.draw or 0) else: # catch all start = 0 limit = s3.ROWSPERPAGE if limit is not None: try: start = int(start) limit = int(limit) except ValueError: start = None limit = None # use default else: start = None # use default if s3.dataTable_pageLength: display_length = s3.dataTable_pageLength else: display_length = 25 if limit is None: limit = 2 * display_length # Datatable Filter totalrows = None if representation == "aadata": searchq, orderby, left = resource.datatable_filter( list_fields, get_vars) if searchq is not None: totalrows = resource.count() resource.add_filter(searchq) else: dt_sorting = {"iSortingCols": "1", "sSortDir_0": "asc"} if len(list_fields) > 1: dt_sorting["bSortable_0"] = "false" dt_sorting["iSortCol_0"] = "1" else: dt_sorting["bSortable_0"] = "true" dt_sorting["iSortCol_0"] = "0" orderby, left = resource.datatable_filter(list_fields, dt_sorting)[1:] # Get the records data = resource.select(list_fields, start=start, limit=limit, orderby=orderby, left=left, count=True, represent=True) displayrows = data["numrows"] if totalrows is None: totalrows = displayrows # Generate a datatable dt = S3DataTable(data["rfields"], data["rows"]) datatable_id = "s3merge_1" if representation == "aadata": output = dt.json(totalrows, displayrows, datatable_id, draw, dt_bulk_actions=[(current.T("Merge"), "merge", "pair-action")]) elif representation == "html": # Initial HTML response T = current.T output = {"title": T("De-duplicate Records")} url = r.url(representation="aadata") #url = "/%s/%s/%s/deduplicate.aadata" % (r.application, #r.controller, #r.function) items = dt.html( totalrows, displayrows, datatable_id, dt_ajax_url=url, dt_bulk_actions=[(T("Merge"), "merge", "pair-action")], dt_pageLength=display_length, ) output["items"] = items s3.actions = [ { "label": str(T("View")), "url": r.url(target="[id]", method="read"), "_class": "action-btn", }, ] if len(record_ids) < 2: output["add_btn"] = DIV( SPAN( T("You need to have at least 2 records in this list in order to merge them." ), # @ToDo: Move to CSS _style="float:left;padding-right:10px;"), A(T("Find more"), _href=r.url(method="", id=0, component_id=0, vars={}))) else: output["add_btn"] = DIV( SPAN( T("Select 2 records from this list, then click 'Merge'." )), ) s3.dataTableID = [datatable_id] current.response.view = self._view(r, "list.html") else: r.error(501, current.ERROR.BAD_FORMAT) return output
def cascade(cls, table, record_ids, rules): """ Apply cascade of rules to anonymize records @param table: the Table @param record_ids: a set of record IDs @param rules: the rules for this Table @raises Exception: if the cascade failed due to DB constraints or invalid rules; callers should roll back the transaction if an exception is raised """ s3db = current.s3db pkey = table._id.name cascade = rules.get("cascade") if cascade: fieldnames = set(rule.get("match", pkey) for _, rule in cascade) if pkey not in fieldnames: fieldnames.add(pkey) fields = [table[fn] for fn in fieldnames] db = current.db rows = db(table._id.belongs(record_ids)).select(*fields) for tablename, rule in cascade: lookup = rule.get("lookup") if lookup: # Explicit look-up function, call with master table+rows, # as well as the name of the related table; should return # a set/tuple/list of record ids in the related table ids = lookup(table, rows, tablename) else: key = rule.get("key") if not key: continue field = rule.get("match", pkey) match = set(row[field] for row in rows) # Resolve key and construct query resource = s3db.resource(tablename, components=[]) rq = FS(key).belongs(match) query = rq.query(resource) # Construct necessary joins joins = S3Joins(tablename) joins.extend(rq._joins(resource)[0]) joins = joins.as_list() # Extract the target table IDs target_rows = db(query).select(resource._id, join = joins, ) ids = set(row[resource._id.name] for row in target_rows) # Recurse into related table if ids: cls.cascade(resource.table, ids, rule) # Apply field rules field_rules = rules.get("fields") if field_rules: cls.apply_field_rules(table, record_ids, field_rules) # Apply deletion rules if rules.get("delete"): resource = s3db.resource(table, id=list(record_ids)) resource.delete(cascade=True)
def geojson(self, r, **attr): """ Render the pivot table data as a dict ready to be exported as GeoJSON for display on a Map. @param r: the S3Request instance @param attr: controller attributes for the request """ resource = self.resource response = current.response s3 = response.s3 # Set response headers response.headers["Content-Type"] = s3.content_type.get( "geojson", "application/json") # Filter s3_filter = s3.filter if s3_filter is not None: resource.add_filter(s3_filter) if not resource.count(): # No Data return json.dumps({}) # Extract the relevant GET vars get_vars = r.get_vars layer_id = r.get_vars.get("layer", None) level = get_vars.get("level", "L0") # Fall back to report options defaults get_config = resource.get_config report_options = get_config("report_options", {}) defaults = report_options.get("defaults", {}) # The rows dimension context = get_config("context") if context and "location" in context: # @ToDo: We can add sanity-checking using resource.parse_bbox_query() as a guide if-desired rows = "(location)$%s" % level else: # Fallback to location_id rows = "location_id$%s" % level # Fallback we can add if-required #rows = "site_id$location_id$%s" % level # Filter out null values resource.add_filter(FS(rows) != None) # Set XSLT stylesheet stylesheet = os.path.join(r.folder, r.XSLT_PATH, "geojson", "export.xsl") # Do we have any data at this level of aggregation? fallback_to_points = True # @ToDo: deployment_setting? output = None if fallback_to_points: if resource.count() == 0: # Show Points resource.clear_query() # Apply URL filters (especially BBOX) resource.build_query(filter=s3_filter, vars=get_vars) # Extract the Location Data xmlformat = S3XMLFormat(stylesheet) include, exclude = xmlformat.get_fields(resource.tablename) resource.load(fields=include, skip=exclude, start=0, limit=None, orderby=None, virtual=False, cacheable=True) gis = current.gis attr_fields = [] style = gis.get_style(layer_id=layer_id, aggregate=False) popup_format = style.popup_format if popup_format: if "T(" in popup_format: # i18n T = current.T items = regex_translate.findall(popup_format) for item in items: titem = str(T(item[1:-1])) popup_format = popup_format.replace( "T(%s)" % item, titem) style.popup_format = popup_format # Extract the attr_fields parts = popup_format.split("{") # Skip the first part parts = parts[1:] for part in parts: attribute = part.split("}")[0] attr_fields.append(attribute) attr_fields = ",".join(attr_fields) location_data = gis.get_location_data(resource, attr_fields=attr_fields) # Export as GeoJSON current.xml.show_ids = True output = resource.export_xml( fields=include, mcomponents=None, references=[], stylesheet=stylesheet, as_json=True, location_data=location_data, map_data=dict(style=style), ) # Transformation error? if not output: r.error( 400, "XSLT Transformation Error: %s " % current.xml.error) else: while resource.count() == 0: # Try a lower level of aggregation level = int(level[1:]) if level == 0: # Nothing we can display return json.dumps({}) resource.clear_query() # Apply URL filters (especially BBOX) resource.build_query(filter=s3_filter, vars=get_vars) level = "L%s" % (level - 1) if context and "location" in context: # @ToDo: We can add sanity-checking using resource.parse_bbox_query() as a guide if-desired rows = "(location)$%s" % level else: # Fallback to location_id rows = "location_id$%s" % level # Fallback we can add if-required #rows = "site_id$location_id$%s" % level resource.add_filter(FS(rows) != None) if not output: # Build the Pivot Table cols = None layer = get_vars.get("fact", defaults.get("fact", "count(id)")) m = layer_pattern.match(layer) selector, method = m.group(2), m.group(1) prefix = resource.prefix_selector selector = prefix(selector) layer = (selector, method) pivottable = resource.pivottable(rows, cols, [layer]) # Extract the Location Data #attr_fields = [] style = current.gis.get_style(layer_id=layer_id, aggregate=True) popup_format = style.popup_format if popup_format: if "T(" in popup_format: # i18n T = current.T items = regex_translate.findall(popup_format) for item in items: titem = str(T(item[1:-1])) popup_format = popup_format.replace( "T(%s)" % item, titem) style.popup_format = popup_format # Extract the attr_fields # No need as defaulted inside S3PivotTable.geojson() #parts = popup_format.split("{") ## Skip the first part #parts = parts[1:] #for part in parts: # attribute = part.split("}")[0] # attr_fields.append(attribute) #attr_fields = ",".join(attr_fields) ids, location_data = pivottable.geojson(layer=layer, level=level) # Export as GeoJSON current.xml.show_ids = True gresource = current.s3db.resource("gis_location", id=ids) output = gresource.export_xml( fields=[], mcomponents=None, references=[], stylesheet=stylesheet, as_json=True, location_data=location_data, # Tell the client that we are # displaying aggregated data and # the level it is aggregated at map_data=dict(level=int(level[1:]), style=style), ) # Transformation error? if not output: r.error(400, "XSLT Transformation Error: %s " % current.xml.error) return output
def create_event_frame(self, resource, event_start, event_end, start=None, end=None, slots=None): """ Create an event frame for this report @param resource: the target resource @param event_start: the event start field (S3ResourceField) @param event_end: the event end field (S3ResourceField) @param start: the start date/time (string) @param end: the end date/time (string) @param slots: the slot length (string) @return: the event frame """ now = tp_tzsafe(datetime.datetime.utcnow()) dtparse = self.dtparse start_dt = end_dt = None STANDARD_SLOT = "1 day" # Parse start and end time if start: start_dt = dtparse(start, start=now) if end: relative_to = start_dt if start_dt else now end_dt = dtparse(end, start=relative_to) # Fall back to now if internval end is not specified if not end_dt: end_dt = now if not start_dt and event_start and event_start.field: # No interval start => fall back to first event start query = FS(event_start.selector) != None resource.add_filter(query) rows = resource.select([event_start.selector], limit=1, orderby=event_start.field, as_rows=True) # Remove the filter we just added resource.rfilter.filters.pop() resource.rfilter.query = None if rows: first_event = rows.first()[event_start.colname] if isinstance(first_event, datetime.date): first_event = tp_tzsafe( datetime.datetime.fromordinal(first_event.toordinal())) start_dt = first_event if not start_dt and event_end and event_end.field: # No interval start => fall back to first event end minus # one standard slot length: query = FS(event_end.selector) != None resource.add_filter(query) rows = resource.select([event_end.selector], limit=1, orderby=event_end.field, as_rows=True) # Remove the filter we just added resource.rfilter.filters.pop() resource.rfilter.query = None if rows: last_event = rows.first()[event_end.colname] if isinstance(last_event, datetime.date): last_event = tp_tzsafe( datetime.datetime.fromordinal(last_event.toordinal())) start_dt = dtparse("-%s" % STANDARD_SLOT, start=last_event) if not start_dt: # No interval start => fall back to interval end minus # one slot length: if not slots: slots = STANDARD_SLOT try: start_dt = dtparse("-%s" % slots, start=end_dt) except (SyntaxError, ValueError): slots = STANDARD_SLOT start_dt = dtparse("-%s" % slots, start=end_dt) if not slots: # No slot length => # Determine optimum slot length automatically # @todo: determine from density of events rather than # total interval length? seconds = abs(end_dt - start_dt).total_seconds() day = 86400 if seconds < day: slots = "hours" elif seconds < 3 * day: slots = "6 hours" elif seconds < 28 * day: slots = "days" elif seconds < 90 * day: slots = "weeks" elif seconds < 730 * day: slots = "months" elif seconds < 2190 * day: slots = "3 months" else: slots = "years" return S3TimePlotEventFrame(start_dt, end_dt, slots)
def add_event_data(self, event_frame, resource, event_start, event_end, facts, cumulative=False, baseline=None): """ Extract event data from resource and add them to the event frame @param event_frame: the event frame @param resource: the resource @param event_start: the event start field (S3ResourceField) @param event_end: the event_end field (S3ResourceField) @param fact: list of fact fields (S3ResourceField) @param cumulative: whether the aggregation method is cumulative @param baseline: field selector to extract the baseline value (e.g. for burn-down visualization) @return: the extracted data (dict from S3Resource.select) """ # Fields to extract fields = set(fact.selector for fact in facts) if event_start: fields.add(event_start.selector) else: return None if event_end: fields.add(event_end.selector) fields.add(resource._id.name) # Filter by event frame start: # End date of events must be after the event frame start date if not cumulative and event_end: end_selector = FS(event_end.selector) start = event_frame.start query = (end_selector == None) | (end_selector >= start) else: # No point if events have no end date, and wrong if # method is cumulative query = None # Filter by event frame end: # Start date of events must be before event frame end date start_selector = FS(event_start.selector) end = event_frame.end q = (start_selector == None) | (start_selector <= end) query = query & q if query is not None else q # Add as temporary filter resource.add_filter(query) # Compute baseline value = None if baseline: try: rfield = resource.resolve_selector(baseline) except (AttributeError, SyntaxError): current.log.error(sys.exc_info[1]) else: if rfield.field and rfield.ftype in ("integer", "double"): # Don't need s3db here - if there's an rfield.field, # then there's also a table! baseline_table = current.db[rfield.tname] pkey = str(baseline_table._id) colname = rfield.colname rows = resource.select( [baseline], groupby=[pkey, colname], as_rows=True, ) value = 0 for row in rows: v = row[colname] if v is not None: value += v else: current.log.error("Invalid field type for baseline") event_frame.baseline = value # Extract the records data = resource.select(fields) # Remove the filter we just added resource.rfilter.filters.pop() resource.rfilter.query = None # Do we need to convert dates into datetimes? convert_start = True if event_start.ftype == "date" else False convert_end = True if event_start.ftype == "date" else False fromordinal = datetime.datetime.fromordinal convert_date = lambda d: fromordinal(d.toordinal()) # Column names for extractions pkey = str(resource._id) start_colname = event_start.colname end_colname = event_end.colname if event_end else None # Use table name as event type tablename = resource.tablename # Create the events events = [] add_event = events.append for row in data.rows: values = dict((fact.colname, row[fact.colname]) for fact in facts) start = row[start_colname] if convert_start and start: start = convert_date(start) end = row[end_colname] if end_colname else None if convert_end and end: end = convert_date(end) event = S3TimePlotEvent(row[pkey], start=start, end=end, values=values, event_type=tablename) add_event(event) # Extend the event frame with these events if events: event_frame.extend(events) return data