def upload_bulk(): """ Receive the Uploaded data from bulk_upload() https://github.com/valums/file-uploader/blob/master/server/readme.txt @ToDo: Read EXIF headers to geolocate the Photos """ tablename = "doc_image" table = s3db[tablename] import cgi source = request.post_vars.get("qqfile", None) if isinstance(source, cgi.FieldStorage) and source.filename: # For IE6-8, Opera, older versions of other browsers you get the file as you normally do with regular form-base uploads. name = source.filename image = source.file else: # For browsers which upload file with progress bar, you will need to get the raw post data and write it to the file. if "name" in request.vars: name = request.vars.name else: HTTP(400, "Invalid Request: Need a Name!") image = request.body.read() # Convert to StringIO for onvalidation/import from io import StringIO image = StringIO(image) source = Storage() source.filename = name source.file = image form = SQLFORM(table) vars = Storage() vars.name = name vars.image = source vars._formname = "%s_create" % tablename # onvalidation callback onvalidation = s3db.get_config(tablename, "create_onvalidation", s3db.get_config(tablename, "onvalidation")) if form.accepts(vars, onvalidation=onvalidation): msg = Storage(success = True) # onaccept callback onaccept = s3db.get_config(tablename, "create_onaccept", s3db.get_config(tablename, "onaccept")) from gluon.tools import callback callback(onaccept, form) # , tablename=tablename (if we ever define callbacks as a dict with tablename) else: error_msg = "" for error in form.errors: error_msg = "%s\n%s:%s" % (error_msg, error, form.errors[error]) msg = Storage(error = error_msg) response.headers["Content-Type"] = "text/html" # This is what the file-uploader widget expects return json.dumps(msg)
def upload_bulk(): """ Receive the Uploaded data from bulk_upload() https://github.com/valums/file-uploader/blob/master/server/readme.txt @ToDo: Read EXIF headers to geolocate the Photos """ tablename = "doc_image" table = s3db[tablename] import cgi source = request.post_vars.get("qqfile", None) if isinstance(source, cgi.FieldStorage) and source.filename: # For IE6-8, Opera, older versions of other browsers you get the file as you normally do with regular form-base uploads. name = source.filename image = source.file else: # For browsers which upload file with progress bar, you will need to get the raw post data and write it to the file. if "name" in request.vars: name = request.vars.name else: HTTP(400, "Invalid Request: Need a Name!") image = request.body.read() # Convert to StringIO for onvalidation/import import cStringIO image = cStringIO.StringIO(image) source = Storage() source.filename = name source.file = image form = SQLFORM(table) vars = Storage() vars.name = name vars.image = source vars._formname = "%s_create" % tablename # onvalidation callback onvalidation = s3db.get_config(tablename, "create_onvalidation", s3db.get_config(tablename, "onvalidation")) if form.accepts(vars, onvalidation=onvalidation): msg = Storage(success = True) # onaccept callback onaccept = s3db.get_config(tablename, "create_onaccept", s3db.get_config(tablename, "onaccept")) from gluon.tools import callback callback(onaccept, form, tablename=tablename) else: error_msg = "" for error in form.errors: error_msg = "%s\n%s:%s" % (error_msg, error, form.errors[error]) msg = Storage(error = error_msg) response.headers["Content-Type"] = "text/html" # This is what the file-uploader widget expects return json.dumps(msg)
def dvr_case_onaccept(form): """ Link the location_id to the Home Address @ToDo: Check Security of records (use S3Resource?) """ vars = form.vars if "location_id" in vars and vars.location_id: person_id = vars.person_id db = current.db s3db = current.s3db ptable = db.pr_person atable = s3db.pr_address query = (ptable.id == person_id) left = atable.on((atable.pe_id == ptable.pe_id) & \ (atable.type == 1)) person = db(query).select(ptable.pe_id, atable.id, atable.location_id, left=left).first() if person: _config = current.s3db.get_config pe_id = person["pr_person"].pe_id if not person["pr_address"].id: # Create Home Address from location_id _vars = Storage( pe_id=pe_id, location_id=vars.location_id, type=1 # Home Address ) id = atable.insert(**_vars) _vars.update(id=id) _form = Storage(vars=_vars) onaccept = _config("pr_address", "create_onaccept") or \ _config("pr_address", "onaccept") callback(onaccept, _form, tablename="pr_address") # Normally happens onvalidation: s3_lx_update(atable, id) else: # Update Home Address from location_id id = person["pr_address"].id query = (atable.type == 1) & \ (atable.id == id) db(query).update(location_id=vars.location_id) onaccept = _config("pr_address", "update_onaccept") or \ _config("pr_address", "onaccept") _vars = Storage( id=id, pe_id=pe_id, location_id=vars.location_id, type=1 # Home Address ) _form = Storage(vars=_vars) callback(onaccept, _form, tablename="pr_address") # Normally happens onvalidation: s3_lx_update(atable, id) return
def dvr_case_onaccept(form): """ Link the location_id to the Home Address @ToDo: Check Security of records (use S3Resource?) """ vars = form.vars if "location_id" in vars and vars.location_id: person_id = vars.person_id db = current.db s3db = current.s3db ptable = s3db.pr_person atable = s3db.pr_address query = (ptable.id == person_id) left = atable.on((atable.pe_id == ptable.pe_id) & \ (atable.type == 1)) person = db(query).select(ptable.pe_id, atable.id, atable.location_id, left=left).first() if person: _config = current.manager.model.get_config pe_id = person["pr_person"].pe_id if not person["pr_address"].id: # Create Home Address from location_id _vars = Storage(pe_id=pe_id, location_id=vars.location_id, type=1 # Home Address ) id = atable.insert(**_vars) _vars.update(id=id) _form = Storage(vars=_vars) onaccept = _config("pr_address", "create_onaccept") or \ _config("pr_address", "onaccept") callback(onaccept, _form, tablename="pr_address") # Normally happens onvalidation: current.response.s3.lx_update(atable, id) else: # Update Home Address from location_id id = person["pr_address"].id query = (atable.type == 1) & \ (atable.id == id) db(query).update(location_id = vars.location_id) onaccept = _config("pr_address", "update_onaccept") or \ _config("pr_address", "onaccept") _vars = Storage(id=id, pe_id=pe_id, location_id=vars.location_id, type=1 # Home Address ) _form = Storage(vars=_vars) callback(onaccept, _form, tablename="pr_address") # Normally happens onvalidation: current.response.s3.lx_update(atable, id) return
def onvalidation(cls, table, record, method="create"): """ Helper to run the onvalidation routine for a record @param table: the Table @param record: the FORM or the Row to validate @param method: the method """ if hasattr(table, "_tablename"): tablename = table._tablename else: tablename = table onvalidation = cls.get_config(tablename, "%s_onvalidation" % method, cls.get_config(tablename, "onvalidation")) if "vars" not in record: record = Storage(vars=Storage(record), errors=Storage()) if onvalidation: callback(onvalidation, record, tablename=tablename) return record.errors
def postprocess_create_node(self, link, node): """ Create a link table entry for a new node @param link: the link information (as returned from preprocess_create_node) @param node: the new node """ try: node_id = node[self.pkey.name] except (AttributeError, KeyError): return s3db = current.s3db tablename = link["linktable"] linktable = s3db.table(tablename) if not linktable: return lkey = link["lkey"] rkey = link["rkey"] data = { rkey: link["parent_id"], lkey: node_id, } # Create the link if it does not already exist query = ((linktable[lkey] == data[lkey]) & (linktable[rkey] == data[rkey])) row = current.db(query).select(linktable._id, limitby=(0, 1)).first() if not row: onaccept = s3db.get_config(tablename, "create_onaccept") if onaccept is None: onaccept = s3db.get_config(tablename, "onaccept") link_id = linktable.insert(**data) data[linktable._id.name] = link_id s3db.update_super(linktable, data) if link_id and onaccept: callback(onaccept, Storage(vars=Storage(data))) return
def onvalidation(cls, table, record, method="create"): """ Helper to run the onvalidation routine for a record @param table: the Table @param record: the FORM or the Row to validate @param method: the method """ if hasattr(table, "_tablename"): tablename = table._tablename else: tablename = table onvalidation = cls.get_config(tablename, "%s_onvalidation" % method, cls.get_config(tablename, "onvalidation")) if "vars" not in record: record = Storage(vars=record, errors=Storage()) if onvalidation: callback(onvalidation, record, tablename=tablename) return record.errors
def postprocess_create_node(self, link, node): """ Create a link table entry for a new node @param link: the link information (as returned from preprocess_create_node) @param node: the new node """ try: node_id = node[self.pkey.name] except (AttributeError, KeyError): return s3db = current.s3db tablename = link["linktable"] linktable = s3db.table(tablename) if not linktable: return lkey = link["lkey"] rkey = link["rkey"] data = {rkey: link["parent_id"], lkey: node_id, } # Create the link if it does not already exist query = ((linktable[lkey] == data[lkey]) & (linktable[rkey] == data[rkey])) row = current.db(query).select(linktable._id, limitby=(0, 1)).first() if not row: onaccept = s3db.get_config(tablename, "create_onaccept") if onaccept is None: onaccept = s3db.get_config(tablename, "onaccept") link_id = linktable.insert(**data) data[linktable._id.name] = link_id s3db.update_super(linktable, data) if link_id and onaccept: callback(onaccept, Storage(vars=Storage(data))) return
def process(self, form, vars, onvalidation=None, onaccept=None, link=None, http="POST"): """ Process the form """ manager = current.manager audit = manager.audit table = self.table record_id = self.record_id response = current.response # Get the proper onvalidation routine if isinstance(onvalidation, dict): onvalidation = onvalidation.get(self.tablename, []) # Append link.postprocess to onvalidation if link and link.postprocess: postprocess = link.postprocess if isinstance(onvalidation, list): onvalidation.append(postprocess) elif onvalidation is not None: onvalidation = [onvalidation, postprocess] else: onvalidation = [postprocess] success = True error = None formname = "%s/%s" % (self.tablename, self.record_id) if form.accepts(vars, current.session, formname=formname, onvalidation=onvalidation, keepvalues=False, hideerror=False): # Audit prefix = self.prefix name = self.name if self.record_id is None: audit("create", prefix, name, form=form, representation=format) else: audit("update", prefix, name, form=form, record=record_id, representation=format) vars = form.vars # Update super entity links current.s3db.update_super(table, vars) # Update component link if link and link.postprocess is None: resource = link.resource master = link.master resource.update_link(master, vars) if vars.id: if record_id is None: # Set record ownership auth = current.auth auth.s3_set_record_owner(table, vars.id) auth.s3_make_session_owner(table, vars.id) # Store session vars self.resource.lastid = str(vars.id) manager.store_session(prefix, name, vars.id) # Execute onaccept callback(onaccept, form, tablename=self.tablename) else: success = False if form.errors: # IS_LIST_OF validation errors need special handling errors = [] table = self.table for fieldname in form.errors: if fieldname in table and \ isinstance(table[fieldname].requires, IS_LIST_OF): errors.append("%s: %s" % (fieldname, form.errors[fieldname])) if errors: error = "\n".join(errors) elif http == "POST": # Invalid form error = current.T( "Invalid form (re-opened in another window?)") return success, error
def __call__( self, cascade=False, replaced_by=None, skip_undeletable=False, ): """ Main deletion process, deletes/archives all records in the resource @param cascade: this is called as a cascade-action from another process (e.g. another delete) @param skip_undeletable: delete whatever is possible, skip undeletable rows @param replaced_by: dict of {replaced_id: replacement_id}, used by record merger to log which record has replaced which """ # Must not re-use instance if self._done: raise RuntimeError("deletion already processed") self._done = True tablename = self.tablename # Check the entire cascade, rather than breaking out after the # first error - debug-only, this can be many errors # (NB ?debug=1 alone won't help if logging is off in 000_config.py) check_all = current.response.s3.debug # Look up all rows that are to be deleted rows = self.extract() if not rows: # No rows to delete # => not always an error (caller must decide this) # log anyway to assist caller debugging if not cascade: current.log.debug("Delete %s: no rows found" % tablename) return 0 else: first = rows[0] if hasattr(first, tablename) and isinstance(first[tablename], Row): # Rows are the result of a join (due to extra_fields) joined = True else: joined = False table = self.table pkey = table._id.name add_error = self.add_error # Check permissions and prepare records has_permission = current.auth.s3_has_permission prepare = self.prepare records = [] for row in rows: record = getattr(row, tablename) if joined else row record_id = record[pkey] # Check permissions if not has_permission("delete", table, record_id=record_id): self.permission_error = True add_error(record_id, "not permitted") continue # Run table-specific ondelete_cascade if prepare: try: callback( prepare, record ) # , tablename=tablename (if we ever define callbacks as a dict with tablename) except Exception: # Exception indicates record is undeletable add_error(record_id, sys.exc_info()[1]) continue records.append(record) # Identify deletable records deletable = self.check_deletable(records, check_all=check_all) # If on cascade or not skipping undeletable rows: exit immediately if self.errors and (cascade or not skip_undeletable): self.set_resource_error() if not cascade: self.log_errors() return 0 # Delete the records db = current.db audit = current.audit resource = self.resource prefix, name = resource.prefix, resource.name ondelete = self.ondelete delete_super = current.s3db.delete_super num_deleted = 0 for row in deletable: record_id = row[pkey] success = True if self.archive: # Run automatic deletion cascade success = self.cascade(row, check_all=check_all) if success: # Unlink all super-records success = delete_super(table, row) if not success: add_error(record_id, "super-entity deletion failed") if success: # Auto-delete linked record if appropriate self.auto_delete_linked(row) # Archive/delete the row itself if self.archive: success = self.archive_record(row, replaced_by=replaced_by) else: success = self.delete_record(row) if success: # Postprocess delete # Clear session if s3_get_last_record_id(tablename) == record_id: s3_remove_last_record_id(tablename) # Audit audit( "delete", prefix, name, record=record_id, representation=self.representation, ) # On-delete hook if ondelete: callback(ondelete, row) # Subsequent cascade errors would roll back successful # deletions too => we want to prevent that when skipping # undeletable rows, so commit here if this is the master # process if not cascade and skip_undeletable: db.commit() num_deleted += 1 elif not cascade: # Master process failure db.rollback() self.log_errors() if skip_undeletable: # Try next row continue else: # Exit immediately break else: # Cascade failure, no point to try any other row # - will be rolled back by master process break self.set_resource_error() return num_deleted
def merge_form(self, merge_id, source, onvalidation=None, onaccept=None, message="Records merged", format=None): """ DRY helper function for SQLFORM in Merge """ # Environment db = self.db session = self.manager.session request = self.manager.request response = self.manager.response # Get the CRUD settings audit = self.manager.audit settings = self.settings # Table and model prefix = self.prefix name = self.name tablename = self.tablename table = self.table model = self.manager.model record = None labels = None # Add asterisk to labels of required fields labels = Storage() mark_required = self._config("mark_required") response.s3.has_required = False for field in table: if field.writable: required = field.required or \ field.notnull or \ mark_required and field.name in mark_required validators = field.requires if not validators and not required: continue if not required: if not isinstance(validators, (list, tuple)): validators = [validators] for v in validators: if hasattr(v, "options"): if hasattr(v, "zero") and v.zero is None: continue val, error = v("") if error: required = True break if required: response.s3.has_required = True labels[field.name] = DIV("%s:" % field.label, SPAN(" *", _class="req")) for f in source: row = self.db(table.id==merge_id).select(limitby=(0, 1)).first() if f in row.keys(): print f if table[f].represent is not None: value = table[f].represent(source[f]) else: value = str(source[f]) comment = DIV(INPUT(_type="hidden", _value=row[f]), value) # Get formstyle from settings formstyle = self.settings.formstyle # Get the form form = SQLFORM(table, record = merge_id, col3 = dict(), # using this for the copy button+merge data deletable = False, showid = False, upload = self.download_url, labels = labels, formstyle = formstyle, submit_button = self.settings.submit_button) # Process the form logged = False # Set form name formname = "%s/%s" % (self.tablename, form.record_id) # Get the proper onvalidation routine if isinstance(onvalidation, dict): onvalidation = onvalidation.get(self.tablename, []) if form.accepts(request.post_vars, session, formname=formname, onvalidation=onvalidation, keepvalues=False, hideerror=False): # Message response.flash = message # Audit if merge_id is None: audit("create", prefix, name, form=form, representation=format) else: audit("update", prefix, name, form=form, record=merge_id, representation=format) logged = True # Update super entity links model.update_super(table, form.vars) # Store session vars if form.vars.id: if record_id is None: self.manager.auth.s3_make_session_owner(table, form.vars.id) self.resource.lastid = str(form.vars.id) self.manager.store_session(prefix, name, form.vars.id) # Execute onaccept callback(onaccept, form, tablename=tablename) if not logged and not form.errors: audit("read", prefix, name, record=merge_id, representation=format) return form
def __call__(self, cascade=False, replaced_by=None, skip_undeletable=False): """ Main deletion process, deletes/archives all records in the resource @param cascade: this is called as a cascade-action from another process (e.g. another delete) @param skip_undeletable: delete whatever is possible, skip undeletable rows @param replaced_by: dict of {replaced_id: replacement_id}, used by record merger to log which record has replaced which """ # Must not re-use instance if self._done: raise RuntimeError("deletion already processed") self._done = True tablename = self.tablename # Check the entire cascade, rather than breaking out after the # first error - debug-only, this can be many errors # (NB ?debug=1 alone won't help if logging is off in 000_config.py) check_all = current.response.s3.debug # Look up all rows that are to be deleted rows = self.extract() if not rows: # No rows to delete # => not an error, but log anyway to assist caller debugging if not cascade: current.log.debug("Delete %s: no rows found" % tablename) return 0 else: first = rows[0] if hasattr(first, tablename) and isinstance(first[tablename], Row): # Rows are the result of a join (due to extra_fields) joined = True else: joined = False table = self.table pkey = table._id.name add_error = self.add_error # Check permissions and prepare records has_permission = current.auth.s3_has_permission prepare = self.prepare records = [] for row in rows: record = getattr(row, tablename) if joined else row record_id = record[pkey] # Check permissions if not has_permission("delete", table, record_id=record_id): self.permission_error = True add_error(record_id, "not permitted") continue # Run table-specific ondelete-cascade if prepare: try: callback(prepare, record, tablename=tablename) except Exception: # Exception indicates record is undeletable add_error(record_id, sys.exc_info()[1]) continue records.append(record) # Identify deletable records deletable = self.check_deletable(records, check_all=check_all) # If on cascade or not skipping undeletable rows: exit immediately if self.errors and (cascade or not skip_undeletable): self.set_resource_error() if not cascade: self.log_errors() return 0 # Delete the records db = current.db audit = current.audit resource = self.resource prefix, name = resource.prefix, resource.name ondelete = self.ondelete delete_super = current.s3db.delete_super num_deleted = 0 for row in deletable: record_id = row[pkey] success = True if self.archive: # Run automatic deletion cascade success = self.cascade(row, check_all=check_all) if success: # Unlink all super-records success = delete_super(table, row) if not success: add_error(record_id, "super-entity deletion failed") if success: # Auto-delete linked record if appropriate self.auto_delete_linked(row) # Archive/delete the row itself if self.archive: success = self.archive_record(row, replaced_by=replaced_by) else: success = self.delete_record(row) if success: # Postprocess delete # Clear session if s3_get_last_record_id(tablename) == record_id: s3_remove_last_record_id(tablename) # Audit audit("delete", prefix, name, record = record_id, representation = self.representation, ) # On-delete hook if ondelete: callback(ondelete, row) # Subsequent cascade errors would roll back successful # deletions too => we want to prevent that when skipping # undeletable rows, so commit here if this is the master # process if not cascade and skip_undeletable: db.commit() num_deleted += 1 elif not cascade: # Master process failure db.rollback() self.log_errors() if skip_undeletable: # Try next row continue else: # Exit immediately break else: # Cascade failure, no point to try any other row # - will be rolled back by master process break self.set_resource_error() return num_deleted
def process(self, form, vars, onvalidation = None, onaccept = None, link = None, http = "POST"): """ Process the form """ manager = current.manager audit = manager.audit table = self.table record_id = self.record_id response = current.response # Get the proper onvalidation routine if isinstance(onvalidation, dict): onvalidation = onvalidation.get(self.tablename, []) # Append link.postprocess to onvalidation if link and link.postprocess: postprocess = link.postprocess if isinstance(onvalidation, list): onvalidation.append(postprocess) elif onvalidation is not None: onvalidation = [onvalidation, postprocess] else: onvalidation = [postprocess] success = True error = None formname = "%s/%s" % (self.tablename, self.record_id) if form.accepts(vars, current.session, formname=formname, onvalidation=onvalidation, keepvalues=False, hideerror=False): # Audit prefix = self.prefix name = self.name if self.record_id is None: audit("create", prefix, name, form=form, representation=format) else: audit("update", prefix, name, form=form, record=record_id, representation=format) vars = form.vars # Update super entity links current.s3db.update_super(table, vars) # Update component link if link and link.postprocess is None: resource = link.resource master = link.master resource.update_link(master, vars) if vars.id: if record_id is None: # Set record ownership auth = current.auth auth.s3_set_record_owner(table, vars.id) auth.s3_make_session_owner(table, vars.id) # Store session vars self.resource.lastid = str(vars.id) manager.store_session(prefix, name, vars.id) # Execute onaccept callback(onaccept, form, tablename=self.tablename) else: success = False if form.errors: # IS_LIST_OF validation errors need special handling errors = [] table = self.table for fieldname in form.errors: if fieldname in table and \ isinstance(table[fieldname].requires, IS_LIST_OF): errors.append("%s: %s" % (fieldname, form.errors[fieldname])) if errors: error = "\n".join(errors) elif http == "POST": # Invalid form error = current.T("Invalid form (re-opened in another window?)") return success, error