def _vw_opt_flds_select_json(self, opt_tbl, vw_opt_fields): ret = [] args = Storage( rname = opt_tbl._rname or opt_tbl._tablename, tbl= opt_tbl._entopt.vw_pref or opt_tbl._tablename ) if opt_tbl._entopt.type=='list': name_str_json = 'row_to_json(%(rname)s.*) AS %(vw_name)s' args.vw_name = args.tbl ret.append(name_str_json % args) fld = Field(args.vw_name, 'json') fld._entopt=Storage(vw_name = args.vw_name, is_optrow = True) vw_opt_fields.append(fld) else: name_str = '%(rname)s.%(fld)s AS %(vw_name)s' for fld in opt_tbl: if fld is opt_tbl._entopt.own_FK: continue args.fld = fld.name if not hasattr(fld, '_entopt'): fld._entopt = Storage() if fld._entopt.vw_name: args.vw_name = fld._entopt.vw_name else: args.vw_name = fld._entopt.vw_name = '%(tbl)s_%(fld)s' % args vw_opt_fields.append(fld) ret.append(name_str % args) return ret
def _vw_opt_fld_name(fld): pref_fld_str = '%(tbl)s_%(fld)s' fld_str = '%(vw_name)s' opt_tbl=fld._table args = Storage(tbl = hasattr(opt_tbl, '_entopt') and opt_tbl._entopt.vw_pref or opt_tbl._tablename ) args.fld=fld.name args.vw_name = hasattr(fld, '_entopt') and fld._entopt.vw_name name_str = args.vw_name and fld_str or pref_fld_str return name_str % args
def _get_settings(self): """ Retreives the settings from the database and stores them in a storage dictionary """ settings = Storage() rows = self.db(self.db.settings.id > 0).select(cache=self.cache) for row in rows: if not settings.has_key(row.key): settings[row.key] = Storage() settings[row.key][row.name] = row self.settings = settings
def task_migrate_vm(auth, vmid, live=False, destination=None): auth = Storage(loads(b64decode(auth))) conn = Baadal.Connection(_authurl, _tenant, auth.u, auth.p) if live: pass else: pass
def update(self, id, **kwargs): ret = Storage() if self.parent: ret.parent = self.parent.update(id,**kwargs) self._write_history(id) db = self.db splited_args = self._split_args(kwargs) # update own if splited_args['own_args']: ret.own = db(self.own.id==id).update(**splited_args['own_args']) # update opt opt_args = splited_args['opt_args'] if opt_args: for opt_tbl in opt_args: rows_to_update = opt_args[opt_tbl] if not rows_to_update: continue rows_to_update = isinstance(rows_to_update, (list, tuple)) and rows_to_update \ or (rows_to_update,) own_FK_name = opt_tbl._entopt.own_FK.name ret_opt = ret[opt_tbl._tablename] = Storage(upd_ids=[], del_ids=[], ins_ids=[]) for row in rows_to_update: row[own_FK_name] = id rec_id = row.get('id', None) or (opt_tbl._entopt.type=='one' and id) rec = opt_tbl(rec_id) if rec: # delete or update rec = rec.as_dict() if not rec: raise RuntimeError('option update err: '+ opt_tbl._tablename+'[%s]'%rec_id+' - not found') if rec[own_FK_name]!=id: raise RuntimeError('option update err: '+ opt_tbl._tablename+'[%s]'%rec_id+' - has alien FK') rec.update(row) if self.is_all_null(rec, ('id', own_FK_name)): db(opt_tbl._id==rec_id).delete() ret_opt.del_ids.append(rec_id) else: db(opt_tbl._id==rec_id).update(**rec) ret_opt.upd_ids.append(rec_id) elif not self.is_all_null(row, ('id', own_FK_name)): # insert ret_opt.ins_ids.append(opt_tbl.insert(**row)) return ret
def new_vm(): request_time = time.time() fields = validate_vm_request_form(request.vars) approver_mail_required = False mail2 = True if len(fields): raise HTTP(400, body=jsonify(status='fail', fields=fields)) try: if (ldap.user_is_faculty(session.username)) or \ (user_is_project_admin): owner_id = session.username vm_state = 1 else: owner_id = request.vars.faculty approver_mail_required = True vm_state = 0 public_ip_required = 1 if request.vars.public_ip == 'yes' else 0 db.vm_requests.insert(vm_name=request.vars.vm_name, flavor=request.vars.config, sec_domain=request.vars.sec_domain, image=request.vars.template, owner=owner_id, requester=session.username, purpose=request.vars.purpose, public_ip_required=public_ip_required, extra_storage=request.vars.storage, collaborators=request.vars.collaborators, request_time=request_time, state=vm_state) context = Storage() user_info = ldap.fetch_user_info(session.username) context.username = user_info['user_name'] user_email = user_info['user_email'] context.vm_name = request.vars.vm_name context.mail_support = mail_support mail1 = mailer.send(mailer.MailTypes.VMRequest, user_email, context) if approver_mail_required: user_info = ldap.fetch_user_info(request.vars.faculty) context.approver = user_info['user_name'] user_email = user_info['user_email'] context.request_type = 'New VM' context.request_time = seconds_to_localtime(request_time) mail2 = mailer.send(mailer.MailTypes.ApprovalReminder, user_email, context) if mail1 and mail2: db.commit() return jsonify() else: db.rollback() raise Exception('Email sending failed') except Exception as e: logger.exception(e.message or str(e.__class__)) return jsonify(status='fail', message=e.message or str(e.__class__))
def _vw_opt_flds_select(self, opt_tbl, vw_opt_fields): name_str = '%(rname)s.%(fld)s AS %(vw_name)s' ret = [] args = Storage( rname = opt_tbl._rname or opt_tbl._tablename, tbl= opt_tbl._entopt.vw_pref or opt_tbl._tablename ) for fld in opt_tbl: if fld is opt_tbl._entopt.own_FK: continue args.fld = fld.name if not hasattr(fld, '_entopt'): fld._entopt = Storage() if fld._entopt.vw_name: args.vw_name = fld._entopt.vw_name else: args.vw_name = fld._entopt.vw_name = '%(tbl)s_%(fld)s' % args vw_opt_fields.append(fld) ret.append(name_str % args) return ret
def task_delete_snapshot(auth, vmid, snapshot_id): auth = Storage(loads(b64decode(auth))) try: conn = Baadal.Connection(_authurl, _tenant, auth.u, auth.p) image = conn.find_image(id=snapshot_id) status = image.delete() logger.info('Snapshot deleted: VMID %s, snapshot_id %s' % \ (vmid, snapshot_id)) except Exception as e: logger.exception(e)
def comp_rec(): js_vars = jq_param_w2p(request.body) if js_vars and js_vars.sort: sort_fld = js_vars.sort[0].field orderby = sort_fld =='recid' and db.company._id or db.company[sort_fld] orderby = js_vars.sort[0].direction=='asc' and orderby or ~orderby else: orderby = company.own.id ret = Storage( status= 'success', total=0 ) rows = db(db.company).select(orderby=orderby) ret.total = len(rows) ret.records= rows #.as_json() print 'comp_rec ', js_vars return ret
def task_restore_snapshot(auth, vmid, snapshot_id): auth = Storage(loads(b64decode(auth))) try: conn = Baadal.Connection(_authurl, _tenant, auth.u, auth.p) vm = conn.find_baadal_vm(id=vmid) vm.restore_snapshot(snapshot_id) logger.info('Snapshot restored: VMID %s, snapshot_id %s' % \ (vmid, snapshot_id)) except Exception as e: logger.exception(e)
def new_vm(): request_time = time.time() fields = validate_vm_request_form(request.vars) approver_mail_required = False mail2 = True if len(fields): raise HTTP(400,body=jsonify(status='fail', fields=fields)) try: if (ldap.user_is_faculty(session.username)) or \ (user_is_project_admin): owner_id = session.username vm_state = 1 else: owner_id = request.vars.faculty approver_mail_required = True vm_state = 0 public_ip_required = 1 if request.vars.public_ip == 'yes' else 0 db.vm_requests.insert(vm_name=request.vars.vm_name, flavor=request.vars.config, sec_domain=request.vars.sec_domain, image=request.vars.template, owner=owner_id, requester=session.username, purpose=request.vars.purpose, public_ip_required=public_ip_required, extra_storage=request.vars.storage, collaborators=request.vars.collaborators, request_time=request_time, state=vm_state ) context = Storage() user_info = ldap.fetch_user_info(session.username) context.username = user_info['user_name'] user_email = user_info['user_email'] context.vm_name = request.vars.vm_name context.mail_support = mail_support mail1 = mailer.send(mailer.MailTypes.VMRequest, user_email, context) if approver_mail_required: user_info = ldap.fetch_user_info(request.vars.faculty) context.approver = user_info['user_name'] user_email = user_info['user_email'] context.request_type = 'New VM' context.request_time = seconds_to_localtime(request_time) mail2 = mailer.send(mailer.MailTypes.ApprovalReminder, user_email, context) if mail1 and mail2: db.commit() return jsonify() else: db.rollback() raise Exception('Email sending failed') except Exception as e: logger.exception(e.message or str(e.__class__)) return jsonify(status='fail', message=e.message or str(e.__class__))
def _write_history(self, q_or_id): db = self.db src_tbl = self.view_agg id = isinstance(q_or_id, (int, long)) and q_or_id or None if id: q = src_tbl._id==id q_for_upd = self.own._id == id else: q = q_or_id q_for_upd = self.own._id.belongs(db(q)._select(self._get_id_field(q))) db(q_for_upd).select(for_update=True) if not self.tracking: return if not self.history: raise RuntimeError('Trying backup record but history-table isn\'t defined') sql_ins = 'INSERT INTO %(history)s (%(h_flds_names)s) ( %(select)s )' flds_lst = [] sql_args = Storage( history = self.history._rname, h_flds_names = [], select = '' ) for f in self.view_agg: flds_lst.append(f) if f is not src_tbl._id: sql_args.h_flds_names.append(f.name) else: sql_args.h_flds_names.append(self.history_FK_name) import datetime as dt sql_args.h_flds_names.append(self.history.audit_dtm.name) flds_lst.append(" TIMESTAMP '%s' AS audit_dtm " % (dt.datetime.now())) #request.now !!!!!!!!!!!!!!!!!!!!!!!!!! sql_args.h_flds_names = ','.join(sql_args.h_flds_names) sql_args.select = db(q)._select(*flds_lst)[:-1] db.executesql(sql_ins % sql_args)
def task_clone_vm(auth, reqid): auth = Storage(loads(b64decode(auth))) req = db(db.clone_requests.id == reqid).select()[0] try: conn = Baadal.Connection(_authurl, _tenant, auth.u, auth.p) vm = conn.find_baadal_vm(id=req.vm_id) clone = vm.clone() logger.info('VM Cloned: VMID %s, clone_id %s' % (req.vm_id, clone)) req.update_record(status=REQUEST_STATUS_APPROVED) except Exception as e: logger.exception(e) req.update_record(status=REQUEST_STATUS_POSTED)
def insert(self, **kwargs): ret = Storage() if self.parent: kwargs[self.parent.D_field.name] = self.D_value ret.parent = self.parent.insert(**kwargs) kwargs['id'] = ret.parent.own splited_args = self._split_args(kwargs) id = self.own.insert(**splited_args['own_args']) opt_args = splited_args['opt_args'] for opt_tbl in opt_args: rows_to_insert = opt_args[opt_tbl] rows_to_insert = isinstance(rows_to_insert, (list, tuple)) and rows_to_insert \ or (rows_to_insert,) own_FK_name = opt_tbl._entopt.own_FK.name ret[opt_tbl._tablename] = [] for row in rows_to_insert: if self.is_all_null(row,['id', own_FK_name]): continue row.pop('id', None) row[own_FK_name] = id ret[opt_tbl._tablename].append( opt_tbl.insert(**row) ) ret.own = id return ret
def task_resize_vm(auth, reqid): auth = Storage(loads(b64decode(auth))) logger.info('Resizing VM') try: req = db(db.resize_requests.id == reqid).select()[0] conn = Baadal.Connection(_authurl, _tenant, auth.u, auth.p) vm = conn.find_baadal_vm(id=req.vm_id) new_flavor = req.new_flavor vm.resize(new_flavor) req.update_record(status=REQUEST_STATUS_APPROVED) logger.info('VM Resized: VMID %s, old_flavor %s, new_flavor %s' % \ (req.vm_id, vm.server.flavor['id'], req.new_flavor)) except Exception as e: logger.exception(e) req.update_record(status=REQUEST_STATUS_POSTED) finally: db.commit() conn.close()
def validate_fdata(fdata, app_folder, must_exist=False): fdata = Storage(fdata) ret = Storage(md5_hash=None, error='', os_path=None) pth = fdata.path.strip() sanitize_pth_re = re.compile(r'\s*(\\|/)*([^\s]*)\s*$') pth = sanitize_pth_re.match(pth).groups()[1] ret.os_path = os_path = os.path.join(app_folder, pth) if must_exist and not os.path.exists(os_path): ret.error = 'it seems that path does not exist: %s' % os_path elif os.path.isdir(os_path): ret.error = 'path to a file was expected: %s [%s]' % (os_path, fdata.path) elif os.path.isfile(os_path): if not fdata.md5_hash: ret.error = 'md5_hash is required' elif md5_hash(safe_read(os_path)) != fdata.md5_hash: ret.error = 'file was changed on disk' elif os.path.exists(os_path): ret.error = 'path exists but it`s to never : %s' % os_path return ret
def update_many(self, q, del_insert = False, **kwargs): ret = Storage() if self.parent: ret.parent = self.parent.update_many(q, del_insert, **kwargs) self._write_history(q) db = self.db #del_insert = kwargs.pop('del_insert', None) pg_com = db.executesql pg_com('CREATE TEMP TABLE IF NOT EXISTS tmp_ids (id bigint primary key);') pg_com(' TRUNCATE tmp_ids;') # select ids into temp sel_str = db(q)._select(self.own._id) save_str = ' INSERT INTO tmp_ids (%s);'%sel_str[:-1] pg_com(save_str) splited_args = self._split_args(kwargs) # update own sel_ids = 'select tmp_ids.id from tmp_ids;' ret.own = db(self.own.id.belongs(sel_ids)).update(**splited_args['own_args']) # update opt opt_args = splited_args['opt_args'] for opt_tbl in opt_args: own_FK_name = opt_tbl._entopt.own_FK.name row_to_update = opt_args[opt_tbl] if opt_tbl._entopt.type == 'list': #if isinstance(row_to_update, (list, tuple)): if not del_insert: raise RuntimeError('Can\'t update list option \'%s\' by query, set del_insert=True'%opt_tbl._tablename) else: ids = db().select(db.tmp_ids.id) ret_del_ins = ret[opt_tbl._tablename] = Storage() ret_del_ins.deleted = db(opt_tbl._entopt.own_FK.belongs(sel_ids)).delete() ret_del_ins.inserted_ids = [] row_to_ins=[r for r in row_to_update if not self.is_all_null(r, [own_FK_name, 'id'])] for rid in ids: for r in row_to_ins: r.pop('id', None) r[own_FK_name] = rid.id ret_del_ins.inserted_ids.append(opt_tbl.insert(**r)) else: ret_upd_ins = ret[opt_tbl._tablename] = Storage() ret_upd_ins.updated = db(opt_tbl[own_FK_name].belongs(sel_ids)).update(**row_to_update) ret_upd_ins.inserted_ids=[] ret_upd_ins.deleted=[] if ret[opt_tbl._tablename] != db(db.tmp_ids).count(): #preform insert ids= db(opt_tbl[own_FK_name]==None).select(db.tmp_ids.id, left = opt_tbl.on(opt_tbl[own_FK_name]==db.tmp_ids.id)) for r in ids: row_to_update[own_FK_name] = r.id ret_upd_ins.inserted_ids.append(opt_tbl.insert(**row_to_update)) #delete nulls rows fld_lst= [f for f in opt_tbl if f is not opt_tbl._entopt.own_FK] q_to_del = (fld_lst[0]==None) | ((fld_lst[0]=='')) q_to_del=reduce( lambda q, f: q & ((f==None) | (f=='')) , fld_lst[1:] , q_to_del ) ret_upd_ins.deleted = db(q_to_del).delete() return ret
def import_publ(record, param): # KNIHY: ID_PUBL, RADA_PC, RADA_KNIHY, SIGNATURA, TEMATIKA, EAN, AUTORI, NAZEV, PODNAZEV, PUVOD, KNPOZNAMKA, # JAZYK, VYDANI, IMPRESUM, ANOTACE, ISBN, KS_CELK, KS, KS_JE, POZNAMKA, STUDOVNA, ID_NAKL def impression_iter(impressions): for record in impressions: impression = {} impression['iid'] = record['pc'] impression['sgn'] = record['signatura'] impression['barcode'] = record['barcode'] impression['place_id'] = place_to_place_id(param['places'], record['umisteni'].strip()) yield impression id_publ = record['id_publ'] nazev = fix_895(record['nazev'].strip()) podnazev = fix_895(record['podnazev'].strip()) pubplace = publisher = '' nakl_id = record['id_nakl'] nakladatel = param['nakl'].get(nakl_id) if nakladatel: pubplace = fix_895(nakladatel['misto'].strip()) publisher = fix_895(nakladatel['nazev1'].strip() or nakladatel['nazev_zkr'].strip()) # prefer Nazev1 ? pubyear = parse_year_from_text(record['impresum'], as_string=True) klsl = [] for klic in param['k_klsl'].get(id_publ, ()): klsl.append(fix_895(param['klsl'][klic['id_klsl']]['klsl'].strip())) for klic in param['k_dt'].get(id_publ, ()): klsl.append(fix_895(klic['dt'].strip())) # zatím neukládám k_dt.pom_znak a dt.dt_txt surnamed = [] full = [] for osoba in param['k_autori'].get(id_publ, ()): osoba_tuple = (fix_895(param['autori'][osoba['id_autora']]['autor']),) surnamed1, full1 = normalize_authors(osoba_tuple, string_surnamed=True, string_full=True) if osoba['vztah'] == 'A': # aut surnamed.append(surnamed1) full.append(full1) else: # ostatní osoby klsl.append(full1) # zatím neukládám k_autori.vztah auth_surnamed = REPEATJOINER.join(surnamed) auth_full = REPEATJOINER.join(full) origin = fix_895(record['puvod'].strip()) knpoznamka = fix_895(record['knpoznamka'].strip()) impresum = fix_895(record['impresum'].strip()) anotace = fix_895(record['anotace'].strip()) # TODO: promyslet, jak spojit a kam uložit <<<<<<<<<<<<<<<<<<<<<< answer_rec = Storage() # get an empty object answer_rec.pubyears = parse_pubyear(impresum) answer_rec.country = 'cze' # ??? # zatim ignorujeme JAZYK (Manetin: jazyk originalu nebo ORI pro knihy v originale) isbn = '' if record['isbn']: isbn_candidates = re.findall(r'\b[0-9X\-]\b', record['isbn']) isbn_candidates = [candidate for candidate in isbn_candidates if len(candidate.replace('-', '')) in (8, 10, 13)] if isbn_candidates: isbn = isbn_candidates[0] ean = record['ean'].strip() if not ean: ean = isxn_to_ean(isbn) # always, because in case of other system import fastinfo can change together with same ean & md5publ fastinfo, md5publ = publ_fastinfo_and_hash(nazev, auth_surnamed, auth_full, pubplace, publisher, pubyear, subtitles=((DEFAULT_SUBTITLE_JOINER, podnazev),) if podnazev else None, origin=origin, keys=klsl) impressions = param['vytisky'].get(id_publ, ()) added, answer_id = update_or_insert_answer(ean, md5publ, fastinfo, md5redirects=param['redirects'], marcrec=answer_rec) owned_book_id = update_or_insert_owned_book(answer_id, fastinfo, len(impressions)) # impression_gen je generátor podle impression/impressions impression_gen = impression_iter(impressions) update_or_insert_impressions(answer_id, owned_book_id, impression_gen) counter_and_commit_if_100(param, added)
#!/bin/python ########################################################### from gluon.tools import Storage plugin_wordpress2py = Storage() plugin_wordpress2py.meta = { 'title':'Wordpress To Web2py', 'author':'Thadeus Burgess <*****@*****.**>', 'keywords':'database, migration, wordpress, blog', 'description':'Converts wordpress exported xml+rss into a python dictionary and then imports into web2py DAL', 'copyright': 'GPL v3', } ####################################### #### USAGE ######## # Retrieve a python dict that represents the wordpress database ## data = word2py(open('/path/to/wordpress.2009-11-30.xml', 'r')) # Insert data into web2py DAL using a schema ## ids_inserted = schema_migrate(db, schema, '/path/to/wordpress.2009-11-30.xml') # Use the data dictionary to create a custom migration function, # Dictionary layout is documented in the word2py function. ####################################### #### SCHEMA KEY PATTERNS ######## #{
from gluon.tools import Storage linkback = local_import('plugin_linkback') #----------------------------------------------------------------------- # # Default error codes from the pingback specification. # PINGBACK_SOURCE_DOES_NOT_EXIST = 0x0010 PINGBACK_SOURCE_DOES_NOT_LINK = 0x0011 PINGBACK_TARGET_DOES_NOT_EXIST = 0x0020 PINGBACK_TARGET_CANNOT_BE_USED = 0x0021 PINGBACK_ALREADY_REGISTERED = 0x0030 PINGBACK_ACCESS_DENIED = 0x0031 PINGBACK_UPSTREAM_ERROR = 0x0032 PINGBACK_OK = 'OK' #----------------------------------------------------------------------- plugin_linkback = Storage() plugin_linkback.meta = { 'title': 'Track the Pings', 'author': 'Thadeus Burgess <*****@*****.**>', 'keywords': 'trackback, pingback', 'description': 'Provides a framework for posting and receiving trackbacks and pingbacks', 'copyright': 'GPL v2', } #----------------------------------------------------------------------- #FIXME: use request.server # # This is the URL that points to the pingback and trackback public # functions. This exposes the reception of trackback form POST and # pingback xmlrpc handler.
# along with this program. If not, see <http://www.gnu.org/licenses/> # ########################################################### __author__ = "Thadeus Burgess <*****@*****.**>" __copyright__ = "Copyright 2009-2010 Thadeus Burgess. GNU GPL v3." __title__ = "Wordpress 2 Python" __description__ = """ Turns a wordpress export xml file into a python dictionary taking coffee for donations :) """ __version__ = "0.0.2" ########################################################### from gluon.tools import Storage plugin_wordpress2py = Storage() plugin_wordpress2py.meta = { 'title': 'Wordpress To Web2py', 'author': 'Thadeus Burgess <*****@*****.**>', 'keywords': 'database, migration, wordpress, blog', 'description': 'Converts wordpress exported xml+rss into a python dictionary and then imports into web2py DAL', 'copyright': 'GPL v2', } ####################################### #### USAGE ######## # Retrieve a python dict that represents the wordpress database ## data = word2py(open('/path/to/wordpress.2009-11-30.xml', 'r'))
def task_create_vm(reqid, auth): logger.debug('inside scheduler') try: auth = Storage(loads(b64decode(auth))) req = db(db.vm_requests.id == reqid).select()[0] conn = Baadal.Connection(_authurl, _tenant, auth.u, auth.p) name = req.vm_name img = req.image owner = req.owner collaborators = req.collaborators requester = req.requester flavor = req.flavor nics = [{'net-id': req.sec_domain}] kp = default_keypair pub_ip = req.public_ip_required vdisk = req.extra_storage vm = conn.create_baadal_vm(name, img, flavor, nics, key_name=kp, requester=requester, owner=owner, collaborators=collaborators) status = vm.get_status() while status not in ('Running', 'Error'): logger.info('VM %s in creation, current status %s' % \ (name, status)) sleep(5) status = vm.get_status() if status == 'Running': logger.info('VM created') try: req.update_record(state=REQUEST_STATUS_APPROVED) context = Storage() user_info = ldap.fetch_user_info(req.requester) context.username = user_info['user_name'] context.user_email = user_info['user_email'] context.vm_name = name context.mail_support = mail_support context.gateway_server = gateway_server context.request_time = seconds_to_localtime(req.request_time) logger.info('sending mail') mailer.send(mailer.MailTypes.VMCreated, context.user_email, context) logger.info('mail sent') if pub_ip == 1 or vdisk: if pub_ip: vm.attach_floating_ip() if vdisk: disk = conn.create_volume(vdisk) while disk.status != 'available': disk = conn.get_disk_by_id(disk.id) num_disks = vm.metadata().get('disks', 0) disk_path = '/dev/vd' + chr(97 + num_disks) vm.attach_disk(disk, disk_path) vm.update(disks=num_disks + 1) except Exception as e: logger.exception(e) else: # VM state Error req.update_record(state=REQUEST_STATUS_POSTED) raise Exception('VM build failed') except Exception as e: req.update_record(state=REQUEST_STATUS_POSTED) logger.exception(e) finally: db.commit()
# REQUIRES AND IMPORTS ########################################################### if not 'db' in globals() or not 'auth' in globals(): raise HTTP(500, 'plugin_comments requires "db" and "auth"') from gluon.tools import Storage #response.files.append(URL(r=request, c='static', f='plugin_comments/comments.css')) # META INFO ########################################################### plugin_comments = Storage() plugin_comments.meta = { 'title': 'Commentizor', 'author': 'Thadeus Burgess <*****@*****.**>', 'keywords': 'comments, commenting, blog', 'description': 'Provides a comment framework', 'copyright': 'GPL v2' } # SETTINGS ########################################################### plugin_comments.settings = Storage() # Enable recaptcha to post comments plugin_comments.settings.recaptcha = True # Should users that are logged into auth get a captcha? plugin_comments.settings.no_recaptcha_for_users = True