def tag_articles(request): _ = request.translate tag = request.matchdict['tag'] page_size = int(get_config('elements_on_page')) start_page = 0 if 'start' in request.GET: try: start_page = int(request.GET['start']) except ValueError: start_page = 0 user = request.user dbsession = DBSession() q = dbsession.query(Article).join(Tag).options(joinedload('tags')).options(joinedload('user')).order_by(Article.published.desc()) if not user.has_role('editor'): q = q.filter(Article.is_draft==False) c = {} c['articles'] = q.filter(Tag.tag == tag)[(start_page * page_size):(start_page+1) * page_size + 1] c['prev_page'] = None if len(c['articles']) > page_size: c['prev_page'] = route_url('blog_tag_articles', request, tag=tag, _query=[('start', start_page+1)]) c['articles'].pop() c['next_page'] = None if start_page > 0: c['next_page'] = route_url('blog_tag_articles', request, tag=tag, _query=[('start', start_page-1)]) c['page_title'] = _(u'Articles labeled with tag “{0}”'.format(tag)) return c
def edit_article_comment_ajax(request): """ Update comment and return updated and rendered data """ comment_id = int(request.matchdict['comment_id']) dbsession = DBSession() comment = dbsession.query(Comment).options(joinedload('user')).options(joinedload('user.roles')).get(comment_id) # passed POST parameters are: 'body', 'name', 'email', 'website', 'date', 'ip', 'xffip' params = { 'body': 'body', 'name': 'display_name', 'email': 'email', 'website': 'website', 'ip': 'ip_address', 'xffip': 'xff_ip_address' } for k, v in params.items(): value = request.POST[k] if value == '': value = None setattr(comment, v, value) comment.set_body(request.POST['body']) comment.is_subscribed = 'is_subscribed' in request.POST comment.published = h.str_to_timestamp(request.POST['date']) dbsession.flush() #comment_user = None #if comment.user is not None: # comment_user = dbsession.query(User).options(joinedload('roles')).get(comment.user) dbsession.expunge(comment) if comment.user is not None: dbsession.expunge(comment.user) for p in comment.user.roles: dbsession.expunge(p) data = {} # without "unicode" or "str" it generates broken HTML # because render() returns webhelpers.html.builder.literal renderer_dict = {'comment': comment} if comment.user is not None: comment._real_email = comment.user.email else: comment._real_email = comment.email if comment._real_email == '': comment._real_email = None data['rendered'] = render('/blog/single_comment.mako', renderer_dict, request) return data
def delete_accounts_ajax(request): uids_raw = request.POST['uids'] uids = [int(s.strip()) for s in uids_raw.split(',')] c = { 'deleted': uids, 'failed': False } dbsession = DBSession() dbsession.query(User).filter(User.id.in_(uids)).delete(False) return c
def upload_file_check_ajax(request): c = {'exists': False} # check filename if 'filename' in request.POST: filename = request.POST['filename'] dbsession = DBSession() file = dbsession.query(File).filter(File.name == filename).first() if file is not None: c['exists'] = True return c
def visitors_emails_delete_ajax(request): c = { 'deleted': [], 'failed': False } uids_raw = request.POST['uids'] uids = [int(s.strip()) for s in uids_raw.split(',')] dbsession = DBSession() dbsession.query(VerifiedEmail).filter(VerifiedEmail.id.in_(uids)).delete(False) c['deleted'] = uids return c
def visitor_email_edit_ajax(request): c = {} id = int(request.POST['id']) is_verified = request.POST['is_verified'] == 'true' dbsession = DBSession() vf = dbsession.query(VerifiedEmail).get(id) if vf is None: return HTTPNotFound() vf.is_verified = is_verified return c
def edit_file_props_check_ajax(request): c = {} file_id = int(request.matchdict['file_id']) # check filename if 'filename' in request.POST: filename = request.POST['filename'] dbsession = DBSession() file = dbsession.query(File).filter(File.name == filename).filter(File.id != file_id).first() if file is not None: c['exists'] = True return c
def delete_article_comment_ajax(request): comment_id = int(request.matchdict['comment_id']) dbsession = DBSession() comment = dbsession.query(Comment).get(comment_id) if comment is None: return HTTPNotFound() dbsession.delete(comment) article = dbsession.query(Article).get(comment.article_id) _update_comments_counters(dbsession, article) data = {} return data
def edit_fetch_comment_ajax(request): """ Fetch comment details """ comment_id = int(request.matchdict['comment_id']) dbsession = DBSession() comment = dbsession.query(Comment).get(comment_id) attrs = ('display_name', 'email', 'website', 'body', 'ip_address', 'xff_ip_address', 'is_subscribed') data = {} for a in attrs: data[a] = getattr(comment, a) data['date'] = h.timestamp_to_str(comment.published) return data
def get_available_themes(): dbsession = DBSession() themes = [ ('default', _('Default theme (internal)')), ('green', _('Green theme (internal)')), ('blog.regolit.com', _('blog.regolit.com style (internal)'))] # load suitable css files from the storage storage_dirs = get_storage_dirs() storage_path = storage_dirs['orig'] style_files = dbsession.query(File).filter(File.dltype=='auto', File.content_type=='text/css').all() theme_data_re = re.compile(r'/\* pyrone-theme-data:([0-9a-z-]+):\s*(.+)\s*\*/') for f in style_files: # open css file and read metadata filename = os.path.join(storage_path, f.name) description = f.name theme_data = {} try: with open(filename) as fp: # analyze first line line = fp.readline(100) if not line.startswith('/* pyrone-theme-css */'): continue # now read remaining file and search for metadata for line in fp: mo = theme_data_re.match(line) if mo is None: continue theme_data[mo.group(1)] = mo.group(2) except Exception as e: log.error(e) continue # get description from the data request = threadlocal.get_current_request() key = 'title-{0}'.format(lang.lang(request)) if key in theme_data: description = theme_data[key] themes.append((f.name, description)) return themes
def latest(request): """ Display list of articles sorted by publishing date ascending, show rendered previews, not complete articles """ _ = request.translate c = {'articles': []} headers = [] for k,v in request.headers.items(): headers.append('{0}: {1}'.format(k, v)) page_size = int(get_config('elements_on_page')) start_page = 0 if 'start' in request.GET: try: start_page = int(request.GET['start']) except ValueError: start_page = 0 dbsession = DBSession() user = request.user q = dbsession.query(Article).options(joinedload('tags')).options(joinedload('user')).order_by(Article.published.desc()) if not user.has_role('editor'): q = q.filter(Article.is_draft==False) c['articles'] = q[(start_page * page_size):(start_page+1) * page_size + 1] #for article in c['articles']: # log.debug(article.shortcut_date) c['prev_page'] = None if len(c['articles']) > page_size: c['prev_page'] = route_url('blog_latest', request, _query=[('start', start_page+1)]) c['articles'].pop() c['next_page'] = None if start_page > 0: c['next_page'] = route_url('blog_latest', request, _query=[('start', start_page-1)]) c['page_title'] = _('Latest articles') return c
def upload_file(request): """ Process file upload request """ c = {'errors': {}} c['file'] = File() # process file upload # extract file params etc req = ('filedata', 'filename', 'dltype') for x in req: if x not in request.POST: return HTTPBadRequest() hfile = request.POST['filedata'] # guess content type content_type = guess_type(hfile.filename)[0] or 'application/octet-stream' dbsession = DBSession() now = datetime.utcnow() file = dbsession.query(File).filter(File.name==request.POST['filename']).first() if file is None: file = File() file.name = request.POST['filename'] file.size = len(hfile.value) file.dltype = 'download' if request.POST['dltype'] == 'download' else 'auto' file.content_type = content_type file.updated = h.dt_to_timestamp(now) # save file to the storage storage_dirs = get_storage_dirs() orig_filename = os.path.join(storage_dirs['orig'], file.name) fp = open(orig_filename, 'wb') shutil.copyfileobj(hfile.file, fp) hfile.file.close() fp.close() dbsession.add(file) dbsession.flush() dbsession.expunge(file) try: transaction.commit() except IntegrityError: # display error etc transaction.abort() return HTTPFound(location=route_url('admin_list_files', request)) return HTTPFound(location=route_url('admin_list_files', request))
def approve_article_comment_ajax(request): comment_id = int(request.matchdict['comment_id']) dbsession = DBSession() comment = dbsession.query(Comment).get(comment_id) if comment is None: return HTTPNotFound() # also find corresponding article article = dbsession.query(Article).get(comment.article_id) if article is None: return HTTPNotFound() comment.is_approved = True _update_comments_counters(dbsession, article) data = {} return data
def my_profile_save_ajax(request): c = {} user_id = request.user.id is_changed = False dbsession = DBSession() user = dbsession.query(User).options(joinedload('roles')).get(user_id) if user is None: return JSONResponse(httpcode.BadRequest, c) if 'email' in request.POST: user.email = request.POST['email'] is_changed = True if user.kind == 'local': if 'display_name' in request.POST: user.display_name = request.POST['display_name'] is_changed = True if 'login' in request.POST: user.login = request.POST['login'] is_changed = True if 'new_password' in request.POST and request.POST['new_password'] != '': # construct new password sample = '0123456789abcdef' salt = ''.join([random.choice(sample) for x in range(8)]) user.password = salt + sha1(salt + sha1(request.POST['new_password'])) is_changed = True if is_changed: dbsession.flush() user.detach() user.get_roles() # also update Beaker session object remember(request, None, user=user) else: return JSONResponse(httpcode.BadRequest, c) return c
def login_twitter_finish(request): """ Finish twitter authentication """ consumer_key = str(get_config('tw_consumer_key')) consumer_secret = str(get_config('tw_consumer_secret')) token = request.session.get('twitter_request_token') twitter = Twitter(auth=OAuth(token[0], token[1], consumer_key, consumer_secret), format='', api_version=None) verifier = request.GET.get('oauth_verifier') try: oauth_resp = twitter.oauth.access_token(oauth_verifier=verifier) except TwitterHTTPError as e: log.error('Invalid "access_token" request: {0}'.format(str(e))) return HTTPNotFound() oauth_resp_data = dict(urllib.parse.parse_qsl(oauth_resp)) # typical response: # {'user_id': '128607225', 'oauth_token_secret': 'NaGQrWyNRtHHHbvm3tNI0tcr2KTBUEY0J3ng8d7KFXg', 'screen_name': 'otmenych', 'oauth_token': '128607225-NWzT8YL1Wt6qNzMLzmaCEWOxqFtrEI1pjlA8c5FK'} tw_username = oauth_resp_data['screen_name'] user = find_twitter_user(tw_username) if user is None: dbsession = DBSession() # create user user = User() user.kind = 'twitter' user.login = tw_username dbsession.add(user) # re-request again to correctly read roles user = find_twitter_user(tw_username) if user is None: log.error('Unable to create twitter user') return HTTPServerError() # save user to the session user.detach() remember(request, None, user=user) return HTTPFound(location=request.GET['pyrone_url'])
def latest_rss(request): """ Create rss feed with the latest published articles and return them as the atom feed """ _ = request.translate dbsession = DBSession() q = dbsession.query(Article).options(joinedload('tags'))\ .options(joinedload('user'))\ .filter(Article.is_draft==False).order_by(Article.updated.desc()) articles = q[0:10] rss_title = get_config('site_title') + ' - ' + _('Latest articles feed') site_base_url = get_config('site_base_url') items = [] ''' feed = Rss201rev2Feed( title=rss_title, link=site_base_url, description='', language='en') ''' for a in articles: link = h.article_url(request, a) tags_list = [] for t in a.tags: tags_list.append(t.tag) items.append(RSSItem(title=a.title, link=link, description=a.rendered_preview, pubDate=h.timestamp_to_dt(a.published), guid=str(a.id))) feed = RSS2( title=rss_title, link=site_base_url, description='', items=items ) response = Response(body=feed.to_xml(encoding='utf-8'), content_type='application/rss+xml') return response
def get_public_tags_cloud(force_reload=False): """ return tags cloud: list of tuples-pairs ("tag", "tag_weight"), tag_weight - is a number divisible by 5, 0 <= tag_weight <= 100 Only for published articles. """ value = cache.get_value('tags_cloud') if value is None or force_reload: dbsession = DBSession() q = dbsession.query(func.count(Tag.id), Tag.tag).join(Article).filter(Article.is_draft==False).group_by(Tag.tag) items = list() counts = list() total = 0 for rec in q.all(): if rec[0] <= 0: continue total += rec[0] items.append((rec[1], int(rec[0]))) counts.append(int(rec[0])) if len(counts) != 0: min_count = min(counts) max_counts = max(counts) if min_count == max_counts: # i.e. all tags counts are the same, so they have the same weight weights = [(x[0], 50) for x in items] else: lmm = lg(max_counts) - lg(min_count) weights = [(x[0], (lg(x[1])-lg(min_count)) / lmm) for x in items] weights = [(x[0], int(5*(int(100*x[1])/5))) for x in weights] value = weights else: value = [] cache.set_value('tags_cloud', value) return value
def view_article(request): shortcut_date = request.matchdict['shortcut_date'] shortcut = request.matchdict['shortcut'] dbsession = DBSession() q = dbsession.query(Article).filter(Article.shortcut_date == shortcut_date)\ .filter(Article.shortcut == shortcut) user = request.user if not user.has_role('editor'): q = q.filter(Article.is_draft==False) article = q.first() if article is None: return HTTPNotFound() if 'commentid' in request.GET: # redirect to comment URL, this trick is required because some # browsers don't reload page after changing page anchor (e.g. http://example.com/index#abc) comment_url = h.article_url(request, article) + '#comment-' + request.GET['commentid'] return HTTPFound(location=comment_url) return _view_article(request, article=article)
def download_file(request): filename = request.matchdict['filename'] dbsession = DBSession() file = dbsession.query(File).filter(File.name == filename).first() if file is None: return HTTPNotFound() #headers = [('Content-Type', file.content_type), ('Content-Length', str(file.size))] headers = [] dltype = file.dltype if 'dltype' in request.GET and request.GET['dltype'] in allowed_dltypes: dltype = request.GET['dltype'] if dltype == 'download': headers.append(('Content-Disposition', str('attachment; filename={0}'.format(file.name)))) else: # if file.dltype == 'auto': pass storage_dirs = get_storage_dirs() full_path = os.path.join(storage_dirs['orig'], filename) try: content_length = os.path.getsize(full_path) headers += [('Content-Length', str(content_length))] except IOError: return HTTPNotFound() response = Response(content_type=str(file.content_type)) try: response.app_iter = open(full_path, 'rb') except IOError: return HTTPNotFound() response.headerlist += headers return response
def verify_email(request): c = {} fail = False try: email = normalize_email(request.GET['email']) verification_code = request.GET['token'] dbsession = DBSession() vf = dbsession.query(VerifiedEmail).filter(VerifiedEmail.email == email).first() if vf is None or vf.verification_code != verification_code or vf.is_verified: fail = True else: vf.is_verified = True except KeyError: fail = True if fail: c['result'] = _('Verification failed: email not found.') return JSONResponse(httpcode.BadRequest, c) else: c['result'] = _('Email `{0}` has been confirmed.').format(email) return c
def edit_article(request): article_id = int(request.matchdict['article_id']) c = {} c['errors'] = {} c['new_article'] = False dbsession = DBSession() if request.method == 'GET': article = dbsession.query(Article).get(article_id) c['article'] = article c['tags'] = [tag.tag for tag in article.tags] c['article_published_str'] = h.timestamp_to_str(article.published) elif request.method == 'POST': res = _update_article(article_id, request) if type(res) != dict: return res c.update(res) c['submit_url'] = route_url('blog_edit_article', request, article_id=article_id) c['save_url_ajax'] = route_url('blog_edit_article_ajax', request, article_id=article_id) return c
def edit_file_props(request): c = {'errors': {}} file_id = int(request.matchdict['file_id']) dbsession = DBSession() if request.method == 'POST': content_type = request.POST['content_type'] dltype = request.POST['dltype'] filename = request.POST['filename'] file = dbsession.query(File).get(file_id) if file is None: return HTTPNotFound() if dltype in allowed_dltypes: file.dltype = dltype if content_type != '': file.content_type = content_type # rename file on disk if file.name != filename: storage_dirs = get_storage_dirs() old_name = os.path.join(storage_dirs['orig'], str(file.name)) new_name = os.path.join(storage_dirs['orig'], filename) os.rename(old_name, new_name) file.name = filename return HTTPFound(location=route_url('admin_list_files', request)) elif request.method == 'GET': c['file'] = dbsession.query(File).get(file_id) if c['file'] is None: return HTTPNotFound() return c
def view_moderation_queue(request): c = {'comments': []} dbsession = DBSession() comments = dbsession.query(Comment).filter(Comment.is_approved==False).all() for x in comments: # set real email if x.user is not None: x._real_email = x.user.email else: x._real_email = x.email if x._real_email == '': x._real_email = None # truncate comment text trunc_pos = 200 x._truncated_body = None if len(x.rendered_body) > trunc_pos: x._truncated_body = x.rendered_body[0:trunc_pos] c['comments'].append(x) return c
def delete_article(request): article_id = int(request.matchdict['article_id']) dbsession = DBSession() article = dbsession.query(Article).get(article_id) if article is None: return HTTPNotFound() # delete article and all article comments, invalidate tags too dbsession.query(Comment).filter(Comment.article_id == article_id).delete() dbsession.delete(article) h.get_public_tags_cloud(force_reload=True) data = {} return data
def get_not_approved_comments_count(): dbsession = DBSession() cnt = dbsession.query(func.count(Comment.id)).filter(Comment.is_approved==False).scalar() return cnt
def list_accounts(request): c = {} dbsession = DBSession() c['users'] = dbsession.query(User).all() return c
def backup_now(request): """ Perform complete blog backup: articles, comments, files and settings. """ backups_dir = get_backups_dir() now = datetime.now() stamp = now.strftime('%Y%m%d-%H%M%S') backup_file_name = 'backup-{0}.zip'.format(stamp) backup_tmp_dir = os.path.join(backups_dir, 'tmp-{0}'.format(stamp)) os.mkdir(backup_tmp_dir) nsmap = {None: 'http://regolit.com/ns/pyrone/backup/1.0'} def e(parent, name, text=None): node = etree.SubElement(parent, name, nsmap=nsmap) if text is not None: node.text = text return node root = etree.Element('backup', nsmap=nsmap) root.set('version', '1.1') #info_el = e(root, 'info') articles_el = e(root, 'articles') vf_el = e(root, 'verified-emails') files_el = e(root, 'files') settings_el = e(root, 'settings') users_el = e(root, 'users') dbsession = DBSession() # dump tables, create xml-file with data, dump files, pack all in the zip-file for article in dbsession.query(Article).all(): article_el = e(articles_el, 'article') article_el.set('id', str(article.id)) article_el.set('user-id', str(article.user_id)) e(article_el, 'title', article.title) e(article_el, 'shortcut-date', article.shortcut_date) e(article_el, 'shortcut', article.shortcut) e(article_el, 'body', article.body) e(article_el, 'published', str(article.published)) e(article_el, 'updated', str(article.updated)) e(article_el, 'is-commentable', str(article.is_commentable)) e(article_el, 'is-draft', str(article.is_draft)) tags_el = e(article_el, 'tags') for t in article.tags: e(tags_el, 'tag', t.tag) comments_el = e(article_el, 'comments') for comment in article.comments: comment_el = e(comments_el, 'comment') comment_el.set('id', str(comment.id)) if comment.user_id is not None: comment_el.set('user-id', str(comment.user_id)) if comment.parent_id is not None: comment_el.set('parent-id', str(comment.parent_id)) e(comment_el, 'body', comment.body) e(comment_el, 'email', comment.email) e(comment_el, 'display-name', comment.display_name) e(comment_el, 'published', str(comment.published)) e(comment_el, 'ip-address', comment.ip_address) if comment.xff_ip_address is not None: e(comment_el, 'xff-ip-address', comment.xff_ip_address) e(comment_el, 'is-approved', str(comment.is_approved)) # dump verified emails for vf in dbsession.query(VerifiedEmail).all(): s = e(vf_el, 'email', vf.email) s.set('verified', 'true' if vf.is_verified else 'false') s.set('last-verification-date', str(int(vf.last_verify_date))) s.set('verification-code', vf.verification_code) # dump settings for setting in dbsession.query(Config).all(): s = e(settings_el, 'config', setting.value) s.set('id', setting.id) # dump users and roles for user in dbsession.query(User).all(): user_el = e(users_el, 'user') user_el.set('id', str(user.id)) e(user_el, 'login', user.login) e(user_el, 'password', user.password) e(user_el, 'display-name', user.display_name) e(user_el, 'email', user.email) e(user_el, 'kind', user.kind) perms_el = e(user_el, 'roles') for p in user.roles: e(perms_el, 'role', p.name) # dump files ind = 1 storage_dirs = get_storage_dirs() for f in dbsession.query(File).all(): # check is file exists full_path = os.path.join(storage_dirs['orig'], f.name) if not os.path.exists(full_path) or not os.path.isfile(full_path): continue target_file = 'file{0:05}'.format(ind) shutil.copy(full_path, os.path.join(backup_tmp_dir, target_file)) file_el = e(files_el, 'file') file_el.set('src', target_file) e(file_el, 'name', f.name) e(file_el, 'dltype', f.dltype) e(file_el, 'updated', str(f.updated)) e(file_el, 'content-type', f.content_type) ind += 1 # write xml data = {} index_xml = os.path.join(backup_tmp_dir, 'index.xml') out = open(index_xml, 'wb') etree.ElementTree(root).write(index_xml, pretty_print=True, encoding='UTF-8', xml_declaration=True) out.close() # compress directory z = zipfile.ZipFile(os.path.join(backups_dir, backup_file_name), 'w') for fn in os.listdir(backup_tmp_dir): fn_full = os.path.join(backup_tmp_dir, fn) if not os.path.isfile(fn_full): continue z.write(fn_full, fn) z.close() data['backup_file'] = backup_file_name # cleanup shutil.rmtree(backup_tmp_dir, ignore_errors=True) data['success'] = True return data
def restore_backup(request): _ = request.translate backup_id = request.matchdict['backup_id'] backups_dir = get_backups_dir() filename = b64decode(backup_id).decode('utf-8') all_backups = [x for x in os.listdir(backups_dir) if os.path.isfile(os.path.join(backups_dir, x))] if filename not in all_backups: return {'error': _('Backup file not found')} full_filename = os.path.join(backups_dir, filename) try: z = zipfile.ZipFile(full_filename) except zipfile.BadZipfile: return {'error': _('Backup file is broken!')} # now check zip file contents, first extract file "index.xml" try: xml_f = z.open('index.xml') except KeyError: return {'error': _('Backup file is broken, no catalog file inside!')} try: xmldoc = etree.parse(xml_f) except etree.XMLSyntaxError: return {'error': _('Backup file is broken, XML catalog is broken!')} root = xmldoc.getroot() NS = 'http://regolit.com/ns/pyrone/backup/1.0' def t(name): """ Convert tag name "name" to full qualified name like "{http://regolit.com/ns/pyrone/backup/1.0}name" """ return '{{{0}}}{1}'.format(NS, name) def unt(name): """ Remove namespace """ return name.replace('{{{0}}}'.format(NS), '') # now check is backup version supported if root.tag != t('backup'): return {'error': _('Unknown XML format of catalog file.')} backup_version = root.get('version') if backup_version not in ('1.0', '1.1'): return {'error': _('Unsupported backup version: “{0}”!'.format(root.get('version')))} dbsession = DBSession() dialect_name = dbsession.bind.name # now start to extract all data and fill DB # first delete everything from the database dbsession.query(Comment).delete() dbsession.query(Tag).delete() dbsession.query(Article).delete() dbsession.query(VerifiedEmail).delete() dbsession.query(Role).delete() dbsession.query(File).delete() # also remove files from the storage dir dbsession.query(Config).delete() dbsession.query(User).delete() namespaces = {'b': NS} # first restore config nodes = xmldoc.xpath('//b:backup/b:settings', namespaces=namespaces) if len(nodes) == 0: return JSONResponse(httpcode.NotFound, {'error': _('Backup file is broken: settings block not found')}) node = nodes[0] nodes = node.xpath('//b:config', namespaces=namespaces) def recursively_restore_commits(tree, root): if root not in tree: return for comment in tree[root]: dbsession.add(comment) dbsession.flush() for comment in tree[root]: recursively_restore_commits(tree, comment.id) for node in nodes: c = dbsession.query(Config).get(node.get('id')) if c is None: c = Config(node.get('id'), node.text) dbsession.add(c) else: c.value = node.text # now restore users nodes = xmldoc.xpath('//b:backup/b:users', namespaces=namespaces) if len(nodes) == 0: return JSONResponse(httpcode.NotFound, {'error': _('Backup file is broken: users block not found')}) node = nodes[0] nodes = node.xpath('./b:user', namespaces=namespaces) for node in nodes: u = User() u.id = int(node.get('id')) subnodes = node.xpath('./*', namespaces=namespaces) m = {} for sn in subnodes: m[unt(sn.tag)] = sn.text props = {'login': '******', 'password': '******', 'display-name': 'display_name', 'email': 'email', 'kind': 'kind'} for k, v in props.items(): if k in m: setattr(u, v, m[k]) dbsession.add(u) if backup_version == '1.0': # restore permissions now permissions_roles_map = { 'write_article': 'writer', 'edit_article': 'editor', 'admin': 'admin', 'files': 'filemanager' } subnodes = node.xpath('./b:permissions/b:permission', namespaces=namespaces) for sn in subnodes: permission_name = sn.text if permission_name not in permissions_roles_map: continue role_name = permissions_roles_map[permission_name] r = Role(None, u.id, role_name) dbsession.add(r) elif backup_version == '1.1': # restore roles directly subnodes = node.xpath('./b:roles/b:role', namespaces=namespaces) for sn in subnodes: r = Role(None, u.id, sn.text) dbsession.add(r) # restore verified emails nodes = xmldoc.xpath('//b:backup/b:verified-emails', namespaces=namespaces) if len(nodes) != 0: # block is optional node = nodes[0] nodes = node.xpath('./b:email', namespaces=namespaces) for node in nodes: vf = VerifiedEmail(node.text) vf.last_verify_date = int(node.get('last-verification-date')) vf.is_verified = node.get('verified') == 'true' vf.verification_code = node.get('verification-code') dbsession.add(vf) # now restore articles nodes = xmldoc.xpath('//b:backup/b:articles', namespaces=namespaces) if len(nodes) == 0: return JSONResponse(httpcode.NotFound, {'error': _('Backup file is broken: articles block not found')}) node = nodes[0] nodes = node.xpath('./b:article', namespaces=namespaces) for node in nodes: article = Article() article.id = int(node.get('id')) article.user_id = int(node.get('user-id')) subnodes = node.xpath('./*', namespaces=namespaces) m = {} for sn in subnodes: m[unt(sn.tag)] = sn.text props = {'title': 'title', 'body': 'body', 'shortcut': 'shortcut', 'shortcut-date': 'shortcut_date'} for k, v in props.items(): if k in m: setattr(article, v, m[k]) article.set_body(m['body']) props = {'published': 'published', 'updated': 'updated'} for k, v in props.items(): if k in m: setattr(article, v, int(m[k])) props = {'is-commentable': 'is_commentable', 'is-draft': 'is_draft'} for k, v in props.items(): if k in m: res = False if m[k].lower() == 'true': res = True setattr(article, v, res) article.comments_total = 0 article.comments_approved = 0 # now restore tags subnodes = node.xpath('./b:tags/b:tag', namespaces=namespaces) tags_set = set() for sn in subnodes: tags_set.add(sn.text.strip()) for tag_str in tags_set: log.debug('tag: '+tag_str) tag = Tag(tag_str, article) dbsession.add(tag) # now process comments # we need to preserve comments hierarchy # local_comments = {} # key is a comment ID, value - comment object local_parents = {} # key is a parent-id, value is a list of child IDs subnodes = node.xpath('./b:comments/b:comment', namespaces=namespaces) for sn in subnodes: comment = Comment() comment.article_id = article.id comment.id = int(sn.get('id')) try: comment.parent_id = int(sn.get('parent-id')) except KeyError: pass except TypeError: pass try: comment.user_id = int(sn.get('user-id')) except TypeError: pass except KeyError: pass subsubnodes = sn.xpath('./*', namespaces=namespaces) m = {} for sn in subsubnodes: m[unt(sn.tag)] = sn.text props = {'display-name': 'display_name', 'email': 'email', 'website': 'website', 'ip-address': 'ip_address', 'xff-ip-address': 'xff_ip_address'} for k, v in props.items(): if k in m: setattr(comment, v, m[k]) comment.set_body(m['body']) comment.published = int(m['published']) props = {'is-approved': 'is_approved', 'is-subscribed': 'is_subscribed'} for k, v in props.items(): if k in m: res = False if m[k].lower() == 'true': res = True setattr(comment, v, res) article.comments_total += 1 if comment.is_approved: article.comments_approved += 1 parent_id = comment.parent_id if parent_id not in local_parents: local_parents[parent_id] = [] local_parents[parent_id].append(comment) dbsession.add(article) dbsession.flush() recursively_restore_commits(local_parents, None) # now process files nodes = xmldoc.xpath('//b:backup/b:files', namespaces=namespaces) if len(nodes) == 0: return JSONResponse(httpcode.NotFound, {'error': _('Backup file is broken: articles block not found')}) node = nodes[0] nodes = node.xpath('./b:file', namespaces=namespaces) storage_dirs = get_storage_dirs() for node in nodes: file = File() src = node.get('src') # read "name", "dltype", "updated", "content_type" subnodes = node.xpath('./*', namespaces=namespaces) m = {} for sn in subnodes: m[unt(sn.tag)] = sn.text props = {'name': 'name', 'dltype': 'dltype', 'content-type': 'content_type'} for k, v in props.items(): if k in m: setattr(file, v, m[k]) # check "file.name" if file.name == '.' or file.name == '..': continue if file.name.find('/') != -1 or file.name.find('\\') != -1: continue if file.dltype not in allowed_dltypes: file.dltype = 'auto' # extract file from the archive, put to the storage dir, fill attribute "size" file_f = z.open(src) file_full_path = os.path.join(storage_dirs['orig'], file.name) file_out_f = open(file_full_path, 'wb') shutil.copyfileobj(file_f, file_out_f) file_f.close() file_out_f.close() file.size = os.path.getsize(file_full_path) dbsession.add(file) # catch IntegrityError here! try: transaction.commit() # reset sequences if dialect_name == 'postgresql': dbsession.bind.execute(text("SELECT setval('pbarticle_id_seq', (SELECT MAX(id) FROM pbarticle));")) dbsession.bind.execute(text("SELECT setval('pbarticlecomment_id_seq', (SELECT MAX(id) FROM pbarticlecomment));")) except IntegrityError: return JSONResponse(httpcode.BadRequest, {'error': _('Unable to restore backup: database error, maybe your backup file is corrupted')}) except Exception as e: return JSONResponse(httpcode.BadRequest, {'error': _('Unable to restore backup: database error, maybe your backup file is corrupted')}) # we should also destroy current session (logout) forget(request) # clear config cache cache.clear_cache() return {'success': True}
def list_visitors_emails(request): c = {} dbsession = DBSession() c['emails'] = dbsession.query(VerifiedEmail).all() return c
def list_files(request): c = {} dbsession = DBSession() c['files'] = dbsession.query(File).all() return c