def import_page(self, filename, title, create_only=[], replace=False): if filename: if not os.path.isfile(filename): raise AdminCommandError( _("'%(name)s' is not a file", name=path_to_unicode(filename))) data = read_file(filename) else: data = sys.stdin.read() data = to_unicode(data, 'utf-8') name = unicode_unquote(title.encode('utf-8')) page = model.WikiPage(self.env, name) if page.exists: if name in create_only: self.log.info("%s already exists", name) return False if data == page.text: self.log.info("%s is already up to date", name) return False page.text = data try: page.save('trac', None, replace=replace) except TracError as e: raise AdminCommandError(e) self.log.info("%s imported from %s", name, path_to_unicode(filename)) return True
def _get_system_wiki_list(self): """Helper function that enumerates all 'system' wikis. The list is combined of default wiki pages and pages that are bundled with Bloodhound dashboard and search plugins""" from bhdashboard import wiki paths = [resource_filename('trac.wiki', 'default-pages')] + \ [resource_filename('bhdashboard', 'default-pages')] + \ [resource_filename('bhsearch', 'default-pages')] pages = [] original_pages = [] for path in paths: for page in os.listdir(path): filename = os.path.join(path, page) page = unicode_unquote(page.encode('utf-8')) if os.path.isfile(filename): original_pages.append(page) for original_name in original_pages: if original_name.startswith('Trac'): new_name = wiki.new_name(original_name) if not new_name: continue if new_name in original_pages: continue name = new_name # original trac wikis should also be included in the list pages.append(original_name) else: name = original_name pages.append(name) return pages
def do_transaction(db): for path in paths: if os.path.isdir(path): self.load_pages(path, replace=replace) else: page = os.path.basename(path) page = unicode_unquote(page.encode('utf-8')) if self.import_page(path, page, replace=replace): printout(_(" %(page)s imported from %(filename)s", filename=path, page=page))
def do_load(db): for page in os.listdir(dir): if page in ignore: continue filename = os.path.join(dir, page) page = unicode_unquote(page.encode('utf-8')) if os.path.isfile(filename): if self.import_page(filename, page, create_only, replace): printout(_(" %(page)s imported from %(filename)s", filename=filename, page=page))
def load_pages(self, dir, ignore=[], create_only=[], replace=False): with self.env.db_transaction: for page in os.listdir(dir): if page in ignore: continue filename = os.path.join(dir, page) page = unicode_unquote(page.encode('utf-8')) if os.path.isfile(filename): if self.import_page(filename, page, create_only, replace): printout(_(" %(page)s imported from %(filename)s", filename=filename, page=page))
def _load_or_replace(self, paths, replace): with self.env.db_transaction: for path in paths: if os.path.isdir(path): self.load_pages(path, replace=replace) else: page = os.path.basename(path) page = unicode_unquote(page.encode('utf-8')) if self.import_page(path, page, replace=replace): printout(_(" %(page)s imported from %(filename)s", filename=path_to_unicode(path), page=page))
def load_pages(self, dir, ignore=[], create_only=[], replace=False): loaded = [] with self.env.db_transaction: for page in sorted(os.listdir(dir)): if page in ignore: continue filename = os.path.join(dir, page) if os.path.isfile(filename): page = unicode_unquote(page.encode('utf-8')) if self.import_page(filename, page, create_only, replace): loaded.append(page) return loaded
def insert(self, filename, fileobj, size, t=None, db=None): # FIXME: `t` should probably be switched to `datetime` too if not db: db = self.env.get_db_cnx() handle_ta = True else: handle_ta = False self.size = size and int(size) or 0 timestamp = int(t or time.time()) self.date = datetime.fromtimestamp(timestamp, utc) # Make sure the path to the attachment is inside the environment # attachments directory attachments_dir = os.path.join(os.path.normpath(self.env.path), 'attachments') commonprefix = os.path.commonprefix([attachments_dir, self.path]) assert commonprefix == attachments_dir if not os.access(self.path, os.F_OK): os.makedirs(self.path) filename = unicode_quote(filename) path, targetfile = create_unique_file(os.path.join(self.path, filename)) try: # Note: `path` is an unicode string because `self.path` was one. # As it contains only quoted chars and numbers, we can use `ascii` basename = os.path.basename(path).encode('ascii') filename = unicode_unquote(basename) cursor = db.cursor() cursor.execute("INSERT INTO attachment " "VALUES (%s,%s,%s,%s,%s,%s,%s,%s)", (self.parent_realm, self.parent_id, filename, self.size, timestamp, self.description, self.author, self.ipnr)) shutil.copyfileobj(fileobj, targetfile) self.resource.id = self.filename = filename self.env.log.info('New attachment: %s by %s', self.title, self.author) if handle_ta: db.commit() targetfile.close() for listener in AttachmentModule(self.env).change_listeners: listener.attachment_added(self) finally: if not targetfile.closed: targetfile.close()
def load_pages(self, dir, ignore=[], create_only=[], replace=False): with self.env.db_transaction: for page in os.listdir(dir): if page in ignore: continue filename = os.path.join(dir, page) page = unicode_unquote(page.encode('utf-8')) if os.path.isfile(filename): if self.import_page(filename, page, create_only, replace): printout(_(" %(page)s imported from %(filename)s", filename=path_to_unicode(filename), page=page))
def add_default_wiki_pages(self, group, env): dir = pkg_resources.resource_filename('trac.wiki', 'default-pages') #WikiAdmin(env).load_pages(dir) with env.db_transaction: for name in os.listdir(dir): filename = os.path.join(dir, name) name = unicode_unquote(name.encode('utf-8')) if os.path.isfile(filename): self.note(" Adding page %s" % name) with open(filename) as file: text = file.read().decode('utf-8') self.add_wiki_page(env, name, text)
def __has_db_entry(self, dir, filename, tkt_id): try: fn = unicode_unquote(filename) except: # print "exception using filename: %s" % filename fn = filename for row in self.env.db_query(""" SELECT id FROM attachment WHERE filename='%s' and id='%s' ORDER BY type, id, filename""" % (fn, tkt_id) ): return True return False
def __has_db_entry(self, dir, filename, tkt_id): try: fn = unicode_unquote(filename) except: # print "exception using filename: %s" % filename fn = filename for row in self.env.db_query(""" SELECT id FROM attachment WHERE filename='%s' and id='%s' ORDER BY type, id, filename""" % (fn, tkt_id)): return True return False
def __init__(self, env, req): max_size = AttachmentModule(env).max_size size = self.__get_content_length(req) self.__verify_size(size, max_size) tempfile = TemporaryFile() try: self.__read_content(req, tempfile, size, max_size) except: tempfile.close() raise self.file = tempfile filename = req.get_header('X-TracDragDrop-Filename') self.filename = unicode_unquote(filename or '').encode('utf-8')
def remove_user_file(self, file_href): """Returns uploaded file's url @param file_href: str @return: bool """ try: Attachment(self.env, 'wiki', self.attachments_wikiPage, filename=unicode_unquote(file_href.split('/')[-1]) ).delete() return True except Exception, e: out = StringIO() traceback.print_exc(file=out) self.log.error('%s: %s\n%s' % (self.__class__.__name__, str(e), out.getvalue())) raise TracError(_("Unable to remove file. [%s].")%(file_href)) return False
def insert(self, filename, fileobj, size, t=None, db=None): if not db: db = self.env.get_db_cnx() handle_ta = True else: handle_ta = False self.size = size and int(size) or 0 self.time = int(t or time.time()) # Make sure the path to the attachment is inside the environment # attachments directory attachments_dir = os.path.join(os.path.normpath(self.env.path), 'attachments') commonprefix = os.path.commonprefix([attachments_dir, self.path]) assert commonprefix == attachments_dir if not os.access(self.path, os.F_OK): os.makedirs(self.path) filename = unicode_quote(filename) path, targetfile = create_unique_file(os.path.join( self.path, filename)) try: # Note: `path` is an unicode string because `self.path` was one. # As it contains only quoted chars and numbers, we can use `ascii` basename = os.path.basename(path).encode('ascii') filename = unicode_unquote(basename) cursor = db.cursor() cursor.execute( "INSERT INTO attachment " "VALUES (%s,%s,%s,%s,%s,%s,%s,%s)", (self.parent_type, self.parent_id, filename, self.size, self.time, self.description, self.author, self.ipnr)) shutil.copyfileobj(fileobj, targetfile) self.filename = filename self.env.log.info('New attachment: %s by %s', self.title, self.author) if handle_ta: db.commit() for listener in AttachmentModule(self.env).change_listeners: listener.attachment_added(self) finally: targetfile.close()
def get_resource_url(self, resource, href, **kwargs): """Return an URL to the attachment itself. A `format` keyword argument equal to `'raw'` will be converted to the raw-attachment prefix. """ format = kwargs.get('format') prefix = 'attachment' if format == 'raw': kwargs.pop('format') prefix = 'raw-attachment' parent_href = unicode_unquote(get_resource_url(self.env, resource.parent(version=None), Href(''))) if not resource.id: # link to list of attachments, which must end with a trailing '/' # (see process_request) return href(prefix, parent_href) + '/' else: return href(prefix, parent_href, resource.id, **kwargs)
def _update_config(self): for option in [option for option in Option.registry.values() if option.section == 'google.ads']: if option.name == 'hide_for_authenticated': option.value = self.config.getbool('google.ads', option.name, True) elif option.name == 'ads_html': # Still get the Option to get __doc__ from it db = self.env.get_db_cnx() cursor = db.cursor() cursor.execute('SELECT value FROM system WHERE name=%s', ('google.ads_html',)) code = cursor.fetchone() if code: code = unicode_unquote(code[0]) option.value = code or '' else: option.value = self.config.get('google.ads', option.name, option.default) self.options[option.name] = option
def _get_file_from_req(self, req): file = req.args['file'] # Test if file is uploaded. if not hasattr(file, 'filename') or not file.filename: raise TracError('No file uploaded.') # Get file size. if hasattr(file.file, 'fileno'): size = os.fstat(file.file.fileno())[6] else: file.file.seek(0, 2) size = file.file.tell() file.file.seek(0) if size == 0: raise TracError('Can\'t upload empty file.') # Strip path from filename. filename = os.path.basename(file.filename) return file.file, unicode_unquote(filename), size
def get_resource_url(self, resource, href, **kwargs): """Return an URL to the attachment itself. A `format` keyword argument equal to `'raw'` will be converted to the raw-attachment prefix. """ if not resource.parent: return None format = kwargs.get('format') prefix = 'attachment' if format in ('raw', 'zip'): kwargs.pop('format') prefix = format + '-attachment' parent_href = unicode_unquote(get_resource_url(self.env, resource.parent(version=None), Href(''))) if not resource.id: # link to list of attachments, which must end with a trailing '/' # (see process_request) return href(prefix, parent_href, '', **kwargs) else: return href(prefix, parent_href, resource.id, **kwargs)
def __init__(self, req): size = req.get_header('Content-Length') if size is None: size = -1 else: size = int(size) tempfile = TemporaryFile() input = req.environ['wsgi.input'] while True: buf = input.read(min(4096, size)) if not buf: break tempfile.write(buf) size -= len(buf) tempfile.flush() tempfile.seek(0) self.file = tempfile filename = req.get_header('X-TracDragDrop-Filename') self.filename = unicode_unquote(filename or '').encode('utf-8')
def __remove_db_entry(self, filename, tkt_id): try: fn = "%%%s" % unicode_unquote(filename) # print "[__remove_db_entry] fn: %s" % fn except: # print "exception using filename: %s" % filename fn = filename cnt = 0 for row in self.env.db_query(""" SELECT count(id) FROM attachment WHERE filename like '%%%s' and id='%s'""" % (fn, tkt_id) ): cnt += row[0] if cnt == 1: sql = "DELETE FROM attachment WHERE type='%s' AND id='%s' AND filename like '%s'" % ('ticket', tkt_id, fn) try: with self.env.db_transaction as db: db(sql) except Exception, e: print "SQL %s caused Exception: %s" % (sql, e)
def __remove_db_entry(self, filename, tkt_id): try: fn = "%%%s" % unicode_unquote(filename) # print "[__remove_db_entry] fn: %s" % fn except: # print "exception using filename: %s" % filename fn = filename cnt = 0 for row in self.env.db_query(""" SELECT count(id) FROM attachment WHERE filename like '%%%s' and id='%s'""" % (fn, tkt_id)): cnt += row[0] if cnt == 1: sql = "DELETE FROM attachment WHERE type='%s' AND id='%s' AND filename like '%s'" % ( 'ticket', tkt_id, fn) try: with self.env.db_transaction as db: db(sql) except Exception, e: print "SQL %s caused Exception: %s" % (sql, e)
def redirect(self, req, another_view, *args, **kwargs): """ Redirects the HTTP Request to the given View class or to the redirect parameter url present in the request. The redirect request parameter should be relative to the application path, the unquote and url completion will be done automatically. """ assert another_view.url, \ "The view needs to have an url specified!" href = another_view.get_url(req, *args, **kwargs) redirect_url = req.args.get('redirect') if redirect_url: url_with_qs = unicode_unquote(redirect_url) if '?' in url_with_qs: path, parameters = \ self._split_path_and_parameters(url_with_qs) href = req.href(path, **parameters) else: # Do we need to do something special if the URL # contains a path (not only one component)? path = url_with_qs href = req.href(path) req.redirect(href)
def test_unicode_unquote(self): u = u'the Ü thing' up = u'%20Ü %20' self.assertEqual(u, unicode_unquote(unicode_quote(u))) self.assertEqual(up, unicode_unquote(unicode_quote(up)))
def __format_value(self, bibkey, value, style): required = style['required'] optional = style['optional'] if bibkey in value: if bibkey in required: pre = required[bibkey]['pre'] post = required[bibkey]['post'] entry = required[bibkey] else: pre = optional[bibkey]['pre'] post = optional[bibkey]['post'] entry = optional[bibkey] span = tag.span(class_=bibkey) self.__stack.append(pre) if 'presub' in entry: for sub in entry['presub']: self.__stack.append(self.__format_value(sub, value, style)) if bibkey in BIBTEX_PERSON: a = authors(value[bibkey]) for person in a: if 'first' in person: formatted = "" for first in person['first'].split(' '): first = remove_braces(replace_tags(first)) if len(first) > 0: formatted = formatted + first[0] + "." partspan = tag.span(class_='first') partspan.append(formatted) span.append(partspan) span.append(" ") for part in ['von', 'last']: if part in person: partspan = tag.span(class_=part) partspan.append( remove_braces(replace_tags(person[part]))) span.append(partspan) if part != 'last': span.append(" ") if person != a[-1] and len(a) < 3: span.append(" and ") else: if len(a) >= 3: etal = tag.span(class_='etal') etal.append(" et al.") span.append(etal) if bibkey == 'editor': if len(a) > 1 and person == a[-1]: span.append(", Eds.") else: span.append(", Ed.") break elif bibkey == 'url': url = value['url'] span.append(tag.a(href=url)(unicode_unquote(url))) elif bibkey == 'doi': url = 'http://dx.doi.org/' + value['doi'].strip() span.append(tag.a(href=url)(value['doi'])) else: if bibkey == 'pages': value[bibkey] = re.sub('---', '--', value[bibkey]) value[bibkey] = re.sub(r'([^-])-([^-])', r'\1--\2', value[bibkey]) span.append( Markup(capitalizetitle(replace_tags(value[bibkey])))) self.__stack.append(span) if 'postsub' in entry: for sub in entry['postsub']: self.__format_value(sub, value, style) self.__stack.append(post)
def __format_value(self, bibkey, value, style): required = style["required"] optional = style["optional"] if bibkey in value: if bibkey in required: pre = required[bibkey]["pre"] post = required[bibkey]["post"] entry = required[bibkey] else: pre = optional[bibkey]["pre"] post = optional[bibkey]["post"] entry = optional[bibkey] span = tag.span(class_=bibkey) self.__stack.append(pre) if "presub" in entry: for sub in entry["presub"]: self.__stack.append(self.__format_value(sub, value, style)) if bibkey in BIBTEX_PERSON: a = authors(value[bibkey]) for person in a: if "first" in person: formatted = "" for first in person["first"].split(" "): first = remove_braces(replace_tags(first)) if len(first) > 0: formatted = formatted + first[0] + "." partspan = tag.span(class_="first") partspan.append(formatted) span.append(partspan) span.append(" ") for part in ["von", "last"]: if part in person: partspan = tag.span(class_=part) partspan.append(remove_braces(replace_tags(person[part]))) span.append(partspan) if part != "last": span.append(" ") if person != a[-1] and len(a) < 3: span.append(" and ") else: if len(a) >= 3: etal = tag.span(class_="etal") etal.append(" et al.") span.append(etal) if bibkey == "editor": if len(a) > 1 and person == a[-1]: span.append(", Eds.") else: span.append(", Ed.") break elif bibkey == "url": url = value["url"] span.append(tag.a(href=url)(unicode_unquote(url))) elif bibkey == "doi": url = "http://dx.doi.org/" + value["doi"].strip() span.append(tag.a(href=url)(value["doi"])) else: if bibkey == "pages": value[bibkey] = re.sub("---", "--", value[bibkey]) value[bibkey] = re.sub(r"([^-])-([^-])", r"\1--\2", value[bibkey]) span.append(Markup(capitalizetitle(replace_tags(value[bibkey])))) self.__stack.append(span) if "postsub" in entry: for sub in entry["postsub"]: self.__format_value(sub, value, style) self.__stack.append(post)
def filter_stream(self, req, method, filename, stream, data): self.log.debug('Google Ads Stream Filter: %s', req.session) if req.path_info.startswith('/admin'): # Don't even show the ads link on admin pages return stream state = req.session.get('adspanel.state', 'shown') if state == 'hidden': state = 'show' elif state == 'shown': state = 'hide' db = self.env.get_db_cnx() cursor = db.cursor() cursor.execute('SELECT value FROM system WHERE name=%s', ('google.ads_html',)) code = cursor.fetchone() if code: code = unicode_unquote(code[0]) else: return stream add_ctxtnav(req, tag.a('%s Ads' % state.capitalize(), href=req.href.adspanel(state), class_="toggle_ads")) if self.dont_show_ads(req): self.log.debug('Not displaying ads, returning stream') return stream jscode = """\ jQuery(document).ready(function() { jQuery('a.toggle_ads').show(); jQuery('a.toggle_ads').attr('href', 'javascript:;'); jQuery('a.toggle_ads').bind('click', function() { var state = jQuery('#%(show_hide_id)s').is(':hidden') ? 'show' : 'hide'; var name = jQuery('#%(show_hide_id)s').is(':hidden') ? 'Hide Ads' : 'Show Ads'; jQuery(this).html(name); jQuery('#%(show_hide_id)s').animate({opacity: state}, 200); jQuery.get('%(href)s/'+state); }); });""" ads_div_id = self.config.get('google.ads', 'ads_div_id', 'main') if ads_div_id == 'main': streambuffer = StreamBuffer() return stream | Transformer( '//div[@id="%s"]/* | //div[@id="%s"]/text()' % (ads_div_id, ads_div_id)) \ .cut(streambuffer, accumulate=True).buffer().end() \ .select('//div[@id="%s"]' % ads_div_id).prepend( tag.table(tag.tr(tag.td(streambuffer, width="100%", style="vertical-align: top;") + tag.td(Markup(code), id="ads_panel", style="vertical-align: top;") ), width='100%' ) + tag.script(jscode % dict(href=req.href.adspanel(), show_hide_id='ads_panel'), type="text/javascript") ) else: return stream | Transformer( '//div[@id="%s"]/* | //div[@id="%s"]/text()' % ( ads_div_id, ads_div_id)).replace(tag( Markup(code), tag.script(jscode % dict(href=req.href.adspanel(), show_hide_id=ads_div_id), type="text/javascript")))
def test_unicode_unquote(self): u = u"the Ü thing" up = u"%20Ü %20" self.assertEqual(u, unicode_unquote(unicode_quote(u))) self.assertEqual(up, unicode_unquote(unicode_quote(up)))