def _postprocess_args(self, arguments, rule): if not getattr(request, 'website_enabled', False): return super(ir_http, self)._postprocess_args(arguments, rule) for key, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.BaseModel) and isinstance( val._uid, RequestUID): arguments[key] = val.sudo(request.uid) try: _, path = rule.build(arguments) assert path is not None except Exception: return self._handle_exception(werkzeug.exceptions.NotFound()) if request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path)
def _postprocess_args(cls, arguments, rule): super(Http, cls)._postprocess_args(arguments, rule) for key, val in pycompat.items(arguments): # Replace uid placeholder by the current request.uid if isinstance(val, models.BaseModel) and isinstance( val._uid, RequestUID): arguments[key] = val.sudo(request.uid) try: _, path = rule.build(arguments) assert path is not None except Exception as e: return cls._handle_exception(e, code=404) if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path, code=301)
def signin_3rd(self, **kw): state = json.loads(kw['state']) dbname = state['d'] provider = state['p'] context = state.get('c', {}) registry = registry_get(dbname) with registry.cursor() as cr: try: env = api.Environment(cr, SUPERUSER_ID, context) credentials = env['res.users'].sudo().auth_oauth_third( provider, kw) cr.commit() action = state.get('a') menu = state.get('m') redirect = werkzeug.url_unquote_plus( state['r']) if state.get('r') else False url = '/web' if redirect: url = redirect elif action: url = '/web#action=%s' % action elif menu: url = '/web#menu_id=%s' % menu if credentials[0] == -1: from .controllers import gen_id credentials[1]['oauth_provider_id'] = provider qr_id = gen_id(credentials[1]) redirect = base64.urlsafe_b64encode( redirect.encode('utf-8')).decode('utf-8') url = '/corp/bind?qr_id=%s&redirect=%s' % (qr_id, redirect) else: return login_and_redirect(*credentials, redirect_url=url) except AttributeError: import traceback traceback.print_exc() # auth_signup is not installed _logger.error( "auth_signup not installed on database %s: oauth sign up cancelled." % (dbname, )) url = "/web/login?oauth_error=1" except AccessDenied: import traceback traceback.print_exc() # oauth credentials not valid, user could be on a temporary session _logger.info( 'OAuth2: access denied, redirect to main page in case a valid session exists, without setting cookies' ) url = "/web/login?oauth_error=3" redirect = werkzeug.utils.redirect(url, 303) redirect.autocorrect_location_header = False return redirect except Exception as e: # signup error _logger.exception("OAuth2: %s" % str(e)) url = "/web/login?oauth_error=2" return set_cookie_and_redirect(url)
def test_quoting(): """URL quoting""" assert url_quote(u'\xf6\xe4\xfc') == '%C3%B6%C3%A4%C3%BC' assert url_unquote(url_quote(u'#%="\xf6')) == u'#%="\xf6' assert url_quote_plus('foo bar') == 'foo+bar' assert url_unquote_plus('foo+bar') == 'foo bar' assert url_encode({'a': None, 'b': 'foo bar'}) == 'b=foo+bar' assert url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)') == \ 'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
def _deal_state_r(self, state): _logger.info('>>> get_state %s' % request.httprequest.url) _fm = request.params.get('_fm', None) if _fm: fragment = base64.urlsafe_b64decode( _fm.encode('utf-8')).decode('utf-8') r = werkzeug.url_unquote_plus(state.get('r', '')) state['r'] = werkzeug.url_quote_plus('%s#%s' % (r, fragment)) return state
def _postprocess_args(cls, arguments, rule): super(IrHttp, cls)._postprocess_args(arguments, rule) try: _, path = rule.build(arguments) assert path is not None except Exception as e: return cls._handle_exception(e) if getattr(request, 'is_frontend_multilang', False) and request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != cls._get_default_lang().code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path, code=301)
def _postprocess_args(self, arguments, rule): if not getattr(request, 'website_enabled', False): return super(ir_http, self)._postprocess_args(arguments, rule) for arg, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.browse_record) and isinstance(val._uid, RequestUID): val._uid = request.uid try: _, path = rule.build(arguments) assert path is not None except Exception: return self._handle_exception(werkzeug.exceptions.NotFound()) generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path return werkzeug.utils.redirect(path)
def _postprocess_args(self, arguments, rule): if not getattr(request, 'website_enabled', False): return super(ir_http, self)._postprocess_args(arguments, rule) for arg, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.browse_record) and isinstance( val._uid, RequestUID): val._uid = request.uid try: _, path = rule.build(arguments) assert path is not None except Exception: return self._handle_exception(werkzeug.exceptions.NotFound()) generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path return werkzeug.utils.redirect(path)
def unserialize(cls, string, secret_key): """Load the secure cookie from a serialized string. :param string: the cookie value to unserialize. :param secret_key: the secret key used to serialize the cookie. :return: a new :class:`SecureCookie`. """ if isinstance(string, unicode): string = string.encode('utf-8', 'ignore') try: base64_hash, data = string.split('?', 1) except (ValueError, IndexError): items = () else: items = {} mac = hmac(secret_key, None, cls.hash_method) for item in data.split('&'): mac.update('|' + item) if '=' not in item: items = None break key, value = item.split('=', 1) key = url_unquote_plus(key) try: key = str(key) except UnicodeError: pass items[key] = value try: client_hash = base64_hash.decode('base64') except Exception: items = client_hash = None if items is not None and client_hash == mac.digest(): try: for key, value in items.iteritems(): items[key] = cls.unquote(value) except UnquoteError: items = () else: if '_expires' in items: if time() > items['_expires']: items = () else: del items['_expires'] else: items = () return cls(items, secret_key, False)
def unserialize(cls, string, secret_key): """Load the secure cookie from a serialized string. :param string: the cookie value to unserialize. :param secret_key: the secret key used to serialize the cookie. :return: a new :class:`SecureCookie`. """ if isinstance(string, unicode): string = string.encode('utf-8', 'ignore') try: base64_hash, data = string.split('?', 1) except (ValueError, IndexError): items = () else: items = {} mac = hmac(secret_key, None, cls.hash_method) for item in data.split('&'): mac.update('|' + item) if not '=' in item: items = None break key, value = item.split('=', 1) # try to make the key a string key = url_unquote_plus(key) try: key = str(key) except UnicodeError: pass items[key] = value # no parsing error and the mac looks okay, we can now # sercurely unpickle our cookie. try: client_hash = base64_hash.decode('base64') except Exception: items = client_hash = None if items is not None and client_hash == mac.digest(): try: for key, value in items.iteritems(): items[key] = cls.unquote(value) except UnquoteError: items = () else: if '_expires' in items: if time() > items['_expires']: items = () else: del items['_expires'] else: items = () return cls(items, secret_key, False)
def _postprocess_args(cls, arguments, rule): super(Http, cls)._postprocess_args(arguments, rule) for key, val in pycompat.items(arguments): # Replace uid placeholder by the current request.uid if isinstance(val, models.BaseModel) and isinstance(val._uid, RequestUID): arguments[key] = val.sudo(request.uid) try: _, path = rule.build(arguments) assert path is not None except Exception as e: return cls._handle_exception(e, code=404) if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path, code=301)
def _postprocess_args(self, arguments, rule): if not getattr(request, 'website_enabled', False): return super(ir_http, self)._postprocess_args(arguments, rule) for key, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.BaseModel) and isinstance(val._uid, RequestUID): arguments[key] = val.sudo(request.uid) try: _, path = rule.build(arguments) assert path is not None except Exception: return self._handle_exception(werkzeug.exceptions.NotFound()) if request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path)
def unserialize(cls, string, secret_key): if isinstance(string, unicode): string = string.encode('utf-8', 'ignore') try: base64_hash, data = string.split('?', 1) except (ValueError, IndexError): items = () else: items = {} mac = hmac(secret_key, None, cls.hash_method) for item in data.split('&'): mac.update('|' + item) if '=' not in item: items = None break key, value = item.split('=', 1) key = url_unquote_plus(key) try: key = str(key) except UnicodeError: pass items[key] = value try: client_hash = base64_hash.decode('base64') except Exception: items = client_hash = None if items is not None and client_hash == mac.digest(): try: for key, value in items.iteritems(): items[key] = cls.unquote(value) except UnquoteError: items = () else: if '_expires' in items: if time() > items['_expires']: items = () else: del items['_expires'] else: items = () return cls(items, secret_key, False)
def test_url_unquote_plus_unicode(): """Make sure that URL unquote plus accepts unicode.""" # was broken in 0.6 assert url_unquote_plus(u'\x6d') == u'\x6d'
class ir_http(orm.AbstractModel): _inherit = 'ir.http' rerouting_limit = 10 geo_ip_resolver = None def _get_converters(self): return dict( super(ir_http, self)._get_converters(), model=ModelConverter, page=PageConverter, ) def _auth_method_public(self): # TODO: select user_id from matching website if not request.session.uid: request.uid = self.pool['ir.model.data'].xmlid_to_res_id( request.cr, openerp.SUPERUSER_ID, 'base.public_user') else: request.uid = request.session.uid bots = "bot|crawl|slurp|spider|curl|wget|facebookexternalhit".split("|") def is_a_bot(self): # We don't use regexp and ustr voluntarily # timeit has been done to check the optimum method ua = request.httprequest.environ.get('HTTP_USER_AGENT', '').lower() try: return any(bot in ua for bot in self.bots) except UnicodeDecodeError: return any(bot in ua.encode('ascii', 'ignore') for bot in self.bots) def get_nearest_lang(self, lang): # Try to find a similar lang. Eg: fr_BE and fr_FR short = lang.partition('_')[0] short_match = False for code, name in request.website.get_languages(): if code == lang: return lang if not short_match and code.startswith(short): short_match = code return short_match def _dispatch(self): first_pass = not hasattr(request, 'website') request.website = None func = None try: func, arguments = self._find_handler() request.website_enabled = func.routing.get('website', False) except werkzeug.exceptions.NotFound: # either we have a language prefixed route, either a real 404 # in all cases, website processes them request.website_enabled = True request.website_multilang = ( request.website_enabled and func and func.routing.get('multilang', func.routing['type'] == 'http')) if 'geoip' not in request.session: record = {} if self.geo_ip_resolver is None: try: import GeoIP # updated database can be downloaded on MaxMind website # http://dev.maxmind.com/geoip/legacy/install/city/ geofile = config.get('geoip_database') if os.path.exists(geofile): self.geo_ip_resolver = GeoIP.open( geofile, GeoIP.GEOIP_STANDARD) else: self.geo_ip_resolver = False logger.warning( 'GeoIP database file %r does not exists', geofile) except ImportError: self.geo_ip_resolver = False if self.geo_ip_resolver and request.httprequest.remote_addr: record = self.geo_ip_resolver.record_by_addr( request.httprequest.headers.environ.get( 'HTTP_X_FORWARDED_FOR', request.httprequest.remote_addr)) or {} request.session['geoip'] = record cook_lang = request.httprequest.cookies.get('website_lang') if request.website_enabled: try: if func: self._authenticate(func.routing['auth']) else: self._auth_method_public() except Exception as e: return self._handle_exception(e) request.redirect = lambda url, code=302: werkzeug.utils.redirect( url_for(url), code) request.website = request.registry['website'].get_current_website( request.cr, request.uid, context=request.context) langs = [lg[0] for lg in request.website.get_languages()] path = request.httprequest.path.split('/') if first_pass: nearest_lang = not func and self.get_nearest_lang(path[1]) url_lang = nearest_lang and path[1] preferred_lang = ((cook_lang if cook_lang in langs else False) or self.get_nearest_lang(request.lang) or request.website.default_lang_code) is_a_bot = self.is_a_bot() request.lang = request.context[ 'lang'] = nearest_lang or preferred_lang # if lang in url but not the displayed or default language --> change or remove # or no lang in url, and lang to dispay not the default language --> add lang # and not a POST request # and not a bot or bot but default lang in url if ((url_lang and (url_lang != request.lang or url_lang == request.website.default_lang_code)) or (not url_lang and request.website_multilang and request.lang != request.website.default_lang_code) and request.httprequest.method != 'POST') \ and (not is_a_bot or (url_lang and url_lang == request.website.default_lang_code)): if url_lang: path.pop(1) if request.lang != request.website.default_lang_code: path.insert(1, request.lang) path = '/'.join(path) or '/' redirect = request.redirect( path + '?' + request.httprequest.query_string) redirect.set_cookie('website_lang', request.lang) return redirect elif url_lang: path.pop(1) return self.reroute('/'.join(path) or '/') # bind modified context request.website = request.website.with_context(request.context) resp = super(ir_http, self)._dispatch() if request.website_enabled and cook_lang != request.lang and hasattr( resp, 'set_cookie'): resp.set_cookie('website_lang', request.lang) return resp def reroute(self, path): if not hasattr(request, 'rerouting'): request.rerouting = [request.httprequest.path] if path in request.rerouting: raise Exception("Rerouting loop is forbidden") request.rerouting.append(path) if len(request.rerouting) > self.rerouting_limit: raise Exception("Rerouting limit exceeded") request.httprequest.environ['PATH_INFO'] = path # void werkzeug cached_property. TODO: find a proper way to do this for key in ('path', 'full_path', 'url', 'base_url'): request.httprequest.__dict__.pop(key, None) return self._dispatch() def _postprocess_args(self, arguments, rule): super(ir_http, self)._postprocess_args(arguments, rule) for key, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.BaseModel) and isinstance( val._uid, RequestUID): arguments[key] = val.sudo(request.uid) try: _, path = rule.build(arguments) assert path is not None except Exception, e: return self._handle_exception(e, code=404) if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path, code=301)
class ir_http(orm.AbstractModel): _inherit = 'ir.http' rerouting_limit = 10 def _get_converters(self): return dict( super(ir_http, self)._get_converters(), model=ModelConverter, page=PageConverter, ) def _dispatch(self): first_pass = not hasattr(request, 'website') request.website = None func = None try: func, arguments = self._find_handler() request.website_enabled = func.routing.get('website', False) except werkzeug.exceptions.NotFound: # either we have a language prefixed route, either a real 404 # in all cases, website processes them request.website_enabled = True if request.website_enabled: if func: self._authenticate(func.routing['auth']) else: self._auth_method_public() request.website = request.registry['website'].get_current_website( request.cr, request.uid, context=request.context) if first_pass: request.lang = request.website.default_lang_code request.context['lang'] = request.lang request.website.preprocess_request(request) if not func: path = request.httprequest.path.split('/') langs = [lg[0] for lg in request.website.get_languages()] if path[1] in langs: request.lang = request.context['lang'] = path.pop(1) path = '/'.join(path) or '/' return self.reroute(path) return super(ir_http, self)._dispatch() def reroute(self, path): if not hasattr(request, 'rerouting'): request.rerouting = [request.httprequest.path] if path in request.rerouting: raise Exception("Rerouting loop is forbidden") request.rerouting.append(path) if len(request.rerouting) > self.rerouting_limit: raise Exception("Rerouting limit exceeded") request.httprequest.environ['PATH_INFO'] = path # void werkzeug cached_property. TODO: find a proper way to do this for key in ('path', 'full_path', 'url', 'base_url'): request.httprequest.__dict__.pop(key, None) return self._dispatch() def _postprocess_args(self, arguments, rule): if not getattr(request, 'website_enabled', False): return super(ir_http, self)._postprocess_args(arguments, rule) for arg, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.browse_record) and isinstance( val._uid, RequestUID): val._uid = request.uid try: _, path = rule.build(arguments) assert path is not None except Exception, e: return self._handle_exception(e, code=404) if request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path return werkzeug.utils.redirect(path)
class Http(models.AbstractModel): _inherit = 'ir.http' rerouting_limit = 10 _geoip_resolver = None @classmethod def _get_converters(cls): """ Get the converters list for custom url pattern werkzeug need to match Rule. This override adds the website ones. """ return dict( super(Http, cls)._get_converters(), model=ModelConverter, page=PageConverter, ) @classmethod def _auth_method_public(cls): """ If no user logged, set the public user of current website, or default public user as request uid. After this method `request.env` can be called, since the `request.uid` is set. The `env` lazy property of `request` will be correct. """ if not request.session.uid: env = api.Environment(request.cr, SUPERUSER_ID, request.context) website = env['website'].get_current_website() if website: request.uid = website.user_id.id else: request.uid = env.ref('base.public_user').id else: request.uid = request.session.uid bots = "bot|crawl|slurp|spider|curl|wget|facebookexternalhit".split("|") @classmethod def is_a_bot(cls): # We don't use regexp and ustr voluntarily # timeit has been done to check the optimum method user_agent = request.httprequest.environ.get('HTTP_USER_AGENT', '').lower() try: return any(bot in user_agent for bot in cls.bots) except UnicodeDecodeError: return any(bot in user_agent.encode('ascii', 'ignore') for bot in cls.bots) @classmethod def get_nearest_lang(cls, lang): # Try to find a similar lang. Eg: fr_BE and fr_FR short = lang.partition('_')[0] short_match = False for code, dummy in request.website.get_languages(): if code == lang: return lang if not short_match and code.startswith(short): short_match = code return short_match @classmethod def _geoip_setup_resolver(cls): if cls._geoip_resolver is None: try: import GeoIP # updated database can be downloaded on MaxMind website # http://dev.maxmind.com/geoip/legacy/install/city/ geofile = config.get('geoip_database') if os.path.exists(geofile): cls._geoip_resolver = GeoIP.open(geofile, GeoIP.GEOIP_STANDARD) else: cls._geoip_resolver = False logger.warning( 'GeoIP database file %r does not exists, apt-get install geoip-database-contrib or download it from http://dev.maxmind.com/geoip/legacy/install/city/', geofile) except ImportError: cls._geoip_resolver = False @classmethod def _geoip_resolve(cls): if 'geoip' not in request.session: record = {} if cls._geoip_resolver and request.httprequest.remote_addr: record = cls._geoip_resolver.record_by_addr( request.httprequest.remote_addr) or {} request.session['geoip'] = record @classmethod def get_page_key(cls): return (cls._name, "cache", request.uid, request.lang, request.httprequest.full_path) @classmethod def _dispatch(cls): """ Before executing the endpoint method, add website params on request, such as - current website (record) - multilang support (set on cookies) - geoip dict data are added in the session Then follow the parent dispatching. Reminder : Do not use `request.env` before authentication phase, otherwise the env set on request will be created with uid=None (and it is a lazy property) """ first_pass = not hasattr(request, 'website') request.website = None func = None try: if request.httprequest.method == 'GET' and '//' in request.httprequest.path: new_url = request.httprequest.path.replace( '//', '/') + '?' + request.httprequest.query_string return werkzeug.utils.redirect(new_url, 301) func, arguments = cls._find_handler() request.website_enabled = func.routing.get('website', False) except werkzeug.exceptions.NotFound: # either we have a language prefixed route, either a real 404 # in all cases, website processes them request.website_enabled = True request.website_multilang = ( request.website_enabled and func and func.routing.get('multilang', func.routing['type'] == 'http')) cls._geoip_setup_resolver() cls._geoip_resolve() # For website routes (only), add website params on `request` cook_lang = request.httprequest.cookies.get('website_lang') if request.website_enabled: try: if func: cls._authenticate(func.routing['auth']) elif request.uid is None: cls._auth_method_public() except Exception as e: return cls._handle_exception(e) request.redirect = lambda url, code=302: werkzeug.utils.redirect( url_for(url), code) request.website = request.env['website'].get_current_website( ) # can use `request.env` since auth methods are called context = dict(request.context) context['website_id'] = request.website.id langs = [lg[0] for lg in request.website.get_languages()] path = request.httprequest.path.split('/') if first_pass: nearest_lang = not func and cls.get_nearest_lang(path[1]) url_lang = nearest_lang and path[1] preferred_lang = ((cook_lang if cook_lang in langs else False) or cls.get_nearest_lang(request.lang) or request.website.default_lang_code) is_a_bot = cls.is_a_bot() request.lang = context['lang'] = nearest_lang or preferred_lang # if lang in url but not the displayed or default language --> change or remove # or no lang in url, and lang to dispay not the default language --> add lang # and not a POST request # and not a bot or bot but default lang in url if ((url_lang and (url_lang != request.lang or url_lang == request.website.default_lang_code)) or (not url_lang and request.website_multilang and request.lang != request.website.default_lang_code) and request.httprequest.method != 'POST') \ and (not is_a_bot or (url_lang and url_lang == request.website.default_lang_code)): if url_lang: path.pop(1) if request.lang != request.website.default_lang_code: path.insert(1, request.lang) path = '/'.join(path) or '/' redirect = request.redirect( path + '?' + request.httprequest.query_string) redirect.set_cookie('website_lang', request.lang) request.context = context return redirect elif url_lang: request.uid = None path.pop(1) request.context = context return cls.reroute('/'.join(path) or '/') if path[1] == request.website.default_lang_code: context['edit_translations'] = False if not context.get('tz'): context['tz'] = request.session.get('geoip', {}).get('time_zone') # bind modified context request.context = context request.website = request.website.with_context(context) # removed cache for auth public request.cache_save = False resp = super(Http, cls)._dispatch() if request.website_enabled and cook_lang != request.lang and hasattr( resp, 'set_cookie'): resp.set_cookie('website_lang', request.lang) return resp @classmethod def reroute(cls, path): if not hasattr(request, 'rerouting'): request.rerouting = [request.httprequest.path] if path in request.rerouting: raise Exception("Rerouting loop is forbidden") request.rerouting.append(path) if len(request.rerouting) > cls.rerouting_limit: raise Exception("Rerouting limit exceeded") request.httprequest.environ['PATH_INFO'] = path # void werkzeug cached_property. TODO: find a proper way to do this for key in ('path', 'full_path', 'url', 'base_url'): request.httprequest.__dict__.pop(key, None) return cls._dispatch() @classmethod def _postprocess_args(cls, arguments, rule): super(Http, cls)._postprocess_args(arguments, rule) for key, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, models.BaseModel) and isinstance( val._uid, RequestUID): arguments[key] = val.sudo(request.uid) try: _, path = rule.build(arguments) assert path is not None except Exception, e: return cls._handle_exception(e, code=404) if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path, code=301)
class ir_http(orm.AbstractModel): _inherit = 'ir.http' rerouting_limit = 10 geo_ip_resolver = None def _get_converters(self): return dict( super(ir_http, self)._get_converters(), model=ModelConverter, page=PageConverter, ) def _auth_method_public(self): if not request.session.uid: domain_name = request.httprequest.environ.get('HTTP_HOST', '').split(':')[0] website_id = self.pool['website']._get_current_website_id( request.cr, openerp.SUPERUSER_ID, domain_name, context=request.context) if website_id: request.uid = self.pool['website'].browse( request.cr, openerp.SUPERUSER_ID, website_id, request.context).user_id.id else: request.uid = self.pool['ir.model.data'].xmlid_to_res_id( request.cr, openerp.SUPERUSER_ID, 'base', 'public_user') else: request.uid = request.session.uid def _dispatch(self): first_pass = not hasattr(request, 'website') request.website = None func = None try: func, arguments = self._find_handler() request.website_enabled = func.routing.get('website', False) except werkzeug.exceptions.NotFound: # either we have a language prefixed route, either a real 404 # in all cases, website processes them request.website_enabled = True request.website_multilang = request.website_enabled and func and func.routing.get( 'multilang', True) if 'geoip' not in request.session: record = {} if self.geo_ip_resolver is None: try: import GeoIP # updated database can be downloaded on MaxMind website # http://dev.maxmind.com/geoip/legacy/install/city/ geofile = config.get('geoip_database') if os.path.exists(geofile): self.geo_ip_resolver = GeoIP.open( geofile, GeoIP.GEOIP_STANDARD) else: self.geo_ip_resolver = False logger.warning( 'GeoIP database file %r does not exists', geofile) except ImportError: self.geo_ip_resolver = False if self.geo_ip_resolver and request.httprequest.remote_addr: record = self.geo_ip_resolver.record_by_addr( request.httprequest.remote_addr) or {} request.session['geoip'] = record if request.website_enabled: try: if func: self._authenticate(func.routing['auth']) else: self._auth_method_public() except Exception as e: return self._handle_exception(e) request.redirect = lambda url, code=302: werkzeug.utils.redirect( url_for(url), code) request.website = request.registry['website'].get_current_website( request.cr, request.uid, context=request.context) request.context['website_id'] = request.website.id langs = [lg[0] for lg in request.website.get_languages()] path = request.httprequest.path.split('/') if first_pass: if request.website_multilang: # If the url doesn't contains the lang and that it's the first connection, we to retreive the user preference if it exists. if not path[ 1] in langs and not request.httprequest.cookies.get( 'session_id'): if request.lang not in langs: # Try to find a similar lang. Eg: fr_BE and fr_FR short = request.lang.split('_')[0] langs_withshort = [ lg[0] for lg in request.website.get_languages() if lg[0].startswith(short) ] if len(langs_withshort): request.lang = langs_withshort[0] else: request.lang = request.website.default_lang_code # We redirect with the right language in url if request.lang != request.website.default_lang_code: path.insert(1, request.lang) path = '/'.join(path) or '/' return request.redirect( path + '?' + request.httprequest.query_string) else: request.lang = request.website.default_lang_code request.context['lang'] = request.lang if not request.context.get('tz'): request.context['tz'] = request.session['geoip'].get( 'time_zone') if not func: if path[1] in langs: request.lang = request.context['lang'] = path.pop(1) path = '/'.join(path) or '/' if request.lang == request.website.default_lang_code: # If language is in the url and it is the default language, redirect # to url without language so google doesn't see duplicate content return request.redirect( path + '?' + request.httprequest.query_string, code=301) return self.reroute(path) # bind modified context request.website = request.website.with_context(request.context) return super(ir_http, self)._dispatch() def reroute(self, path): if not hasattr(request, 'rerouting'): request.rerouting = [request.httprequest.path] if path in request.rerouting: raise Exception("Rerouting loop is forbidden") request.rerouting.append(path) if len(request.rerouting) > self.rerouting_limit: raise Exception("Rerouting limit exceeded") request.httprequest.environ['PATH_INFO'] = path # void werkzeug cached_property. TODO: find a proper way to do this for key in ('path', 'full_path', 'url', 'base_url'): request.httprequest.__dict__.pop(key, None) return self._dispatch() def _postprocess_args(self, arguments, rule): super(ir_http, self)._postprocess_args(arguments, rule) for key, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.BaseModel) and isinstance( val._uid, RequestUID): arguments[key] = val.sudo(request.uid) try: _, path = rule.build(arguments) assert path is not None except Exception, e: return self._handle_exception(e, code=404) if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path, code=301)
class ir_http(orm.AbstractModel): _inherit = 'ir.http' rerouting_limit = 10 _geoip_resolver = None def _get_converters(self): return dict( super(ir_http, self)._get_converters(), model=ModelConverter, page=PageConverter, ) def _auth_method_public(self): if not request.session.uid: website = self.pool['website'].get_current_website( request.cr, openerp.SUPERUSER_ID, context=request.context) if website and website.user_id: request.uid = website.user_id.id else: request.uid = self.pool['ir.model.data'].xmlid_to_res_id( request.cr, openerp.SUPERUSER_ID, 'base', 'public_user') else: request.uid = request.session.uid bots = "bot|crawl|slurp|spider|curl|wget|facebookexternalhit".split("|") def is_a_bot(self): # We don't use regexp and ustr voluntarily # timeit has been done to check the optimum method ua = request.httprequest.environ.get('HTTP_USER_AGENT', '').lower() try: return any(bot in ua for bot in self.bots) except UnicodeDecodeError: return any(bot in ua.encode('ascii', 'ignore') for bot in self.bots) def get_nearest_lang(self, lang): # Try to find a similar lang. Eg: fr_BE and fr_FR short = lang.partition('_')[0] short_match = False for code, name in request.website.get_languages(): if code == lang: return lang if not short_match and code.startswith(short): short_match = code return short_match def _geoip_setup_resolver(self): if self._geoip_resolver is None: geofile = config.get('geoip_database') try: self._geoip_resolver = GeoIPResolver.open(geofile) or False except Exception as e: logger.warning('Cannot load GeoIP: %s', ustr(e)) def _geoip_resolve(self): if 'geoip' not in request.session: record = {} if self._geoip_resolver and request.httprequest.remote_addr: record = self._geoip_resolver.resolve( request.httprequest.remote_addr) or {} request.session['geoip'] = record def get_page_key(self): return (self._name, "cache", request.uid, request.lang, request.httprequest.full_path) def _dispatch(self): first_pass = not hasattr(request, 'website') request.website = None func = None try: if request.httprequest.method == 'GET' and '//' in request.httprequest.path: new_url = request.httprequest.path.replace( '//', '/') + '?' + request.httprequest.query_string return werkzeug.utils.redirect(new_url, 301) func, arguments = self._find_handler() request.website_enabled = func.routing.get('website', False) except werkzeug.exceptions.NotFound: # either we have a language prefixed route, either a real 404 # in all cases, website processes them request.website_enabled = True request.website_multilang = ( request.website_enabled and func and func.routing.get('multilang', func.routing['type'] == 'http')) self._geoip_setup_resolver() self._geoip_resolve() cook_lang = request.httprequest.cookies.get('website_lang') if request.website_enabled: try: if func: self._authenticate(func.routing['auth']) elif request.uid is None: self._auth_method_public() except Exception as e: return self._handle_exception(e) request.redirect = lambda url, code=302: werkzeug.utils.redirect( url_for(url), code) request.website = request.registry['website'].get_current_website( request.cr, request.uid, context=request.context) request.context['website_id'] = request.website.id langs = [lg[0] for lg in request.website.get_languages()] path = request.httprequest.path.split('/') if first_pass: is_a_bot = self.is_a_bot() nearest_lang = not func and self.get_nearest_lang(path[1]) url_lang = nearest_lang and path[1] preferred_lang = ((cook_lang if cook_lang in langs else False) or (not is_a_bot and self.get_nearest_lang(request.lang)) or request.website.default_lang_code) request.lang = request.context[ 'lang'] = nearest_lang or preferred_lang # if lang in url but not the displayed or default language --> change or remove # or no lang in url, and lang to dispay not the default language --> add lang # and not a POST request # and not a bot or bot but default lang in url if ((url_lang and (url_lang != request.lang or url_lang == request.website.default_lang_code)) or (not url_lang and request.website_multilang and request.lang != request.website.default_lang_code) and request.httprequest.method != 'POST') \ and (not is_a_bot or (url_lang and url_lang == request.website.default_lang_code)): if url_lang: path.pop(1) if request.lang != request.website.default_lang_code: path.insert(1, request.lang) path = '/'.join(path) or '/' redirect = request.redirect( path + '?' + request.httprequest.query_string) redirect.set_cookie('website_lang', request.lang) return redirect elif url_lang: request.uid = None path.pop(1) return self.reroute('/'.join(path) or '/') if request.lang == request.website.default_lang_code: request.context['edit_translations'] = False if not request.context.get('tz'): request.context['tz'] = request.session.get( 'geoip', {}).get('time_zone') try: pytz.timezone(request.context['tz'] or '') except pytz.UnknownTimeZoneError: request.context.pop('tz') # bind modified context request.website = request.website.with_context(request.context) # cache for auth public cache_time = getattr(func, 'routing', {}).get('cache') cache_enable = cache_time and request.httprequest.method == "GET" and request.website.user_id.id == request.uid cache_response = None if cache_enable: key = self.get_page_key() try: r = self.pool.cache[key] if r['time'] + cache_time > time.time(): cache_response = openerp.http.Response( r['content'], mimetype=r['mimetype']) else: del self.pool.cache[key] except KeyError: pass if cache_response: request.cache_save = False resp = cache_response else: request.cache_save = key if cache_enable else False resp = super(ir_http, self)._dispatch() if request.website_enabled and cook_lang != request.lang and hasattr( resp, 'set_cookie'): resp.set_cookie('website_lang', request.lang) return resp def reroute(self, path): if not hasattr(request, 'rerouting'): request.rerouting = [request.httprequest.path] if path in request.rerouting: raise Exception("Rerouting loop is forbidden") request.rerouting.append(path) if len(request.rerouting) > self.rerouting_limit: raise Exception("Rerouting limit exceeded") request.httprequest.environ['PATH_INFO'] = path # void werkzeug cached_property. TODO: find a proper way to do this for key in ('path', 'full_path', 'url', 'base_url'): request.httprequest.__dict__.pop(key, None) return self._dispatch() def _postprocess_args(self, arguments, rule): super(ir_http, self)._postprocess_args(arguments, rule) for key, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.BaseModel) and isinstance( val._uid, RequestUID): arguments[key] = val.sudo(request.uid) try: _, path = rule.build(arguments) assert path is not None except Exception, e: return self._handle_exception(e, code=404) if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path, code=301)
class ir_http(orm.AbstractModel): _inherit = 'ir.http' rerouting_limit = 10 geo_ip_resolver = None def _get_converters(self): return dict( super(ir_http, self)._get_converters(), model=ModelConverter, page=PageConverter, ) def _auth_method_public(self): # TODO: select user_id from matching website if not request.session.uid: request.uid = self.pool['ir.model.data'].xmlid_to_res_id(request.cr, openerp.SUPERUSER_ID, 'base.public_user') else: request.uid = request.session.uid def _dispatch(self): first_pass = not hasattr(request, 'website') request.website = None func = None try: func, arguments = self._find_handler() request.website_enabled = func.routing.get('website', False) except werkzeug.exceptions.NotFound: # either we have a language prefixed route, either a real 404 # in all cases, website processes them request.website_enabled = True request.website_multilang = request.website_enabled and func and func.routing.get('multilang', True) if 'geoip' not in request.session: record = {} if self.geo_ip_resolver is None: try: import GeoIP # updated database can be downloaded on MaxMind website # http://dev.maxmind.com/geoip/legacy/install/city/ geofile = config.get('geoip_database', '/usr/share/GeoIP/GeoLiteCity.dat') if os.path.exists(geofile): self.geo_ip_resolver = GeoIP.open(geofile, GeoIP.GEOIP_STANDARD) else: self.geo_ip_resolver = False logger.warning('GeoIP database file %r does not exists', geofile) except ImportError: self.geo_ip_resolver = False if self.geo_ip_resolver and request.httprequest.remote_addr: record = self.geo_ip_resolver.record_by_addr(request.httprequest.remote_addr) or {} request.session['geoip'] = record if request.website_enabled: if func: self._authenticate(func.routing['auth']) else: self._auth_method_public() request.redirect = lambda url: werkzeug.utils.redirect(url_for(url)) request.website = request.registry['website'].get_current_website(request.cr, request.uid, context=request.context) if first_pass: request.lang = request.website.default_lang_code request.context['lang'] = request.lang if not func: path = request.httprequest.path.split('/') langs = [lg[0] for lg in request.website.get_languages()] if path[1] in langs: request.lang = request.context['lang'] = path.pop(1) path = '/'.join(path) or '/' if request.lang == request.website.default_lang_code: # If language is in the url and it is the default language, redirect # to url without language so google doesn't see duplicate content return request.redirect(path + '?' + request.httprequest.query_string) return self.reroute(path) return super(ir_http, self)._dispatch() def reroute(self, path): if not hasattr(request, 'rerouting'): request.rerouting = [request.httprequest.path] if path in request.rerouting: raise Exception("Rerouting loop is forbidden") request.rerouting.append(path) if len(request.rerouting) > self.rerouting_limit: raise Exception("Rerouting limit exceeded") request.httprequest.environ['PATH_INFO'] = path # void werkzeug cached_property. TODO: find a proper way to do this for key in ('path', 'full_path', 'url', 'base_url'): request.httprequest.__dict__.pop(key, None) return self._dispatch() def _postprocess_args(self, arguments, rule): super(ir_http, self)._postprocess_args(arguments, rule) for arg, val in arguments.items(): # Replace uid placeholder by the current request.uid if isinstance(val, orm.browse_record) and isinstance(val._uid, RequestUID): val._uid = request.uid try: _, path = rule.build(arguments) assert path is not None except Exception, e: return self._handle_exception(e, code=404) if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'): generated_path = werkzeug.url_unquote_plus(path) current_path = werkzeug.url_unquote_plus(request.httprequest.path) if generated_path != current_path: if request.lang != request.website.default_lang_code: path = '/' + request.lang + path if request.httprequest.query_string: path += '?' + request.httprequest.query_string return werkzeug.utils.redirect(path)
def import_livejournal(self, username, password, import_what=IMPORT_JOURNAL, community='', security_custom=SECURITY_PROTECTED, categories=[], getcomments=True): """Import from LiveJournal using specified parameters.""" yield _(u'<p>Beginning LiveJournal import. Attempting to login...</p>') if import_what != IMPORT_JOURNAL: usejournal = community else: usejournal = None lj = LiveJournalConnect(username, password, usejournal) result = lj.login(getmoods=0) authors = { username: Author(username=username, email='', real_name=unicode(result['fullname'], 'utf-8')) } yield _(u'<p>Your name: <strong>%s</strong></p>') % \ authors[username].real_name moodlist = dict([(int(m['id']), unicode(str(m['name']), 'utf-8')) for m in result['moods']]) result = lj.getusertags() tags = dict([ (tag, Tag(gen_slug(tag), tag)) for tag in [unicode(t['name'], 'utf-8') for t in result['tags']] ]) yield _(u'<p><strong>Tags:</strong> %s</p>') % _(u', ').join( tags.keys()) ##result = lj.getdaycounts() ##daycounts = [(date(*strptime(item['date'], '%Y-%m-%d')[0:3]), ## item['count']) for item in result['daycounts']] ##totalposts = sum([x[1] for x in daycounts]) ##yield _(u'<p>Found <strong>%d</strong> posts on <strong>%d days'\ ## u'</strong> between %s and %s.</p>') % ( ## totalposts, ## len(daycounts), ## daycounts[0][0].strftime('%Y-%m-%d'), ## daycounts[-1][0].strftime('%Y-%m-%d')) posts = {} # Process implemented as per # http://www.livejournal.com/doc/server/ljp.csp.entry_downloading.html yield _(u'<ul>') yield _(u'<li>Getting metadata...</li>') result = lj.syncitems() sync_items = [] sync_total = int(result['total']) yield _(u'<li>%d items...</li>') % sync_total sync_items.extend(result['syncitems']) while len(sync_items) < sync_total: lastsync = max([ parse_lj_date(item['time']) for item in sync_items ]).strftime('%Y-%m-%d %H:%M:%S') yield _(u'<li>Got %d items up to %s...</li>') % (len(sync_items), lastsync) result = lj.syncitems(lastsync=lastsync) sync_items.extend(result['syncitems']) yield _(u'<li>Got all %d items.</li>') % len(sync_items) yield _(u'</ul>') #: Discard non-journal items. sync_items = [i for i in sync_items if i['item'].startswith('L-')] yield _(u'<p>Downloading <strong>%d</strong> entries...</p>') % len( sync_items) # Track what items we need to get sync_data = {} for item in sync_items: sync_data[int(item['item'][2:])] = { 'downloaded': False, 'time': parse_lj_date(item['time']) } # Start downloading bodies sync_left = [ sync_data[x] for x in sync_data if sync_data[x]['downloaded'] is False ] if sync_left: lastsync = (min([x['time'] for x in sync_left]) - timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S') while len(sync_left) > 0: yield _(u'<p>Getting a batch...</p>') try: result = lj.getevents(selecttype='syncitems', lastsync=lastsync) except xmlrpclib.Fault, fault: if fault.faultCode == 406: # LJ doesn't like us. Go back one second and try again. yield _(u'<p>LiveJournal says we are retrying the same '\ u'date and time too often. Trying again with the '\ u'time set behind by one second.</p>') lastsync = ( parse_lj_date(lastsync) - timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S') continue else: yield _(u'<p>Process failed. LiveJournal says: '\ u'(%d) %s</p>') % (fault.faultCode, fault.faultString) break yield _(u'<ol start="%d">') % (len(posts) + 1) for item in result['events']: if sync_data[item['itemid']]['downloaded'] is True: # Dupe, thanks to our lastsync time manipulation. Skip. continue sync_data[item['itemid']]['downloaded'] = True sync_data[item['itemid']]['item'] = item subject = item.get('subject', '') if isinstance(subject, xmlrpclib.Binary): subject = subject.data subject = unicode(str(subject), 'utf-8') #: LiveJournal subjects may contain HTML tags. Strip them and #: convert HTML entities to Unicode equivalents. subject = unescape( tag_re.sub('', ljuser_re.sub('\\2', subject))) poster = item.get('poster', username) if poster != username and import_what != IMPORT_COMMUNITY_ALL: # Discard, since we don't want this. yield _( u'<li><strong>Discarded:</strong> %s <em>(by %s)</em></li>' ) % (subject, poster) continue if poster not in authors: authors[poster] = Author(poster, '', '') # Map LiveJournal security codes to Zine status flags security = item.get('security', 'public') if security == 'usemask' and item['allowmask'] == 1: security = 'friends' if security == 'usemask': status = { SECURITY_DISCARD: None, SECURITY_PUBLIC: STATUS_PUBLISHED, SECURITY_PROTECTED: STATUS_PROTECTED, SECURITY_PRIVATE: STATUS_PRIVATE }[security_custom] if status is None: yield _(u'<li><strong>Discarded (masked):</strong> '\ u'%s</li>') % subject continue else: status = { 'public': STATUS_PUBLISHED, 'friends': STATUS_PROTECTED, 'private': STATUS_PRIVATE, }[security] #: Read time as local timezone and then convert to UTC. Zine #: doesn't seem to like non-UTC timestamps in imports. pub_date = get_timezone().localize( parse_lj_date(item['eventtime'])).astimezone(UTC) itemtags = [ t.strip() for t in unicode( item['props'].get('taglist', ''), 'utf-8').split(',') ] while '' in itemtags: itemtags.remove('') itemtags = [tags[t] for t in itemtags] extras = {} if 'current_music' in item['props']: if isinstance(item['props']['current_music'], xmlrpclib.Binary): extras['current_music'] = unicode( item['props']['current_music'].data, 'utf-8') else: extras['current_music'] = unicode( str(item['props']['current_music']), 'utf-8') if 'current_mood' in item['props']: if isinstance(item['props']['current_mood'], xmlrpclib.Binary): extras['current_mood'] = unicode( item['props']['current_mood'].data, 'utf-8') else: extras['current_mood'] = unicode( str(item['props']['current_mood']), 'utf-8') elif 'current_moodid' in item['props']: extras['current_mood'] = moodlist[int( item['props']['current_moodid'])] if 'current_coords' in item['props']: if isinstance(item['props']['current_coords'], xmlrpclib.Binary): extras['current_coords'] = unicode( item['props']['current_coords'].data, 'utf-8') else: extras['current_coords'] = unicode( str(item['props']['current_coords']), 'utf-8') if 'current_location' in item['props']: if isinstance(item['props']['current_location'], xmlrpclib.Binary): extras['current_location'] = unicode( item['props']['current_location'].data, 'utf-8') else: extras['current_location'] = unicode( str(item['props']['current_location']), 'utf-8') if 'picture_keyword' in item['props']: if isinstance(item['props']['picture_keyword'], xmlrpclib.Binary): extras['picture_keyword'] = unicode( item['props']['picture_keyword'].data, 'utf-8') else: extras['picture_keyword'] = unicode( str(item['props']['picture_keyword']), 'utf-8') extras['lj_post_id'] = item['itemid'] extras['original_url'] = item['url'] posts[item['itemid']] = Post( #: Generate slug. If there's no subject, use '-'+itemid. #: Why the prefix? Because if the user wants %year%/%month%/ #: for the post url format and we end up creating a slug #: like 2003/12/1059, it will conflict with the archive #: access path format of %Y/%m/%d and the post will become #: inaccessible, since archive paths take higher priority #: to slugs in zine's urls.py. slug=gen_timestamped_slug( gen_slug(subject) or ('-' + str(item['itemid'])), 'entry', pub_date), title=subject, link=item['url'], pub_date=pub_date, author=authors[poster], intro='', body=isinstance(item['event'], xmlrpclib.Binary) and unicode(item['event'].data, 'utf-8') or url_unquote_plus(str(item['event'])), tags=itemtags, categories=[Category(x) for x in categories], comments=[], # Will be updated later. comments_enabled=not item['props'].get( 'opt_nocomments', False), pings_enabled=False, # LiveJournal did not support pings uid='livejournal;%s;%d' % (usejournal or username, item['itemid']), parser=item['props'].get('opt_preformatted', False) and 'html' or 'livejournal', status=status, extra=extras) yield _(u'<li>%s <em>(by %s on %s)</em></li>') % ( subject, poster, pub_date.strftime('%Y-%m-%d %H:%M')) # Done processing batch. yield _(u'</ol>') sync_left = [ sync_data[x] for x in sync_data if sync_data[x]['downloaded'] is False ] if sync_left: lastsync = (min([x['time'] for x in sync_left]) - timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S')