def _parse_pairs(): for key, val in _cookie_parse_impl(header): key = to_unicode(key, charset, errors, allow_none_charset=True) if not key: continue val = to_unicode(val, charset, errors, allow_none_charset=True) yield try_coerce_native(key), val
def __init__(self, exc_type, exc_value, tb): self.lineno = tb.tb_lineno self.function_name = tb.tb_frame.f_code.co_name self.locals = tb.tb_frame.f_locals self.globals = tb.tb_frame.f_globals fn = inspect.getsourcefile(tb) or inspect.getfile(tb) if fn[-4:] in ('.pyo', '.pyc'): fn = fn[:-1] # if it's a file on the file system resolve the real filename. if os.path.isfile(fn): fn = os.path.realpath(fn) self.filename = to_unicode(fn, get_filesystem_encoding()) self.module = self.globals.get('__name__') self.loader = self.globals.get('__loader__') self.code = tb.tb_frame.f_code # support for paste's traceback extensions self.hide = self.locals.get('__traceback_hide__', False) info = self.locals.get('__traceback_info__') if info is not None: try: info = text_type(info) except UnicodeError: info = str(info).decode('utf-8', 'replace') self.info = info
def peek_path_info(environ, charset='utf-8', errors='replace'): """Returns the next segment on the `PATH_INFO` or `None` if there is none. Works like :func:`pop_path_info` without modifying the environment: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> peek_path_info(env) 'a' >>> peek_path_info(env) 'a' If the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is checked. """ segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1) if segments: return to_unicode(wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True)
def url_fix(s, charset='utf-8'): r"""Sometimes you get an URL by a user that just isn't a real URL because it contains unsafe characters like ' ' and so on. This function can fix some of the problems in a similar way browsers handle data entered by the user: >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' :param s: the string with the URL to fix. :param charset: The target charset for the URL if the url was given as unicode string. """ # First step is to switch to unicode processing and to convert # backslashes (which are invalid in URLs anyways) to slashes. This is # consistent with what Chrome does. s = to_unicode(s, charset, 'replace').replace('\\', '/') # For the specific case that we look like a malformed windows URL # we want to fix this up manually: if s.startswith('file://') and s[7:8].isalpha() and s[8:10] in (':/', '|/'): s = 'file:///' + s[7:] url = url_parse(s) path = url_quote(url.path, charset, safe='/%+$!*\'(),') qs = url_quote_plus(url.query, charset, safe=':&%=+$!*\'(),') anchor = url_quote_plus(url.fragment, charset, safe=':&%=+$!*\'(),') return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)))
def iri_to_uri(iri, charset='utf-8', errors='strict'): r""" Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always uses utf-8 URLs internally because this is what browsers and HTTP do as well. In some places where it accepts an URL it also accepts a unicode IRI and converts it into a URI. Examples for IRI versus URI: >>> iri_to_uri(u'http://☃.net/') 'http://xn--n3h.net/' >>> iri_to_uri(u'http://üser:pässword@☃.net/påth') 'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th' .. versionadded:: 0.6 :param iri: The IRI to convert. :param charset: The charset for the URI. """ if isinstance(iri, tuple): iri = url_unparse(iri) iri = url_parse(to_unicode(iri, charset, errors)) netloc = iri.encode_netloc().decode('ascii') path = url_quote(iri.path, charset, errors, '/:~+%') query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=') fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/') return to_native(url_unparse((iri.scheme, netloc, path, query, fragment)))
def __call__(self, *path, **query): if path and isinstance(path[-1], dict): if query: raise TypeError("keyword arguments and query-dicts " "can't be combined") query, path = path[-1], path[:-1] elif query: query = dict([(k.endswith("_") and k[:-1] or k, v) for k, v in query.items()]) path = "/".join([to_unicode(url_quote(x, self.charset), "ascii") for x in path if x is not None]).lstrip("/") rv = self.base if path: if not rv.endswith("/"): rv += "/" rv = url_join(rv, "./" + path) if query: rv += "?" + to_unicode(url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii") return to_native(rv)
def uri_to_iri(uri, charset='utf-8', errors='replace'): r""" Converts a URI in a given charset to a IRI. Examples for URI versus IRI: >>> uri_to_iri(b'http://xn--n3h.net/') u'http://\u2603.net/' >>> uri_to_iri(b'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th') u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th' Query strings are left unchanged: >>> uri_to_iri('/?foo=24&x=%26%2f') u'/?foo=24&x=%26%2f' .. versionadded:: 0.6 :param uri: The URI to convert. :param charset: The charset of the URI. :param errors: The error handling on decode. """ if isinstance(uri, tuple): uri = url_unparse(uri) uri = url_parse(to_unicode(uri, charset)) path = url_unquote(uri.path, charset, errors, '/;?') query = url_unquote(uri.query, charset, errors, ';/?:@&=+,$') fragment = url_unquote(uri.fragment, charset, errors, ';/?:@&=+,$') return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))
def serve_file(self, environ, start_response, endpoint, file_name=None): if endpoint == 'root_file': if not file_name: file_name = 'index.html' environ['PATH_INFO'] = environ['PATH_INFO'] + '/index.html' elif file_name == 'admin.html': file_name = 'builder.html' if file_name == 'index.html': self.check_modified(file_name, environ) self.check_project_modified() elif file_name == 'builder.html': self.check_modified(os.path.join(to_unicode(self.jam_dir, 'utf-8'), file_name), environ) environ['PATH_INFO'] = os.path.join('jam', file_name) if file_name: base, ext = os.path.splitext(file_name) init_path_info = None if common.SETTINGS['COMPRESSED_JS'] and ext and ext in ['.js', '.css']: init_path_info = environ['PATH_INFO'] min_file_name = base + '.min' + ext environ['PATH_INFO'] = environ['PATH_INFO'].replace(file_name, min_file_name) try: try: return self.fileserver(environ, start_response) except Exception as e: if init_path_info: environ['PATH_INFO'] = init_path_info return self.fileserver(environ, start_response) else: raise except Exception as e: return Response('')(environ, start_response)
def dump_cookie(key, value='', max_age=None, expires=None, path='/', domain=None, secure=None, httponly=False, sync_expires=True): """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. """ if not isinstance(key, (bytes, text_type)): raise TypeError('invalid key %r' % key) if not isinstance(value, (bytes, text_type)): raise TypeError('invalid value %r' % value) key, value = to_native(key, _cookie_charset), to_native(value, _cookie_charset) value = quote_header_value(value) morsel = _ExtendedMorsel(key, value) if isinstance(max_age, timedelta): max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds if expires is not None: if not isinstance(expires, string_types): expires = cookie_date(expires) morsel['expires'] = expires elif max_age is not None and sync_expires: morsel['expires'] = cookie_date(time() + max_age) if domain and ':' in domain: # The port part of the domain should NOT be used. Strip it domain = domain.split(':', 1)[0] if domain: assert '.' in domain, ( "Setting \"domain\" for a cookie on a server running localy (ex: " "localhost) is not supportted by complying browsers. You should " "have something like: \"127.0.0.1 localhost dev.localhost\" on " "your hosts file and then point your server to run on " "\"dev.localhost\" and also set \"domain\" for \"dev.localhost\"" ) for k, v in (('path', path), ('domain', domain), ('secure', secure), ('max-age', max_age), ('httponly', httponly)): if v is not None and v is not False: morsel[k] = str(v) return to_unicode(morsel.output(header='').lstrip(), _cookie_charset)
def full_path(self): """ Werzueg's full_path implementation always appends '?', even when the query string is empty. Let's fix that. """ if not self.query_string: return self.path return self.path + "?" + to_unicode(self.query_string, self.url_charset)
def unquote(cls, value): if cls.quote_base64: value = base64.b64decode(value) ### Added line value = to_unicode(value, 'utf-8') if cls.serialization_method is not None: value = cls.serialization_method.loads(value) return value
def process_sql_result(rows): result = [] for row in rows: new_row = [] for r in row: if isinstance(r, fdb.fbcore.BlobReader): r = to_unicode(r.read(), 'utf-8') new_row.append(r) result.append(new_row) return result
def process_sql_result(rows): result = [] for row in rows: new_row = [] for r in row: if isinstance(r, bytes): r = to_unicode(r, 'utf-8') new_row.append(r) result.append(new_row) return result
def process_sql_result(rows): result = [] for row in rows: fields = [] for field in row: if isinstance(field, cx_Oracle.LOB): field = field.read() field = to_unicode(field, 'utf-8') fields.append(field) result.append(fields) return result
def __call__(self, *path, **query): if path and isinstance(path[-1], dict): if query: raise TypeError('keyword arguments and query-dicts ' 'can\'t be combined') query, path = path[-1], path[:-1] elif query: query = dict([(k.endswith('_') and k[:-1] or k, v) for k, v in query.items()]) path = '/'.join([to_unicode(url_quote(x, self.charset), 'ascii') for x in path if x is not None]).lstrip('/') rv = self.base if path: if not rv.endswith('/'): rv += '/' rv = url_join(rv, './' + path) if query: rv += '?' + to_unicode(url_encode(query, self.charset, sort=self.sort, key=self.key), 'ascii') return to_native(rv)
def process_sql_result(rows): result = [] for row in rows: fields = [] for field in row: if PY2: if type(field) == buffer: field = str(field) else: if type(field) == memoryview: field = to_unicode(to_bytes(field, 'utf-8'), 'utf-8') fields.append(field) result.append(fields) return result
def pop_path_info(environ, charset='utf-8', errors='replace'): """Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` a bytestring is returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified. """ path = environ.get('PATH_INFO') if not path: return None script_name = environ.get('SCRIPT_NAME', '') # shift multiple leading slashes over old_path = path path = path.lstrip('/') if path != old_path: script_name += '/' * (len(old_path) - len(path)) if '/' not in path: environ['PATH_INFO'] = '' environ['SCRIPT_NAME'] = script_name + path rv = wsgi_get_bytes(path) else: segment, path = path.split('/', 1) environ['PATH_INFO'] = '/' + path environ['SCRIPT_NAME'] = script_name + segment rv = wsgi_get_bytes(segment) return to_unicode(rv, charset, errors, allow_none_charset=True)
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024): """Works like :func:`make_line_iter` but accepts a separator which divides chunks. If you want newline based processing you should use :func:`make_line_iter` instead as it supports arbitrary newline markers. .. versionadded:: 0.8 .. versionadded:: 0.9 added support for iterators as input stream. :param stream: the stream or iterate to iterate over. :param separator: the separator that divides chunks. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is otherwise already limited). :param buffer_size: The optional buffer size. """ _iter = _make_chunk_iter(stream, limit, buffer_size) first_item = next(_iter, '') if not first_item: return _iter = chain((first_item,), _iter) if isinstance(first_item, text_type): separator = to_unicode(separator) _split = re.compile(r'(%s)' % re.escape(separator)).split _join = u''.join else: separator = to_bytes(separator) _split = re.compile(b'(' + re.escape(separator) + b')').split _join = b''.join buffer = [] while 1: new_data = next(_iter, '') if not new_data: break chunks = _split(new_data) new_buf = [] for item in chain(buffer, chunks): if item == separator: yield _join(new_buf) new_buf = [] else: new_buf.append(item) buffer = new_buf if buffer: yield _join(buffer)
def parse_cookie(header, errors='replace', cls=None): """Parse a cookie. Either from a string or WSGI environ. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a :exc:`HTTPUnicodeError` is raised. .. versionchanged:: 0.5 This function now returns a :class:`TypeConversionDict` instead of a regular dict. The `cls` parameter was added. :param header: the header to be used to parse the cookie. Alternatively this can be a WSGI environment. :param charset: the charset for the cookie values. :param errors: the error behavior for the charset decoding. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`TypeConversionDict` is used. """ if isinstance(header, dict): header = header.get('HTTP_COOKIE', '') header = to_native(header, _cookie_charset) if cls is None: cls = TypeConversionDict cookie = _ExtendedCookie() cookie.load(header) result = {} # decode to unicode and skip broken items. Our extended morsel # and extended cookie will catch CookieErrors and convert them to # `None` items which we have to skip here. for key, value in iteritems(cookie): if value.value is not None: result[to_unicode(key, _cookie_charset)] = \ to_unicode(unquote_header_value(value.value), _cookie_charset) return cls(result)
def parse_authorization_header(value): """Parse an HTTP basic/digest authorization header transmitted by the web browser. The return value is either `None` if the header was invalid or not given, otherwise an :class:`~werkzeug.datastructures.Authorization` object. :param value: the authorization header to parse. :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`. """ if not value: return value = wsgi_to_bytes(value) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except ValueError: return if auth_type == b'basic': try: username, password = base64.b64decode(auth_info).split(b':', 1) except Exception: return return Authorization( 'basic', { 'username': to_unicode(username, _basic_auth_charset), 'password': to_unicode(password, _basic_auth_charset) } ) elif auth_type == b'digest': auth_map = parse_dict_header(auth_info) for key in 'username', 'realm', 'nonce', 'uri', 'response': if key not in auth_map: return if 'qop' in auth_map: if not auth_map.get('nc') or not auth_map.get('cnonce'): return return Authorization('digest', auth_map)
def on_upload(self, request): if request.method == 'POST': task_id = int(request.form.get('task_id')) path = request.form.get('path') if task_id == 0: task = self.admin else: task = self.get_task() result = { 'status': common.RESPONSE, 'data': None, 'version': task.version } r = {'result': result, 'error': None} if not self.check_session(request, task): r['result']['status'] = common.NOT_LOGGED r['result']['data'] = common.NOT_LOGGED else: f = request.files.get('file') file_name = request.form.get('file_name') if f and file_name: base, ext = os.path.splitext(file_name) if not path: if task_id == 0: path = os.path.join('static', 'builder') else: path = os.path.join('static', 'files') file_name = ('%s%s%s') % (base, datetime.datetime.now( ).strftime('%Y-%m-%d_%H:%M:%S.%f'), ext) file_name = secure_filename(file_name) file_name = file_name.replace('?', '') if not r['error']: dir_path = os.path.join( to_unicode(self.work_dir, 'utf-8'), path) if not os.path.exists(dir_path): os.makedirs(dir_path) f.save(os.path.join(dir_path, file_name)) task = self.get_task() r['result'] = { 'status': common.RESPONSE, 'data': { 'file_name': file_name, 'path': path }, 'version': task.version } else: r['error'] = 'File upload invalid parameters' return self.create_post_response(request, r)
def get_path_info(environ, charset="utf-8", errors="replace"): """Returns the `PATH_INFO` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path info, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get("PATH_INFO", "")) return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'): """Returns the `SCRIPT_NAME` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get('SCRIPT_NAME', '')) return to_unicode(path, charset, errors, allow_none_charset=True)
def parse_authorization_header(value): """Parse an HTTP basic/digest authorization header transmitted by the web browser. The return value is either `None` if the header was invalid or not given, otherwise an :class:`~werkzeug.datastructures.Authorization` object. :param value: the authorization header to parse. :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`. """ if not value: return value = wsgi_to_bytes(value) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except ValueError: return if auth_type == b'basic': try: username, password = base64.b64decode(auth_info).split(b':', 1) except Exception: return return Authorization( 'basic', { 'username': to_unicode(username, _basic_auth_charset), 'password': to_unicode(password, _basic_auth_charset) }) elif auth_type == b'digest': auth_map = parse_dict_header(auth_info) for key in 'username', 'realm', 'nonce', 'uri', 'response': if key not in auth_map: return if 'qop' in auth_map: if not auth_map.get('nc') or not auth_map.get('cnonce'): return return Authorization('digest', auth_map)
def _get_condition(self, field, filter_type, value, db_module): esc_char = '/' cond_field_name = '%s."%s"' % (self.table_alias(), field.db_field_name) if type(value) == str: value = to_unicode(value, 'utf-8') filter_sign = self._get_filter_sign(filter_type, value, db_module) cond_string = '%s %s %s' if filter_type in (common.FILTER_IN, common.FILTER_NOT_IN): values = [self._convert_field_value(field, v, filter_type, db_module) for v in value if v is not None] value = '(%s)' % ', '.join(values) elif filter_type == common.FILTER_RANGE: value = self._convert_field_value(field, value[0], filter_type, db_module) + \ ' AND ' + self._convert_field_value(field, value[1], filter_type, db_module) elif filter_type == common.FILTER_ISNULL: value = '' else: value = self._convert_field_value(field, value, filter_type, db_module) if filter_type in [common.FILTER_CONTAINS, common.FILTER_STARTWITH, common.FILTER_ENDWITH]: value, esc_found = self._escape_search(value, esc_char) if field.lookup_item: if field.lookup_item1: cond_field_name = '%s."%s"' % (self.lookup_table_alias1(field), field.lookup_db_field1) else: if field.data_type == common.KEYS: cond_field_name = '%s."%s"' % (self.table_alias(), field.db_field_name) else: cond_field_name = '%s."%s"' % (self.lookup_table_alias(field), field.lookup_db_field) if filter_type == common.FILTER_CONTAINS: value = '%' + value + '%' elif filter_type == common.FILTER_STARTWITH: value = value + '%' elif filter_type == common.FILTER_ENDWITH: value = '%' + value cond_field_name, value = db_module.convert_like(cond_field_name, value, field.data_type) if esc_found: value = "'" + value + "' ESCAPE '" + esc_char + "'" else: value = "'" + value + "'" sql = cond_string % (cond_field_name, filter_sign, value) if field.data_type == common.BOOLEAN and value == '0': if filter_sign == '=': sql = '(' + sql + ' OR %s IS NULL)' % cond_field_name elif filter_sign == '<>': sql = '(' + sql + ' AND %s IS NOT NULL)' % cond_field_name else: raise Exception('sql.py where_clause method: boolen field condition may give ambiguious results.') return sql
def _convert_field_value(self, field, value, filter_type, db_module): data_type = field.data_type if filter_type and filter_type in [ common.FILTER_CONTAINS, common.FILTER_STARTWITH, common.FILTER_ENDWITH ]: if data_type == common.FLOAT: value = common.str_to_float(value) elif data_type == common.CURRENCY: value = common.str_to_currency(value) if type(value) == float: if int(value) == value: value = str(int(value)) + '.' else: value = str(value) return value else: if data_type == common.DATE: if type(value) in string_types: result = value else: result = value.strftime('%Y-%m-%d') return db_module.cast_date(result) elif data_type == common.DATETIME: if type(value) in string_types: result = value else: result = value.strftime('%Y-%m-%d %H:%M') result = db_module.cast_datetime(result) return result elif data_type == common.INTEGER: if type(value) == int or type( value) in string_types and value.isdigit(): return str(value) else: return "'" + value + "'" elif data_type == common.BOOLEAN: if value: return '1' else: return '0' elif data_type == common.TEXT: #~ return "'" + str(value) + "'" return "'" + to_unicode(value) + "'" elif data_type in (common.FLOAT, common.CURRENCY): return str(float(value)) else: return value
def match(self, path_info=None, method=None, return_rule=False, query_args=None, request=None): self.map.update() if path_info is None: path_info = self.path_info else: path_info = to_unicode(path_info, self.map.charset) if query_args is None: query_args = self.query_args method = (method or self.default_method).upper() path = u'%s|%s' % ( self.map.host_matching and self.server_name or self.subdomain, path_info and '/%s' % path_info.lstrip('/') ) have_match_for = set() for rule in self.map._rules: try: rv = rule.match(path, request) except RequestSlash: raise RequestRedirect(self.make_redirect_url( url_quote(path_info, self.map.charset, safe='/:|+') + '/', query_args)) except RequestAliasRedirect as e: raise RequestRedirect(self.make_alias_redirect_url( path, rule.endpoint, e.matched_values, method, query_args)) if rv is None: continue if rule.methods is not None and method not in rule.methods: have_match_for.update(rule.methods) continue if self.map.redirect_defaults: redirect_url = self.get_default_redirect(rule, method, rv, query_args) if redirect_url is not None: raise RequestRedirect(redirect_url) if return_rule: return rule, rv else: return rule.endpoint, rv if have_match_for: raise MethodNotAllowed(valid_methods=list(have_match_for)) raise NotFound()
def url_fix(s, charset='utf-8'): r"""Sometimes you get an URL by a user that just isn't a real URL because it contains unsafe characters like ' ' and so on. This function can fix some of the problems in a similar way browsers handle data entered by the user: >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' :param s: the string with the URL to fix. :param charset: The target charset for the URL if the url was given as unicode string. """ scheme, netloc, path, qs, anchor = url_parse(to_unicode(s, charset, 'replace')) path = url_quote(path, charset, safe='/%+$!*\'(),') qs = url_quote_plus(qs, charset, safe=':&%=+$!*\'(),') return to_native(url_unparse((scheme, netloc, path, qs, anchor)))
def on_ext(self, request): if request.method == 'POST': r = {'result': None, 'error': None} method = get_path_info(request.environ) data = request.get_data() if type(data) != str: data = to_unicode(data, 'utf-8') try: params = json.loads(data) except: params = None if self.task: try: data = None if self.under_maintenance: status = consts.UNDER_MAINTAINANCE elif self.task.on_ext_request: status = consts.RESPONSE self._busy += 1 try: data = self.task.on_ext_request( self.task, method, params) finally: self._busy -= 1 else: status = None r['result'] = { 'status': status, 'data': data, 'modification': consts.MODIFICATION } except AbortException as e: traceback.print_exc() r['result'] = {'data': [None, error_message(e)]} r['error'] = error_message(e) except Exception as e: traceback.print_exc() r['result'] = {'data': [None, error_message(e)]} r['error'] = error_message(e) else: r['result'] = { 'status': self.state, 'data': None, 'modification': None } return self.create_post_response(request, r)
def _convert_field_value(self, field, value, filter_type, db_module): data_type = field.data_type if filter_type and filter_type in [common.FILTER_CONTAINS, common.FILTER_STARTWITH, common.FILTER_ENDWITH]: if data_type == common.FLOAT: value = common.str_to_float(value) elif data_type == common.CURRENCY: value = common.str_to_currency(value) if type(value) == float: if int(value) == value: value = str(int(value)) + '.' else: value = str(value) return value else: if data_type == common.DATE: if type(value) in string_types: result = value else: result = value.strftime('%Y-%m-%d') return db_module.cast_date(result) elif data_type == common.DATETIME: if type(value) in string_types: result = value else: result = value.strftime('%Y-%m-%d %H:%M') result = db_module.cast_datetime(result) return result elif data_type == common.INTEGER: if type(value) in integer_types or type(value) in string_types and value.isdigit(): return str(value) else: return "'" + value + "'" elif data_type == common.BOOLEAN: if value: return '1' else: return '0' elif data_type == common.TEXT: #~ return "'" + str(value) + "'" return "'" + to_unicode(value) + "'" elif data_type in (common.FLOAT, common.CURRENCY): return str(float(value)) else: return value
def zip_dir(dir, zip_file, exclude_dirs=[], exclude_ext=[], recursive=True): folder = os.path.join(to_unicode(os.getcwd(), 'utf-8'), dir) if os.path.exists(folder): if recursive: for dirpath, dirnames, filenames in os.walk(folder): head, tail = os.path.split(dirpath) if not tail in exclude_dirs: for file_name in filenames: name, ext = os.path.splitext(file_name) if not ext in exclude_ext: file_path = os.path.join(dirpath, file_name) arcname = os.path.relpath(os.path.join(dir, file_path)) zip_file.write(file_path, arcname) else: for file_name in os.listdir(folder): name, ext = os.path.splitext(file_name) if not ext in exclude_ext: file_path = os.path.join(folder, file_name) arcname = os.path.relpath(os.path.join(dir, file_path)) zip_file.write(file_path, arcname)
def init_locale(): import locale result = {} try: locale.setlocale(locale.LC_ALL, '') loc = locale.localeconv() for field in LOCALE_FIELDS: setting = field[2:] try: result[field] = to_unicode(loc[setting], 'utf-8') except: result[field] = jam.common.DEFAULT_LOCALE[setting.upper()] except: pass try: result['f_d_fmt'] = locale.nl_langinfo(locale.D_FMT) except: result['f_d_fmt'] = '%Y-%m-%d' result['f_d_t_fmt'] = '%s %s' % (result['f_d_fmt'], '%H:%M') return result
def peek_path_info(environ, charset='utf-8', errors='replace'): """Returns the next segment on the `PATH_INFO` or `None` if there is none. Works like :func:`pop_path_info` without modifying the environment: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> peek_path_info(env) 'a' >>> peek_path_info(env) 'a' If the `charset` is set to `None` a bytestring is returned. :param environ: the WSGI environment that is checked. """ segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1) if segments: return to_unicode(wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True)
def on_upload(self, request): if request.method == 'POST': r = {'result': None, 'error': None} task_id = int(request.form.get('task_id')) path = request.form.get('path') if task_id == 0: task = self.admin else: task = self.task if task: result = {'status': common.RESPONSE, 'data': None, 'version': task.version} r ['result'] = result if not self.check_session(request, task): r['result']['status'] = common.NOT_LOGGED r['result']['data'] = common.NOT_LOGGED else: f = request.files.get('file') file_name = request.form.get('file_name') if f and file_name: base, ext = os.path.splitext(file_name) if not path: if task_id == 0: path = os.path.join('static', 'builder') else: path = os.path.join('static', 'files') file_name = ('%s%s%s') % (base, datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S.%f'), ext) file_name = secure_filename(file_name) file_name = file_name.replace('?', '') if not r['error']: dir_path = os.path.join(to_unicode(self.work_dir, 'utf-8'), path) if not os.path.exists(dir_path): os.makedirs(dir_path) f.save(os.path.join(dir_path, file_name)) task = self.get_task() r['result'] = {'status': common.RESPONSE, 'data': {'file_name': file_name, 'path': path}, 'version': task.version} else: r['error'] = 'File upload invalid parameters'; else: r['result'] = {'status': self.state, 'data': None, 'version': None} return self.create_post_response(request, r)
def serve_file(self, environ, start_response, endpoint, file_name=None): if endpoint == 'root_file': if file_name: file_name += '.html' if not file_name: file_name = 'index.html' environ['PATH_INFO'] = '/index.html' elif file_name == 'admin.html': file_name = 'builder.html' if file_name == 'index.html': self.check_modified(file_name, environ) self.check_project_modified() elif file_name == 'builder.html': if os.path.exists(file_name): self.check_modified(file_name, environ) else: self.check_modified( os.path.join(to_unicode(self.jam_dir, 'utf-8'), file_name), environ) environ['PATH_INFO'] = '/jam/builder.html' if file_name: base, ext = os.path.splitext(file_name) init_path_info = None if consts.COMPRESSED_JS and ext and ext in ['.js', '.css']: init_path_info = environ['PATH_INFO'] min_file_name = base + '.min' + ext environ['PATH_INFO'] = environ['PATH_INFO'].replace( file_name, min_file_name) try: try: return self.fileserver(environ, start_response) except Exception as e: if init_path_info: environ['PATH_INFO'] = init_path_info return self.fileserver(environ, start_response) else: raise except Exception as e: return Response('')(environ, start_response)
def on_ext(self, request): if request.method == 'POST': r = {'result': None, 'error': None} method = get_path_info(request.environ) data = request.get_data() if type(data) != str: data = to_unicode(data, 'utf-8') params = json.loads(data) task = self.get_task() try: data = None if self.under_maintenance: status = common.UNDER_MAINTAINANCE elif task.on_ext_request: status = common.RESPONSE self._busy += 1 try: data = task.on_ext_request(task, method, params) finally: self._busy -= 1 else: status = None r['result'] = { 'status': status, 'data': data, 'version': task.version } except AbortException as e: traceback.print_exc() r['result'] = {'data': [None, error_message(e)]} r['error'] = error_message(e) except Exception as e: traceback.print_exc() #~ if common.SETTINGS['DEBUGGING']: #~ raise r['result'] = {'data': [None, error_message(e)]} r['error'] = error_message(e) return self.create_post_response(request, r)
def on_ext(self, request): if request.method == 'POST': r = {'result': None, 'error': None} method = get_path_info(request.environ) data = request.get_data() if type(data) != str: data = to_unicode(data, 'utf-8') try: params = json.loads(data) except: params = None if self.task: try: data = None if self.under_maintenance: status = common.UNDER_MAINTAINANCE elif self.task.on_ext_request: status = common.RESPONSE self._busy += 1 try: data = self.task.on_ext_request(self.task, method, params) finally: self._busy -= 1 else: status = None r['result'] = {'status': status, 'data': data, 'version': self.task.version} except AbortException as e: traceback.print_exc() r['result'] = {'data': [None, error_message(e)]} r['error'] = error_message(e) except Exception as e: traceback.print_exc() r['result'] = {'data': [None, error_message(e)]} r['error'] = error_message(e) else: r['result'] = {'status': self.state, 'data': None, 'version': None} return self.create_post_response(request, r)
def iri_to_uri(iri, charset='utf-8', errors='strict', safe_conversion=False): r""" Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always uses utf-8 URLs internally because this is what browsers and HTTP do as well. In some places where it accepts an URL it also accepts a unicode IRI and converts it into a URI. Examples for IRI versus URI: >>> iri_to_uri(u'http://☃.net/') 'http://xn--n3h.net/' >>> iri_to_uri(u'http://üser:pässword@☃.net/påth') 'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th' There is a general problem with IRI and URI conversion with some protocols that appear in the wild that are in violation of the URI specification. In places where Werkzeug goes through a forced IRI to URI conversion it will set the `safe_conversion` flag which will not perform a conversion if the end result is already ASCII. This can mean that the return value is not an entirely correct URI but it will not destroy such invalid URLs in the process. As an example consider the following two IRIs:: magnet:?xt=uri:whatever itms-services://?action=download-manifest The internal representation after parsing of those URLs is the same and there is no way to reconstruct the original one. If safe conversion is enabled however this function becomes a noop for both of those strings as they both can be considered URIs. .. versionadded:: 0.6 .. versionchanged:: 0.9.6 The `safe_conversion` parameter was added. :param iri: The IRI to convert. :param charset: The charset for the URI. :param safe_conversion: indicates if a safe conversion should take place. For more information see the explanation above. """ if isinstance(iri, tuple): iri = url_unparse(iri) if safe_conversion: try: native_iri = to_native(iri) ascii_iri = to_native(iri).encode('ascii') if ascii_iri.split() == [ascii_iri]: return native_iri except UnicodeError: pass iri = url_parse(to_unicode(iri, charset, errors)) netloc = iri.encode_netloc() path = url_quote(iri.path, charset, errors, '/:~+%') query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=') fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/') return to_native(url_unparse((iri.scheme, netloc, path, query, fragment)))
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False): """Works like :func:`make_line_iter` but accepts a separator which divides chunks. If you want newline based processing you should use :func:`make_line_iter` instead as it supports arbitrary newline markers. .. versionadded:: 0.8 .. versionadded:: 0.9 added support for iterators as input stream. .. versionadded:: 0.11.10 added support for the `cap_at_buffer` parameter. :param stream: the stream or iterate to iterate over. :param separator: the separator that divides chunks. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is otherwise already limited). :param buffer_size: The optional buffer size. :param cap_at_buffer: if this is set chunks are split if they are longer than the buffer size. Internally this is implemented that the buffer size might be exhausted by a factor of two however. """ _iter = _make_chunk_iter(stream, limit, buffer_size) first_item = next(_iter, '') if not first_item: return _iter = chain((first_item,), _iter) if isinstance(first_item, text_type): separator = to_unicode(separator) _split = re.compile(r'(%s)' % re.escape(separator)).split _join = u''.join else: separator = to_bytes(separator) _split = re.compile(b'(' + re.escape(separator) + b')').split _join = b''.join buffer = [] while 1: new_data = next(_iter, '') if not new_data: break chunks = _split(new_data) new_buf = [] buf_size = 0 for item in chain(buffer, chunks): if item == separator: yield _join(new_buf) new_buf = [] buf_size = 0 else: buf_size += len(item) new_buf.append(item) if cap_at_buffer and buf_size >= buffer_size: rv = _join(new_buf) while len(rv) >= buffer_size: yield rv[:buffer_size] rv = rv[buffer_size:] new_buf = [rv] buf_size = len(rv) buffer = new_buf if buffer: yield _join(buffer)
def on_api(self, request): error = '' if request.method == 'POST': r = {'result': None, 'error': None} try: data = request.get_data() if type(data) != str: data = to_unicode(data, 'utf-8') method, task_id, item_id, params, modification, date = json.loads(data) if task_id == 0: task = self.admin else: task = self.task if not task: task = self.get_task() if not task: lang = self.admin.lang result = {'status': None, 'data': {'error': lang['error'], \ 'info': lang['info']}, 'modification': None} result['status'] = self.state if self.state == common.PROJECT_LOADING: result['data']['project_loading'] = lang['project_loading'] elif self.state == common.PROJECT_NO_PROJECT: result['data']['no_project'] = lang['no_project'] elif self.state == common.PROJECT_ERROR: result['data']['project_error'] = lang['project_error'] r ['result'] = result return self.create_post_response(request, r) if not task: result = {'status': common.PROJECT_NO_PROJECT, 'data': None, 'modification': None} else: self.check_build() result = {'status': common.RESPONSE, 'data': None, 'modification': self.MODIFICATION} if task_id and modification and modification != self.MODIFICATION: result['status'] = common.PROJECT_MODIFIED elif self.under_maintenance: result['status'] = common.PROJECT_MAINTAINANCE elif method == 'connect': self.connect(request, task) result['data'] = self.connect(request, task) elif method == 'login': result['data'] = self.login(request, task, params[0]) elif method == 'logout': self.logout(request, task); result['status'] = common.PROJECT_NOT_LOGGED result['data'] = common.PROJECT_NOT_LOGGED else: if not self.check_session(request, task): result['status'] = common.PROJECT_NOT_LOGGED result['data'] = common.PROJECT_NOT_LOGGED else: item = task if task and item_id: item = task.item_by_ID(item_id) self._busy += 1 try: data = self.get_response(item, method, params) finally: self._busy -= 1 result['data'] = data r ['result'] = result except AbortException as e: traceback.print_exc() error = error_message(e) r['result'] = {'data': [None, error]} r['error'] = error except Exception as e: traceback.print_exc() error = error_message(e) if self.DEBUGGING and task_id != 0: raise r['result'] = {'data': [None, error]} r['error'] = error response = self.create_post_response(request, r) request.save_session(response, self, task) return response
def get_remote_addr(): return to_unicode(_get_remote_addr())
def file_read(filename): with open(filename, 'rb') as f: return to_unicode(f.read(), 'utf-8', errors='ignore')
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False): """Works like :func:`make_line_iter` but accepts a separator which divides chunks. If you want newline based processing you should use :func:`make_line_iter` instead as it supports arbitrary newline markers. .. versionadded:: 0.8 .. versionadded:: 0.9 added support for iterators as input stream. .. versionadded:: 0.11.10 added support for the `cap_at_buffer` parameter. :param stream: the stream or iterate to iterate over. :param separator: the separator that divides chunks. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is otherwise already limited). :param buffer_size: The optional buffer size. :param cap_at_buffer: if this is set chunks are split if they are longer than the buffer size. Internally this is implemented that the buffer size might be exhausted by a factor of two however. """ _iter = _make_chunk_iter(stream, limit, buffer_size) first_item = next(_iter, '') if not first_item: return _iter = chain((first_item, ), _iter) if isinstance(first_item, text_type): separator = to_unicode(separator) _split = re.compile(r'(%s)' % re.escape(separator)).split _join = u''.join else: separator = to_bytes(separator) _split = re.compile(b'(' + re.escape(separator) + b')').split _join = b''.join buffer = [] while 1: new_data = next(_iter, '') if not new_data: break chunks = _split(new_data) new_buf = [] buf_size = 0 for item in chain(buffer, chunks): if item == separator: yield _join(new_buf) new_buf = [] buf_size = 0 else: buf_size += len(item) new_buf.append(item) if cap_at_buffer and buf_size >= buffer_size: rv = _join(new_buf) while len(rv) >= buffer_size: yield rv[:buffer_size] rv = rv[buffer_size:] new_buf = [rv] buf_size = len(rv) buffer = new_buf if buffer: yield _join(buffer)
def parse_authorization_header(header): """ Parses the HTTP Auth Header to a JWT Token Args: header: Authorization header of the HTTP Request Examples: request.headers['Authorization'] or something same Returns: Valid JWT token """ if not header: return None value = wsgi_to_bytes(header) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except ValueError: # Fallback for old versions auth_type = b"bearer" auth_info = value if auth_type == b"basic": try: username, password = base64.b64decode(auth_info).split(b":", 1) with current_app.app_context(): username = to_unicode(username, "utf-8") password = to_unicode(password, "utf-8") user_manager: UserManager = UserManager(current_app.database_manager) group_manager: GroupManager = GroupManager(current_app.database_manager, right_manager=RightManager(rights)) security_manager: SecurityManager = SecurityManager(current_app.database_manager) auth_settings = SystemSettingsReader(current_app.database_manager).get_all_values_from_section( 'auth', default=AuthModule.__DEFAULT_SETTINGS__) auth_module = AuthModule(auth_settings, user_manager=user_manager, group_manager=group_manager, security_manager=security_manager) try: user_instance = auth_module.login(username, password) except Exception as e: return None if user_instance: tg = TokenGenerator(current_app.database_manager) return tg.generate_token(payload={'user': { 'public_id': user_instance.get_public_id() }}) else: return None except Exception: return None if auth_type == b"bearer": try: with current_app.app_context(): tv = TokenValidator(current_app.database_manager) decoded_token = tv.decode_token(auth_info) tv.validate_token(decoded_token) return auth_info except Exception: return None return None
def import_lang(task, file_path): error = '' try: with open(file_path, 'r') as f: content = to_unicode(f.read(), 'utf-8') content = json.loads(content) language = content['language'] translation = content['translation'] con = lang_con(task) sys_con = task.create_connection() try: cursor = con.cursor() cursor.execute( 'SELECT ID FROM JAM_LANGS WHERE F_LANGUAGE=%s AND F_COUNTRY=%s' % (language['f_language'], language['f_country'])) res = cursor.fetchall() if len(res): lang_id = res[0][0] fields = [] field_values = [] for key, value in iteritems(language): fields.append('%s=?' % key) field_values.append(value) fields = ','.join(fields) cursor.execute( "UPDATE JAM_LANGS SET %s WHERE ID=%s" % (fields, lang_id), field_values) sys_cursor = sys_con.cursor() sys_cursor.execute( "UPDATE SYS_LANGS SET %s WHERE ID=%s" % (fields, lang_id), field_values) sys_con.commit() else: fields = [] values = [] field_values = [] for key, value in iteritems(language): fields.append(key) field_values.append(value) values.append('?') cursor.execute( 'INSERT INTO JAM_LANGS (%s) VALUES (%s)' % (','.join(fields), ','.join(values)), field_values) cursor.execute( 'SELECT ID FROM JAM_LANGS WHERE F_LANGUAGE=%s AND F_COUNTRY=%s' % (language['f_language'], language['f_country'])) res = cursor.fetchall() lang_id = res[0][0] fields.append('DELETED') values.append('?') field_values.append(0) sys_cursor = sys_con.cursor() sys_cursor.execute( 'INSERT INTO SYS_LANGS (%s) VALUES (%s)' % (','.join(fields), ','.join(values)), field_values) sys_con.commit() if lang_id: cursor.execute('SELECT ID, F_KEYWORD FROM JAM_LANG_KEYS') res = cursor.fetchall() keys = {} for r in res: keys[r[1]] = r[0] recs = [] for keyword, value in iteritems(translation): key_id = keys.get(keyword) if key_id: cursor.execute( 'SELECT ID FROM JAM_LANG_VALUES WHERE F_LANG=%s AND F_KEY=%s' % (lang_id, key_id)) res = cursor.fetchall() if len(res): cursor.execute( 'UPDATE JAM_LANG_VALUES SET F_VALUE=? WHERE ID=%s' % (res[0][0]), (value, )) else: cursor.execute( 'INSERT INTO JAM_LANG_VALUES (F_LANG, F_KEY, F_VALUE) VALUES (?, ?, ?)', (lang_id, key_id, value)) con.commit() finally: con.close() sys_con.close() except Exception as e: print(e) error = 'Can not import language'
def dump_cookie(key, value='', max_age=None, expires=None, path='/', domain=None, secure=None, httponly=False, sync_expires=True): """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. """ if not isinstance(key, (bytes, text_type)): raise TypeError('invalid key %r' % key) if not isinstance(value, (bytes, text_type)): raise TypeError('invalid value %r' % value) key, value = to_native(key, _cookie_charset), to_native(value, _cookie_charset) value = quote_header_value(value) morsel = _ExtendedMorsel(key, value) if isinstance(max_age, timedelta): max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds if expires is not None: if not isinstance(expires, string_types): expires = cookie_date(expires) morsel['expires'] = expires elif max_age is not None and sync_expires: morsel['expires'] = cookie_date(time() + max_age) if domain and ':' in domain: # The port part of the domain should NOT be used. Strip it domain = domain.split(':', 1)[0] if domain: assert '.' in domain, ( "Setting \"domain\" for a cookie on a server running localy (ex: " "localhost) is not supportted by complying browsers. You should " "have something like: \"127.0.0.1 localhost dev.localhost\" on " "your hosts file and then point your server to run on " "\"dev.localhost\" and also set \"domain\" for \"dev.localhost\"") for k, v in (('path', path), ('domain', domain), ('secure', secure), ('max-age', max_age), ('httponly', httponly)): if v is not None and v is not False: morsel[k] = str(v) return to_unicode(morsel.output(header='').lstrip(), _cookie_charset)
def full_path(self): """Requested path as unicode, including the query string.""" return self.path + u'?' + to_unicode(self.query_string, self.url_charset)
def match(self, path_info=None, method=None, return_rule=False, query_args=None): self.map.update() if path_info is None: path_info = self.path_info else: path_info = to_unicode(path_info, self.map.charset) if query_args is None: query_args = self.query_args method = (method or self.default_method).upper() path = u'%s|%s' % (self.map.host_matching and self.server_name or self.subdomain, path_info and '/%s' % path_info.lstrip('/')) have_match_for = set() for rule in self.map._rules: try: rv = rule.match(path, method) except RequestPath: raise RequestRedirect( self.make_redirect_url( url_quote(path_info, self.map.charset, safe='/:|+') + '/', query_args)) except RequestAliasRedirect as e: raise RequestRedirect( self.make_alias_redirect_url(path, rule.endpoint, e.matched_values, method, query_args)) if rv is None: continue if rule.methods is not None and method not in rule.methods: have_match_for.update(rule.methods) continue # 确定版本 version = get_version(self.request) if self.request and version: if not isinstance(rule.version, list) or not rule.version: rule.version = list() version_list = self.version_dict.get(rule.rule) if len(rule.version) == 0 \ and version_list is not None \ and version in version_list: continue elif len(rule.version) != 0 and version not in rule.version: continue self.request.rule_version = rule.version if self.map.redirect_defaults: redirect_url = self.get_default_redirect( rule, method, rv, query_args) if redirect_url is not None: raise RequestRedirect(redirect_url) if rule.redirect_to is not None: if isinstance(rule.redirect_to, string_types): def _handle_match(match): value = rv[match.group(1)] return rule._converters[match.group(1)].to_url(value) redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to) else: redirect_url = rule.redirect_to(self, **rv) raise RequestRedirect( str( url_join( '%s://%s%s%s' % (self.url_scheme or 'http', self.subdomain and self.subdomain + '.' or '', self.server_name, self.script_name), redirect_url))) if return_rule: return rule, rv else: return rule.endpint, rv if have_match_for: raise MethodNotAllowed(valid_methods=list(have_match_for)) raise NotFound()
def on_api(self, request): error = '' if request.method == 'POST': r = {'result': None, 'error': None} try: data = request.get_data() if type(data) != str: data = to_unicode(data, 'utf-8') method, task_id, item_id, params, date = json.loads(data) if task_id == 0: task = self.admin else: task = self.get_task() result = {'status': common.RESPONSE, 'data': None, 'version': task.version} if not task: result['status'] = common.NO_PROJECT elif self.under_maintenance: result['status'] = common.UNDER_MAINTAINANCE elif method == 'connect': self.connect(request, task) result['data'] = self.connect(request, task) elif method == 'login': result['data'] = self.login(request, task, params[0], params[1]) elif method == 'logout': self.logout(request, task); result['status'] = common.NOT_LOGGED result['data'] = common.NOT_LOGGED else: if not self.check_session(request, task): result['status'] = common.NOT_LOGGED result['data'] = common.NOT_LOGGED else: item = task if task and item_id: item = task.item_by_ID(item_id) self._busy += 1 try: data = None started = datetime.datetime.now() if task.on_before_request: data = task.on_before_request(item, method, params) if not data: data = self.get_response(item, method, params) if task.on_after_request: task.on_after_request(item, method, params, datetime.datetime.now() - started) finally: self._busy -= 1 result['data'] = data r ['result'] = result except AbortException as e: traceback.print_exc() error = error_message(e) r['result'] = {'data': [None, error]} r['error'] = error except Exception as e: traceback.print_exc() error = error_message(e) if common.SETTINGS['DEBUGGING'] and task_id != 0: raise r['result'] = {'data': [None, error]} r['error'] = error response = self.create_post_response(request, r) request.save_session(response, self, task) return response
def exception(self): """String representation of the exception.""" buf = traceback.format_exception_only(self.exc_type, self.exc_value) rv = "".join(buf).strip() return to_unicode(rv, "utf-8", "replace")
def on_upload(self, request): def find_param(data): pos = data.find(to_bytes(';', 'utf-8')) return data[:pos], pos + 1 def read_user_info(data): info_len, pos = find_param(data) info_len = int(info_len) user_info = data[pos:pos+info_len] task_ID, p = find_param(user_info) task_name = user_info[p:] pos = pos + info_len + 1 return task_name, int(task_ID), pos if request.method == 'POST': try: data = request.get_data() header = [] header_str = to_bytes('', 'utf-8') length = 0 string = to_bytes('', 'utf-8') task_name, task_id, pos = read_user_info(data) if task_id == 0: task = self.admin else: task = self.get_task() if self.admin.safe_mode: if not request.get_session(task).get('info'): return Response() for i in range(len(data)): s = data[pos + i:pos+i+1] header_str += s if s == to_bytes(';', 'utf-8'): if len(header) == 0: length = int(string) header.append(int(string)) if len(header) == 2 * (length + 1): break; string = to_bytes('', 'utf-8') else: string += s start = len(header_str) + pos path = os.path.join(to_unicode(os.getcwd(), 'utf-8'), \ os.path.normpath(to_unicode(data[start: start + header[1]], 'utf-8'))) if not os.path.exists(path): os.makedirs(path) start = start + header[1] for i in range(length): index = 2 * i + 2 file_name = to_unicode(data[start: start + header[index]], 'utf-8') start = start + header[index] index += 1 content = data[start: start + header[index]] file_name = os.path.join(path, file_name) with open(file_name, 'wb') as f: f.write(content) os.chmod(file_name, 0o666) start = start + header[index] except: traceback.print_exc() return Response()
def _parse_pairs(): for key, val in _cookie_parse_impl(header): key = to_unicode(key, charset, errors, allow_none_charset=True) val = to_unicode(val, charset, errors, allow_none_charset=True) yield try_coerce_native(key), val
def on_upload(self, request): if request.method == 'POST': r = {'result': None, 'error': None} task_id = int(request.form.get('task_id')) item_id = int(request.form.get('item_id')) field_id = int(request.form.get('field_id')) path = request.form.get('path') if task_id == 0: task = self.admin else: task = self.task if task: request.task = task result = { 'status': consts.RESPONSE, 'data': None, 'modification': consts.MODIFICATION } r['result'] = result if not self.check_session(request, task): r['result']['status'] = consts.NOT_LOGGED r['result']['data'] = consts.NOT_LOGGED else: f = request.files.get('file') file_name = request.form.get('file_name') if f and file_name: base, ext = os.path.splitext(file_name) upload_result = None if task.on_upload: upload_result = task.on_upload( task, path, file_name, f) if upload_result: path, file_name = upload_result r['result']['data'] = { 'file_name': file_name, 'path': path } else: if item_id != -1 and field_id != -1: item = task.item_by_ID(item_id) field = item.field_by_ID(field_id) if field.data_type == consts.IMAGE: if ext != validate_image(f): r['error'] = 'Invalid image file' else: if not ext in consts.upload_file_ext: r['error'] = 'Invalid file extension' file_name = ('%s%s%s') % ( base, datetime.datetime.now().strftime( '%Y-%m-%d_%H:%M:%S.%f'), ext) file_name = secure_filename(file_name) file_name = file_name.replace('?', '') if task_id == 0: path = os.path.join('static', 'builder') else: path = os.path.join('static', 'files') if not r['error']: dir_path = os.path.join( to_unicode(self.work_dir, 'utf-8'), path) if not os.path.exists(dir_path): os.makedirs(dir_path) f.save(os.path.join(dir_path, file_name)) r['result']['data'] = { 'file_name': file_name, 'path': path } else: r['error'] = 'File upload invalid parameters' else: r['result'] = { 'status': self.state, 'data': None, 'modification': None } return self.create_post_response(request, r)