def test_basic(self): @datastructures.native_itermethods(['keys', 'values', 'items']) class StupidDict(object): def keys(self, multi=1): return iter(['a', 'b', 'c'] * multi) def values(self, multi=1): return iter([1, 2, 3] * multi) def items(self, multi=1): return iter(zip(iterkeys(self, multi=multi), itervalues(self, multi=multi))) d = StupidDict() expected_keys = ['a', 'b', 'c'] expected_values = [1, 2, 3] expected_items = list(zip(expected_keys, expected_values)) self.assert_equal(list(iterkeys(d)), expected_keys) self.assert_equal(list(itervalues(d)), expected_values) self.assert_equal(list(iteritems(d)), expected_items) self.assert_equal(list(iterkeys(d, 2)), expected_keys * 2) self.assert_equal(list(itervalues(d, 2)), expected_values * 2) self.assert_equal(list(iteritems(d, 2)), expected_items * 2)
def test_basic(self): @datastructures.native_itermethods(["keys", "values", "items"]) class StupidDict(object): def keys(self, multi=1): return iter(["a", "b", "c"] * multi) def values(self, multi=1): return iter([1, 2, 3] * multi) def items(self, multi=1): return iter( zip(iterkeys(self, multi=multi), itervalues(self, multi=multi)) ) d = StupidDict() expected_keys = ["a", "b", "c"] expected_values = [1, 2, 3] expected_items = list(zip(expected_keys, expected_values)) assert list(iterkeys(d)) == expected_keys assert list(itervalues(d)) == expected_values assert list(iteritems(d)) == expected_items assert list(iterkeys(d, 2)) == expected_keys * 2 assert list(itervalues(d, 2)) == expected_values * 2 assert list(iteritems(d, 2)) == expected_items * 2
def change_table_sql(self, db_type, old_fields, new_fields): def recreate(comp): for key, (old_field, new_field) in iteritems(comp): if old_field and new_field: if old_field['field_name'] != new_field['field_name']: return True elif old_field['default_value'] != new_field['default_value']: return True elif old_field and not new_field: return True db_module = db_modules.get_db_module(db_type) table_name = self.f_table_name.value result = [] comp = {} for field in old_fields: comp[field['id']] = [field, None] for field in new_fields: if comp.get(field['id']): comp[field['id']][1] = field else: if field['id']: comp[field['id']] = [None, field] else: comp[field['field_name']] = [None, field] if db_type == db_modules.SQLITE and recreate(comp): result += self.recreate_table_sql(db_type, old_fields, new_fields) else: for key, (old_field, new_field) in iteritems(comp): if old_field and not new_field and db_type != db_modules.SQLITE: result.append(db_module.del_field_sql(table_name, old_field)) for key, (old_field, new_field) in iteritems(comp): if old_field and new_field and db_type != db_modules.SQLITE: if (old_field['field_name'] != new_field['field_name']) or \ (db_module.FIELD_TYPES[old_field['data_type']] != db_module.FIELD_TYPES[new_field['data_type']]) or \ (old_field['default_value'] != new_field['default_value']) or \ (old_field['size'] != new_field['size']): sql = db_module.change_field_sql(table_name, old_field, new_field) if type(sql) in (list, tuple): result += sql else: result.append() for key, (old_field, new_field) in iteritems(comp): if not old_field and new_field: result.append(db_module.add_field_sql(table_name, new_field)) for i, s in enumerate(result): print(result[i]) return result
def lists(self): """Return a list of ``(key, values)`` pairs, where values is the list of all values associated with the key.""" for key, values in iteritems(dict, self): values = [self.sanitize_input(v) for v in values] yield key, values
def dump_header(iterable, allow_token=True): """Dump an HTTP header again. This is the reversal of :func:`parse_list_header`, :func:`parse_set_header` and :func:`parse_dict_header`. This also quotes strings that include an equals sign unless you pass it as dict of key, value pairs. >>> dump_header({'foo': 'bar baz'}) 'foo="bar baz"' >>> dump_header(('foo', 'bar baz')) 'foo, "bar baz"' :param iterable: the iterable or dict of values to quote. :param allow_token: if set to `False` tokens as values are disallowed. See :func:`quote_header_value` for more details. """ if isinstance(iterable, dict): items = [] for key, value in iteritems(iterable): if value is None: items.append(key) else: items.append('%s=%s' % ( key, quote_header_value(value, allow_token=allow_token) )) else: items = [quote_header_value(x, allow_token=allow_token) for x in iterable] return ', '.join(items)
def group_clause(self, query, fields, db_module=None): if db_module is None: db_module = self.task.db_module group_fields = query.get('__group_by') funcs = query.get('__funcs') if funcs: functions = {} for key, value in iteritems(funcs): functions[key.upper()] = value result = '' if group_fields: for field_name in group_fields: field = self._field_by_name(field_name) if query['__expanded'] and field.lookup_item and field.data_type != common.KEYS: func = functions.get(field.field_name.upper()) if func: result += '%s."%s", ' % (self.table_alias(), field.db_field_name) else: result += '%s, %s."%s", ' % (self.lookup_field_sql(field, db_module), self.table_alias(), field.db_field_name) else: result += '%s."%s", ' % (self.table_alias(), field.db_field_name) if result: result = result[:-2] result = ' GROUP BY ' + result return result else: return ''
def restart_with_reloader(): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log('info', ' * Restarting with reloader') #fix lastest python version entry_point script file incompatible bug if sys.argv[0].endswith('.pyw') or sys.argv[0].endswith('.py'): args = [sys.executable] + sys.argv else: args = sys.argv new_environ = os.environ.copy() new_environ['WERKZEUG_RUN_MAIN'] = 'true' # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == 'nt' and PY2: for key, value in iteritems(new_environ): if isinstance(value, text_type): new_environ[key] = value.encode('iso-8859-1') exit_code = subprocess.call(args, env=new_environ) if exit_code != 3: return exit_code
def proxy(*children, **arguments): buffer = "<" + tag for key, value in iteritems(arguments): if value is None: continue if key[-1] == "_": key = key[:-1] if key in self._boolean_attributes: if not value: continue if self._dialect == "xhtml": value = '="' + key + '"' else: value = "" else: value = '="' + escape(value) + '"' buffer += " " + key + value if not children and tag in self._empty_elements: if self._dialect == "xhtml": buffer += " />" else: buffer += ">" return buffer buffer += ">" children_as_string = "".join([text_type(x) for x in children if x is not None]) if children_as_string: if tag in self._plaintext_elements: children_as_string = escape(children_as_string) elif tag in self._c_like_cdata and self._dialect == "xhtml": children_as_string = "/*<![CDATA[*/" + children_as_string + "/*]]>*/" buffer += children_as_string + "</" + tag + ">" return buffer
def find_actions(namespace, action_prefix): """Find all the actions in the namespace.""" actions = {} for key, value in iteritems(namespace): if key.startswith(action_prefix): actions[key[len(action_prefix):]] = analyse_action(value) return actions
def print_usage(actions): """Print the usage information. (Help screen)""" _deprecated() actions = sorted(iteritems(actions)) print('usage: %s <action> [<options>]' % basename(sys.argv[0])) print(' %s --help' % basename(sys.argv[0])) print() print('actions:') for name, (func, doc, arguments) in actions: print(' %s:' % name) for line in doc.splitlines(): print(' %s' % line) if arguments: print() for arg, shortcut, default, argtype in arguments: if isinstance(default, bool): print(' %s' % ( (shortcut and '-%s, ' % shortcut or '') + '--' + arg )) else: print(' %-30s%-10s%s' % ( (shortcut and '-%s, ' % shortcut or '') + '--' + arg, argtype, default )) print()
def restart_with_reloader(): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log('info', ' * Restarting with reloader') requires_shell = False if sys.executable: args = [sys.executable] + sys.argv else: args, requires_shell = detect_executable() new_environ = os.environ.copy() new_environ['WERKZEUG_RUN_MAIN'] = 'true' # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == 'nt' and PY2: for key, value in iteritems(new_environ): if isinstance(value, text_type): new_environ[key] = value.encode('iso-8859-1') exit_code = subprocess.call(args, env=new_environ, shell=requires_shell) if exit_code != 3: return exit_code
def restart_with_reloader(self): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log('info', ' * Restarting with %s' % self.name) args = _get_args_for_reloading() # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == 'nt' and PY2: new_environ = {} for key, value in iteritems(os.environ): if isinstance(key, text_type): key = key.encode('iso-8859-1') if isinstance(value, text_type): value = value.encode('iso-8859-1') new_environ[key] = value else: new_environ = os.environ.copy() new_environ['WERKZEUG_RUN_MAIN'] = 'true' exit_code = subprocess.call(args, env=new_environ, close_fds=False) if exit_code != 3: return exit_code
def _find_exceptions(): for name, obj in iteritems(globals()): try: if getattr(obj, 'code', None) is not None: default_exceptions[obj.code] = obj __all__.append(obj.__name__) except TypeError: # pragma: no cover continue
def __call__(self, environ, start_response): path = environ['PATH_INFO'] app = self.app for prefix, opts in iteritems(self.targets): if path.startswith(prefix): app = self.proxy_to(opts, path, prefix) break return app(environ, start_response)
def prepare_environ_pickle(environ): result = {} for key, value in iteritems(environ): try: pickle.dumps((key, value)) except Exception: continue result[key] = value return result
def recreate(comp): for key, (old_field, new_field) in iteritems(comp): if old_field and new_field: if old_field['field_name'] != new_field['field_name']: return True elif old_field['default_value'] != new_field['default_value']: return True elif old_field and not new_field: return True
def __call__(self, environ, start_response): cleaned_path = get_path_info(environ) if PY2: cleaned_path = cleaned_path.encode(sys.getfilesystemencoding()) # sanitize the path for non unix systems cleaned_path = cleaned_path.strip('/') for sep in os.sep, os.altsep: if sep and sep != '/': cleaned_path = cleaned_path.replace(sep, '/') path = '/' + '/'.join(x for x in cleaned_path.split('/') if x and x != '..') file_loader = None for search_path, loader in iteritems(self.exports): if search_path == path: real_filename, file_loader = loader(None) if file_loader is not None: break if not search_path.endswith('/'): search_path += '/' if path.startswith(search_path): real_filename, file_loader = loader(path[len(search_path):]) if file_loader is not None: break if file_loader is None or not self.is_allowed(real_filename): return self.app(environ, start_response) guessed_type = mimetypes.guess_type(real_filename) mime_type = guessed_type[0] or self.fallback_mimetype f, mtime, file_size = file_loader() headers = [('Date', http_date())] if self.cache: timeout = self.cache_timeout etag = self.generate_etag(mtime, file_size, real_filename) headers += [ ('Etag', '"%s"' % etag), ('Cache-Control', 'max-age=%d, public' % timeout) ] if not is_resource_modified(environ, etag, last_modified=mtime): f.close() start_response('304 Not Modified', headers) return [] headers.append(('Expires', http_date(time() + timeout))) else: headers.append(('Cache-Control', 'public')) headers.extend(( ('Content-Type', mime_type), ('Content-Length', str(file_size)), ('Last-Modified', http_date(mtime)) )) start_response('200 OK', headers) return wrap_file(environ, f)
def __init__(self, app, targets, chunk_size=2 << 13, timeout=10): def _set_defaults(opts): opts.setdefault('remove_prefix', False) opts.setdefault('host', '<auto>') opts.setdefault('headers', {}) opts.setdefault('ssl_context', None) return opts self.app = app self.targets = dict(('/%s/' % k.strip('/'), _set_defaults(v)) for k, v in iteritems(targets)) self.chunk_size = chunk_size self.timeout = timeout
def unserialize(cls, string, secret_key): """Load the secure cookie from a serialized string. :param string: the cookie value to unserialize. :param secret_key: the secret key used to serialize the cookie. :return: a new :class:`SecureCookie`. """ if isinstance(string, text_type): string = string.encode('utf-8', 'replace') if isinstance(secret_key, text_type): secret_key = secret_key.encode('utf-8', 'replace') try: base64_hash, data = string.split(b'?', 1) except (ValueError, IndexError): items = () else: items = {} mac = hmac(secret_key, None, cls.hash_method) for item in data.split(b'&'): mac.update(b'|' + item) if not b'=' in item: items = None break key, value = item.split(b'=', 1) # try to make the key a string key = url_unquote_plus(key.decode('ascii')) try: key = to_native(key) except UnicodeError: pass items[key] = value # no parsing error and the mac looks okay, we can now # sercurely unpickle our cookie. try: client_hash = base64.b64decode(base64_hash) except TypeError: items = client_hash = None if items is not None and safe_str_cmp(client_hash, mac.digest()): try: for key, value in iteritems(items): items[key] = cls.unquote(value) except UnquoteError: items = () else: if '_expires' in items: if time() > items['_expires']: items = () else: del items['_expires'] else: items = () return cls(items, secret_key, False)
def _find_exceptions(): for name, obj in iteritems(globals()): try: is_http_exception = issubclass(obj, HTTPException) except TypeError: is_http_exception = False if not is_http_exception or obj.code is None: continue __all__.append(obj.__name__) old_obj = default_exceptions.get(obj.code, None) if old_obj is not None and issubclass(obj, old_obj): continue default_exceptions[obj.code] = obj
def _items(mappingorseq): """Wrapper for efficient iteration over mappings represented by dicts or sequences:: >>> for k, v in _items((i, i*i) for i in xrange(5)): ... assert k*k == v >>> for k, v in _items(dict((i, i*i) for i in xrange(5))): ... assert k*k == v """ if hasattr(mappingorseq, 'items'): return iteritems(mappingorseq) return mappingorseq
def get_table_info(connection, table_name, db_name): cursor = connection.cursor() sql = 'SHOW COLUMNS FROM "%s" FROM %s' % (table_name, db_name) cursor.execute(sql) result = cursor.fetchall() fields = [] for (field_name, type_size, null, key, default_value, autoinc) in result: try: pk = False if autoinc and key == 'PRI': pk = True data_type = type_size.split('(')[0].upper() size = type_size.split('(')[1].split(')')[0] if not data_type in ['VARCHAR', 'CHAR']: size = 0 except: data_type = type_size size = 0 fields.append({ 'field_name': field_name, 'data_type': data_type, 'size': size, 'default_value': default_value, 'pk': pk }) sql = 'SHOW INDEXES FROM %s FROM %s' % (table_name, db_name) cursor.execute(sql) result = cursor.fetchall() indexes = {} for r in result: index_name = r[2] unique = False if r[1] == 0: unique = True if index_name != 'PRIMARY': index = indexes.get(index_name) if not index: index = { 'index_name': index_name, 'unique': unique, 'fields': [] } indexes[index_name] = index index['fields'].append([r[4], False]) ind = [] indexes.values() for key, value in iteritems(indexes): ind.append(value) return {'fields': fields, 'indexes': ind}
def order_clause(self, query, db_module=None): limit = query.get('__limit') if limit and not query.get('__order') and self._primary_key: query['__order'] = [[self._primary_key, False]] if query.get('__funcs') and not query.get('__group_by'): return '' funcs = query.get('__funcs') functions = {} if funcs: for key, value in iteritems(funcs): functions[key.upper()] = value if db_module is None: db_module = self.task.db_module order_list = query.get('__order', []) orders = [] for order in order_list: field = self._field_by_name(order[0]) if field: func = functions.get(field.field_name.upper()) if not query['__expanded'] and field.lookup_item1: orders = [] break if query['__expanded'] and field.lookup_item: if field.data_type == common.KEYS: ord_str = '%s."%s"' % (self.table_alias(), field.db_field_name) else: if func: ord_str = self.field_alias(field, db_module) else: ord_str = self.lookup_field_sql(field, db_module) else: if func: if db_module.DATABASE == 'MSSQL' and limit: ord_str = '%s(%s."%s")' % (func, self.table_alias(), field.db_field_name) else: ord_str = '"%s"' % field.db_field_name else: ord_str = '%s."%s"' % (self.table_alias(), field.db_field_name) if order[1]: if hasattr(db_module, 'DESC'): ord_str += ' ' + db_module.DESC else: ord_str += ' DESC' orders.append(ord_str) if orders: result = ' ORDER BY %s' % ', '.join(orders) else: result = '' return result
def __call__(self, environ, start_response): cleaned_path = get_path_info(environ) if PY2: cleaned_path = cleaned_path.encode(get_filesystem_encoding()) # sanitize the path for non unix systems cleaned_path = cleaned_path.strip('/') for sep in os.sep, os.altsep: if sep and sep != '/': cleaned_path = cleaned_path.replace(sep, '/') path = '/' + '/'.join( x for x in cleaned_path.split('/') if x and x != '..') file_loader = None for search_path, loader in iteritems(self.exports): if search_path == path: real_filename, file_loader = loader(None) if file_loader is not None: break if not search_path.endswith('/'): search_path += '/' if path.startswith(search_path): real_filename, file_loader = loader(path[len(search_path):]) if file_loader is not None: break if file_loader is None or not self.is_allowed(real_filename): return self.app(environ, start_response) guessed_type = mimetypes.guess_type(real_filename) mime_type = guessed_type[0] or self.fallback_mimetype f, mtime, file_size = file_loader() headers = [('Date', http_date())] if self.cache: timeout = self.cache_timeout etag = self.generate_etag(mtime, file_size, real_filename) headers += [('Etag', '"%s"' % etag), ('Cache-Control', 'max-age=%d, public' % timeout)] if not is_resource_modified(environ, etag, last_modified=mtime): f.close() start_response('304 Not Modified', headers) return [] headers.append(('Expires', http_date(time() + timeout))) else: headers.append(('Cache-Control', 'public')) headers.extend( (('Content-Type', mime_type), ('Content-Length', str(file_size)), ('Last-Modified', http_date(mtime)))) start_response('200 OK', headers) return wrap_file(environ, f)
def order_clause(self, query, db_module=None): limit = query.get('__limit') if limit and not query.get('__order') and self._primary_key: query['__order'] = [[self._primary_key, False]] if query.get('__funcs') and not query.get('__group_by'): return '' funcs = query.get('__funcs') functions = {} if funcs: for key, value in iteritems(funcs): functions[key.upper()] = value if db_module is None: db_module = self.task.db_module order_list = query.get('__order', []) orders = [] for order in order_list: field = self._field_by_name(order[0]) if field: if not query['__expanded'] and field.lookup_item1: orders = [] break if query['__expanded'] and field.lookup_item: if field.data_type == common.KEYS: ord_str = '%s."%s"' % (self.table_alias(), field.db_field_name) else: ord_str = self.lookup_field_sql(field, db_module) else: func = functions.get(field.field_name.upper()) if func: if db_module.DATABASE == 'MSSQL' and limit: ord_str = '%s(%s."%s")' % ( func, self.table_alias(), field.db_field_name) else: ord_str = '"%s"' % field.db_field_name else: ord_str = '%s."%s"' % (self.table_alias(), field.db_field_name) if order[1]: if hasattr(db_module, 'DESC'): ord_str += ' ' + db_module.DESC else: ord_str += ' DESC' orders.append(ord_str) if orders: result = ' ORDER BY %s' % ', '.join(orders) else: result = '' return result
def change_table_sql(self, db_type, old_fields, new_fields): db_module = db_modules.get_db_module(db_type) table_name = self.f_table_name.value result = [] comp = {} for field in old_fields: comp[field['id']] = [field, None] for field in new_fields: if comp.get(field['id']): comp[field['id']][1] = field else: if field['id']: comp[field['id']] = [None, field] else: comp[field['field_name']] = [None, field] for key, (old_field, new_field) in iteritems(comp): if old_field and not new_field and db_type != db_modules.SQLITE: result.append(db_module.del_field_sql(table_name, old_field)) for key, (old_field, new_field) in iteritems(comp): if old_field and new_field and db_type != db_modules.SQLITE: if (old_field['field_name'] != new_field['field_name']) or \ (db_module.FIELD_TYPES[old_field['data_type']] != db_module.FIELD_TYPES[new_field['data_type']]) or \ (old_field['default_value'] != new_field['default_value']) or \ (old_field['size'] != new_field['size']): sql = db_module.change_field_sql(table_name, old_field, new_field) if type(sql) in (list, tuple): result += sql else: result.append() for key, (old_field, new_field) in iteritems(comp): if not old_field and new_field: result.append(db_module.add_field_sql(table_name, new_field)) for i, s in enumerate(result): print(result[i]) return result
def items(self, multi=False): """ Return an iterator of ``(key, value)`` pairs. :param multi: If set to `True` the iterator returned will have a pair for each value of each key. Otherwise it will only contain pairs for the first value of each key. """ for key, values in iteritems(dict, self): values = [self.sanitize_input(v) for v in values] if multi: for value in values: yield key, value else: yield key, values[0]
def dump_options_header(header, options): """The reverse function to :func:`parse_options_header`. :param header: the header to dump :param options: a dict of options to append. """ segments = [] if header is not None: segments.append(header) for key, value in iteritems(options): if value is None: segments.append(key) else: segments.append('%s=%s' % (key, quote_header_value(value))) return '; '.join(segments)
def generate_map(map, name="url_map"): """ Generates a JavaScript function containing the rules defined in this map, to be used with a MapAdapter's generate_javascript method. If you don't pass a name the returned JavaScript code is an expression that returns a function. Otherwise it's a standalone script that assigns the function with that name. Dotted names are resolved (so you an use a name like 'obj.url_for') In order to use JavaScript generation, simplejson must be installed. Note that using this feature will expose the rules defined in your map to users. If your rules contain sensitive information, don't use JavaScript generation! """ from warnings import warn warn(DeprecationWarning("This module is deprecated")) map.update() rules = [] converters = [] for rule in map.iter_rules(): trace = [{ "is_dynamic": is_dynamic, "data": data } for is_dynamic, data in rule._trace] rule_converters = {} for key, converter in iteritems(rule._converters): js_func = js_to_url_function(converter) try: index = converters.index(js_func) except ValueError: converters.append(js_func) index = len(converters) - 1 rule_converters[key] = index rules.append({ u"endpoint": rule.endpoint, u"arguments": list(rule.arguments), u"converters": rule_converters, u"trace": trace, u"defaults": rule.defaults, }) return render_template( name_parts=name and name.split(".") or [], rules=dumps(rules), converters=converters, )
def _iter_data(data): """Iterates over a dict or multidict yielding all keys and values. This is used to iterate over the data passed to the :class:`EnvironBuilder`. """ if isinstance(data, MultiDict): for key, values in iterlists(data): for value in values: yield key, value else: for key, values in iteritems(data): if isinstance(values, list): for value in values: yield key, value else: yield key, values
def get_filename_and_loader(self, path): file_loader = None exports = {"": self.loader} for search_path, loader in iteritems(exports): if search_path == path: real_filename, file_loader = loader(None) if file_loader is not None: break if not search_path.endswith("/"): search_path += "/" if path.startswith(search_path): real_filename, file_loader = loader(path[len(search_path) :]) if file_loader is not None: break return real_filename, file_loader
def fields_clause(self, query, fields, db_module=None): summary = query.get('__summary') if db_module is None: db_module = self.task.db_module funcs = query.get('__funcs') if funcs: functions = {} for key, value in iteritems(funcs): functions[key.upper()] = value sql = [] for i, field in enumerate(fields): if i == 0 and summary: sql.append(db_module.identifier_case('count(*)')) elif field.master_field: pass elif field.calculated: sql.append('NULL %s "%s"' % (db_module.FIELD_AS, field.db_field_name)) else: field_sql = '%s."%s"' % (self.table_alias(), field.db_field_name) func = None if funcs: func = functions.get(field.field_name.upper()) if func: field_sql = '%s(%s) %s "%s"' % (func.upper(), field_sql, db_module.FIELD_AS, field.db_field_name) sql.append(field_sql) if query['__expanded']: for i, field in enumerate(fields): if i == 0 and summary: continue field_sql = self.lookup_field_sql(field, db_module) field_alias = self.field_alias(field, db_module) if field_sql: if funcs: func = functions.get(field.field_name.upper()) if func: field_sql = '%s(%s) %s "%s"' % (func.upper( ), field_sql, db_module.FIELD_AS, field_alias) else: field_sql = '%s %s %s' % ( field_sql, db_module.FIELD_AS, field_alias) sql.append(field_sql) sql = ', '.join(sql) return sql
def __call__(self, environ, start_response): cleaned_path = get_path_info(environ) if PY2: cleaned_path = cleaned_path.encode(get_filesystem_encoding()) # sanitize the path for non unix systems cleaned_path = cleaned_path.strip("/") for sep in os.sep, os.altsep: if sep and sep != "/": cleaned_path = cleaned_path.replace(sep, "/") path = "/" + "/".join(x for x in cleaned_path.split("/") if x and x != "..") file_loader = None for search_path, loader in iteritems(self.exports): if search_path == path: real_filename, file_loader = loader(None) if file_loader is not None: break if not search_path.endswith("/"): search_path += "/" if path.startswith(search_path): real_filename, file_loader = loader(path[len(search_path) :]) if file_loader is not None: break if file_loader is None or not self.is_allowed(real_filename): return self.app(environ, start_response) guessed_type = mimetypes.guess_type(real_filename) mime_type = guessed_type[0] or self.fallback_mimetype f, mtime, file_size = file_loader() headers = [("Date", http_date())] if self.cache: timeout = self.cache_timeout etag = self.generate_etag(mtime, file_size, real_filename) headers += [("Etag", '"%s"' % etag), ("Cache-Control", "max-age=%d, public" % timeout)] if not is_resource_modified(environ, etag, last_modified=mtime): f.close() start_response("304 Not Modified", headers) return [] headers.append(("Expires", http_date(time() + timeout))) else: headers.append(("Cache-Control", "public")) headers.extend( (("Content-Type", mime_type), ("Content-Length", str(file_size)), ("Last-Modified", http_date(mtime))) ) start_response("200 OK", headers) return wrap_file(environ, f)
def generate_map(map, name='url_map'): """ Generates a JavaScript function containing the rules defined in this map, to be used with a MapAdapter's generate_javascript method. If you don't pass a name the returned JavaScript code is an expression that returns a function. Otherwise it's a standalone script that assigns the function with that name. Dotted names are resolved (so you an use a name like 'obj.url_for') In order to use JavaScript generation, simplejson must be installed. Note that using this feature will expose the rules defined in your map to users. If your rules contain sensitive information, don't use JavaScript generation! """ from warnings import warn warn(DeprecationWarning('This module is deprecated')) map.update() rules = [] converters = [] for rule in map.iter_rules(): trace = [{ 'is_dynamic': is_dynamic, 'data': data } for is_dynamic, data in rule._trace] rule_converters = {} for key, converter in iteritems(rule._converters): js_func = js_to_url_function(converter) try: index = converters.index(js_func) except ValueError: converters.append(js_func) index = len(converters) - 1 rule_converters[key] = index rules.append({ u'endpoint': rule.endpoint, u'arguments': list(rule.arguments), u'converters': rule_converters, u'trace': trace, u'defaults': rule.defaults }) return render_template(name_parts=name and name.split('.') or [], rules=dumps(rules), converters=converters)
def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first value for each key. :return: a :class:`dict` """ if flat: d = {} for k, v in iteritems(self): v = self.sanitize_input(v) d[k] = v return d return dict(self.lists())
def dict_repr(self, d, recursive, limit=5): if recursive: return _add_subclass_info(u'{...}', d, dict) buf = ['{'] have_extended_section = False for idx, (key, value) in enumerate(iteritems(d)): if idx: buf.append(', ') if idx == limit - 1: buf.append('<span class="extended">') have_extended_section = True buf.append('<span class="pair"><span class="key">%s</span>: ' '<span class="value">%s</span></span>' % (self.repr(key), self.repr(value))) if have_extended_section: buf.append('</span>') buf.append('}') return _add_subclass_info(u''.join(buf), d, dict)
def get_dict(self, *keys): key_mapping = {} have_encoded_keys = False for key in keys: encoded_key = self._normalize_key(key) if not isinstance(key, str): have_encoded_keys = True if _test_memcached_key(key): key_mapping[encoded_key] = key d = rv = self._client.get_multi(key_mapping.keys()) if have_encoded_keys or self.key_prefix: rv = {} for key, value in iteritems(d): rv[key_mapping[key]] = value if len(rv) < len(keys): for key in keys: if key not in rv: rv[key] = None return rv
def fields_clause(self, query, fields, db_module=None): if db_module is None: db_module = self.task.db_module funcs = query.get('__funcs') if funcs: functions = {} for key, value in iteritems(funcs): functions[key.upper()] = value sql = [] for field in fields: if field.master_field: pass elif field.calculated: sql.append('NULL %s "%s"' % (db_module.FIELD_AS, field.db_field_name)) else: field_sql = '%s."%s"' % (self.table_alias(), field.db_field_name) if funcs: func = functions.get(field.field_name.upper()) if func: field_sql = '%s(%s) AS "%s"' % ( func.upper(), field_sql, field.db_field_name) sql.append(field_sql) if query['__expanded']: for field in fields: if field.lookup_item: if field.lookup_field2: field_sql = '%s."%s" %s %s_LOOKUP' % \ (self.lookup_table_alias2(field), field.lookup_db_field2, db_module.FIELD_AS, field.db_field_name) elif field.lookup_field1: field_sql = '%s."%s" %s %s_LOOKUP' % \ (self.lookup_table_alias1(field), field.lookup_db_field1, db_module.FIELD_AS, field.db_field_name) else: if field.data_type == common.KEYS: field_sql = 'NULL' else: field_sql = '%s."%s" %s %s_LOOKUP' % \ (self.lookup_table_alias(field), field.lookup_db_field, db_module.FIELD_AS, field.db_field_name) sql.append(field_sql) sql = ', '.join(sql) return sql
def dump_object(self, obj): repr = items = None if isinstance(obj, dict): title = 'Contents of' items = [] for key, value in iteritems(obj): if not isinstance(key, string_types): items = None break items.append((key, self.repr(value))) if items is None: items = [] repr = self.repr(obj) for key in dir(obj): try: items.append((key, self.repr(getattr(obj, key)))) except Exception: pass title = 'Details for' title += ' ' + object.__repr__(obj)[1:-1] return self.render_object_dump(items, title, repr)
def restart_with_reloader(self): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log("info", " * Restarting with %s" % self.name) args = _get_args_for_reloading() new_environ = os.environ.copy() new_environ["WERKZEUG_RUN_MAIN"] = "true" # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == "nt" and PY2: for key, value in iteritems(new_environ): if isinstance(value, text_type): new_environ[key] = value.encode("iso-8859-1") exit_code = subprocess.call(args, env=new_environ, close_fds=False) if exit_code != 3: return exit_code
def restart_with_reloader(): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log('info', ' * Restarting with reloader') args = [sys.executable] + sys.argv new_environ = os.environ.copy() new_environ['WERKZEUG_RUN_MAIN'] = 'true' # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == 'nt' and PY2: for key, value in iteritems(new_environ): if isinstance(value, text_type): new_environ[key] = value.encode('iso-8859-1') exit_code = subprocess.call(args, env=new_environ) if exit_code != 3: return exit_code
def __init__(self, app, exports, disallow=None, cache=True, cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'): self.app = app self.exports = {} self.cache = cache self.cache_timeout = cache_timeout for key, value in iteritems(exports): if isinstance(value, tuple): loader = self.get_package_loader(*value) elif isinstance(value, string_types): if os.path.isfile(value): loader = self.get_file_loader(value) else: loader = self.get_directory_loader(value) else: raise TypeError('unknown def %r' % value) self.exports[key] = loader if disallow is not None: from fnmatch import fnmatch self.is_allowed = lambda x: not fnmatch(x, disallow) self.fallback_mimetype = fallback_mimetype
def test_create_environ(): env = create_environ("/foo?bar=baz", "http://example.org/") expected = { "wsgi.multiprocess": False, "wsgi.version": (1, 0), "wsgi.run_once": False, "wsgi.errors": sys.stderr, "wsgi.multithread": False, "wsgi.url_scheme": "http", "SCRIPT_NAME": "", "SERVER_NAME": "example.org", "REQUEST_METHOD": "GET", "HTTP_HOST": "example.org", "PATH_INFO": "/foo", "SERVER_PORT": "80", "SERVER_PROTOCOL": "HTTP/1.1", "QUERY_STRING": "bar=baz", } for key, value in iteritems(expected): assert env[key] == value strict_eq(env["wsgi.input"].read(0), b"") strict_eq(create_environ("/foo", "http://example.com/")["SCRIPT_NAME"], "")
def print_usage(actions): """Print the usage information. (Help screen)""" actions = sorted(iteritems(actions)) print('usage: %s <action> [<options>]' % basename(sys.argv[0])) print(' %s --help' % basename(sys.argv[0])) print() print('actions:') for name, (func, doc, arguments) in actions: print(' %s:' % name) for line in doc.splitlines(): print(' %s' % line) if arguments: print() for arg, shortcut, default, argtype in arguments: if isinstance(default, bool): print(' %s' % ((shortcut and '-%s, ' % shortcut or '') + '--' + arg)) else: print(' %-30s%-10s%s' % ((shortcut and '-%s, ' % shortcut or '') + '--' + arg, argtype, default)) print()
def test_create_environ(): env = create_environ('/foo?bar=baz', 'http://example.org/') expected = { 'wsgi.multiprocess': False, 'wsgi.version': (1, 0), 'wsgi.run_once': False, 'wsgi.errors': sys.stderr, 'wsgi.multithread': False, 'wsgi.url_scheme': 'http', 'SCRIPT_NAME': '', 'SERVER_NAME': 'example.org', 'REQUEST_METHOD': 'GET', 'HTTP_HOST': 'example.org', 'PATH_INFO': '/foo', 'SERVER_PORT': '80', 'SERVER_PROTOCOL': 'HTTP/1.1', 'QUERY_STRING': 'bar=baz' } for key, value in iteritems(expected): assert env[key] == value strict_eq(env['wsgi.input'].read(0), b'') strict_eq(create_environ('/foo', 'http://example.com/')['SCRIPT_NAME'], '')
def get_dict(self, *keys): key_mapping = {} have_encoded_keys = False for key in keys: if isinstance(key, unicode): encoded_key = key.encode('utf-8') have_encoded_keys = True else: encoded_key = key if self.key_prefix: encoded_key = self.key_prefix + encoded_key if _test_memcached_key(key): key_mapping[encoded_key] = key d = rv = self._client.get_multi(key_mapping.keys()) if have_encoded_keys or self.key_prefix: rv = {} for key, value in iteritems(d): rv[key_mapping[key]] = value if len(rv) < len(keys): for key in keys: if key not in rv: rv[key] = None return rv
def order_clause(self, query, db_module=None): if query.get('__funcs') and not query.get('__group_by'): return '' funcs = query.get('__funcs') functions = {} if funcs: for key, value in iteritems(funcs): functions[key.upper()] = value if db_module is None: db_module = self.task.db_module order_list = query.get('__order', []) orders = [] for order in order_list: field = self._field_by_ID(order[0]) if field: if not query['__expanded'] and field.lookup_item1: orders = [] break if query['__expanded'] and field.lookup_item: if field.data_type == common.KEYS: ord_str = '%s."%s"' % (self.table_alias(), field.db_field_name) else: ord_str = '%s_LOOKUP' % field.db_field_name else: func = functions.get(field.field_name.upper()) if func: ord_str = '%s' % field.db_field_name else: ord_str = '%s."%s"' % (self.table_alias(), field.db_field_name) if order[1]: ord_str += ' DESC' orders.append(ord_str) if orders: result = ' ORDER BY %s' % ', '.join(orders) else: result = '' return result
def parse_cookie(header, errors='replace', cls=None): """Parse a cookie. Either from a string or WSGI environ. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a :exc:`HTTPUnicodeError` is raised. .. versionchanged:: 0.5 This function now returns a :class:`TypeConversionDict` instead of a regular dict. The `cls` parameter was added. :param header: the header to be used to parse the cookie. Alternatively this can be a WSGI environment. :param charset: the charset for the cookie values. :param errors: the error behavior for the charset decoding. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`TypeConversionDict` is used. """ if isinstance(header, dict): header = header.get('HTTP_COOKIE', '') header = to_native(header, _cookie_charset) if cls is None: cls = TypeConversionDict cookie = _ExtendedCookie() cookie.load(header) result = {} # decode to unicode and skip broken items. Our extended morsel # and extended cookie will catch CookieErrors and convert them to # `None` items which we have to skip here. for key, value in iteritems(cookie): if value.value is not None: result[to_unicode(key, _cookie_charset)] = \ to_unicode(unquote_header_value(value.value), _cookie_charset) return cls(result)
def proxy(*children, **arguments): buffer = '<' + tag for key, value in iteritems(arguments): if value is None: continue if key[-1] == '_': key = key[:-1] if key in self._boolean_attributes: if not value: continue if self._dialect == 'xhtml': value = '="' + key + '"' else: value = '' else: value = '="' + escape(value) + '"' buffer += ' ' + key + value if not children and tag in self._empty_elements: if self._dialect == 'xhtml': buffer += ' />' else: buffer += '>' return buffer buffer += '>' children_as_string = ''.join([text_type(x) for x in children if x is not None]) if children_as_string: if tag in self._plaintext_elements: children_as_string = escape(children_as_string) elif tag in self._c_like_cdata and self._dialect == 'xhtml': children_as_string = '/*<![CDATA[*/' + \ children_as_string + '/*]]>*/' buffer += children_as_string + '</' + tag + '>' return buffer
def proxy(*children, **arguments): buffer = "<" + tag for key, value in iteritems(arguments): if value is None: continue if key[-1] == "_": key = key[:-1] if key in self._boolean_attributes: if not value: continue if self._dialect == "xhtml": value = '="' + key + '"' else: value = "" else: value = '="' + escape(value) + '"' buffer += " " + key + value if not children and tag in self._empty_elements: if self._dialect == "xhtml": buffer += " />" else: buffer += ">" return buffer buffer += ">" children_as_string = "".join( [text_type(x) for x in children if x is not None]) if children_as_string: if tag in self._plaintext_elements: children_as_string = escape(children_as_string) elif tag in self._c_like_cdata and self._dialect == "xhtml": children_as_string = ("/*<![CDATA[*/" + children_as_string + "/*]]>*/") buffer += children_as_string + "</" + tag + ">" return buffer
def print_usage(actions): """Print the usage information. (Help screen)""" actions = sorted(iteritems(actions)) print("usage: %s <action> [<options>]" % basename(sys.argv[0])) print(" %s --help" % basename(sys.argv[0])) print() print("actions:") for name, (func, doc, arguments) in actions: print(" %s:" % name) for line in doc.splitlines(): print(" %s" % line) if arguments: print() for arg, shortcut, default, argtype in arguments: if isinstance(default, bool): print(" %s" % ((shortcut and "-%s, " % shortcut or "") + "--" + arg)) else: print(" %-30s%-10s%s" % ( (shortcut and "-%s, " % shortcut or "") + "--" + arg, argtype, default, )) print()
def test_ordered_interface(self): cls = self.storage_class d = cls() assert not d d.add("foo", "bar") assert len(d) == 1 d.add("foo", "baz") assert len(d) == 1 assert list(iteritems(d)) == [("foo", "bar")] assert list(d) == ["foo"] assert list(iteritems(d, multi=True)) == [("foo", "bar"), ("foo", "baz")] del d["foo"] assert not d assert len(d) == 0 assert list(d) == [] d.update([("foo", 1), ("foo", 2), ("bar", 42)]) d.add("foo", 3) assert d.getlist("foo") == [1, 2, 3] assert d.getlist("bar") == [42] assert list(iteritems(d)) == [("foo", 1), ("bar", 42)] expected = ["foo", "bar"] assert list(d.keys()) == expected assert list(d) == expected assert list(iterkeys(d)) == expected assert list(iteritems(d, multi=True)) == [ ("foo", 1), ("foo", 2), ("bar", 42), ("foo", 3), ] assert len(d) == 2 assert d.pop("foo") == 1 assert d.pop("blafasel", None) is None assert d.pop("blafasel", 42) == 42 assert len(d) == 1 assert d.poplist("bar") == [42] assert not d d.get("missingkey") is None d.add("foo", 42) d.add("foo", 23) d.add("bar", 2) d.add("foo", 42) assert d == datastructures.MultiDict(d) id = self.storage_class(d) assert d == id d.add("foo", 2) assert d != id d.update({"blah": [1, 2, 3]}) assert d["blah"] == 1 assert d.getlist("blah") == [1, 2, 3] # setlist works d = self.storage_class() d["foo"] = 42 d.setlist("foo", [1, 2]) assert d.getlist("foo") == [1, 2] with pytest.raises(BadRequestKeyError): d.pop("missing") with pytest.raises(BadRequestKeyError): d["missing"] # popping d = self.storage_class() d.add("foo", 23) d.add("foo", 42) d.add("foo", 1) assert d.popitem() == ("foo", 23) with pytest.raises(BadRequestKeyError): d.popitem() assert not d d.add("foo", 23) d.add("foo", 42) d.add("foo", 1) assert d.popitemlist() == ("foo", [23, 42, 1]) with pytest.raises(BadRequestKeyError): d.popitemlist() # Unhashable d = self.storage_class() d.add("foo", 23) pytest.raises(TypeError, hash, d)
def test_basic_interface(self): md = self.storage_class() assert isinstance(md, dict) mapping = [ ("a", 1), ("b", 2), ("a", 2), ("d", 3), ("a", 1), ("a", 3), ("d", 4), ("c", 3), ] md = self.storage_class(mapping) # simple getitem gives the first value assert md["a"] == 1 assert md["c"] == 3 with pytest.raises(KeyError): md["e"] assert md.get("a") == 1 # list getitem assert md.getlist("a") == [1, 2, 1, 3] assert md.getlist("d") == [3, 4] # do not raise if key not found assert md.getlist("x") == [] # simple setitem overwrites all values md["a"] = 42 assert md.getlist("a") == [42] # list setitem md.setlist("a", [1, 2, 3]) assert md["a"] == 1 assert md.getlist("a") == [1, 2, 3] # verify that it does not change original lists l1 = [1, 2, 3] md.setlist("a", l1) del l1[:] assert md["a"] == 1 # setdefault, setlistdefault assert md.setdefault("u", 23) == 23 assert md.getlist("u") == [23] del md["u"] md.setlist("u", [-1, -2]) # delitem del md["u"] with pytest.raises(KeyError): md["u"] del md["d"] assert md.getlist("d") == [] # keys, values, items, lists assert list(sorted(md.keys())) == ["a", "b", "c"] assert list(sorted(iterkeys(md))) == ["a", "b", "c"] assert list(sorted(itervalues(md))) == [1, 2, 3] assert list(sorted(itervalues(md))) == [1, 2, 3] assert list(sorted(md.items())) == [("a", 1), ("b", 2), ("c", 3)] assert list(sorted(md.items(multi=True))) == [ ("a", 1), ("a", 2), ("a", 3), ("b", 2), ("c", 3), ] assert list(sorted(iteritems(md))) == [("a", 1), ("b", 2), ("c", 3)] assert list(sorted(iteritems(md, multi=True))) == [ ("a", 1), ("a", 2), ("a", 3), ("b", 2), ("c", 3), ] assert list(sorted(md.lists())) == [("a", [1, 2, 3]), ("b", [2]), ("c", [3])] assert list(sorted(iterlists(md))) == [("a", [1, 2, 3]), ("b", [2]), ("c", [3])] # copy method c = md.copy() assert c["a"] == 1 assert c.getlist("a") == [1, 2, 3] # copy method 2 c = copy(md) assert c["a"] == 1 assert c.getlist("a") == [1, 2, 3] # deepcopy method c = md.deepcopy() assert c["a"] == 1 assert c.getlist("a") == [1, 2, 3] # deepcopy method 2 c = deepcopy(md) assert c["a"] == 1 assert c.getlist("a") == [1, 2, 3] # update with a multidict od = self.storage_class([("a", 4), ("a", 5), ("y", 0)]) md.update(od) assert md.getlist("a") == [1, 2, 3, 4, 5] assert md.getlist("y") == [0] # update with a regular dict md = c od = {"a": 4, "y": 0} md.update(od) assert md.getlist("a") == [1, 2, 3, 4] assert md.getlist("y") == [0] # pop, poplist, popitem, popitemlist assert md.pop("y") == 0 assert "y" not in md assert md.poplist("a") == [1, 2, 3, 4] assert "a" not in md assert md.poplist("missing") == [] # remaining: b=2, c=3 popped = md.popitem() assert popped in [("b", 2), ("c", 3)] popped = md.popitemlist() assert popped in [("b", [2]), ("c", [3])] # type conversion md = self.storage_class({"a": "4", "b": ["2", "3"]}) assert md.get("a", type=int) == 4 assert md.getlist("b", type=int) == [2, 3] # repr md = self.storage_class([("a", 1), ("a", 2), ("b", 3)]) assert "('a', 1)" in repr(md) assert "('a', 2)" in repr(md) assert "('b', 3)" in repr(md) # add and getlist md.add("c", "42") md.add("c", "23") assert md.getlist("c") == ["42", "23"] md.add("c", "blah") assert md.getlist("c", type=int) == [42, 23] # setdefault md = self.storage_class() md.setdefault("x", []).append(42) md.setdefault("x", []).append(23) assert md["x"] == [42, 23] # to dict md = self.storage_class() md["foo"] = 42 md.add("bar", 1) md.add("bar", 2) assert md.to_dict() == {"foo": 42, "bar": 1} assert md.to_dict(flat=False) == {"foo": [42], "bar": [1, 2]} # popitem from empty dict with pytest.raises(KeyError): self.storage_class().popitem() with pytest.raises(KeyError): self.storage_class().popitemlist() # key errors are of a special type with pytest.raises(BadRequestKeyError): self.storage_class()[42] # setlist works md = self.storage_class() md["foo"] = 42 md.setlist("foo", [1, 2]) assert md.getlist("foo") == [1, 2]
'BaseResponse', 'BaseRequest', 'Request', 'Response', 'AcceptMixin', 'ETagRequestMixin', 'ETagResponseMixin', 'ResponseStreamMixin', 'CommonResponseDescriptorsMixin', 'UserAgentMixin', 'AuthorizationMixin', 'WWWAuthenticateMixin', 'CommonRequestDescriptorsMixin' ], 'werkzeug.security': ['generate_password_hash', 'check_password_hash'], # the undocumented easteregg ;-) 'werkzeug._internal': ['_easteregg'] } # modules that should be imported when accessed as attributes of werkzeug attribute_modules = frozenset(['exceptions', 'routing', 'script']) object_origins = {} for module, items in iteritems(all_by_module): for item in items: object_origins[item] = module class module(ModuleType): """Automatically import objects from the modules.""" def __getattr__(self, name): if name in object_origins: module = __import__(object_origins[name], None, None, [name]) for extra_name in all_by_module[module.__name__]: setattr(self, extra_name, getattr(module, extra_name)) return getattr(module, name) elif name in attribute_modules: __import__('werkzeug.' + name) return ModuleType.__getattribute__(self, name)