def test_basic_local(): """Basic local object support""" l = Local() l.foo = 0 values = [] def value_setter(idx): time.sleep(0.01 * idx) l.foo = idx time.sleep(0.02) values.append(l.foo) threads = [Thread(target=value_setter, args=(x,)) for x in [1, 2, 3]] for thread in threads: thread.start() time.sleep(0.2) assert sorted(values) == [1, 2, 3] def delfoo(): del l.foo delfoo() assert_raises(AttributeError, lambda: l.foo) assert_raises(AttributeError, delfoo) release_local(l)
def test_local_release(): """Locals work without manager""" loc = Local() loc.foo = 42 release_local(loc) assert not hasattr(loc, "foo") ls = LocalStack() ls.push(42) release_local(ls) assert ls.top is None
def test_coroutine_local(self): ctx = Local() patch_local(ctx) @coroutine def other_context(): ctx.test = 45 return ctx.test ctx.test = 40 fut = asyncio.ensure_future(other_context()) yield from fut self.assertEqual(ctx.test, 40) self.assertNotEqual(ctx.test, fut.result()) self.assertEqual(fut.result(), 45)
def test_custom_idents(): """Local manager supports custom ident functions""" ident = 0 local = Local() stack = LocalStack() mgr = LocalManager([local, stack], ident_func=lambda: ident) local.foo = 42 stack.push({"foo": 42}) ident = 1 local.foo = 23 stack.push({"foo": 23}) ident = 0 assert local.foo == 42 assert stack.top["foo"] == 42 stack.pop() assert stack.top is None ident = 1 assert local.foo == 23 assert stack.top["foo"] == 23 stack.pop() assert stack.top is None
#encoding: utf-8 from threading import Thread from werkzeug.local import Local local = Local() local.request = '123' class MyThread(Thread): def run(self): local.request = 'abc' print('子线程:', local.request) mythread = MyThread() mythread.start() mythread.join() print('主线程:', local.request)
# IO密集型的程序 查询数据库,请求网络资源,读写文件 # flask web 框架 # 请求,线程 # flask 开启多少个线程来处理请求 # nginx apache tomact iis # 线程隔离 werkzeug local Local dict # localstack local 使用字典的实现的线程隔离 # 线程隔离的栈结构 # {thread_id1:Lvalue1,thread_id2:value2...} # 封装 from werkzeug.local import Local import threading import time my_obj=Local() my_obj.b=1 def worker(): my_obj.b=2 print('in new thread b is:'+str(my_obj.b)) #new thread new_t=threading.Thread(target=worker, name='qiyue_thread') new_t.start() time.sleep(1) # main thread print('in main thread b is:'+str(my_obj.b))
一个变量名实例化为三个对象 6-8 线程隔离 实现线程隔离的话 Python中可以通过字典dictionary这种数据结构来标识多线程 dict{'request1':'', "request2":""......} 6-9 线程隔离对象Local 字典只是保存数据 还需要操作数据 from werkzeug.local import Local #引入线程隔离 obj = Local() 这样主进程和子进程就会不相互影响 new_t = threading.Thread(target=worker,name='子进程') new_t.start() 线程隔离的是变量的值 storage{'ident':{'name':'value'}} 6-10 线程隔离的栈:LocalStack 6-11 LocalStack的基本用法 from werkzeug.local import LocalStack 引入实例化 s = LocalStack() 推入栈
class Environment(Mapping): """ An environment wraps data for ORM records: - :attr:`cr`, the current database cursor; - :attr:`uid`, the current user id; - :attr:`context`, the current context dictionary. It provides access to the registry by implementing a mapping from model names to new api models. It also holds a cache for records, and a data structure to manage recomputations. """ _local = Local() @classproperty def envs(cls): return cls._local.environments @classmethod @contextmanager def manage(cls): """ Context manager for a set of environments. """ if hasattr(cls._local, 'environments'): yield else: try: cls._local.environments = Environments() yield finally: release_local(cls._local) @classmethod def reset(cls): """ Clear the set of environments. This may be useful when recreating a registry inside a transaction. """ cls._local.environments = Environments() def __new__(cls, cr, uid, context): assert context is not None args = (cr, uid, context) # if env already exists, return it env, envs = None, cls.envs for env in envs: if env.args == args: return env # otherwise create environment, and add it in the set self = object.__new__(cls) self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context)) self.registry = Registry(cr.dbname) self.cache = envs.cache self._cache_key = (cr, uid) self._protected = StackMap() # {field: ids, ...} self.dirty = defaultdict(set) # {record: set(field_name), ...} self.all = envs envs.add(self) return self # # Mapping methods # def __contains__(self, model_name): """ Test whether the given model exists. """ return model_name in self.registry def __getitem__(self, model_name): """ Return an empty recordset from the given model. """ return self.registry[model_name]._browse(self, (), ()) def __iter__(self): """ Return an iterator on model names. """ return iter(self.registry) def __len__(self): """ Return the size of the model registry. """ return len(self.registry) def __eq__(self, other): return self is other def __ne__(self, other): return self is not other def __hash__(self): return object.__hash__(self) def __call__(self, cr=None, user=None, context=None): """ Return an environment based on ``self`` with modified parameters. :param cr: optional database cursor to change the current cursor :param user: optional user/user id to change the current user :param context: optional context dictionary to change the current context """ cr = self.cr if cr is None else cr uid = self.uid if user is None else int(user) context = self.context if context is None else context return Environment(cr, uid, context) def ref(self, xml_id, raise_if_not_found=True): """ return the record corresponding to the given ``xml_id`` """ return self['ir.model.data'].xmlid_to_object( xml_id, raise_if_not_found=raise_if_not_found) @property def user(self): """ return the current user (as an instance) """ return self(user=SUPERUSER_ID)['res.users'].browse(self.uid) @property def company(self): """ return the company in which the user is logged in (as an instance) """ try: company_id = int(self.context.get('allowed_company_ids')[0]) if company_id in self.user.company_ids.ids: return self['res.company'].browse(company_id) return self.user.company_id except Exception: return self.user.company_id @property def companies(self): """ return a recordset of the enabled companies by the user """ try: # In case the user tries to bidouille the url (eg: cids=1,foo,bar) allowed_company_ids = self.context.get('allowed_company_ids') # Prevent the user to enable companies for which he doesn't have any access users_company_ids = self.user.company_ids.ids allowed_company_ids = [ company_id for company_id in allowed_company_ids if company_id in users_company_ids ] except Exception: # By setting the default companies to all user companies instead of the main one # we save a lot of potential trouble in all "out of context" calls, such as # /mail/redirect or /web/image, etc. And it is not unsafe because the user does # have access to these other companies. The risk of exposing foreign records # (wrt to the context) is low because all normal RPCs will have a proper # allowed_company_ids. # Examples: # - when printing a report for several records from several companies # - when accessing to a record from the notification email template # - when loading an binary image on a template allowed_company_ids = self.user.company_ids.ids return self['res.company'].browse(allowed_company_ids) @property def lang(self): """ return the current language code """ return self.context.get('lang') @contextmanager def do_in_draft(self): """ Context-switch to draft mode, where all field updates are done in cache only. """ if self.all.in_draft: yield else: try: self.all.in_draft = True yield finally: self.all.in_draft = False self.dirty.clear() @property def in_draft(self): """ Return whether we are in draft mode. """ return self.all.in_draft def clear(self): """ Clear all record caches, and discard all fields to recompute. This may be useful when recovering from a failed ORM operation. """ self.cache.invalidate() self.all.todo.clear() @contextmanager def clear_upon_failure(self): """ Context manager that clears the environments (caches and fields to recompute) upon exception. """ try: yield except Exception: self.clear() raise def protected(self, field): """ Return the recordset for which ``field`` should not be invalidated or recomputed. """ return self[field.model_name].browse(self._protected.get(field, ())) @contextmanager def protecting(self, what, records=None): """ Prevent the invalidation or recomputation of fields on records. The parameters are either: - ``what`` a collection of fields and ``records`` a recordset, or - ``what`` a collection of pairs ``(fields, records)``. """ protected = self._protected try: protected.pushmap() what = what if records is None else [(what, records)] for fields, records in what: for field in fields: ids = protected.get(field, frozenset()) protected[field] = ids.union(records._ids) yield finally: protected.popmap() def field_todo(self, field): """ Return a recordset with all records to recompute for ``field``. """ ids = { rid for recs in self.all.todo.get(field, ()) for rid in recs.ids } return self[field.model_name].browse(ids) def check_todo(self, field, record): """ Check whether ``field`` must be recomputed on ``record``, and if so, return the corresponding recordset to recompute. """ for recs in self.all.todo.get(field, []): if recs & record: return recs def add_todo(self, field, records): """ Mark ``field`` to be recomputed on ``records``. """ recs_list = self.all.todo.setdefault(field, []) for i, recs in enumerate(recs_list): if recs.env == records.env: # only add records if not already in the recordset, much much # cheaper in case recs is big and records is a singleton # already present if not records <= recs: recs_list[i] |= records break else: recs_list.append(records) def remove_todo(self, field, records): """ Mark ``field`` as recomputed on ``records``. """ recs_list = [recs - records for recs in self.all.todo.pop(field, [])] recs_list = [r for r in recs_list if r] if recs_list: self.all.todo[field] = recs_list def has_todo(self): """ Return whether some fields must be recomputed. """ return bool(self.all.todo) def get_todo(self): """ Return a pair ``(field, records)`` to recompute. The field is such that none of its dependencies must be recomputed. """ field = min(self.all.todo, key=self.registry.field_sequence) return field, self.all.todo[field][0] @property def recompute(self): return self.all.recompute @contextmanager def norecompute(self): tmp = self.all.recompute self.all.recompute = False try: yield finally: self.all.recompute = tmp def cache_key(self, field): """ Return the key to store the value of ``field`` in cache, the full cache key being ``(key, field, record.id)``. """ return self if field.context_dependent else self._cache_key
class Environment(Mapping): """ An environment wraps data for ORM records: - :attr:`cr`, the current database cursor; - :attr:`uid`, the current user id; - :attr:`context`, the current context dictionary. It provides access to the registry by implementing a mapping from model names to new api models. It also holds a cache for records, and a data structure to manage recomputations. """ _local = Local() @classproperty def envs(cls): return cls._local.environments @classmethod @contextmanager def manage(cls): """ Context manager for a set of environments. """ if hasattr(cls._local, 'environments'): yield else: try: cls._local.environments = Environments() yield finally: release_local(cls._local) @classmethod def reset(cls): """ Clear the set of environments. This may be useful when recreating a registry inside a transaction. """ cls._local.environments = Environments() def __new__(cls, cr, uid, context): assert context is not None args = (cr, uid, context) # if env already exists, return it env, envs = None, cls.envs for env in envs: if env.args == args: return env # otherwise create environment, and add it in the set self = object.__new__(cls) self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context)) self.registry = Registry(cr.dbname) self.cache = envs.cache self._protected = StackMap() # {field: ids, ...} self.dirty = defaultdict(set) # {record: set(field_name), ...} self.all = envs envs.add(self) return self # # Mapping methods # def __contains__(self, model_name): """ Test whether the given model exists. """ return model_name in self.registry def __getitem__(self, model_name): """ Return an empty recordset from the given model. """ return self.registry[model_name]._browse((), self) def __iter__(self): """ Return an iterator on model names. """ return iter(self.registry) def __len__(self): """ Return the size of the model registry. """ return len(self.registry) def __eq__(self, other): return self is other def __ne__(self, other): return self is not other def __hash__(self): return object.__hash__(self) def __call__(self, cr=None, user=None, context=None): """ Return an environment based on ``self`` with modified parameters. :param cr: optional database cursor to change the current cursor :param user: optional user/user id to change the current user :param context: optional context dictionary to change the current context """ cr = self.cr if cr is None else cr uid = self.uid if user is None else int(user) context = self.context if context is None else context return Environment(cr, uid, context) def ref(self, xml_id, raise_if_not_found=True): """ return the record corresponding to the given ``xml_id`` """ return self['ir.model.data'].xmlid_to_object(xml_id, raise_if_not_found=raise_if_not_found) @property def user(self): """ return the current user (as an instance) """ return self(user=SUPERUSER_ID)['res.users'].browse(self.uid) @property def lang(self): """ return the current language code """ return self.context.get('lang') @contextmanager def _do_in_mode(self, mode): if self.all.mode: yield else: try: self.all.mode = mode yield finally: self.all.mode = False self.dirty.clear() def do_in_draft(self): """ Context-switch to draft mode, where all field updates are done in cache only. """ return self._do_in_mode(True) @property def in_draft(self): """ Return whether we are in draft mode. """ return bool(self.all.mode) def do_in_onchange(self): """ Context-switch to 'onchange' draft mode, which is a specialized draft mode used during execution of onchange methods. """ return self._do_in_mode('onchange') @property def in_onchange(self): """ Return whether we are in 'onchange' draft mode. """ return self.all.mode == 'onchange' def clear(self): """ Clear all record caches, and discard all fields to recompute. This may be useful when recovering from a failed ORM operation. """ self.cache.invalidate() self.all.todo.clear() @contextmanager def clear_upon_failure(self): """ Context manager that clears the environments (caches and fields to recompute) upon exception. """ try: yield except Exception: self.clear() raise def protected(self, field): """ Return the recordset for which ``field`` should not be invalidated or recomputed. """ return self[field.model_name].browse(self._protected.get(field, ())) @contextmanager def protecting(self, what, records=None): """ Prevent the invalidation or recomputation of fields on records. The parameters are either: - ``what`` a collection of fields and ``records`` a recordset, or - ``what`` a collection of pairs ``(fields, records)``. """ protected = self._protected try: protected.pushmap() what = what if records is None else [(what, records)] for fields, records in what: for field in fields: ids = protected.get(field, frozenset()) protected[field] = ids.union(records._ids) yield finally: protected.popmap() def field_todo(self, field): """ Return a recordset with all records to recompute for ``field``. """ ids = {rid for recs in self.all.todo.get(field, ()) for rid in recs.ids} return self[field.model_name].browse(ids) def check_todo(self, field, record): """ Check whether ``field`` must be recomputed on ``record``, and if so, return the corresponding recordset to recompute. """ for recs in self.all.todo.get(field, []): if recs & record: return recs def add_todo(self, field, records): """ Mark ``field`` to be recomputed on ``records``. """ recs_list = self.all.todo.setdefault(field, []) for i, recs in enumerate(recs_list): if recs.env == records.env: recs_list[i] |= records break else: recs_list.append(records) def remove_todo(self, field, records): """ Mark ``field`` as recomputed on ``records``. """ recs_list = [recs - records for recs in self.all.todo.pop(field, [])] recs_list = [r for r in recs_list if r] if recs_list: self.all.todo[field] = recs_list def has_todo(self): """ Return whether some fields must be recomputed. """ return bool(self.all.todo) def get_todo(self): """ Return a pair ``(field, records)`` to recompute. The field is such that none of its dependencies must be recomputed. """ field = min(self.all.todo, key=self.registry.field_sequence) return field, self.all.todo[field][0] @property def recompute(self): return self.all.recompute @contextmanager def norecompute(self): tmp = self.all.recompute self.all.recompute = False try: yield finally: self.all.recompute = tmp
class Environment(object): """ An environment wraps data for ORM records: - :attr:`cr`, the current database cursor; - :attr:`uid`, the current user id; - :attr:`context`, the current context dictionary. It also provides access to the registry, a cache for records, and a data structure to manage recomputations. """ _local = Local() @classmethod @contextmanager def manage(cls): """ Context manager for a set of environments. """ if hasattr(cls._local, 'environments'): yield else: try: cls._local.environments = WeakSet() yield finally: release_local(cls._local) def __new__(cls, cr, uid, context): assert context is not None args = (cr, uid, context) # if env already exists, return it env, envs = None, cls._local.environments for env in envs: if env.args == args: return env # otherwise create environment, and add it in the set self = object.__new__(cls) self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context)) self.registry = RegistryManager.get(cr.dbname) self.cache = defaultdict(dict) # {field: {id: value, ...}, ...} self.prefetch = defaultdict(set) # {model_name: set(id), ...} self.computed = defaultdict(set) # {field: set(id), ...} self.dirty = set() # set(record) self.todo = {} # {field: records, ...} self.mode = env.mode if env else Mode() self.all = envs envs.add(self) return self def __getitem__(self, model_name): """ return a given model """ return self.registry[model_name]._browse(self, ()) def __call__(self, cr=None, user=None, context=None): """ Return an environment based on `self` with modified parameters. :param cr: optional database cursor to change the current cursor :param user: optional user/user id to change the current user :param context: optional context dictionary to change the current context """ cr = self.cr if cr is None else cr uid = self.uid if user is None else int(user) context = self.context if context is None else context return Environment(cr, uid, context) def ref(self, xml_id, raise_if_not_found=True): """ return the record corresponding to the given `xml_id` """ return self['ir.model.data'].xmlid_to_object( xml_id, raise_if_not_found=raise_if_not_found) @property def user(self): """ return the current user (as an instance) """ return self(user=SUPERUSER_ID)['res.users'].browse(self.uid) @property def lang(self): """ return the current language code """ return self.context.get('lang') @contextmanager def _do_in_mode(self, mode): if self.mode.value: yield else: try: self.mode.value = mode yield finally: self.mode.value = False self.dirty.clear() def do_in_draft(self): """ Context-switch to draft mode, where all field updates are done in cache only. """ return self._do_in_mode(True) @property def in_draft(self): """ Return whether we are in draft mode. """ return bool(self.mode.value) def do_in_onchange(self): """ Context-switch to 'onchange' draft mode, which is a specialized draft mode used during execution of onchange methods. """ return self._do_in_mode('onchange') @property def in_onchange(self): """ Return whether we are in 'onchange' draft mode. """ return self.mode.value == 'onchange' def invalidate(self, spec): """ Invalidate some fields for some records in the cache of all environments. :param spec: what to invalidate, a list of `(field, ids)` pair, where `field` is a field object, and `ids` is a list of record ids or ``None`` (to invalidate all records). """ if not spec: return for env in list(iter(self.all)): c = env.cache for field, ids in spec: if ids is None: if field in c: del c[field] else: field_cache = c[field] for id in ids: field_cache.pop(id, None) def invalidate_all(self): """ Clear the cache of all environments. """ for env in list(iter(self.all)): env.cache.clear() env.prefetch.clear() env.computed.clear() env.dirty.clear() def check_cache(self): """ Check the cache consistency. """ # make a full copy of the cache, and invalidate it cache_dump = dict((field, dict(field_cache)) for field, field_cache in self.cache.iteritems()) self.invalidate_all() # re-fetch the records, and compare with their former cache invalids = [] for field, field_dump in cache_dump.iteritems(): ids = filter(None, field_dump) records = self[field.model_name].browse(ids) for record in records: try: cached = field_dump[record.id] fetched = record[field.name] if fetched != cached: info = {'cached': cached, 'fetched': fetched} invalids.append((field, record, info)) except (AccessError, MissingError): pass if invalids: raise Warning('Invalid cache for fields\n' + pformat(invalids))
# {thread_id1:value, thread_id2_value2 ...} # L 线程隔离的对象 # t1 L.a, t2 L.a import threading import time from werkzeug.local import Local class A: b = 1 # my_obj = A() # 普通调用 my_obj = Local() # 使用Local() 实现线程隔离 my_obj.b = 1 def worker(): # 新线程 my_obj.b = 2 # 新线程的值影响到主线程的结果,非线程隔离。 print('in new thread b is:' + str(my_obj.b)) new_t = threading.Thread(target=worker, name='qiu_thread') new_t.start() time.sleep(1) # 主线程 print('in main thread b is:' + str(my_obj.b))
from werkzeug.local import Local from django.db import models from django.utils.translation import ugettext_lazy as _ from django.shortcuts import redirect, get_object_or_404 from django.forms import ModelForm from django.http.response import HttpResponseForbidden from django.core.exceptions import ValidationError from rest_framework import serializers from common.utils import get_logger from .utils import (current_org, set_current_org, set_to_root_org, get_current_org_id) from .models import Organization logger = get_logger(__file__) tl = Local() __all__ = [ 'OrgManager', 'OrgViewGenericMixin', 'OrgModelMixin', 'OrgModelForm', 'RootOrgViewMixin', 'OrgMembershipSerializerMixin', 'OrgMembershipModelViewSetMixin', 'OrgResourceSerializerMixin', ] class OrgManager(models.Manager): def get_queryset(self):
from pharmacy import app from pharmacy.database.users import Users from pharmacy.utils.time import get_time from flask import request, session from werkzeug.local import Local # Setup a thread proxy object user_manager = Local() user = user_manager('user') @app.before_request def resolve_user(): # If the request endpoint is static, don't resolve the user if request.endpoint == "static": return user_manager.user = None user = session.get("user") if user is None: return uid = user.get("id") if uid is None: return u = Users.query.filter_by(id=int(uid)).first()
def create_app(): local = Local() local_manager = LocalManager([local]) app = local_manager.make_middleware(Olaf()) app = set_statics(app) return app
default_want_json, get_config, hash_data, localize_callback, send_mail, string_types, url_for_security, verify_and_update_password, verify_hash, ) from .views import create_blueprint, default_render_json from .cache import VerifyHashCache # Convenient references _security = LocalProxy(lambda: current_app.extensions["security"]) local_cache = Local() # List of authentication mechanisms supported. AUTHN_MECHANISMS = ("basic", "session", "token") #: Default Flask-Security configuration _default_config = { "BLUEPRINT_NAME": "security", "CLI_ROLES_NAME": "roles", "CLI_USERS_NAME": "users", "URL_PREFIX": None, "SUBDOMAIN":
""" Interact with the SQLite database """ # @hack rseekely uggggh context crap # pylint: disable=invalid-name,global-statement import os import sqlite3 from werkzeug.local import Local from quagen import config # Allows us to share our database connection in a WSGI environment # https://werkzeug.palletsprojects.com/en/0.16.x/local/ context = Local() def set_context(new_context): """ Set thread context for sharing db connection """ global context context = new_context def get_connection(): """ Retrieve the connection to the configured database to the configured database. Makes a connection if none already exists. Returns:
+ some utility functions that should probably be moved """ from __future__ import unicode_literals, print_function from six import iteritems, text_type, string_types from werkzeug.local import Local, release_local import os, sys, importlib, inspect, json # public from .exceptions import * from .utils.jinja import get_jenv, get_template, render_template, get_email_from_template __version__ = '8.10.6' __title__ = "Frappe Framework" local = Local() class _dict(dict): """dict like object that exposes keys as attributes""" def __getattr__(self, key): ret = self.get(key) if not ret and key.startswith("__"): raise AttributeError() return ret def __setattr__(self, key, value): self[key] = value def __getstate__(self): return self def __setstate__(self, d): self.update(d) def update(self, d):
import threading import time from werkzeug.local import Local, LocalStack my_local = Local() my_local.a = 'a' my_stack = LocalStack() my_stack.push(1) print(my_stack.top) def worker(): print('I am a thread worker') print(my_stack.top) t = threading.current_thread() # time.sleep(10) print(t.getName()) my_local.a = 'b' print(my_local.a) my_stack.push(2) print(my_stack.top) new_t1 = threading.Thread(target=worker, name='new thread1') new_t1.start() # new_t2 = threading.Thread(target=worker, name='new thread2') # new_t2.start() time.sleep(1) t = threading.current_thread()
def init_app(self, app, force_https=True, force_https_permanent=False, force_file_save=False, frame_options=SAMEORIGIN, frame_options_allow_from=None, strict_transport_security=True, strict_transport_security_preload=False, strict_transport_security_max_age=ONE_YEAR_IN_SECS, strict_transport_security_include_subdomains=True, content_security_policy=DEFAULT_CSP_POLICY, content_security_policy_report_uri=None, content_security_policy_report_only=False, referrer_policy=DEFAULT_REFERRER_POLICY, session_cookie_secure=True, session_cookie_http_only=True): """ Initialization. Args: app: A Flask application. force_https: Redirects non-http requests to https, disabled in debug mode. force_https_permanent: Uses 301 instead of 302 redirects. frame_options: Sets the X-Frame-Options header, defaults to SAMEORIGIN. frame_options_allow_from: Used when frame_options is set to ALLOW_FROM and is a string of domains to allow frame embedding. strict_transport_security: Sets HSTS headers. strict_transport_security_preload: Enables HSTS preload. See https://hstspreload.org. strict_transport_security_max_age: How long HSTS headers are honored by the browser. strict_transport_security_include_subdomain: Whether to include all subdomains when setting HSTS. content_security_policy: A string or dictionary describing the content security policy for the response. content_security_policy_report_uri: A string indicating the report URI used for CSP violation reports content_security_policy_report_only: Whether to set the CSP header as "report-only", which disables the enforcement by the browser and requires a "report-uri" parameter with a backend to receive the POST data referrer_policy: A string describing the referrer policy for the response. session_cookie_secure: Forces the session cookie to only be sent over https. Disabled in debug mode. session_cookie_http_only: Prevents JavaScript from reading the session cookie. force_file_save: Prevents the user from opening a file download directly on >= IE 8 See README.rst for a detailed description of each option. """ self.force_https = force_https self.force_https_permanent = force_https_permanent self.frame_options = frame_options self.frame_options_allow_from = frame_options_allow_from self.strict_transport_security = strict_transport_security self.strict_transport_security_preload = \ strict_transport_security_preload self.strict_transport_security_max_age = \ strict_transport_security_max_age self.strict_transport_security_include_subdomains = \ strict_transport_security_include_subdomains self.content_security_policy = content_security_policy.copy() self.content_security_policy_report_uri = \ content_security_policy_report_uri self.content_security_policy_report_only = \ content_security_policy_report_only if self.content_security_policy_report_only and \ self.content_security_policy_report_uri is None: raise ValueError( 'Setting content_security_policy_report_only to True also ' 'requires a URI to be specified in ' 'content_security_policy_report_uri') self.referrer_policy = referrer_policy self.session_cookie_secure = session_cookie_secure if session_cookie_http_only: app.config['SESSION_COOKIE_HTTPONLY'] = True self.force_file_save = force_file_save self.app = app self.local_options = Local() app.before_request(self._update_local_options) app.before_request(self._force_https) app.after_request(self._set_response_headers)
# -*- coding: utf-8 -*- # Copyright 2017 Therp BV <http://therp.nl> # Copyright 2017 LasLabs Inc. # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) import inspect from werkzeug.local import Local from odoo.sql_db import Cursor from odoo.modules import module from odoo.modules.graph import Graph original = module.load_information_from_description_file local = Local() local.rdepends_to_process = {} def load_information_from_description_file(module, mod_path=None): result = original(module, mod_path=mod_path) # add the keys you want to react on here if result.get('depends_if_installed'): cr = _get_cr() if cr: _handle_depends_if_installed(cr, result) if result.get('rdepends_if_installed'): cr = _get_cr() if cr: _handle_rdepends_if_installed(cr, result, module) # Apply depends specified in other modules as rdepends extra_depends = local.rdepends_to_process.get(module)
return length def int_length(i): return len(str(i)) def _get_trans(): gettext.install(APP_NAME, LOCALE_DIR) zh = gettext.translation(APP_NAME, LOCALE_DIR, ["zh_CN"]) en = gettext.translation(APP_NAME, LOCALE_DIR, ["en"]) return zh, en trans_zh, trans_en = _get_trans() _thread_locals = Local() def set_current_lang(lang): setattr(_thread_locals, 'LANGUAGE_CODE', lang) def get_current_lang(attr): return getattr(_thread_locals, attr, None) def _gettext(lang): import builtins if lang == 'en': trans_en.install() else:
from werkzeug.local import Local g = Local() # thread local
""" Local线程隔离 """ import threading import time from werkzeug.local import Local class A: b = 1 my_obj = Local() my_obj.b = 1 def worker(): # 新线程 my_obj.b = 2 print('in new thread b is ' + str(my_obj.b)) new_t = threading.Thread(target=worker, name='ruansong_thread') new_t.start() time.sleep(1) print('in main thread b is ' + str(my_obj.b))
class Environment(object): """ An environment wraps data for ORM records: - :attr:`cr`, the current database cursor; - :attr:`uid`, the current user id; - :attr:`context`, the current context dictionary. It provides access to the registry by implementing a mapping from model names to new api models. It also holds a cache for records, and a data structure to manage recomputations. """ _local = Local() @classproperty def envs(cls): return cls._local.environments @classmethod @contextmanager def manage(cls): """ Context manager for a set of environments. """ if hasattr(cls._local, 'environments'): yield else: try: cls._local.environments = Environments() yield finally: release_local(cls._local) @classmethod def reset(cls): """ Clear the set of environments. This may be useful when recreating a registry inside a transaction. """ cls._local.environments = Environments() def __new__(cls, cr, uid, context): assert context is not None args = (cr, uid, context) # if env already exists, return it env, envs = None, cls.envs for env in envs: if env.args == args: return env # otherwise create environment, and add it in the set self = object.__new__(cls) self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context)) self.registry = RegistryManager.get(cr.dbname) self.cache = defaultdict(dict) # {field: {id: value, ...}, ...} self.prefetch = defaultdict(set) # {model_name: set(id), ...} self.computed = defaultdict(set) # {field: set(id), ...} self.dirty = defaultdict(set) # {record: set(field_name), ...} self.all = envs envs.add(self) return self def __contains__(self, model_name): """ Test whether the given model exists. """ return model_name in self.registry def __getitem__(self, model_name): """ Return an empty recordset from the given model. """ return self.registry[model_name]._browse(self, ()) def __iter__(self): """ Return an iterator on model names. """ return iter(self.registry) def __len__(self): """ Return the size of the model registry. """ return len(self.registry) def __call__(self, cr=None, user=None, context=None): """ Return an environment based on ``self`` with modified parameters. :param cr: optional database cursor to change the current cursor :param user: optional user/user id to change the current user :param context: optional context dictionary to change the current context """ cr = self.cr if cr is None else cr uid = self.uid if user is None else int(user) context = self.context if context is None else context return Environment(cr, uid, context) def ref(self, xml_id, raise_if_not_found=True): """ return the record corresponding to the given ``xml_id`` """ return self['ir.model.data'].xmlid_to_object( xml_id, raise_if_not_found=raise_if_not_found) @property def user(self): """ return the current user (as an instance) """ return self(user=SUPERUSER_ID)['res.users'].browse(self.uid) @property def lang(self): """ return the current language code """ return self.context.get('lang') @contextmanager def _do_in_mode(self, mode): if self.all.mode: yield else: try: self.all.mode = mode yield finally: self.all.mode = False self.dirty.clear() def do_in_draft(self): """ Context-switch to draft mode, where all field updates are done in cache only. """ return self._do_in_mode(True) @property def in_draft(self): """ Return whether we are in draft mode. """ return bool(self.all.mode) def do_in_onchange(self): """ Context-switch to 'onchange' draft mode, which is a specialized draft mode used during execution of onchange methods. """ return self._do_in_mode('onchange') @property def in_onchange(self): """ Return whether we are in 'onchange' draft mode. """ return self.all.mode == 'onchange' def invalidate(self, spec): """ Invalidate some fields for some records in the cache of all environments. :param spec: what to invalidate, a list of `(field, ids)` pair, where ``field`` is a field object, and ``ids`` is a list of record ids or ``None`` (to invalidate all records). """ if not spec: return for env in list(self.all): c = env.cache for field, ids in spec: if ids is None: if field in c: del c[field] else: field_cache = c[field] for id in ids: field_cache.pop(id, None) def invalidate_all(self): """ Clear the cache of all environments. """ for env in list(self.all): env.cache.clear() env.prefetch.clear() env.computed.clear() env.dirty.clear() def clear(self): """ Clear all record caches, and discard all fields to recompute. This may be useful when recovering from a failed ORM operation. """ self.invalidate_all() self.all.todo.clear() @contextmanager def clear_upon_failure(self): """ Context manager that clears the environments (caches and fields to recompute) upon exception. """ try: yield except Exception: self.clear() raise def field_todo(self, field): """ Return a recordset with all records to recompute for ``field``. """ ids = { rid for recs in self.all.todo.get(field, ()) for rid in recs.ids } return self[field.model_name].browse(ids) def check_todo(self, field, record): """ Check whether ``field`` must be recomputed on ``record``, and if so, return the corresponding recordset to recompute. """ for recs in self.all.todo.get(field, []): if recs & record: return recs def add_todo(self, field, records): """ Mark ``field`` to be recomputed on ``records``. """ recs_list = self.all.todo.setdefault(field, []) for i, recs in enumerate(recs_list): if recs.env == records.env: recs_list[i] |= records break else: recs_list.append(records) def remove_todo(self, field, records): """ Mark ``field`` as recomputed on ``records``. """ recs_list = [recs - records for recs in self.all.todo.pop(field, [])] recs_list = filter(None, recs_list) if recs_list: self.all.todo[field] = recs_list def has_todo(self): """ Return whether some fields must be recomputed. """ return bool(self.all.todo) def get_todo(self): """ Return a pair ``(field, records)`` to recompute. The field is such that none of its dependencies must be recomputed. """ field = min(self.all.todo, key=self.registry.field_sequence) return field, self.all.todo[field][0] def check_cache(self): """ Check the cache consistency. """ # make a full copy of the cache, and invalidate it cache_dump = dict((field, dict(field_cache)) for field, field_cache in self.cache.iteritems()) self.invalidate_all() # re-fetch the records, and compare with their former cache invalids = [] for field, field_dump in cache_dump.iteritems(): ids = filter(None, field_dump) records = self[field.model_name].browse(ids) for record in records: try: cached = field_dump[record.id] fetched = record[field.name] if fetched != cached: info = {'cached': cached, 'fetched': fetched} invalids.append((field, record, info)) except (AccessError, MissingError): pass if invalids: raise UserError('Invalid cache for fields\n' + pformat(invalids)) @property def recompute(self): return self.all.recompute @contextmanager def norecompute(self): tmp = self.all.recompute self.all.recompute = False try: yield finally: self.all.recompute = tmp @property def recompute_old(self): return self.all.recompute_old def clear_recompute_old(self): del self.all.recompute_old[:]
def configure(self): self._locals = Local() self._local_manager = LocalManager([self._locals]) self.reset()
# -*- coding: utf-8 -*- # from jumpserver.const import DYNAMIC from werkzeug.local import Local, LocalProxy thread_local = Local() def _find(attr): return getattr(thread_local, attr, None) class _Settings: pass def get_dynamic_cfg_from_thread_local(): KEY = 'dynamic_config' try: cfg = getattr(thread_local, KEY) except AttributeError: cfg = _Settings() setattr(thread_local, KEY, cfg) return cfg class DynamicDefaultLocalProxy(LocalProxy): def __getattr__(self, item): try:
import functools import logging import time from contextlib import contextmanager from typing import Optional, Union, Tuple import requests from bs4 import BeautifulSoup from requests import Response from requests.exceptions import RequestException from werkzeug.local import Local, release_local from werkzeug.urls import url_parse, url_fix from .__version__ import __version__ LOCAL_CONTEXT = Local() logger = logging.getLogger(__name__) bs4_parser = "html.parser" default_timeout = 3.05 def get_session(): """ Returns the Requests Session for the current local context. Creates a Session with default values if none exists. :return: Requests Session """
from werkzeug.local import LocalProxy, Local from werkzeug.wrappers import Request as BaseRequest from werkzeug.exceptions import HTTPException from json import loads def _get_req_object(): try: ctx = _req_ctx_ls.ctx return getattr(ctx, 'request') except KeyError: raise RuntimeError('脱离请求上下文!') _req_ctx_ls = Local() # request_context_localStorage, 只考虑一般情况:一个请求一个ctx request = LocalProxy(_get_req_object) class Request(BaseRequest): def __init__(self, environ): self.rule = None # werkzeug.routing.Rule对象 self.view_args = None # 将传给视图函数的参数 self.blueprint = None # 该请求所在蓝图名,为None表示在app上 self.routing_exception = None # 暂存路由错误 super().__init__(environ) def __load__(self, res): """ 为request绑上blueprint、rule与函数参数,可调用rule.endpoint、rule.methods(集合来着?) 不过request已有method属性 """
为什么要有线程隔离的栈? 因为在flask中都是用同一个变量名去引用相似的一类对象,如request引用Request对象,如何正确区分? flask通过包装成上下文对象,通过栈的特性,来保证上下文对象的存储和一致性(AppContext对象和RequestContext对象配套)。 线程隔离特性,保证线程之间操作各自的栈而互不干扰。(Request对象因为实例化了多个,所以需要隔离,而核心对象实际上只有一个,隔不隔离没有意义。) 意义在于:能使当前的线程能够正确引用到他自己所创建的对象,而不是引用到其他线程所创建的对象。虽然都叫相同的名字,但是从各自的栈顶取。从而保证不同请求的隔离性。 为什么要用一个栈来存取上下文对象? 实际上多线程模式下,一个请求由一个线程来处理,请求来进栈,处理完出栈,不需要保存多个上下文。但有些情况下,如离线脚本或单元测试时,可能会需要推入多个上下文,故需要一个容器来存取。 ''' import time from threading import Thread, current_thread from werkzeug.local import Local, LocalStack obj = Local() obj.a = 1 print(f'In {current_thread().name} {obj.__ident_func__()} obj.a is', obj.a) def manipulate(): obj.a = 2 print(f'In {current_thread().name} {obj.__ident_func__()} obj.a is', obj.a) newThread = Thread(name='newThread', target=manipulate) newThread.start() time.sleep(1) print(f'In {current_thread().name} {obj.__ident_func__()} obj.a is', obj.a)
import threading from flask import Blueprint, request, jsonify, json from .. import app, db, data from ..models.indent import Indent, IndentNum, IndentStatus from werkzeug.local import Local from ..define import * import time import random import datetime bp_order = Blueprint('order', __name__) order_id_pool = Local() step_size = 10000 def gen_order_id(): """ :return: 订单号 订单号格式(共11位): 1位业务类型 + 1位进程ID + 1位线程ID + 2位degree + 4位序列数字 + 2位随机数字 degree+序列数字 取值为0到999999,共一百万个 由于订单号格式,有以下限制: 1. 必须使用uwsgi开启应用 2. work进程数量不得超过10个 3. 每个work进程的线程数量不得超过10个 4. 订单号最多数量为1000000个, 可通过增加degree位数增加订单号数量 订单号存于indent表,为MYSQL的BIGINT类型,一定要注意不要溢出 BIGINT无符号数范围:0 ~ 18446744073709551615
def configure(self) -> None: self._locals = Local() self._local_manager = LocalManager([self._locals]) self.prepare()
class Environment(Mapping): """ An environment wraps data for ORM records: - :attr:`cr`, the current database cursor; - :attr:`uid`, the current user id; - :attr:`context`, the current context dictionary; - :attr:`su`, whether in superuser mode. It provides access to the registry by implementing a mapping from model names to new api models. It also holds a cache for records, and a data structure to manage recomputations. """ _local = Local() @classproperty def envs(cls): return getattr(cls._local, 'environments', ()) @classmethod @contextmanager def manage(cls): """ Context manager for a set of environments. """ if hasattr(cls._local, 'environments'): yield else: try: cls._local.environments = Environments() yield finally: release_local(cls._local) @classmethod def reset(cls): """ Clear the set of environments. This may be useful when recreating a registry inside a transaction. """ cls._local.environments = Environments() def __new__(cls, cr, uid, context, su=False): if uid == SUPERUSER_ID: su = True assert context is not None args = (cr, uid, context, su) # if env already exists, return it env, envs = None, cls.envs for env in envs: if env.args == args: return env # otherwise create environment, and add it in the set self = object.__new__(cls) args = (cr, uid, frozendict(context), su) self.cr, self.uid, self.context, self.su = self.args = args self.registry = Registry(cr.dbname) self.cache = envs.cache self._protected = envs.protected # proxy to shared data structure self.all = envs envs.add(self) return self # # Mapping methods # def __contains__(self, model_name): """ Test whether the given model exists. """ return model_name in self.registry def __getitem__(self, model_name): """ Return an empty recordset from the given model. """ return self.registry[model_name]._browse(self, (), ()) def __iter__(self): """ Return an iterator on model names. """ return iter(self.registry) def __len__(self): """ Return the size of the model registry. """ return len(self.registry) def __eq__(self, other): return self is other def __ne__(self, other): return self is not other def __hash__(self): return object.__hash__(self) def __call__(self, cr=None, user=None, context=None, su=None): """ Return an environment based on ``self`` with modified parameters. :param cr: optional database cursor to change the current cursor :param user: optional user/user id to change the current user :param context: optional context dictionary to change the current context :param su: optional boolean to change the superuser mode :type context: dict :type user: int or :class:`~odoo.addons.base.models.res_users` :type su: bool """ cr = self.cr if cr is None else cr uid = self.uid if user is None else int(user) context = self.context if context is None else context su = (user is None and self.su) if su is None else su return Environment(cr, uid, context, su) def ref(self, xml_id, raise_if_not_found=True): """Return the record corresponding to the given ``xml_id``.""" return self['ir.model.data'].xmlid_to_object(xml_id, raise_if_not_found=raise_if_not_found) def is_superuser(self): """ Return whether the environment is in superuser mode. """ return self.su def is_admin(self): """ Return whether the current user has group "Access Rights", or is in superuser mode. """ return self.su or self.user._is_admin() def is_system(self): """ Return whether the current user has group "Settings", or is in superuser mode. """ return self.su or self.user._is_system() @lazy_property def user(self): """Return the current user (as an instance). :rtype: :class:`~odoo.addons.base.models.res_users`""" return self(su=True)['res.users'].browse(self.uid) @lazy_property def company(self): """Return the current company (as an instance). If not specified in the context (`allowed_company_ids`), fallback on current user main company. :raise AccessError: invalid or unauthorized `allowed_company_ids` context key content. :return: current company (default=`self.user.company_id`) :rtype: res.company .. warning:: No sanity checks applied in sudo mode ! When in sudo mode, a user can access any company, even if not in his allowed companies. This allows to trigger inter-company modifications, even if the current user doesn't have access to the targeted company. """ company_ids = self.context.get('allowed_company_ids', []) if company_ids: if not self.su: user_company_ids = self.user.company_ids.ids if any(cid not in user_company_ids for cid in company_ids): raise AccessError(_("Access to unauthorized or invalid companies.")) return self['res.company'].browse(company_ids[0]) return self.user.company_id @lazy_property def companies(self): """Return a recordset of the enabled companies by the user. If not specified in the context(`allowed_company_ids`), fallback on current user companies. :raise AccessError: invalid or unauthorized `allowed_company_ids` context key content. :return: current companies (default=`self.user.company_ids`) :rtype: res.company .. warning:: No sanity checks applied in sudo mode ! When in sudo mode, a user can access any company, even if not in his allowed companies. This allows to trigger inter-company modifications, even if the current user doesn't have access to the targeted company. """ company_ids = self.context.get('allowed_company_ids', []) if company_ids: if not self.su: user_company_ids = self.user.company_ids.ids if any(cid not in user_company_ids for cid in company_ids): raise AccessError(_("Access to unauthorized or invalid companies.")) return self['res.company'].browse(company_ids) # By setting the default companies to all user companies instead of the main one # we save a lot of potential trouble in all "out of context" calls, such as # /mail/redirect or /web/image, etc. And it is not unsafe because the user does # have access to these other companies. The risk of exposing foreign records # (wrt to the context) is low because all normal RPCs will have a proper # allowed_company_ids. # Examples: # - when printing a report for several records from several companies # - when accessing to a record from the notification email template # - when loading an binary image on a template return self.user.company_ids @property def lang(self): """Return the current language code. :rtype: str """ return self.context.get('lang') def clear(self): """ Clear all record caches, and discard all fields to recompute. This may be useful when recovering from a failed ORM operation. """ self.cache.invalidate() self.all.tocompute.clear() self.all.towrite.clear() @contextmanager def clear_upon_failure(self): """ Context manager that clears the environments (caches and fields to recompute) upon exception. """ tocompute = { field: set(ids) for field, ids in self.all.tocompute.items() } towrite = { model: { record_id: dict(values) for record_id, values in id_values.items() } for model, id_values in self.all.towrite.items() } try: yield except Exception: self.clear() self.all.tocompute.update(tocompute) for model, id_values in towrite.items(): for record_id, values in id_values.items(): self.all.towrite[model][record_id].update(values) raise def is_protected(self, field, record): """ Return whether `record` is protected against invalidation or recomputation for `field`. """ return record.id in self._protected.get(field, ()) def protected(self, field): """ Return the recordset for which ``field`` should not be invalidated or recomputed. """ return self[field.model_name].browse(self._protected.get(field, ())) @contextmanager def protecting(self, what, records=None): """ Prevent the invalidation or recomputation of fields on records. The parameters are either: - ``what`` a collection of fields and ``records`` a recordset, or - ``what`` a collection of pairs ``(fields, records)``. """ protected = self._protected try: protected.pushmap() what = what if records is None else [(what, records)] for fields, records in what: for field in fields: ids = protected.get(field, frozenset()) protected[field] = ids.union(records._ids) yield finally: protected.popmap() def fields_to_compute(self): """ Return a view on the field to compute. """ return self.all.tocompute.keys() def records_to_compute(self, field): """ Return the records to compute for ``field``. """ ids = self.all.tocompute.get(field, ()) return self[field.model_name].browse(ids) def is_to_compute(self, field, record): """ Return whether ``field`` must be computed on ``record``. """ return record.id in self.all.tocompute.get(field, ()) def not_to_compute(self, field, records): """ Return the subset of ``records`` for which ``field`` must not be computed. """ ids = self.all.tocompute.get(field, ()) return records.browse(id_ for id_ in records._ids if id_ not in ids) def add_to_compute(self, field, records): """ Mark ``field`` to be computed on ``records``. """ if not records: return records self.all.tocompute[field].update(records._ids) def remove_to_compute(self, field, records): """ Mark ``field`` as computed on ``records``. """ if not records: return ids = self.all.tocompute.get(field, None) if ids is None: return ids.difference_update(records._ids) if not ids: del self.all.tocompute[field] @contextmanager def norecompute(self): """ Delay recomputations (deprecated: this is not the default behavior). """ yield
class SSHEnvironment(object): """ It provides an environment for thread-safe management of SSH sessions Attributes: RETRY_EXCEPTIONS: (list) Exceptions that will cause an SSH reconnection attempt. RETRY_MAX: (int) Maximum amount of consecutive SSH connection reconnection attempts before failure. client: (paramiko.Client|None) Raw Paramiko SSH connection. None if not connected. host: (str) Node Hostname/IP that is used for connection. port: (int) Port number used for connection. username: (str) Username used for authentication. identify_file: (str) Path to the SSH identity file TODO: * Create a transport registry * Add locking at transport level * Allow host key policy configuration """ RETRY_EXCEPTIONS = [ paramiko.ssh_exception.SSHException, paramiko.ssh_exception.ChannelException, socket.error, ] RETRY_MAX = 5 _local = Local() @classproperty def envs(cls): """ It returns the current, or creates new, Environments """ try: return cls._local.environments except AttributeError: cls.reset() return cls._local.environments @classmethod def reset(cls): """ It creates a new set of Environments and scrubs the old """ _logger.debug('Resetting SSHEnvironment') try: for env in cls._local.environments: env._cleanup() except AttributeError: pass cls._local.environments = SSHEnvironments() @contextmanager def get_channel(self, retry=True): """ It provides context manager yielding a new Paramiko channel """ try: transport = self.client.get_transport() with transport.open_session() as channel: yield channel except tuple(self.RETRY_EXCEPTIONS + [AttributeError]): if retry: self._connect() with self.get_channel(False) as channel: yield channel else: raise def _cleanup(self): """ It provides a handler to close existing resources """ if self.client: try: self.client.close() except: # pragma: no cover _logger.info('Client close failed for %s', self.client) self.client = None def _connect(self): """ It creates an SSH connection to remote node """ self._cleanup() self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: self.client.connect( self.host, port=self.port, username=self.username, key_filename=os.path.expanduser(self.identity_file), ) self._retry_left = self.RETRY_MAX except tuple(self.RETRY_EXCEPTIONS): if self._retry_left: self._retry_left -= 1 self._connect() else: self.client = None raise def __wrap_method(self, method): """ It injects a lock and a reconnect onto the method """ locked = self.__inject_lock(method) return self.__inject_reconnect(locked) def __inject_lock(self, method): """ It injects a thread lock onto the method """ def __call_method(*args, **kwargs): self._lock.acquire(True) try: return method(*args, **kwargs) finally: self._lock.release() return __call_method def __inject_reconnect(self, method): """ It injects a reconnector onto the method """ def __call_method(*args, **kwargs): try: return method(*args, **kwargs) except tuple(self.RETRY_EXCEPTIONS): self._connect() return method(*args, **kwargs) return __call_method def __new__(cls, host, port=22, username=None, identity_file=None, connect=True, *args, **kwargs ): """ It returns a cached SSHEnvironment, or creates a new one. The bulk of this method is only called once when the SSHEnvironment is instantiated for the first time. Subsequent instantiations will return a cached SSHEnvironment & skip the initiatizations. Params: host: (str) Host or IP of remote node. post: (int) Remote SSH port. username: (str|None) Username for connection. None for shell default. identity_file: (str|None) Path to the SSH identity file connect: (bool) If a connection should be initiated automatically """ eval_args = (host, port, username, identity_file) for env in cls.envs: if env._eval_args == eval_args: return env self = object.__new__(cls, *args, **kwargs) cls.envs.add(self) self.host = host self.port = port self.username = username self.identity_file = identity_file self.client = None self._retry_left = self.RETRY_MAX self._args = args self._kwargs = kwargs self._eval_args = eval_args self._lock = Lock() if connect: self._connect() return self def __init__(self, *args, **kwargs): """ It initializes a new SSHEnvironment. Note that ``__init__`` will be run the first time an Environment is used in a session, regardless of whether it was cached. This method currently does nothing, but could be useful for environment prep Params: host: (str) Host or IP of remote node. post: (int) Remote SSH port. username: (str|None) Username for connection. None for shell default. identity_file: (str|None) Path to the SSH identity file connect: (bool) If a connection should be initiated automatically """ pass def __getattr__(self, key): """ Provide passthrough to paramiko Client while locking the conn """ try: return super(SSHEnvironment, self).__getattr__(key) except AttributeError: pass method = getattr(self.client, key) if not callable(method): return method return self.__wrap_method(method) def __str__(self): """ Allow object rebuilds """ return '%(class)s(*%(args)r, **%(kwargs)r)' % { 'class': self.__class__.__name__, 'args': self._eval_args + self._args, 'kwargs': self._kwargs, } def __repr__(self): return self.__str__()
globals attached to frappe module + some utility functions that should probably be moved """ from __future__ import unicode_literals from werkzeug.local import Local, release_local import os, sys, importlib, inspect, json # public from .exceptions import * from .utils.jinja import get_jenv, get_template, render_template __version__ = '7.2.5' __title__ = "Frappe Framework" local = Local() class _dict(dict): """dict like object that exposes keys as attributes""" def __getattr__(self, key): ret = self.get(key) if not ret and key.startswith("__"): raise AttributeError() return ret def __setattr__(self, key, value): self[key] = value def __getstate__(self): return self
# request = 'abc' # print('子线程',request) #子线程 abc # # mythread = MyThread() # mythread.start() # mythread.join() # # print('主线程',request) #主线程 abc # ==========================local对象:在每个线程中都是隔离的==================== from threading import Thread from werkzeug.local import Local #只要绑定在Local对象上的属性,在每个线程中都是隔离的 locals = Local() locals.request = '123' class MyThread(Thread): def run(self): locals.request = 'abc' print('子线程', locals.request) #子线程 abc mythread = MyThread() mythread.start() mythread.join() print('主线程', locals.request) #主线程 123