def __init__(self, text_list, title=''): self.__win = win = tk.Toplevel() win.protocol("WM_DELETE_WINDOW", self._on_close) win.title(title) self.__lock = thread.allocate_lock() number = len(text_list) self.__progressbars = progressbars = [] self.__text_list = text_list self.__text_changed = False self.__progress_list = progress_list = [0]*number for n in range(number): progressbar = LabeledProgress(win) progressbar.pack(expand='yes', fill='x') progressbar.label_text = text_list[n] progressbars.append(progressbar) self.__timer = timer = TkTimer(widget=win, interval=50) @timer.add_observer def on_timer(): with self.__lock: for n in range(number): progressbars[n].progress = progress_list[n] if self.__text_changed: self.__text_changed = False for n in range(number): progressbars[n].label_text = self.__text_list[n] timer.active = True
def _get(self, timeout): e = None endtime = None while True: with self.mutex: if self._qsize(): return self.queue.popleft() # Waiting lock if not e: e = _thread.allocate_lock() e.acquire() self.waiters.insert(0, e) # Wait for condition or timeout t = perf_counter() if not endtime: endtime = t + timeout delay = 0.0005 while True: ready = e.acquire(False) if ready: break remaining = endtime - t if remaining <= 0.0: try: self.waiters.remove(e) except ValueError: pass raise IdleTimeout() delay = min(delay * 2, remaining, 0.05) time.sleep(delay) t = perf_counter()
def __init__(self, tzid, comps=[]): super(_tzicalvtz, self).__init__() self._tzid = tzid self._comps = comps self._cachedate = [] self._cachecomp = [] self._cache_lock = _thread.allocate_lock()
def _parallel_split(obj, eng, calls): lock = thread.allocate_lock() i = 0 eng.setVar('lock', lock) for func in calls: new_eng = duplicate_engine_instance(eng) new_eng.setWorkflow([lambda o, e: e.setVar('lock', lock), func]) thread.start_new_thread(new_eng.process, ([obj], ))
def _parallel_split(obj, eng, calls): lock = thread.allocate_lock() eng.store['lock'] = lock for func in calls: new_eng = eng.duplicate() new_eng.setWorkflow( [lambda o, e: e.store.update({'lock': lock}), func]) thread.start_new_thread(new_eng.process, ([obj], ))
def _postinit(self): # alias all queue methods for faster lookup self._popleft = self._queue.popleft self._pop = self._queue.pop if hasattr(self._queue, 'remove'): self._remove = self._queue.remove self._wlock = allocate_lock() self._append = self._queue.append
def _get_pool(self, *args, **kwargs): """ Get the connection pool or create it if it doesnt exist Add thread lock to prevent server initial heavy load creating multiple pools Let's create session pool with five initial sessions (min=5), limit maximum session to 10, increment # of sessions by 1, connectiontype = [not documented?] threaded = False (by default, search cx_Oracle docs for OCI_THREADED) getmode = [cx_Oracle.SPOOL_ATTRVAL_NOWAIT | cx_Oracle.SPOOL_ATTRVAL_WAIT | cx_Oracle.SPOOL_ATTRVAL_FORCEGET] homogeneous = True (according to cx_Oracle docs, if pool is not homogeneous then different authentication can be used for each connection "pulled" from the pool) WARNING The threaded argument is expected to be a boolean expression which indicates whether or not Oracle should use the mode OCI_THREADED to wrap accesses to connections with a mutex. Doing so in single threaded applications imposes a performance penalty of about 10-15% which is why the default is False. """ pool_name = '_pool_%s' % getattr(self, 'alias', 'common') if not hasattr(self.__class__, pool_name): lock = thread.allocate_lock() lock.acquire() try: pool = cx_Oracle.SessionPool( user=self.user, password=self.password, dsn=self.tns, min=CX_POOL_SESSION_MIN, max=CX_POOL_SESSION_MAX, increment=CX_POOL_SESSION_INCREMENT, connectiontype=cx_Oracle.Connection, threaded=CX_POOL_THREADED, getmode=cx_Oracle.SPOOL_ATTRVAL_NOWAIT, homogeneous=True) except Exception as err: pool = None if pool: pool.timeout = CX_POOL_CONNECT_TIMEOUT setattr(self.__class__, pool_name, pool) else: msg = """ ### Database login failed or database not found ### """ raise self.Database_Error, ('%s') % (msg) lock.release() return getattr(self.__class__, pool_name)
def _get_pool(self, *args, **kwargs): """ Get the connection pool or create it if it doesnt exist Add thread lock to prevent server initial heavy load creating multiple pools Let's create session pool with five initial sessions (min=5), limit maximum session to 10, increment # of sessions by 1, connectiontype = [not documented?] threaded = False (by default, search cx_Oracle docs for OCI_THREADED) getmode = [cx_Oracle.SPOOL_ATTRVAL_NOWAIT | cx_Oracle.SPOOL_ATTRVAL_WAIT | cx_Oracle.SPOOL_ATTRVAL_FORCEGET] homogeneous = True (according to cx_Oracle docs, if pool is not homogeneous then different authentication can be used for each connection "pulled" from the pool) WARNING The threaded argument is expected to be a boolean expression which indicates whether or not Oracle should use the mode OCI_THREADED to wrap accesses to connections with a mutex. Doing so in single threaded applications imposes a performance penalty of about 10-15% which is why the default is False. """ pool_name = '_pool_%s' % getattr(self, 'alias', 'common') if not hasattr (self.__class__, pool_name): lock = thread.allocate_lock() lock.acquire() try: pool = cx_Oracle.SessionPool( user=self.user, password=self.password, dsn=self.tns, min=CX_POOL_SESSION_MIN, max=CX_POOL_SESSION_MAX, increment=CX_POOL_SESSION_INCREMENT, connectiontype=cx_Oracle.Connection, threaded=CX_POOL_THREADED, getmode=cx_Oracle.SPOOL_ATTRVAL_NOWAIT, homogeneous=True) except Exception as err: pool = None if pool: pool.timeout = CX_POOL_CONNECT_TIMEOUT setattr(self.__class__, pool_name, pool) else: msg = """ ### Database login failed or database not found ### """ raise self.Database_Error, ('%s') %(msg) lock.release() return getattr(self.__class__, pool_name)
def _parallel_split(obj, eng, calls): lock = thread.allocate_lock() eng.store['lock'] = lock for func in calls: new_eng = eng.duplicate() new_eng.setWorkflow( [lambda o, e: e.store.update({'lock': lock}), func] ) thread.start_new_thread(new_eng.process, ([obj], ))
class Reports(NestedDict): """This holds a structure of nested dicts. The outer dict is a mapping of catalog id to reports. The inner dict holds a query key to Report mapping. """ lock = allocate_lock() value = {}
def __init__(self, cache=False): if cache: self._cache = [] self._cache_lock = _thread.allocate_lock() self._cache_gen = self._iter() self._cache_complete = False else: self._cache = None self._cache_complete = False self._len = None
class PriorityMap(NestedDict): """This holds a structure of nested dicts. The outer dict is a mapping of catalog id to plans. The inner dict holds a query key to Benchmark mapping. """ lock = allocate_lock() value = {} @classmethod def get_value(cls): return cls.value.copy() @classmethod def load_default(cls): location = environ.get('ZCATALOGQUERYPLAN') if location: try: pmap = resolve(location) cls.load_pmap(location, pmap) except ImportError: logger.warning('could not load priority map from %s', location) @classmethod def load_from_path(cls, path): path = os.path.abspath(path) _globals = {} _locals = {} with open(path, 'rb') as fd: exec(fd.read(), _globals, _locals) pmap = _locals['queryplan'].copy() cls.load_pmap(path, pmap) @classmethod def load_pmap(cls, location, pmap): logger.info('loaded priority %d map(s) from %s', len(pmap), location) # Convert the simple benchmark tuples to namedtuples new_plan = {} for cid, plan in pmap.items(): new_plan[cid] = {} for querykey, details in plan.items(): new_plan[cid][querykey] = {} if isinstance(details, (frozenset, set)): new_plan[cid][querykey] = details else: for indexname, benchmark in details.items(): new_plan[cid][querykey][indexname] = \ Benchmark(*benchmark) with cls.lock: cls.value = new_plan
def __init__(self, db_cls, create_db=False, use_unicode=False, charset=None, timeout=None): """ Set transaction managed class for use in pool. """ self._db_cls = db_cls # pool of one db object/thread self._db_pool = {} self._db_lock = allocate_lock() # auto-create db if not present on server self._create_db = create_db # unicode settings self.use_unicode = use_unicode self.charset = charset # timeout setting self.timeout = int(timeout) if timeout else None
def __init__(self, threaded=True): self.meters = [] self.in_progress_meters = [] if threaded: self._lock = thread.allocate_lock() else: self._lock = _FakeLock() self.update_period = 0.3 # seconds self.numfiles = None self.finished_files = 0 self.failed_files = 0 self.open_files = 0 self.total_size = None self.failed_size = 0 self.start_time = None self.finished_file_size = 0 self.last_update_time = None self.re = RateEstimator()
def __init__(self, grabber, mirrors, **kwargs): """Initialize the MirrorGroup object. REQUIRED ARGUMENTS grabber - URLGrabber instance mirrors - a list of mirrors OPTIONAL ARGUMENTS failure_callback - callback to be used when a mirror fails default_action - dict of failure actions See the module-level and class level documentation for more details. """ # OVERRIDE IDEAS: # shuffle the list to randomize order self.grabber = grabber self.mirrors = self._parse_mirrors(mirrors) self._next = 0 self._lock = thread.allocate_lock() self.default_action = None self._process_kwargs(kwargs) # use the same algorithm as parallel downloader to initially sort # the mirror list (sort by speed, but prefer live private mirrors) def estimate(m): speed, fail = _TH.estimate(m['mirror']) private = not fail and m.get('kwargs', {}).get('private', False) return private, speed # update the initial order. since sorting is stable, the relative # order of unknown (not used yet) hosts is retained. self.mirrors.sort(key=estimate, reverse=True)
def __init__(self): object.__setattr__(self, '__storage__', {}) object.__setattr__(self, '__lock__', allocate_lock())
channel.continueInDialplan() on_dtmf_handle.close() elif digit == '*': channel.play(media='sound:asterisk-friend') else: channel.play(media='sound:digits/%s' % digit) on_dtmf_handle = channel.on_event('ChannelDtmfReceived', on_dtmf) channel.answer() channel.play(media='sound:hello-world') client.on_channel_event('StasisStart', on_start) # Run the WebSocket sync = thread.allocate_lock() def run(): """Thread for running the Websocket. """ sync.acquire() client.run(apps="hello") sync.release() thr = thread.start_new_thread(run, ()) print("Press enter to exit") sys.stdin.readline() client.close() sync.acquire()
ticket = getattr(request, '_request_monitoring_ticket_', None) if ticket is None: _ticket_lock.acquire() id = _ticket_no = _ticket_no + 1 _ticket_lock.release() ticket = request._request_monitoring_ticket_ = _Ticket(id) return ticket @implementer(ITicket) class _Ticket(object): def __init__(self, id): self.id = id self.time = time() _ticket_lock = allocate_lock() _ticket_no = 0 @adapter(IRequest) @implementer(IInfo) def info(request): """provide readable information for *request*.""" qs = request.get('QUERY_STRING') aia = IAdditionalInfo(request, None) ai = aia and str(aia) return (request.get('PATH_INFO', '') + (qs and '?' + qs or '') + (ai and (' [%s] ' % ai) or ''))
def __init__(self): self.__threadLock = thread.allocate_lock() self.__command = None self.__return_value = None
return _global_registry ############################################################################## # private function '_clear_global_registry' ############################################################################## def _clear_global_registry(): """ Clears out the current global registry. This exists purely to allow testing of the global registry and the apptools.sweet_pickle framework. THIS METHOD SHOULD NEVER BE CALLED DURING NORMAL OPERATIONS! """ global _global_registry _global_registry = None ############################################################################## # private, but global, variables ############################################################################## # The global singleton updater _global_registry = None # The lock used to make access to the global singleton thread safe _global_registry_lock = _thread.allocate_lock() #### EOF #####################################################################
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## from six.moves._thread import allocate_lock import transaction from . import TM from .TM import Surrogate thunk_lock = allocate_lock() class THUNKED_TM(TM.TM): """A big heavy hammer for handling non-thread safe DAs """ def _register(self): if not self._registered: thunk_lock.acquire() try: transaction.get().register(Surrogate(self)) self._begin() except Exception: thunk_lock.release() raise else:
def __init__(cls, *args, **kwargs): cls.__instances = weakref.WeakValueDictionary() cls.__strong_cache = OrderedDict() cls.__strong_cache_size = 8 cls.__cache_lock = _thread.allocate_lock()
# -*- coding: utf-8 -*- from plone.app.dexterity.interfaces import IDXFileFactory from plone.dexterity.utils import createContentInContainer from plone.namedfile.file import NamedBlobFile from plone.namedfile.file import NamedBlobImage from Products.CMFCore.interfaces._content import IFolderish from Products.CMFCore.utils import getToolByName from Products.CMFPlone import utils as ploneutils from six.moves._thread import allocate_lock from zope.component import adapter from zope.container.interfaces import INameChooser from zope.interface import implementer upload_lock = allocate_lock() @adapter(IFolderish) @implementer(IDXFileFactory) class DXFileFactory(object): def __init__(self, context): self.context = context def __call__(self, name, content_type, data): ctr = getToolByName(self.context, 'content_type_registry') type_ = ctr.findTypeName(name.lower(), content_type, data) or 'File' name = ploneutils.safe_unicode(name) chooser = INameChooser(self.context)
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## from six.moves._thread import allocate_lock import transaction from . import TM from .TM import Surrogate thunk_lock = allocate_lock() class THUNKED_TM(TM.TM): """A big heavy hammer for handling non-thread safe DAs """ def _register(self): if not self._registered: thunk_lock.acquire() try: transaction.get().register(Surrogate(self)) self._begin() except Exception: thunk_lock.release() raise
import dateutil.parser import dateutil.tz import logging import re import time import wsgiref.handlers PAGE_CACHE_KEY = 'plone.app.caching.operations.ramcache' PAGE_CACHE_ANNOTATION_KEY = 'plone.app.caching.operations.ramcache.key' ETAG_ANNOTATION_KEY = 'plone.app.caching.operations.etag' LASTMODIFIED_ANNOTATION_KEY = 'plone.app.caching.operations.lastmodified' _marker = object() logger = logging.getLogger('plone.app.caching') parseETagLock = allocate_lock() # etagQuote = re.compile('(\s*\"([^\"]*)\"\s*,{0,1})') # etagNoQuote = re.compile('(\s*([^,]*)\s*,{0,1})') etagQuote = re.compile('(\s*(W\/)?\"([^\"]*)\"\s*,?)') etagNoQuote = re.compile('(\s*(W\/)?([^,]*)\s*,?)') # # Operation helpers, used in the implementations of interceptResponse() and # modifyResponse(). # # These all take three parameters, published, request and response, as well # as any additional keyword parameters required. #
class FakeExecutableObject: """Fake ExecutableObject used to set proxy roles in trusted code. """ def __init__(self, proxy_roles): self._proxy_roles = tuple(proxy_roles) def getOwner(self): return None getWrappedOwner = getOwner # Parse a string of etags from an If-None-Match header # Code follows ZPublisher.HTTPRequest.parse_cookie parse_etags_lock = allocate_lock() def parse_etags( text, result=None, # quoted etags (assumed separated by whitespace + a comma) etagre_quote=re.compile(r'(\s*\"([^\"]*)\"\s*,{0,1})'), # non-quoted etags (assumed separated by whitespace + a comma) etagre_noquote=re.compile(r'(\s*([^,]*)\s*,{0,1})'), acquire=parse_etags_lock.acquire, release=parse_etags_lock.release): if result is None: result = [] if not len(text):
from Acquisition import Acquired from Acquisition import Explicit from App.special_dtml import DTMLFile from OFS.SimpleItem import Item from Persistence import Overridable from ZODB.broken import Broken as ZODB_Broken from ZODB.broken import persistentBroken try: from html import escape except ImportError: # PY2 from cgi import escape broken_klasses = {} broken_klasses_lock = allocate_lock() LOG = getLogger('OFS.Uninstalled') class BrokenClass(ZODB_Broken, Explicit, Item, Overridable): _p_changed = 0 meta_type = 'Broken Because Product is Gone' product_name = 'unknown' id = 'broken' manage_page_header = Acquired manage_page_footer = Acquired def __getattr__(self, name): if name[:3] == '_p_':
from AccessControl.Permissions import view_management_screens from AccessControl.SecurityInfo import ClassSecurityInfo from AccessControl.SecurityInfo import ModuleSecurityInfo from App.special_dtml import HTMLFile from Persistence import Persistent from Shared.DC.ZRDB.Connection import Connection as ConnectionBase from .db import DB from .db import DBPool from .permissions import add_zmysql_database_connections from .utils import TableBrowser from .utils import table_icons # Connection Pool for connections to MySQL. # Maps one mysql client connection to one DA object instance. database_connection_pool_lock = allocate_lock() database_connection_pool = {} # Combining connection pool with the DB pool gets you # one DA/connection per connection with 1 DBPool with 1 DB/thread # pool_id -> DA -> DBPool -> thread id -> DB # dc_pool[pool_id] == DBPool_instance # DBPool_instance[thread id] == DB instance class Connection(ConnectionBase): """ Zope database adapter for MySQL/MariaDB """ meta_type = 'Z MySQL Database Connection' security = ClassSecurityInfo() zmi_icon = 'fas fa-database'
"""Fake ExecutableObject used to set proxy roles in trusted code. """ def __init__(self, proxy_roles): self._proxy_roles = tuple(proxy_roles) def getOwner(self): return None getWrappedOwner = getOwner # Parse a string of etags from an If-None-Match header # Code follows ZPublisher.HTTPRequest.parse_cookie parse_etags_lock = allocate_lock() def parse_etags(text, result=None, # quoted etags (assumed separated by whitespace + a comma) etagre_quote=re.compile(r'(\s*\"([^\"]*)\"\s*,{0,1})'), # non-quoted etags (assumed separated by whitespace + a comma) etagre_noquote=re.compile(r'(\s*([^,]*)\s*,{0,1})'), acquire=parse_etags_lock.acquire, release=parse_etags_lock.release): if result is None: result = [] if not len(text): return result
from ZPublisher.HTTPRequest import WSGIRequest from ZPublisher.HTTPResponse import WSGIResponse from ZPublisher.Iterators import IUnboundStreamIterator from ZPublisher.mapply import mapply from ZPublisher import pubevents from ZPublisher.utils import recordMetaData if sys.version_info >= (3, ): _FILE_TYPES = (IOBase, ) else: _FILE_TYPES = (IOBase, file) # NOQA _DEFAULT_DEBUG_MODE = False _DEFAULT_REALM = None _MODULE_LOCK = allocate_lock() _MODULES = {} def call_object(obj, args, request): return obj(*args) def dont_publish_class(klass, request): request.response.forbiddenError("class %s" % klass.__name__) def missing_name(name, request): if name == 'self': return request['PARENTS'][0] request.response.badRequestError(name)
def __init__(self): self._lock = _thread.allocate_lock() self._hostmap = {} # map hosts to a list of connections self._connmap = {} # map connections to host self._readymap = {} # map connection to ready state
from Acquisition import Explicit from App.special_dtml import DTMLFile from OFS.SimpleItem import Item from Persistence import Overridable from six import exec_ from six.moves._thread import allocate_lock from ZODB.broken import Broken as ZODB_Broken from ZODB.broken import persistentBroken try: from html import escape except ImportError: # PY2 from cgi import escape broken_klasses = {} broken_klasses_lock = allocate_lock() LOG = getLogger('OFS.Uninstalled') class BrokenClass(ZODB_Broken, Explicit, Item, Overridable): _p_changed = 0 meta_type = 'Broken Because Product is Gone' product_name = 'unknown' id = 'broken' manage_page_header = Acquired manage_page_footer = Acquired def __getattr__(self, name): if name[:3] == '_p_':
def _access_default(self): return allocate_lock()
def _access_default ( self ): return allocate_lock()