Esempio n. 1
0
    def save_data(self, environ, start, stop, collector):
        data = {}
        data['hostname'] = self.hostname
        data['environ'] = dict((k, v) for k, v in six.iteritems(environ)
                               if isinstance(v, six.string_types))
        data['start_time'] = start
        data['stop_time'] = stop
        data['thread_ident'] = thread.get_ident()
        # Only take the 25 most frequent stack frames
        collector.filter(25)

        samples = []
        for stack, frequency in six.iteritems(collector.stack_counts):
            frames = []
            for elm in stack:
                frame = {}
                frame['file'] = elm[0]
                frame['line_no'] = elm[1]
                frame['function'] = elm[2]
                frames.append(frame)

            sample = {}
            sample['frames'] = frames
            sample['frequency'] = frequency
            samples.append(sample)

        data['samples'] = samples

        filename = '%s-%s' % (time.time(), thread.get_ident())

        if not os.path.exists(self.outpath):
            os.makedirs(self.outpath)

        with open(os.path.join(self.outpath, filename + '.json'), 'w') as fp:
            json.dump(data, fp, indent=2)
Esempio n. 2
0
    def test_closeConnection(self):
        pool = self._makeOne()

        pool._db_pool[get_ident()] = 'foo'
        self.assertEqual(pool._db_pool, {get_ident(): 'foo'})
        pool.closeConnection()
        self.assertEqual(pool._db_pool, {})

        # This should not raise an error
        pool.closeConnection()
Esempio n. 3
0
    def T22():
        tell, wait = c21_2.tell, c12_2.wait

        # make sure we are not the same thread which ran T11
        # (should be so because we cared not to stop T11 yet)
        assert _thread.get_ident() != t11_ident

        conn22 = db.open()
        assert conn22 is conn01
        tell('T2-conn22-opened')

        # modify zfile and arrange timings so that T1 does abort after we
        # modify, but before we commit
        wait('T1-conn12-opened')
        root22 = conn22.root()
        a22 = root22['zarray2']

        fh22  = a22._v_fileh
        vma22 = fh22.mmap(0, 1)

        Blk(vma22, 0)[0] = 22

        tell('T2-zfile2-modified')
        wait('T1-txn-aborted')

        # commit - changes should propagate to zfile
        transaction.commit()

        tell('T2-txn-committed')

        conn22.close()
Esempio n. 4
0
    def T22():
        tell, wait = c21_2.tell, c12_2.wait

        # make sure we are not the same thread which ran T11
        # (should be so because we cared not to stop T11 yet)
        assert _thread.get_ident() != t11_ident

        conn22 = db.open()
        assert conn22 is conn01
        tell('T2-conn22-opened')

        # modify zarray and arrange timings so that T1 does abort after we
        # modify, but before we commit
        wait('T1-conn12-opened')
        root22 = conn22.root()
        a22 = root22['zarray2']

        a22[0:1] = [22]     # XXX -> [0] = 22   after BigArray can

        tell('T2-zarray2-modified')
        wait('T1-txn-aborted')

        # commit - changes should propagate to zarray
        transaction.commit()

        tell('T2-txn-committed')

        conn22.close()
    def __getattr__(self, name):
        """
        Looks for the name in an object with wrappers that only reach
        up to the root skins folder.

        This should be fast, flexible, and predictable.
        """
        if not name:
            raise AttributeError(name)
        if name[0] not in ('_', '@', '+') and not name.startswith('aq_'):
            sd = SKINDATA.get(get_ident())
            if sd is not None:
                ob, _skinname, ignore, resolve = sd
                if name not in ignore:
                    if name in resolve:
                        return resolve[name]
                    subob = getattr(ob, name, _MARKER)
                    if subob is not _MARKER:
                        # Return it in context of self, forgetting
                        # its location and acting as if it were located
                        # in self.
                        retval = aq_base(subob)
                        resolve[name] = retval
                        return retval
                    else:
                        ignore[name] = 1
        raise AttributeError(name)
    def _checkId(self, id, allow_dup=0):
        """
        Override of ObjectManager._checkId().

        Allows the user to create objects with IDs that match the ID of
        a skin object.
        """
        superCheckId = SkinnableObjectManager.inheritedAttribute('_checkId')
        if not allow_dup:
            # Temporarily disable skindata.
            # Note that this depends heavily on Zope's current thread
            # behavior.
            tid = get_ident()
            sd = SKINDATA.get(tid)
            if sd is not None:
                del SKINDATA[tid]
            try:
                base = getattr(self, 'aq_base', self)
                if not hasattr(base, id):
                    # Cause _checkId to not check for duplication.
                    return superCheckId(self, id, allow_dup=1)
            finally:
                if sd is not None:
                    SKINDATA[tid] = sd
        return superCheckId(self, id, allow_dup)
Esempio n. 7
0
    def current(self):
        ident = thread.get_ident()

        try:
            return self._threads[ident]
        except KeyError:
            return self.build(ident)
Esempio n. 8
0
    def clear(self):
        ident = thread.get_ident()

        if ident not in self._threads:
            return

        del self._threads[ident]
Esempio n. 9
0
    def consumer_callback(self, msg):
        """callback function called by the ConsumerBase class of
        qpid driver.
        Message will be received in the format x-y
        where x is the sender id and y is the msg number of the sender
        extract the sender id 'x' and store the msg 'x-y' with 'x' as
        the key
        """

        if isinstance(msg, dict):
            msgcontent = msg['content']
        else:
            msgcontent = msg

        splitmsg = msgcontent.split('-')
        key = _thread.get_ident()

        if key not in self._messages:
            self._messages[key] = dict()

        tdict = self._messages[key]

        if splitmsg[0] not in tdict:
            tdict[splitmsg[0]] = []

        tdict[splitmsg[0]].append(msgcontent)
Esempio n. 10
0
def do_compute(call_no, mode='sleep', runtime=None, n=None):
    started = utcnow()
    process_id = os.getpid()
    thread_id = _thread.get_ident()

    if mode == 'fib':
        res = fib(n)
    elif mode == 'sleep':
        # yes, we do the evil blocking thing here!
        # this is to simulate CPU intensive stuff
        time.sleep(runtime)
        res = None
    else:
        res = random.random()

    ended = utcnow()

    result = {
        u'call_no': call_no,
        u'started': started,
        u'ended': ended,
        u'process': process_id,
        u'thread': thread_id,
        u'result': res
    }
    return result
Esempio n. 11
0
    def consumer_callback(self, msg):
        """callback function called by the ConsumerBase class of
        qpid driver.
        Message will be received in the format x-y
        where x is the sender id and y is the msg number of the sender
        extract the sender id 'x' and store the msg 'x-y' with 'x' as
        the key
        """

        if isinstance(msg, dict):
            msgcontent = msg['content']
        else:
            msgcontent = msg

        splitmsg = msgcontent.split('-')
        key = _thread.get_ident()

        if key not in self._messages:
            self._messages[key] = dict()

        tdict = self._messages[key]

        if splitmsg[0] not in tdict:
            tdict[splitmsg[0]] = []

        tdict[splitmsg[0]].append(msgcontent)
Esempio n. 12
0
    def __checkThreadID(self):
        """
        ..warning:: just guessing....
        This seems to check that we are not creating a client and then using it
        in a multithreaded environment.
        However, it is triggered only if self.__enableThreadCheck is to True, but it is
        hardcoded to False, and does not seem to be modified anywhere in the code.
        """
        if not self.__initStatus["OK"]:
            return self.__initStatus
        cThID = thread.get_ident()
        if not self.__allowedThreadID:
            self.__allowedThreadID = cThID
        elif cThID != self.__allowedThreadID:
            msgTxt = """
=======DISET client thread safety error========================
Client %s
can only run on thread %s
and this is thread %s
===============================================================""" % (
                str(self),
                self.__allowedThreadID,
                cThID,
            )
            gLogger.error("DISET client thread safety error", msgTxt)
    def current(self):
        ident = thread.get_ident()

        try:
            return self._threads[ident]
        except KeyError:
            return self.build(ident)
Esempio n. 14
0
    def _checkId(self, id, allow_dup=0):
        '''
        Override of ObjectManager._checkId().

        Allows the user to create objects with IDs that match the ID of
        a skin object.
        '''
        superCheckId = SkinnableObjectManager.inheritedAttribute('_checkId')
        if not allow_dup:
            # Temporarily disable skindata.
            # Note that this depends heavily on Zope's current thread
            # behavior.
            tid = get_ident()
            sd = SKINDATA.get(tid)
            if sd is not None:
                del SKINDATA[tid]
            try:
                base = getattr(self, 'aq_base', self)
                if not hasattr(base, id):
                    # Cause _checkId to not check for duplication.
                    return superCheckId(self, id, allow_dup=1)
            finally:
                if sd is not None:
                    SKINDATA[tid] = sd
        return superCheckId(self, id, allow_dup)
Esempio n. 15
0
    def test_concat_files(self):
        """Test the _concat_files() func."""
        ident = _thread.get_ident()
        file_lst = []
        for i in six.moves.range(3):
            file_lst.append(os.path.join(tempfile.gettempdir(),
                                         '{}.{}'.format(ident, i)))
            with io.open(file_lst[-1], 'wb') as logs:
                logs.write(bytearray('{}\n'.format(i), 'ascii'))

        result = local._concat_files(file_lst)
        self.assertTrue(isinstance(result, io.TextIOWrapper))
        self.assertEqual(result.read(), u'0\n1\n2\n')

        # check that _concat_files() catches IOError for non existing file
        file_lst.append('no_such_file')
        local._concat_files(file_lst)

        for f in file_lst[:-1]:
            os.remove(f)

        # make sure that things don't break if the log file contains some
        # binary data with ord num > 128 (eg. \xc5 below) ie. not ascii
        # decodeable
        with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp:
            temp.write(b'\x42\x00\x01\xc5\x45\x0a')
            temp.seek(0)

        self.assertTrue(''.join(local._concat_files([temp.name])))
Esempio n. 16
0
    def __getattr__(self, name):
        '''
        Looks for the name in an object with wrappers that only reach
        up to the root skins folder.

        This should be fast, flexible, and predictable.
        '''
        if not name:
            raise AttributeError(name)
        if name[0] not in ('_', '@', '+') and not name.startswith('aq_'):
            sd = SKINDATA.get(get_ident())
            if sd is not None:
                ob, _skinname, ignore, resolve = sd
                if not name in ignore:
                    if name in resolve:
                        return resolve[name]
                    subob = getattr(ob, name, _MARKER)
                    if subob is not _MARKER:
                        # Return it in context of self, forgetting
                        # its location and acting as if it were located
                        # in self.
                        retval = aq_base(subob)
                        resolve[name] = retval
                        return retval
                    else:
                        ignore[name] = 1
        raise AttributeError(name)
Esempio n. 17
0
    def call(self, *args, **kwargs):
        """
        creates a job, execute it in worker thread, and deliver result.
        if job execution raise exception, re-raise same exception
        meant to be called by non-worker threads, but this is accepted.
        blocking until job done
        """

        _job = job(*args, **kwargs)

        if self._threadID == _thread.get_ident():
            # if caller is worker thread execute immediately
            _job.do()
        else:
            # otherwise notify and wait for completion
            self.mutex.acquire()

            while self.job is not None:
                self.free.wait()

            self.job = _job
            self.todo.notify()
            self.done.wait()
            self.job = None
            self.free.notify()
            self.mutex.release()

        if _job.success:
            return _job.result
        else:
            self.reraise(_job)
    def clear(self):
        ident = thread.get_ident()

        if ident not in self._threads:
            return

        del self._threads[ident]
def newSecurityManager(request, user):
    """Set up a new security context for a request for a user
    """
    thread_id = get_ident()
    _managers[thread_id] = SecurityManager(  # NOQA: F821
        thread_id,
        SecurityContext(user),
    )
def TLOG(*args):
    sargs = []
    sargs.append(str(thread.get_ident()))
    sargs.append(str(time.time()))
    for arg in args:
        sargs.append(str(arg))
    msg = ' '.join(sargs)
    LOG.info(msg)
Esempio n. 21
0
def _temp_dir():
    """Construct an empty temporary dir for each thread and return the path."""
    dirname = os.path.join(tempfile.gettempdir(),
                           'local-{}.temp'.format(_thread.get_ident()))

    shutil.rmtree(dirname, True)
    os.mkdir(dirname, 0o755)
    return dirname
Esempio n. 22
0
def newSecurityManager(request, user):
    """Set up a new security context for a request for a user
    """
    thread_id = get_ident()
    _managers[thread_id] = SecurityManager(
        thread_id,
        SecurityContext(user),
    )
Esempio n. 23
0
    def __new__(cls, statsfile=None):
        current_thread = _thread.get_ident()

        def make_database(statsfile):
            def connect(cache):
                # sqlite needs to get the name in utf-8 on all platforms
                cache.con = dbapi2.connect(
                    statsfile.encode('utf-8') if six.PY2 else statsfile)
                cache.cur = cache.con.cursor()

            def clear_old_data(cache):
                try:
                    cache.cur.execute(
                        """SELECT min(toolkitbuild) FROM files""")
                    val = cache.cur.fetchone()
                    # If the database is empty, we have no idea whether its layout
                    # is correct, so we might as well delete it.
                    if val is None or val[0] < toolkitversion.build:
                        cache.con.close()
                        del cache
                        os.unlink(statsfile)
                        return True
                    return False
                except dbapi2.OperationalError:
                    return False

            cache = cls._caches.setdefault(current_thread,
                                           {})[statsfile] = object.__new__(cls)
            connect(cache)
            if clear_old_data(cache):
                connect(cache)
            cache.create()
            return cache

        if not statsfile:
            if not cls.defaultfile:
                userdir = os.path.expanduser("~")
                cachedir = None
                if os.name == "nt":
                    cachedir = os.path.join(userdir, "Translate Toolkit")
                else:
                    cachedir = os.path.join(userdir, ".translate_toolkit")
                if not os.path.exists(cachedir):
                    os.mkdir(cachedir)
                if isinstance(cachedir, bytes):
                    cachedir = six.text_type(cachedir,
                                             sys.getfilesystemencoding())
                cls.defaultfile = os.path.realpath(
                    os.path.join(cachedir, u"stats.db"))
            statsfile = cls.defaultfile
        else:
            statsfile = os.path.realpath(statsfile)
        # First see if a cache for this file already exists:
        if current_thread in cls._caches and statsfile in cls._caches[
                current_thread]:
            return cls._caches[current_thread][statsfile]
        # No existing cache. Let's build a new one and keep a copy
        return make_database(statsfile)
Esempio n. 24
0
 def _profile(self, profiler, frame, event, arg):
     t = time.clock()
     if t - self.sampled_at < self.interval:
         return
     self.sampled_at = t
     frames = self.current_frames()
     frames[_thread.get_ident()] = frame
     for frame in frames.values():
         profiler.sample(frame)
Esempio n. 25
0
 def _profile(self, profiler, frame, event, arg):
     t = time.clock()
     if t - self.sampled_at < self.interval:
         return
     self.sampled_at = t
     frames = self.current_frames()
     frames[_thread.get_ident()] = frame
     for frame in frames.values():
         profiler.sample(frame)
Esempio n. 26
0
    def test_sql_quote__no_unicode(self):
        self.conn = self._simpleMakeOne()

        self.conn.sql_quote__('foo')

        db_pool = self.conn._v_database_connection._db_pool
        internal_conn = db_pool.get(get_ident()).db
        self.assertEqual(internal_conn.string_literal_called, 'foo')
        self.assertFalse(internal_conn.unicode_literal_called)
Esempio n. 27
0
def test_showFormatOptionsgLogger(
    header, threadID, timeStamp, context, msg, expectedLog, isThreadIDAvailable, isTimeStampAvailable
):
    """
    Set gLogger options, check that options are inherited in log and sublog
    """
    capturedBackend, log, sublog = gLoggerReset()

    # setting these values should modify the way the log record is displayed
    gLogger.showHeaders(header)
    gLogger.showThreadIDs(threadID)
    gLogger.showTimeStamps(timeStamp)
    gLogger.showContexts(context)

    # log and sublog should inherit from the changes
    assert gLogger._options["headerIsShown"] == header
    assert gLogger._options["threadIDIsShown"] == threadID
    assert log._options["headerIsShown"] == gLogger._options["headerIsShown"]
    assert log._options["timeStampIsShown"] == gLogger._options["timeStampIsShown"]
    assert log._options["contextIsShown"] == gLogger._options["contextIsShown"]
    assert log._options["threadIDIsShown"] == gLogger._options["threadIDIsShown"]
    assert sublog._options["headerIsShown"] == log._options["headerIsShown"]
    assert sublog._options["timeStampIsShown"] == log._options["timeStampIsShown"]
    assert sublog._options["contextIsShown"] == log._options["contextIsShown"]
    assert sublog._options["threadIDIsShown"] == log._options["threadIDIsShown"]

    # create log records and check the format is correct
    gLogger.notice(msg)
    logValue = capturedBackend.getvalue()
    # check that timestamp is available if it has to be available
    assert ("UTC" in logValue) == isTimeStampAvailable
    logstring = cleaningLog(logValue)
    capturedBackend.truncate(0)
    capturedBackend.seek(0)

    log.notice(msg)
    logValue = capturedBackend.getvalue()
    assert ("UTC" in logValue) == isTimeStampAvailable
    logstring += cleaningLog(logValue)
    capturedBackend.truncate(0)
    capturedBackend.seek(0)

    sublog.notice(msg)
    logValue = capturedBackend.getvalue()
    assert ("UTC" in logValue) == isTimeStampAvailable
    logstring += cleaningLog(logValue)
    capturedBackend.truncate(0)
    capturedBackend.seek(0)

    # check that threadID is present in the log when it should be present
    threadIDValue = str(thread.get_ident())
    assert (threadIDValue in logstring) == isThreadIDAvailable
    # as thread ID depends on the execution, we have to add it to the expected results
    if isThreadIDAvailable:
        expectedLog = expectedLog % (threadIDValue, threadIDValue, threadIDValue)
    assert expectedLog == logstring
Esempio n. 28
0
 def __init__(self, systemName, level, time, msgText, variableText, frameInfo, subSystemName=''):
   from six.moves import _thread as thread
   self.systemName = systemName
   self.level = level
   self.time = time
   self.msgText = str(msgText)
   self.variableText = str(variableText)
   self.frameInfo = frameInfo
   self.subSystemName = subSystemName
   self.threadId = thread.get_ident()
 def ZCacheManager_getCache(self):
     key = (get_ident(), self.__cacheid)
     try:
         return caches[key]
     except KeyError:
         cache = Memcached()
         settings = self.getSettings()
         cache.initSettings(settings)
         caches[key] = cache
         return cache
Esempio n. 30
0
def dump_threads():
    """Dump running threads

    Returns a string with the tracebacks.
    """
    this_thread_id = _thread.get_ident()
    now = time.strftime("%Y-%m-%d %H:%M:%S")
    res = ["Threads traceback dump at %s\n" % now]
    for thread_id, frame in six.iteritems(_current_frames()):
        if thread_id == this_thread_id:
            continue

        # Find request in frame
        reqinfo = ''
        f = frame
        while f is not None:
            co = f.f_code
            if (co.co_name == 'publish'
                    and co.co_filename.endswith('/ZPublisher/Publish.py')):
                request = f.f_locals.get('request')
                if request is not None:
                    reqinfo = (request.get('REQUEST_METHOD', '') + ' ' +
                               request.get('PATH_INFO', ''))
                    qs = request.get('QUERY_STRING')
                    if qs:
                        reqinfo += '?' + qs
                break
            f = f.f_back
        if reqinfo:
            reqinfo = " (%s)" % reqinfo

        mysql_info = ''
        f = frame
        try:
            from Products.ZMySQLDA.db import DB
            while f is not None:
                code = f.f_code
                if code is DB._query.__code__:
                    mysql_info = "\nMySQL query:\n%s\n" % f.f_locals['query']
                    break
                f = f.f_back
        except ImportError:
            pass

        output = StringIO()
        traceback.print_stack(frame, file=output)
        res.append("Thread %s%s:\n%s%s" %
                   (thread_id, reqinfo, output.getvalue(), mysql_info))

    res.append("End of dump\n")
    result = '\n'.join(res)
    if isinstance(result, six.text_type):
        result = result.encode('utf-8')
    return result
Esempio n. 31
0
def _current_thread():
    # This is a custom version of `threading.current_thread`
    # that does not try # to create a `DummyThread` on `KeyError`.
    ident = _thread.get_ident()
    try:
        thread = threading._active[ident]
    except KeyError:
        name = None
    else:
        name = thread.name
    return ident, name
Esempio n. 32
0
 def _profile(self, profiler, frame, event, arg):
     t = thread_clock()
     thread_id = _thread.get_ident()
     sampled_at = self.sampled_times.get(thread_id, 0)
     if t - sampled_at < self.interval:
         return
     self.sampled_times[thread_id] = t
     profiler.sample(frame)
     self.counter += 1
     if self.counter % 10000 == 0:
         self._clear_for_dead_threads()
Esempio n. 33
0
 def closeConnection(self):
     """ Close this threads connection. Used when DA is being reused
         but the connection string has changed. Need to close the db_cls
         instances and recreate to the new database. Only have to worry
         about this thread as when each thread hits the new connection
         string in the DA this method will be called.
     """
     ident = get_ident()
     try:
         self._pool_del(ident)
     except KeyError:
         pass
Esempio n. 34
0
 def _access_db(self, method_id, args, kw):
     """
       Generic method to call pooled objects' methods.
       When the current thread had never issued any call, create a db_cls
       instance.
     """
     ident = get_ident()
     db = self._pool_get(ident)
     if db is None:
         db = self._db_cls(**self._db_flags)
         self._pool_set(ident, db)
     return getattr(db, method_id)(*args, **kw)
Esempio n. 35
0
    def changeSkin(self, skinname, REQUEST=None):
        '''Change the current skin.

        Can be called manually, allowing the user to change
        skins in the middle of a request.
        '''
        skinobj = self.getSkin(skinname)
        if skinobj is not None:
            tid = get_ident()
            SKINDATA[tid] = (skinobj, skinname, {}, {})
            if REQUEST is not None:
                REQUEST._hold(SkinDataCleanup(tid))
Esempio n. 36
0
    def __new__(cls, statsfile=None):
        current_thread = _thread.get_ident()

        def make_database(statsfile):

            def connect(cache):
                # sqlite needs to get the name in utf-8 on all platforms
                cache.con = dbapi2.connect(statsfile.encode('utf-8') if six.PY2 else statsfile)
                cache.cur = cache.con.cursor()

            def clear_old_data(cache):
                try:
                    cache.cur.execute("""SELECT min(toolkitbuild) FROM files""")
                    val = cache.cur.fetchone()
                    # If the database is empty, we have no idea whether its layout
                    # is correct, so we might as well delete it.
                    if val is None or val[0] < toolkitversion.build:
                        cache.con.close()
                        del cache
                        os.unlink(statsfile)
                        return True
                    return False
                except dbapi2.OperationalError:
                    return False

            cache = cls._caches.setdefault(current_thread, {})[statsfile] = object.__new__(cls)
            connect(cache)
            if clear_old_data(cache):
                connect(cache)
            cache.create()
            return cache

        if not statsfile:
            if not cls.defaultfile:
                userdir = os.path.expanduser("~")
                cachedir = None
                if os.name == "nt":
                    cachedir = os.path.join(userdir, "Translate Toolkit")
                else:
                    cachedir = os.path.join(userdir, ".translate_toolkit")
                if not os.path.exists(cachedir):
                    os.mkdir(cachedir)
                if isinstance(cachedir, bytes):
                    cachedir = six.text_type(cachedir, sys.getfilesystemencoding())
                cls.defaultfile = os.path.realpath(os.path.join(cachedir, u"stats.db"))
            statsfile = cls.defaultfile
        else:
            statsfile = os.path.realpath(statsfile)
        # First see if a cache for this file already exists:
        if current_thread in cls._caches and statsfile in cls._caches[current_thread]:
            return cls._caches[current_thread][statsfile]
        # No existing cache. Let's build a new one and keep a copy
        return make_database(statsfile)
def account_request(request, end):
    ticket = ITicket(request)
    id = ticket.id
    info = str(IInfo(request))
    _lock.acquire()
    try:
        if end:
            del _state[id]
        else:
            _state[id] = Request(id, info, request, ticket.time, get_ident())
    finally:
        _lock.release()
Esempio n. 38
0
    def changeSkin(self, skinname, REQUEST=None):
        """Change the current skin.

        Can be called manually, allowing the user to change
        skins in the middle of a request.
        """
        skinobj = self.getSkin(skinname)
        if skinobj is not None:
            tid = get_ident()
            SKINDATA[tid] = (skinobj, skinname, {}, {})
            if REQUEST is not None:
                REQUEST._hold(SkinDataCleanup(tid))
Esempio n. 39
0
 def __repr__(self, _repr_running={}):
     # Based on OrderedDict/defaultdict
     call_key = id(self), get_ident()
     if call_key in _repr_running:
         return '...'
     _repr_running[call_key] = 1
     try:
         if not self:
             return '%s(%r)' % (self.__class__.__name__, self._sort_key)
         return '%s(%r, %r)' % (self.__class__.__name__, self._sort_key,
                                list(self.items()))
     finally:
         del _repr_running[call_key]
Esempio n. 40
0
    def acquire_thread(self):
        """Run 'start_thread' listeners for the current thread.

        If the current thread has already been seen, any 'start_thread'
        listeners will not be run again.
        """
        thread_ident = _thread.get_ident()
        if thread_ident not in self.threads:
            # We can't just use get_ident as the thread ID
            # because some platforms reuse thread ID's.
            i = len(self.threads) + 1
            self.threads[thread_ident] = i
            self.bus.publish('start_thread', i)
Esempio n. 41
0
 def __repr__(self, _repr_running={}):
     # Based on OrderedDict/defaultdict
     call_key = id(self), get_ident()
     if call_key in _repr_running:
         return '...'
     _repr_running[call_key] = 1
     try:
         if not self:
             return '%s(%r)' % (self.__class__.__name__, self._sort_key)
         return '%s(%r, %r)' % (self.__class__.__name__, self._sort_key,
                                list(self.items()))
     finally:
         del _repr_running[call_key]
Esempio n. 42
0
    def acquire_thread(self):
        """Run 'start_thread' listeners for the current thread.

        If the current thread has already been seen, any 'start_thread'
        listeners will not be run again.
        """
        thread_ident = _thread.get_ident()
        if thread_ident not in self.threads:
            # We can't just use get_ident as the thread ID
            # because some platforms reuse thread ID's.
            i = len(self.threads) + 1
            self.threads[thread_ident] = i
            self.bus.publish('start_thread', i)
Esempio n. 43
0
 def getCurrentSkinName(self):
     '''Return the current skin name.
     '''
     sd = SKINDATA.get(get_ident())
     if sd is not None:
         _ob, skinname, _ignore, _resolve = sd
         if skinname is not None:
             return skinname
     # nothing here, so assume the default skin
     stool = queryUtility(ISkinsTool)
     if stool is not None:
         return stool.getDefaultSkin()
     # and if that fails...
     return None
Esempio n. 44
0
 def getCurrentSkinName(self):
     """Return the current skin name.
     """
     sd = SKINDATA.get(get_ident())
     if sd is not None:
         _ob, skinname, _ignore, _resolve = sd
         if skinname is not None:
             return skinname
     # nothing here, so assume the default skin
     stool = queryUtility(ISkinsTool)
     if stool is not None:
         return stool.getDefaultSkin()
     # and if that fails...
     return None
 def initSettings(self, kw):
     # Note that we lazily allow MemcachedManager
     # to verify the correctness of the internal settings.
     self.__dict__.update(kw)
     servers = kw.get("servers", ("127.0.0.1:11211",))
     self.mirrors = kw.get("mirrors", ())
     debug = kw.get("debug", 1)
     if self.cache is not None:
         self.cache.disconnect_all()
     self.cache = Client(servers, debug=debug, pickleProtocol=-1)
     self.cache.debuglog(
         "(%s) initialized client "
         "with servers: %s" % (get_ident(), ", ".join(servers))
     )
def getSecurityManager():
    """Get a security manager, for the current thread.
    """
    thread_id = get_ident()
    manager = _managers.get(thread_id, None)
    if manager is None:
        nobody = getattr(SpecialUsers, 'nobody', None)
        if nobody is None:
            # Initialize SpecialUsers by importing User.py.
            from AccessControl import User  # NOQA: F401
            nobody = SpecialUsers.nobody
        manager = SecurityManager(thread_id, SecurityContext(nobody))  # NOQA
        _managers[thread_id] = manager

    return manager
Esempio n. 47
0
def getSecurityManager():
    """Get a security manager, for the current thread.
    """
    thread_id = get_ident()
    manager = _managers.get(thread_id, None)
    if manager is None:
        nobody = getattr(SpecialUsers, 'nobody', None)
        if nobody is None:
            # Initialize SpecialUsers by importing User.py.
            from AccessControl import User
            nobody = SpecialUsers.nobody
        manager = SecurityManager(thread_id, SecurityContext(nobody))
        _managers[thread_id] = manager

    return manager
Esempio n. 48
0
def _safe_lock_release_py2(rlock):
  """Ensure that a threading.RLock is fully released for Python 2.

  The RLock release code is:
    https://github.com/python/cpython/blob/2.7/Lib/threading.py#L187

  The RLock object's release method does not release all of its state if an
  exception is raised in the middle of its operation.  There are three pieces of
  internal state that must be cleaned up:
  - owning thread ident, an integer.
  - entry count, an integer that counts how many times the current owner has
      locked the RLock.
  - internal lock, a threading.Lock instance that handles blocking.

  Args:
    rlock: threading.RLock, lock to fully release.

  Yields:
    None.
  """
  assert isinstance(rlock, threading._RLock)
  ident = _thread.get_ident()
  expected_count = 0
  if rlock._RLock__owner == ident:
    expected_count = rlock._RLock__count
  try:
    yield
  except ThreadTerminationError:
    # Check if the current thread still owns the lock by checking if we can
    # acquire the underlying lock.
    if rlock._RLock__block.acquire(0):
      # Lock is clean, so unlock and we are done.
      rlock._RLock__block.release()
    elif rlock._RLock__owner == ident and expected_count > 0:
      # The lock is still held up the stack, so make sure the count is accurate.
      if rlock._RLock__count != expected_count:
        rlock._RLock__count = expected_count
    elif rlock._RLock__owner == ident or rlock._RLock__owner is None:
      # The internal lock is still acquired, but either this thread or no thread
      # owns it, which means it needs to be hard reset.
      rlock._RLock__owner = None
      rlock._RLock__count = 0
      rlock._RLock__block.release()
    raise
Esempio n. 49
0
def do_compute(call_no, delay):
    started = utcnow()
    process_id = os.getpid()
    thread_id = _thread.get_ident()

    # yes, we do the evil blocking thing here!
    # this is to simulate CPU intensive stuff
    time.sleep(delay)

    ended = utcnow()

    result = {
        u'call_no': call_no,
        u'started': started,
        u'ended': ended,
        u'process': process_id,
        u'thread': thread_id
    }
    return result
Esempio n. 50
0
 def handler(self, sig, current_frame):
     start = time.time()
     self.samples_remaining -= 1
     if self.samples_remaining <= 0 or self.stopping:
         plop.platform.setitimer(Collector.MODES[self.mode][0], 0, 0)
         self.stopped = True
         return
     current_tid = _thread.get_ident()
     for tid, frame in six.iteritems(sys._current_frames()):
         if tid == current_tid:
             frame = current_frame
         frames = []
         while frame is not None:
             code = frame.f_code
             frames.append((code.co_filename, code.co_firstlineno, code.co_name))
             frame = frame.f_back
         self.stacks.append(frames)
     end = time.time()
     self.samples_taken += 1
     self.sample_time += (end - start)
Esempio n. 51
0
    def setupCurrentSkin(self, REQUEST=None):
        """
        Sets up skindata so that __getattr__ can find it.

        Can NOT be called manually to change skins in the middle of a
        request! Use changeSkin for that.
        """
        if REQUEST is None:
            return
        if get_ident() in SKINDATA:
            # Already set up for this request.
            return
        skinname = self.getSkinNameFromRequest(REQUEST)
        try:
            self.changeSkin(skinname, REQUEST)
        except ConflictError:
            raise
        except Exception:
            # This shouldn't happen, even if the requested skin
            # does not exist.
            logger.exception('Unable to setupCurrentSkin()')
Esempio n. 52
0
 def get(self):
     return self._toolbars.get(thread.get_ident(), None)
Esempio n. 53
0
 def create(self, request):
     toolbar = DebugToolbar(request)
     self._toolbars[thread.get_ident()] = toolbar
     return toolbar
Esempio n. 54
0
 def T2():
     channels[_thread.get_ident()] = (NotifyChannel(), NotifyChannel())
     m[1*PS]
Esempio n. 55
0
 def release_thread(self):
     """Release the current thread and run 'stop_thread' listeners."""
     thread_ident = _thread.get_ident()
     i = self.threads.pop(thread_ident, None)
     if i is not None:
         self.bus.publish('stop_thread', i)
Esempio n. 56
0
 def loadblk(self, blk, buf):
     # tell driver we are in loadblk and wait untill it says us to go
     cin, cout = channels[_thread.get_ident()]
     cout.tell('ready')
     cin.wait('go')
Esempio n. 57
0
    def wsgi_setup(self, environ=None):
        """
        Setup the member variables used by this WSGI mixin, including
        the ``environ`` and status member variables.

        After the basic environment is created; the optional ``environ``
        argument can be used to override any settings.
        """

        dummy_url = 'http://dummy%s' % (self.path,)
        (scheme, netloc, path, query, fragment) = urlsplit(dummy_url)
        path = unquote(path)
        endslash = path.endswith('/')
        path = posixpath.normpath(path)
        if endslash and path != '/':
            # Put the slash back...
            path += '/'
        (server_name, server_port) = self.server.server_address[:2]

        rfile = self.rfile
        # We can put in the protection to keep from over-reading the
        # file
        try:
            content_length = int(self.headers.get('Content-Length', '0'))
        except ValueError:
            content_length = 0
        if '100-continue' == self.headers.get('Expect','').lower():
            rfile = LimitedLengthFile(ContinueHook(rfile, self.wfile.write), content_length)
        else:
            if not hasattr(self.connection, 'get_context'):
                # @@: LimitedLengthFile is currently broken in connection
                # with SSL (sporatic errors that are diffcult to trace, but
                # ones that go away when you don't use LimitedLengthFile)
                rfile = LimitedLengthFile(rfile, content_length)

        remote_address = self.client_address[0]
        self.wsgi_environ = {
                'wsgi.version': (1,0)
               ,'wsgi.url_scheme': 'http'
               ,'wsgi.input': rfile
               ,'wsgi.errors': sys.stderr
               ,'wsgi.multithread': True
               ,'wsgi.multiprocess': False
               ,'wsgi.run_once': False
               # CGI variables required by PEP-333
               ,'REQUEST_METHOD': self.command
               ,'SCRIPT_NAME': '' # application is root of server
               ,'PATH_INFO': path
               ,'QUERY_STRING': query
               ,'CONTENT_TYPE': self.headers.get('Content-Type', '')
               ,'CONTENT_LENGTH': self.headers.get('Content-Length', '0')
               ,'SERVER_NAME': server_name
               ,'SERVER_PORT': str(server_port)
               ,'SERVER_PROTOCOL': self.request_version
               # CGI not required by PEP-333
               ,'REMOTE_ADDR': remote_address
               }
        if scheme:
            self.wsgi_environ['paste.httpserver.proxy.scheme'] = scheme
        if netloc:
            self.wsgi_environ['paste.httpserver.proxy.host'] = netloc

        if self.lookup_addresses:
            # @@: make lookup_addreses actually work, at this point
            #     it has been address_string() is overriden down in
            #     file and hence is a noop
            if remote_address.startswith("192.168.") \
            or remote_address.startswith("10.") \
            or remote_address.startswith("172.16."):
                pass
            else:
                address_string = None # self.address_string()
                if address_string:
                    self.wsgi_environ['REMOTE_HOST'] = address_string

        if hasattr(self.server, 'thread_pool'):
            # Now that we know what the request was for, we should
            # tell the thread pool what its worker is working on
            self.server.thread_pool.worker_tracker[_thread.get_ident()][1] = self.wsgi_environ
            self.wsgi_environ['paste.httpserver.thread_pool'] = self.server.thread_pool

        for k, v in self.headers.items():
            key = 'HTTP_' + k.replace("-","_").upper()
            if key in ('HTTP_CONTENT_TYPE','HTTP_CONTENT_LENGTH'):
                continue
            self.wsgi_environ[key] = ','.join(self.headers.get(k))

        if hasattr(self.connection,'get_context'):
            self.wsgi_environ['wsgi.url_scheme'] = 'https'
            # @@: extract other SSL parameters from pyOpenSSL at...
            # http://www.modssl.org/docs/2.8/ssl_reference.html#ToC25

        if environ:
            assert isinstance(environ, dict)
            self.wsgi_environ.update(environ)
            if 'on' == environ.get('HTTPS'):
                self.wsgi_environ['wsgi.url_scheme'] = 'https'

        self.wsgi_curr_headers = None
        self.wsgi_headers_sent = False
Esempio n. 58
0
 def worker_thread_callback(self, message=None):
     """
     Worker thread should call this method to get and process queued
     callables.
     """
     thread_obj = threading.currentThread()
     thread_id = thread_obj.thread_id = _thread.get_ident()
     self.workers.append(thread_obj)
     self.idle_workers.append(thread_id)
     requests_processed = 0
     add_replacement_worker = False
     self.logger.debug('Started new worker %s: %s', thread_id, message)
     try:
         while True:
             if self.max_requests and self.max_requests < requests_processed:
                 # Replace this thread then die
                 self.logger.debug('Thread %s processed %i requests (limit %s); stopping thread'
                                   % (thread_id, requests_processed, self.max_requests))
                 add_replacement_worker = True
                 break
             runnable = self.queue.get()
             if runnable is ThreadPool.SHUTDOWN:
                 self.logger.debug('Worker %s asked to SHUTDOWN', thread_id)
                 break
             try:
                 self.idle_workers.remove(thread_id)
             except ValueError:
                 pass
             self.worker_tracker[thread_id] = [time.time(), None]
             requests_processed += 1
             try:
                 try:
                     runnable()
                 except:
                     # We are later going to call sys.exc_clear(),
                     # removing all remnants of any exception, so
                     # we should log it now.  But ideally no
                     # exception should reach this level
                     print('Unexpected exception in worker %r' % runnable,
                           file=sys.stderr)
                     traceback.print_exc()
                 if thread_id in self.dying_threads:
                     # That last exception was intended to kill me
                     break
             finally:
                 try:
                     del self.worker_tracker[thread_id]
                 except KeyError:
                     pass
                 if six.PY2:
                     sys.exc_clear()
             self.idle_workers.append(thread_id)
     finally:
         try:
             del self.worker_tracker[thread_id]
         except KeyError:
             pass
         try:
             self.idle_workers.remove(thread_id)
         except ValueError:
             pass
         try:
             self.workers.remove(thread_obj)
         except ValueError:
             pass
         try:
             del self.dying_threads[thread_id]
         except KeyError:
             pass
         if add_replacement_worker:
             self.add_worker_thread(message='Voluntary replacement for thread %s' % thread_id)
Esempio n. 59
0
 def pop(self):
     return self._toolbars.pop(thread.get_ident(), None)
Esempio n. 60
0
def get_ident():
    """Return the 'thread identifier' of the current thread."""
    return _thread.get_ident()