Esempio n. 1
0
def getOldHiddenService(desc = 0):
    db = MySQLdb.connect(host=ConfigLoader.host, user=ConfigLoader.user, passwd=ConfigLoader.password,
                         db=ConfigLoader.db, use_unicode=True,
                         charset="utf8")
    cursor = db.cursor()


    if desc == 0:
        cursor.execute(
            "UPDATE HiddenServices SET Status=1, LatestScan=%s, ResponsibleThread=%s WHERE Status=2 OR Status=3 ORDER BY LatestScan LIMIT 1",
            (datetime.datetime.now(), threading._get_ident()))
    else:
        cursor.execute(
            "UPDATE HiddenServices SET Status=1, LatestScan=%s, ResponsibleThread=%s WHERE Status=2 OR Status=3 ORDER BY LatestScan DESC LIMIT 1",
            (datetime.datetime.now(), threading._get_ident()))
    db.commit()
    threadNumber = threading._get_ident()

    #Retrieves the hidden service
    cursor.execute("SELECT Id, Url FROM HiddenServices WHERE Status=1 AND ResponsibleThread=%s", (threadNumber,))
    if cursor.rowcount == 0:
        db.close()
        time.sleep(1)
        return None
    res = cursor.fetchall()
    db.close()
    return res[0]
Esempio n. 2
0
def get_cache_con():
	if not cache_con_map.has_key(threading._get_ident()):
		cache_con = sqlite.connect(":memory:")
		cur = cache_con.cursor()
		cur.execute("create table plugin_result_cache(url string, timestamp long, content string)")
		cur.close()
		cache_con_map[threading._get_ident()] = cache_con
	con = cache_con_map[threading._get_ident()]
	return con
Esempio n. 3
0
def get_cache_con():
	if not cache_con_map.has_key(threading._get_ident()):
		cache_con = sqlite.connect(":memory:")
		cur = cache_con.cursor()
		cur.execute("create table plugin_result_cache(url string, timestamp long, content string)")
		cur.close()
		cache_con_map[threading._get_ident()] = cache_con
	con = cache_con_map[threading._get_ident()]
	return con
Esempio n. 4
0
 def __insertion(self, link_prev, key_value):
     self.__insertions_running[_get_ident()] = 1
     self.__setitem__(*key_value)
     key, value = self.__insertions_running.pop(_get_ident())
     if link_prev[2] != key:
         if key in self:
             del self[key]
         link_next = link_prev[1]
         self._OrderedDict__map[key] = link_prev[1] = link_next[0] = [
             link_prev, link_next, key
         ]
     dict.__setitem__(self, key, value)
Esempio n. 5
0
 def speichern(self):
     self._speichernd.append((self, threading._get_ident()))
     try:
         with open(self.datei_name(), 'wb') as f:
             pickle.dump(self, f)
     except:
         if not self.speichern_fehlgeschlagen:
             self.speichern_fehlgeschlagen = True
             traceback.print_exc()
             print self.name, "kann nicht gespeichert werden."
     finally:
         self._speichernd.remove((self, threading._get_ident()))
Esempio n. 6
0
    def startRequest(self):
        """Initialise the DB and starts a new transaction.
        """

        conn = self._getConnObject()
        if conn is None:
            self._conn[threading._get_ident()] = self._db.open()
            Logger.get('dbmgr').debug(
                'Allocated connection for thread %s - table size is %s' %
                (threading._get_ident(), len(self._conn)))
        else:
            Logger.get('dbmgr').debug(
                'Reused connection for thread %s - table size is %s' %
                (threading._get_ident(), len(self._conn)))
def findphoto(bs,url):
    global x
    jieguo=bs.findAll(name ="img",attrs={"src":re.compile(r"^http://")})  #re.compile(r"^http://")
    for temp in jieguo:
        print "find picture %s"% temp["src"]
        print threading._get_ident()
        if(re.compile(r"http://").findall(temp["src"])):
            download(temp["src"])                            #下载方式一
        else:
            print "\n\n\n\n\n\n\n"
            b=urlparse.urlparse(url)
            tempurl=b[0]+r"://"+b[1]+r"/"+temp["src"]
            print tempurl
            download(tempurl)
Esempio n. 8
0
 def get_repository(self, authname):
     if not self._connector:
         candidates = []
         for connector in self.connectors:
             for repos_type_, prio in connector.get_supported_types():
                 if self.repository_type != repos_type_:
                     continue
                 heappush(candidates, (-prio, connector))
         if not candidates:
             raise TracError('Unsupported version control system "%s". '
                             'Check that the Python bindings for "%s" are '
                             'correctly installed.' %
                             ((self.repository_type,)*2))
         self._connector = heappop(candidates)[1]
     db = self.env.get_db_cnx() # prevent possible deadlock, see #4465
     try:
         self._lock.acquire()
         tid = threading._get_ident()
         if tid in self._cache:
             repos = self._cache[tid]
         else:
             rtype, rdir = self.repository_type, self.repository_dir
             repos = self._connector.get_repository(rtype, rdir, authname)
             self._cache[tid] = repos
         return repos
     finally:
         self._lock.release()
Esempio n. 9
0
 def patched_show(*args, **kw):
     tid = threading._get_ident() if six.PY2 else threading.get_ident()
     PatchedMatplotlib._recursion_guard[tid] = True
     # noinspection PyBroadException
     try:
         figures = PatchedMatplotlib._get_output_figures(None,
                                                         all_figures=True)
         for figure in figures:
             # if this is a stale figure (just updated) we should send it, the rest will not be stale
             if figure.canvas.figure.stale or (
                     hasattr(figure, '_trains_is_imshow')
                     and figure._trains_is_imshow):
                 PatchedMatplotlib._report_figure(stored_figure=figure)
     except Exception:
         pass
     ret = PatchedMatplotlib._patched_original_plot(*args, **kw)
     if PatchedMatplotlib._current_task and sys.modules[
             'matplotlib'].rcParams['backend'] == 'agg':
         # clear the current plot, because no one else will
         # noinspection PyBroadException
         try:
             if sys.modules['matplotlib'].rcParams['backend'] == 'agg':
                 import matplotlib.pyplot as plt
                 plt.clf()
         except Exception:
             pass
     PatchedMatplotlib._recursion_guard[tid] = False
     return ret
Esempio n. 10
0
    def request(self, clientAddress, remoteHost, scheme="http"):
        """Obtain an HTTP Request object.
        
        clientAddress: the (IP address, port) of the client
        remoteHost: the IP address of the client
        scheme: either "http" or "https"; defaults to "http"
        """
        if self.state == STOPPED:
            raise cherrypy.NotReady("The CherryPy server has stopped.")
        elif self.state == STARTING:
            raise cherrypy.NotReady("The CherryPy server could not start.")

        threadID = threading._get_ident()
        if threadID not in self.seen_threads:

            if cherrypy.codecoverage:
                from cherrypy.lib import covercp

                covercp.start()

            i = len(self.seen_threads) + 1
            self.seen_threads[threadID] = i

            for func in self.on_start_thread_list:
                func(i)

        r = self.request_class(clientAddress[0], clientAddress[1], remoteHost, scheme)
        cherrypy.serving.request = r
        cherrypy.serving.response = self.response_class()
        return r
Esempio n. 11
0
def currentThread():
    from threading import _get_ident, _active, _DummyThread

    try:
        return _active[_get_ident()]
    except KeyError:
        return _DummyThread()
Esempio n. 12
0
    def request(self,
                local_host,
                remote_host,
                scheme="http",
                server_protocol="HTTP/1.1"):
        """Obtain and return an HTTP Request object. (Core)
        
        local_host should be an http.Host object with the server info.
        remote_host should be an http.Host object with the client info.
        scheme: either "http" or "https"; defaults to "http"
        """
        if self.state == STOPPED:
            req = NotReadyRequest("The CherryPy engine has stopped.")
        elif self.state == STARTING:
            req = NotReadyRequest("The CherryPy engine could not start.")
        else:
            # Only run on_start_thread_list if the engine is running.
            threadID = threading._get_ident()
            if threadID not in self.seen_threads:
                i = len(self.seen_threads) + 1
                self.seen_threads[threadID] = i

                for func in self.on_start_thread_list:
                    func(i)
            req = self.request_class(local_host, remote_host, scheme,
                                     server_protocol)
        resp = self.response_class()
        cherrypy.serving.load(req, resp)
        self.servings.append((req, resp))
        return req
Esempio n. 13
0
 def release(self):
     if self.__owner != _get_ident():
         raise RuntimeError("cannot release un-aquired lock")
     self.__count -= 1
     if not self.__count:
         self.__owner = None
         self.__block.release()
Esempio n. 14
0
  def __bootstrap(self):
    try:
      self._set_ident()
      self._Thread__started.set()
      threading._active_limbo_lock.acquire()
      threading._active[self._Thread__ident] = self
      del threading._limbo[self]
      threading._active_limbo_lock.release()

      if threading._trace_hook:
        sys.settrace(threading._trace_hook)
      if threading._profile_hook:
        sys.setprofile(threading._profile_hook)

      try:
        self.run()
      finally:
        self._Thread__exc_clear()
    finally:
      with threading._active_limbo_lock:
        self._Thread__stop()
        try:
          del threading._active[threading._get_ident()]
        except:
          pass
Esempio n. 15
0
 def get_repository(self, authname):
     if not self._connector:
         candidates = []
         for connector in self.connectors:
             for repos_type_, prio in connector.get_supported_types():
                 if self.repository_type != repos_type_:
                     continue
                 heappush(candidates, (-prio, connector))
         if not candidates:
             raise TracError(
                 u'Système de contrôle de version non supporté "%s"' %
                 self.repository_type)
         self._connector = heappop(candidates)[1]
     try:
         self._lock.acquire()
         tid = threading._get_ident()
         if tid in self._cache:
             repos = self._cache[tid]
         else:
             rtype, rdir = self.repository_type, self.repository_dir
             repos = self._connector.get_repository(rtype, rdir, authname)
             self._cache[tid] = repos
         return repos
     finally:
         self._lock.release()
Esempio n. 16
0
    def do_test_stop_others(self):
        self.build()
        (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
                                                                            "Set a breakpoint here",
                                                                            self.main_source_file)
        # First run with stop others false and see that we got that.
        thread_id = ""
        if sys.version_info.major == 2:
            thread_id = str(threading._get_ident())
        else:
            thread_id = str(threading.get_ident())

        # all-threads should set stop others to False.
        self.run_step(False, "all-threads", thread_id)

        # this-thread should set stop others to True
        self.run_step(True, "this-thread", thread_id)

        # The default value should be stop others:
        self.run_step(True, None, thread_id)

        # The target.process.run-all-threads should override this:
        interp = self.dbg.GetCommandInterpreter()
        result = lldb.SBCommandReturnObject()
        
        interp.HandleCommand("settings set target.process.run-all-threads true", result)
        self.assertTrue(result.Succeeded, "setting run-all-threads works.")

        self.run_step(False, None, thread_id)
Esempio n. 17
0
 def get_cnx(self, timeout=None):
     start = time.time()
     self._available.acquire()
     try:
         tid = threading._get_ident()
         if tid in self._active:
             self._active[tid][0] += 1
             return PooledConnection(self, self._active[tid][1])
         while True:
             if self._dormant:
                 cnx = self._dormant.pop()
                 break
             elif self._maxsize and self._cursize < self._maxsize:
                 cnx = self._cnx_class(**self._args)
                 self._cursize += 1
                 break
             else:
                 if timeout:
                     self._available.wait(timeout)
                     if (time.time() - start) >= timeout:
                         raise TimeoutError, 'Unable to get database ' \
                                             'connection within %d seconds' \
                                             % timeout
                 else:
                     self._available.wait()
         self._active[tid] = [1, cnx]
         return PooledConnection(self, cnx)
     finally:
         self._available.release()
Esempio n. 18
0
def t_func(queue, lock, output_path):
    mynum = threading._get_ident()
    while 1:
        try:
            row = queue.get_nowait()
        except Queue.Empty:
            lock.acquire()
            #Handle empty queue here
            print "END"
            lock.release()
            return 0
        else:
            data = {}
            sz = queue.qsize()
            if sz % 50 == 0:
                lock.acquire()
                print "Remaining: " + str(queue.qsize())
                now = datetime.now()
                s = '%d:%d:%d' % (now.hour, now.minute, now.second)
                print "Time: " + s
                lock.release()
            uri_doremus = row[1]
            names_doremus = row[2].split('|||')
            storage_file = output_path + row[5]
            composers_doremus = row[6]
            try:
                data = getItems(names_doremus, composers_doremus)
                with open(storage_file, 'w+') as outfile:
                    json.dump(data, outfile)
            except:
                lock.acquire()
                print 'Request Error'
                lock.release()
Esempio n. 19
0
  def __bootstrap(self):
    try:
      self._set_ident()
      self._Thread__started.set()
      threading._active_limbo_lock.acquire()
      threading._active[self._Thread__ident] = self
      del threading._limbo[self]
      threading._active_limbo_lock.release()

      if threading._trace_hook:
        sys.settrace(threading._trace_hook)
      if threading._profile_hook:
        sys.setprofile(threading._profile_hook)

      try:
        self.run()
      finally:
        self._Thread__exc_clear()
    finally:
      with threading._active_limbo_lock:
        self._Thread__stop()
        try:
          del threading._active[threading._get_ident()]
        except:
          pass
Esempio n. 20
0
 def request(self, local_host, remote_host, scheme="http",
             server_protocol="HTTP/1.1"):
     """Obtain and return an HTTP Request object. (Core)
     
     local_host should be an http.Host object with the server info.
     remote_host should be an http.Host object with the client info.
     scheme: either "http" or "https"; defaults to "http"
     """
     if self.state == STOPPED:
         req = NotReadyRequest("The CherryPy engine has stopped.")
     elif self.state == STARTING:
         req = NotReadyRequest("The CherryPy engine could not start.")
     else:
         # Only run on_start_thread_list if the engine is running.
         threadID = threading._get_ident()
         if threadID not in self.seen_threads:
             i = len(self.seen_threads) + 1
             self.seen_threads[threadID] = i
             
             for func in self.on_start_thread_list:
                 func(i)
         req = self.request_class(local_host, remote_host, scheme,
                                  server_protocol)
     resp = self.response_class()
     cherrypy.serving.load(req, resp)
     self.servings.append((req, resp))
     return req
Esempio n. 21
0
def getNewHiddenService():
    db = MySQLdb.connect(host=ConfigLoader.host, user=ConfigLoader.user, passwd=ConfigLoader.password, db=ConfigLoader.db, use_unicode=True,
                         charset="utf8")
    cursor = db.cursor()

    #Marks the hidden service as 'under analysis'
    cursor.execute("UPDATE HiddenServices SET Status=1, FirstScan=%s, LatestScan=%s, ResponsibleThread=%s WHERE Status=0 ORDER BY Id LIMIT 1", (datetime.datetime.now(), datetime.datetime.now(), threading._get_ident()))
    db.commit()

    threadNumber = threading._get_ident()

    #Retrieves it.
    cursor.execute("SELECT Id, Url FROM HiddenServices WHERE Status=1 AND ResponsibleThread=%s", (threadNumber, ))
    result = cursor.fetchall()

    #In case no hidden service was returned, it returns None.
    if cursor.rowcount == 0:
        db.close()
        time.sleep(1)
        return None

    #Returns the hidden service's Id.
    id = int(result[0][0])
    db.close()
    return id
Esempio n. 22
0
 def get_repository(self, authname):
     if not self._connector:
         candidates = []
         for connector in self.connectors:
             for repos_type_, prio in connector.get_supported_types():
                 if self.repository_type != repos_type_:
                     continue
                 heappush(candidates, (-prio, connector))
         if not candidates:
             raise TracError('Unsupported version control system "%s". '
                             'Check that the Python bindings for "%s" are '
                             'correctly installed.' %
                             ((self.repository_type, ) * 2))
         self._connector = heappop(candidates)[1]
     db = self.env.get_db_cnx()  # prevent possible deadlock, see #4465
     try:
         self._lock.acquire()
         tid = threading._get_ident()
         if tid in self._cache:
             repos = self._cache[tid]
         else:
             rtype, rdir = self.repository_type, self.repository_dir
             repos = self._connector.get_repository(rtype, rdir, authname)
             self._cache[tid] = repos
         return repos
     finally:
         self._lock.release()
Esempio n. 23
0
    def patched_savefig(self, *args, **kw):
        ret = PatchedMatplotlib._patched_original_savefig(self, *args, **kw)
        # noinspection PyBroadException
        try:
            fname = kw.get('fname') or args[0]
            from pathlib2 import Path
            if six.PY3:
                from pathlib import Path as Path3
            else:
                Path3 = Path

            # if we are not storing into a file (str/Path) do not log the matplotlib
            if not isinstance(fname, (str, Path, Path3)):
                return ret
        except Exception:
            pass

        tid = threading._get_ident() if six.PY2 else threading.get_ident()
        if not PatchedMatplotlib._recursion_guard.get(tid):
            PatchedMatplotlib._recursion_guard[tid] = True
            # noinspection PyBroadException
            try:
                PatchedMatplotlib._report_figure(specific_fig=self,
                                                 set_active=False)
            except Exception:
                pass
            PatchedMatplotlib._recursion_guard[tid] = False

        return ret
Esempio n. 24
0
 def release(self):
     if self.__owner != _get_ident():
         raise RuntimeError("cannot release un-aquired lock")
     self.__count -= 1
     if not self.__count:
         self.__owner = None
         self.__block.release()
Esempio n. 25
0
 def shutdown(self, tid=None):
     if tid:
         assert tid == threading._get_ident()
         try:
             self._lock.acquire()
             self._cache.pop(tid, None)
         finally:
             self._lock.release()
Esempio n. 26
0
 def get_connection(self):
     key = self.dbpath + str(threading._get_ident())
     if key in _MAP_OF_CONNECTIONS:
         return _MAP_OF_CONNECTIONS[key]
     conn = sqlite3.connect(self.dbpath)
     print "Trying to open", self.dbpath
     _MAP_OF_CONNECTIONS[key] = conn
     return conn
Esempio n. 27
0
 def shutdown(self, tid=None):
     if tid:
         assert tid == threading._get_ident()
         try:
             self._lock.acquire()
             self._cache.pop(tid, None)
         finally:
             self._lock.release()
Esempio n. 28
0
    def getInstance(create=True):
        tid = threading._get_ident()
        instance = DALManager._instances.get(tid)

        if not instance and create:
            minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
            instance = DBConnection(minfo)
            DALManager._instances[tid] = instance
        return instance
Esempio n. 29
0
    def getInstance(create=True):
        tid = threading._get_ident()
        instance = DALManager._instances.get(tid)

        if not instance and create:
            minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
            instance = DBConnection(minfo)
            DALManager._instances[tid] = instance
        return instance
Esempio n. 30
0
 def __init__(self, namespace):
     if hasattr(threading, "get_ident"):
         id = threading.get_ident()
     else:
         id = threading._get_ident()
     logging.debug("NamespacedCache.__init__, thread=%s", id)
     self.namespace = namespace
     self.stop_cache = False
     return
Esempio n. 31
0
def _thread_bootstrap_2_7(self):
    """This is a replacement "method" for the Thread class in Python 2.7,
    designed to let an exception fall through to the debugger."""

    # noinspection PyProtectedMember
    # noinspection PyUnresolvedReferences
    # noinspection PyProtectedMember
    # noinspection PyUnresolvedReferences
    from threading import (
        _active,
        _active_limbo_lock,
        _get_ident,
        _limbo,
        _profile_hook,
        _sys,
        _trace_hook,
    )

    try:
        self._set_ident()
        self._Thread__started.set()
        with _active_limbo_lock:
            _active[self._Thread__ident] = self
            del _limbo[self]
        if __debug__:
            self._note("%s.__bootstrap(): thread started", self)

        # if _trace_hook:
        #     self._note("%s.__bootstrap(): registering trace hook", self)
        #     _sys.settrace(_trace_hook)
        if _profile_hook:
            self._note("%s.__bootstrap(): registering profile hook", self)
            _sys.setprofile(_profile_hook)

        try:
            self.run()
        except SystemExit:
            if __debug__:
                self._note("%s.__bootstrap(): raised SystemExit", self)
        else:
            if __debug__:
                self._note("%s.__bootstrap(): normal return", self)
        finally:
            # Prevent a race in
            # test_threading.test_no_refcycle_through_target when
            # the exception keeps the target alive past when we
            # assert that it's dead.
            self._Thread__exc_clear()
    finally:
        with _active_limbo_lock:
            self._Thread__stop()
            try:
                # We don't call self.__delete() because it also
                # grabs _active_limbo_lock.
                del _active[_get_ident()]
            except:
                pass
Esempio n. 32
0
def fix_main_thread_id(on_warn=lambda msg:None, on_exception=lambda msg:None, on_critical=lambda msg:None):
    # This means that we weren't able to import threading in the main thread (which most
    # likely means that the main thread is paused or in some very long operation).
    # In this case we'll import threading here and hotfix what may be wrong in the threading
    # module (if we're on Windows where we create a thread to do the attach and on Linux
    # we are not certain on which thread we're executing this code).
    #
    # The code below is a workaround for https://bugs.python.org/issue37416
    import sys
    import threading

    try:
        with threading._active_limbo_lock:
            main_thread_instance = get_main_thread_instance(threading)

            if sys.platform == 'win32':
                # On windows this code would be called in a secondary thread, so,
                # the current thread is unlikely to be the main thread.
                if hasattr(threading, '_get_ident'):
                    unlikely_thread_id = threading._get_ident()  # py2
                else:
                    unlikely_thread_id = threading.get_ident()  # py3
            else:
                unlikely_thread_id = None

            main_thread_id, critical_warning = get_main_thread_id(unlikely_thread_id)

            if main_thread_id is not None:
                main_thread_id_attr = '_ident'
                if not hasattr(main_thread_instance, main_thread_id_attr):
                    main_thread_id_attr = '_Thread__ident'
                    assert hasattr(main_thread_instance, main_thread_id_attr)

                if main_thread_id != getattr(main_thread_instance, main_thread_id_attr):
                    # Note that we also have to reset the '_tstack_lock' for a regular lock.
                    # This is needed to avoid an error on shutdown because this lock is bound
                    # to the thread state and will be released when the secondary thread
                    # that initialized the lock is finished -- making an assert fail during
                    # process shutdown.
                    main_thread_instance._tstate_lock = threading._allocate_lock()
                    main_thread_instance._tstate_lock.acquire()

                    # Actually patch the thread ident as well as the threading._active dict
                    # (we should have the _active_limbo_lock to do that).
                    threading._active.pop(getattr(main_thread_instance, main_thread_id_attr), None)
                    setattr(main_thread_instance, main_thread_id_attr, main_thread_id)
                    threading._active[getattr(main_thread_instance, main_thread_id_attr)] = main_thread_instance

        # Note: only import from pydevd after the patching is done (we want to do the minimum
        # possible when doing that patching).
        on_warn('The threading module was not imported by user code in the main thread. The debugger will attempt to work around https://bugs.python.org/issue37416.')

        if critical_warning:
            on_critical('Issue found when debugger was trying to work around https://bugs.python.org/issue37416:\n%s' % (critical_warning,))
    except:
        on_exception('Error patching main thread id.')
Esempio n. 33
0
    def record_stop(self,
                    uriset=None,
                    slow_queries=1.0,
                    slow_queries_count=100,
                    debug=False,
                    **kwargs):
        """Record the end of a request."""
        resp = cherrypy.serving.response
        w = appstats['Requests'][threading._get_ident()]

        r = cherrypy.request.rfile.bytes_read
        w['Bytes Read'] = r
        appstats['Total Bytes Read'] += r

        if resp.stream:
            w['Bytes Written'] = 'chunked'
        else:
            cl = int(resp.headers.get('Content-Length', 0))
            w['Bytes Written'] = cl
            appstats['Total Bytes Written'] += cl

        w['Response Status'] = getattr(resp, 'output_status',
                                       None) or resp.status

        w['End Time'] = time.time()
        p = w['End Time'] - w['Start Time']
        w['Processing Time'] = p
        appstats['Total Time'] += p

        appstats['Current Requests'] -= 1

        if debug:
            cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS')

        if uriset:
            rs = appstats.setdefault('URI Set Tracking', {})
            r = rs.setdefault(
                uriset, {
                    'Min': None,
                    'Max': None,
                    'Count': 0,
                    'Sum': 0,
                    'Avg': average_uriset_time
                })
            if r['Min'] is None or p < r['Min']:
                r['Min'] = p
            if r['Max'] is None or p > r['Max']:
                r['Max'] = p
            r['Count'] += 1
            r['Sum'] += p

        if slow_queries and p > slow_queries:
            sq = appstats.setdefault('Slow Queries', [])
            sq.append(w.copy())
            if len(sq) > slow_queries_count:
                sq.pop(0)
Esempio n. 34
0
 def acquire(self,blocking=True,timeout=None):
     me = _get_ident()
     if self.__owner == me:
         self.__count += 1
         return True
     if self.__block.acquire(blocking,timeout):
         self.__owner = me
         self.__count = 1
         return True
     return False
Esempio n. 35
0
 def acquire(self,blocking=True,timeout=None):
     me = _get_ident()
     if self.__owner == me:
         self.__count += 1
         return True
     if self.__block.acquire(blocking,timeout):
         self.__owner = me
         self.__count = 1
         return True
     return False
Esempio n. 36
0
    def startRequest( self ):
        """Initialise the DB and starts a new transaction.
        """

        conn = self._getConnObject()
        if conn is None:
            self._conn[threading._get_ident()]=self._db.open()
            Logger.get('dbmgr').debug('Allocated connection for thread %s - table size is %s' % (threading._get_ident(), len(self._conn)))
        else:
            Logger.get('dbmgr').debug('Reused connection for thread %s - table size is %s' % (threading._get_ident(), len(self._conn)))
    def end_suitesetup(self, name, attributes):

        end_test_attributes = {
            'critical': 'yes',
            'doc': 'Test Suite Setup section',
            'starttime': attributes['starttime'],
            'endtime': attributes['endtime'],
            'status': 'PASS',
            'tags': [],
            'id': 's1-s1-t0',
            'longname': BuiltIn().get_variable_value('${SUITE_NAME}'),
            'template': ''
        }

        test = self.stack.pop()
        BuiltIn().run_keyword(name)

        if end_test_attributes.get('status') == Robot.PASS:
            test.status = Status.PASSED
        elif end_test_attributes.get('status') == Robot.FAIL:
            test.status = Status.FAILED
            test.failure = Failure(message=end_test_attributes.get('message'),
                                   trace='')
        elif end_test_attributes.get('doc') is not '':
            test.description = attributes.get('doc')

        if end_test_attributes['tags']:
            for tag in end_test_attributes['tags']:
                if re.search(self.AllureIssueIdRegEx, tag):
                    test.labels.append(TestLabel(name=Label.ISSUE, value=tag))
                if tag.startswith('feature'):
                    test.labels.append(
                        TestLabel(name='feature', value=tag.split(':')[-1]))
                if tag.startswith('story'):
                    test.labels.append(
                        TestLabel(name='story', value=tag.split(':')[-1]))
                elif tag in SEVERITIES:
                    test.labels.append(TestLabel(name='severity', value=tag))
                elif tag in STATUSSES:
                    test.status = tag  # overwrites the actual test status with this value.

        self.PabotPoolId = BuiltIn().get_variable_value(
            '${PABOTEXECUTIONPOOLID}')

        if (self.PabotPoolId is not None):
            self.threadId = 'PabotPoolId-' + str(self.PabotPoolId)
        else:
            self.threadId = threading._get_ident()

        test.labels.append(TestLabel(name='thread', value=str(self.threadId)))

        self.testsuite.tests.append(test)
        test.stop = now()
        return test
	def gotThreadMsg(self, msg=None):
		
		from ctypes import CDLL
		SYS_gettid = 4222
		libc = CDLL("libc.so.6")
		tid = libc.syscall(SYS_gettid)
		splog('SP: Worker got message: ', currentThread(), _get_ident(), self.ident, os.getpid(), tid )
		
		data = self.__messages.pop()
		if callable(self.callback):
			self.callback(data)
    def end_test(self, name, attributes):
#         logger.console('\nend_test: ['+name+']')
#         logger.console(attributes)
#         logger.console('   [stack lenght] ['+str(len(self.stack))+'] [testsuite lenght] ['+ str(len(self.testsuite.tests))+']')

        test = self.stack.pop()
        
        if attributes.get('status') == Robot.PASS:
            test.status = Status.PASSED
        elif attributes.get('status')==Robot.FAIL:
            test.status = Status.FAILED
            test.failure = Failure(message=attributes.get('message'), trace='')
        elif attributes.get('doc') is not '':
            test.description = attributes.get('doc')

        if attributes['tags']:
            for tag in attributes['tags']:
                if re.search(self.AllureIssueIdRegEx, tag):
                    test.labels.append(TestLabel(
                        name=Label.ISSUE,
                        value=tag))
                elif tag.startswith('feature'):
                    test.labels.append(TestLabel(
                        name='feature',
                        value=tag.split(':')[-1]))
                elif tag.startswith('story'):
                    test.labels.append(TestLabel(
                        name='story',
                        value=tag.split(':')[-1]))
                elif tag in SEVERITIES:
                    test.labels.append(TestLabel(
                        name='severity',
                        value=tag))                    
                    test.severity = tag
                elif tag in STATUSSES:
                    test.status = tag  # overwrites the actual test status with this value.
                else:
                    test.labels.append(TestLabel(
                        name='tag',
                        value=tag))

        self.PabotPoolId =  BuiltIn().get_variable_value('${PABOTEXECUTIONPOOLID}')
        if(self.PabotPoolId is not None):
            self.threadId = 'PabotPoolId-' + str(self.PabotPoolId)
        else:
            self.threadId = threading._get_ident()
                
        test.labels.append(TestLabel(
            name='thread',
            value=str(self.threadId)))

        self.testsuite.tests.append(test)
        test.stop = now()        
        return test
Esempio n. 40
0
    def patched_figure_show(self, *args, **kw):
        tid = threading._get_ident() if six.PY2 else threading.get_ident()
        if PatchedMatplotlib._recursion_guard.get(tid):
            # we are inside a gaurd do nothing
            return PatchedMatplotlib._patched_original_figure(self, *args, **kw)

        PatchedMatplotlib._recursion_guard[tid] = True
        PatchedMatplotlib._report_figure(set_active=False, specific_fig=self)
        ret = PatchedMatplotlib._patched_original_figure(self, *args, **kw)
        PatchedMatplotlib._recursion_guard[tid] = False
        return ret
Esempio n. 41
0
	def gotThreadMsg(self, msg=None):
		
		from ctypes import CDLL
		SYS_gettid = 4222
		libc = CDLL("libc.so.6")
		tid = libc.syscall(SYS_gettid)
		splog('SP: Worker got message: ', currentThread(), _get_ident(), self.ident, os.getpid(), tid )
		
		data = self.__messages.pop()
		if callable(self.callback):
			self.callback(data)
Esempio n. 42
0
 def release(self):
     if not self.is_locked:
         return False
     ident = threading._get_ident()
     ot = self.owner_thread
     if ident != self.current_owner:
         return False
     self.lock_count -= 1
     if self.lock_count == 0:
         self.current_owner = None
         self.__lock.release()
     return True
 def __repr__(self, _repr_running={}):
     'od.__repr__() <==> repr(od)'
     call_key = id(self), _get_ident()
     if call_key in _repr_running:
         return '...'
     _repr_running[call_key] = 1
     try:
         if not self:
             return '%s()' % (self.__class__.__name__, )
         return '%s(%r)' % (self.__class__.__name__, self.items())
     finally:
         del _repr_running[call_key]
Esempio n. 44
0
 def __repr__(self, _repr_running={}):
     'od.__repr__() <==> repr(od)'
     call_key = id(self), _get_ident()
     if call_key in _repr_running:
         return '...'
     _repr_running[call_key] = 1
     try:
         if not self:
             return '%s()' % (self.__class__.__name__,)
         return '%s(%r)' % (self.__class__.__name__, self.items())
     finally:
         del _repr_running[call_key]
Esempio n. 45
0
 def release(self):
     if not self.is_locked:
         return False
     ident = threading._get_ident()
     ot = self.owner_thread
     if ident != self.current_owner:
         return False
     self.lock_count -= 1
     if self.lock_count == 0:
         self.current_owner = None
         self.__lock.release()
     return True
Esempio n. 46
0
def __ThreadIDPrint():
    msg = ''
    if not Debug_Setting.Thread_ID_Show:
        return msg
    thread_name = threading.currentThread().getName()
    if thread_name is None:
        thread_name = ''
    msg += '[Thread:%d\t' % threading._get_ident()
    if Debug_Setting.Thread_Name_Show:
        msg += '%s\t' % thread_name
    msg += ']\t'
    return msg
	def run(self):
		
		from ctypes import CDLL
		SYS_gettid = 4222
		libc = CDLL("libc.so.6")
		tid = libc.syscall(SYS_gettid)
		splog('SP: Worker got message: ', currentThread(), _get_ident(), self.ident, os.getpid(), tid )
		
		while not self.__queue.empty():
			
			# NOTE: we have to check this here and not using the while to prevent the parser to be started on shutdown
			if not self.__running: break
			
			item = self.__queue.pop()
			
			splog('SP: Worker is processing')
			
			result = None
			
			try:
				result = item.identifier.getEpisode(
					item.name, item.begin, item.end, item.service
				)
			except Exception, e:
				splog("SP: Worker: Exception:", str(e))
				
				# Exception finish job with error
				result = str(e)
			
			config.plugins.seriesplugin.lookup_counter.value += 1
			
			if result and len(result) == 4:
				splog("SP: Worker: result callback")
				season, episode, title, series = result
				season = int(CompiledRegexpNonDecimal.sub('', season))
				episode = int(CompiledRegexpNonDecimal.sub('', episode))
				title = title.strip()
				if config.plugins.seriesplugin.replace_chars.value:
					repl = re.compile('['+config.plugins.seriesplugin.replace_chars.value.replace("\\", "\\\\\\\\")+']')
					
					splog("SP: refactor title", title)
					title = repl.sub('', title)
					splog("SP: refactor title", title)
					
					splog("SP: refactor series", series)
					series = repl.sub('', series)
					splog("SP: refactor series", series)
				self.__messages.push( (item.callback, (season, episode, title, series)) )
			else:
				splog("SP: Worker: result failed")
				self.__messages.push( (item.callback, result) )
			self.__pump.send(0)
Esempio n. 48
0
	def add(self, item):
		
		from ctypes import CDLL
		SYS_gettid = 4222
		libc = CDLL("libc.so.6")
		tid = libc.syscall(SYS_gettid)
		splog('SP: Worker add from thread: ', currentThread(), _get_ident(), self.ident, os.getpid(), tid )
		
		self.__queue.push(item)
		
		if not self.__running:
			self.__running = True
			self.start() # Start blocking code in Thread
Esempio n. 49
0
 def acquire_thread(self):
     """Run 'start_thread' listeners for the current thread.
     
     If the current thread has already been seen, any 'start_thread'
     listeners will not be run again.
     """
     thread_ident = threading._get_ident()
     if thread_ident not in self.threads:
         # We can't just use _get_ident as the thread ID
         # because some platforms reuse thread ID's.
         i = len(self.threads) + 1
         self.threads[thread_ident] = i
         self.bus.publish('start_thread', i)
Esempio n. 50
0
 def wrlock(self, blocking=True):
     """
     Get a Write lock
     """
     me = _get_ident()
     with self.lock:
         while not self._wrlock(me):
             if not blocking:
                 return False
             self.nw += 1
             self.wcond.wait()
             self.nw -= 1
     return True
Esempio n. 51
0
 def get_cnx(self, connector, kwargs, timeout=None):
     num = 1
     cnx = None
     log = kwargs.get('log')
     key = unicode(kwargs)
     start = time.time()
     tid = threading._get_ident()
     self._available.acquire()
     try:
         while True:
             # First choice: Return the same cnx already used by the thread
             if (tid, key) in self._active:
                 cnx, num = self._active[(tid, key)]
                 num += 1
             # Second best option: Reuse a live pooled connection
             elif key in self._pool_key:
                 idx = self._pool_key.index(key)
                 self._pool_key.pop(idx)
                 self._pool_time.pop(idx)
                 cnx = self._pool.pop(idx)
                 # If possible, verify that the pooled connection is
                 # still available and working.
                 if hasattr(cnx, 'ping'):
                     try:
                         cnx.ping()
                     except:
                         continue
             # Third best option: Create a new connection
             elif len(self._active) + len(self._pool) < self._maxsize:
                 cnx = connector.get_connection(**kwargs)
             # Forth best option: Replace a pooled connection with a new one
             elif len(self._active)  < self._maxsize:
                 # Remove the LRU connection in the pool
                 self._pool.pop(0).close()
                 self._pool_key.pop(0)
                 self._pool_time.pop(0)
                 cnx = connector.get_connection(**kwargs)
             if cnx:
                 self._active[(tid, key)] = (cnx, num)
                 return PooledConnection(self, cnx, key, tid, log)
             # Worst option: wait until a connection pool slot is available
             if timeout and (time.time() - start) > timeout:
                 raise TimeoutError(_('Unable to get database '
                                      'connection within %(time)d '
                                      'seconds', time=timeout))
             elif timeout:
                 self._available.wait(timeout)
             else:
                 self._available.wait()
     finally:
         self._available.release()
Esempio n. 52
0
def _thread_bootstrap_2_7(self):
    """This is a replacement "method" for the Thread class in Python 2.7,
    designed to let an exception fall through to the debugger."""

    # noinspection PyProtectedMember
    # noinspection PyUnresolvedReferences
    from threading import _active_limbo_lock, _active, _limbo, _trace_hook

    # noinspection PyProtectedMember
    # noinspection PyUnresolvedReferences
    from threading import _profile_hook, _sys, _get_ident

    try:
        self._set_ident()
        self._Thread__started.set()
        with _active_limbo_lock:
            _active[self._Thread__ident] = self
            del _limbo[self]
        if __debug__:
            self._note("%s.__bootstrap(): thread started", self)

        # if _trace_hook:
        #     self._note("%s.__bootstrap(): registering trace hook", self)
        #     _sys.settrace(_trace_hook)
        if _profile_hook:
            self._note("%s.__bootstrap(): registering profile hook", self)
            _sys.setprofile(_profile_hook)

        try:
            self.run()
        except SystemExit:
            if __debug__:
                self._note("%s.__bootstrap(): raised SystemExit", self)
        else:
            if __debug__:
                self._note("%s.__bootstrap(): normal return", self)
        finally:
            # Prevent a race in
            # test_threading.test_no_refcycle_through_target when
            # the exception keeps the target alive past when we
            # assert that it's dead.
            self._Thread__exc_clear()
    finally:
        with _active_limbo_lock:
            self._Thread__stop()
            try:
                # We don't call self.__delete() because it also
                # grabs _active_limbo_lock.
                del _active[_get_ident()]
            except:
                pass
Esempio n. 53
0
 def __init__(self, level, syslog = False):
     levels = {'critical': logging.CRITICAL,
               'error': logging.ERROR,
               'warning': logging.WARNING,
               'info': logging.INFO,
               'debug': logging.DEBUG}
     self.logger = logging.getLogger()
     self.logger.setLevel(levels.get(level))
     if syslog:
         for handler in self.logger.handlers:
             if isinstance(handler, logging.StreamHandler):
                 self.logger.removeHandler(handler)
         self.logger.addHandler(SysLogHandler(address='/dev/log'))
     self.id = hex(threading._get_ident())[2:]
Esempio n. 54
0
    def _getThreadContext(cls, forceCleanup=False):
        """
        * forceCleanup - forces the context to be reset
        """

        tid = threading._get_ident()
        contextDict = cls._getContextDict()

        if forceCleanup:
            contextDict[tid] = {}

        if tid in contextDict:
            return contextDict[tid]
        else:
            raise cls.NoContextException(tid)
Esempio n. 55
0
 def rdlock(self, blocking=True):
     """
     Read lock the lock
     """
     me = _get_ident()
     with self.lock:
         while not self._rdlock(me):
             if not blocking:
                 return False
             # keep track of the number of readers waiting to limit
             # the number of notify_all() calls required.
             self.nr += 1
             self.rcond.wait()
             self.nr -= 1
     return True
Esempio n. 56
0
    def record_stop(
            self, uriset=None, slow_queries=1.0, slow_queries_count=100,
            debug=False, **kwargs):
        """Record the end of a request."""
        resp = cherrypy.serving.response
        w = appstats['Requests'][threading._get_ident()]

        r = cherrypy.request.rfile.bytes_read
        w['Bytes Read'] = r
        appstats['Total Bytes Read'] += r

        if resp.stream:
            w['Bytes Written'] = 'chunked'
        else:
            cl = int(resp.headers.get('Content-Length', 0))
            w['Bytes Written'] = cl
            appstats['Total Bytes Written'] += cl

        w['Response Status'] = getattr(
            resp, 'output_status', None) or resp.status

        w['End Time'] = time.time()
        p = w['End Time'] - w['Start Time']
        w['Processing Time'] = p
        appstats['Total Time'] += p

        appstats['Current Requests'] -= 1

        if debug:
            cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS')

        if uriset:
            rs = appstats.setdefault('URI Set Tracking', {})
            r = rs.setdefault(uriset, {
                'Min': None, 'Max': None, 'Count': 0, 'Sum': 0,
                'Avg': average_uriset_time})
            if r['Min'] is None or p < r['Min']:
                r['Min'] = p
            if r['Max'] is None or p > r['Max']:
                r['Max'] = p
            r['Count'] += 1
            r['Sum'] += p

        if slow_queries and p > slow_queries:
            sq = appstats.setdefault('Slow Queries', [])
            sq.append(w.copy())
            if len(sq) > slow_queries_count:
                sq.pop(0)
Esempio n. 57
0
 def record_start(self):
     request = cherrypy.serving.request
     if not hasattr(request.rfile, 'bytes_read'):
         request.rfile = ByteCountWrapper(request.rfile)
         request.body.fp = request.rfile
     r = request.remote
     appstats['Current Requests'] += 1
     appstats['Total Requests'] += 1
     appstats['Requests'][threading._get_ident()] = {'Bytes Read': None,
      'Bytes Written': None,
      'Client': lambda s: '%s:%s' % (r.ip, r.port),
      'End Time': None,
      'Processing Time': proc_time,
      'Request-Line': request.request_line,
      'Response Status': None,
      'Start Time': time.time()}
Esempio n. 58
0
 def _cleanup(self, tid):
     """Note: self._available *must* be acquired when calling this one."""
     if tid in self._active:
         cnx = self._active.pop(tid)[1]
         assert tid not in self._dormant # hm, how could that happen?
         if cnx.poolable: # i.e. we can manipulate it from other threads
             if try_rollback(cnx):
                 self._dormant[tid] = cnx
             else:
                 self._cursize -= 1
         elif tid == threading._get_ident():
             if try_rollback(cnx): # non-poolable but same thread: close
                 cnx.close()
             self._cursize -= 1
         else: # non-poolable, different thread: push it back
             self._active[tid] = [0, cnx]
         self._available.notify()
Esempio n. 59
0
 def acquire(self, **kwargs):
     blocking = kwargs.get('blocking', True)
     ident = threading._get_ident()
     ot = self.owner_thread
     if self.is_locked:
         if ident == self.current_owner:
             self.lock_count += 1
             return True
         else:
             if ot is not None and ot.ident != ident:
                 return False
     else:
         if ot is not None and ot.ident != ident:
             return False
         self.__lock.acquire(blocking)
         self.current_owner = ident
         self.lock_count += 1