示例#1
0
        def getValue(key):
            """Get the dynamic value of `key`"""
            try:
                value = values[key]

            except KeyError:
                this_thread = get_ident()
                rule = getRule(key)     # this ensures distances[key] is known

                stack = set_stack(this_thread, [])
                stack.append(distances[key])

                try:
                    value = key.__apply__(key, rule)
                finally:
                    distance = stack.pop()
                    if not stack:
                        del computing[this_thread]
                    else:
                        stack[-1] = min(stack[-1], distance)

                value = publish(distance, key, value)

            else:
                if computing:
                    stack = get_stack(get_ident())
                    if stack:
                        stack[-1] = min(stack[-1], distances[key])

            return value
示例#2
0
 def testDoubleCallbackToPython(self):
     """Test a call to managed code that then calls back into Python
        that then calls managed code that then calls Python again."""
     dprint("thread %s DoubleCallBack" % thread.get_ident())
     result = ThreadTest.CallEchoString2("spam")
     self.assertTrue(result == "spam")
     dprint("thread %s DoubleCallBack ret" % thread.get_ident())
示例#3
0
    def acquire(self, flag=1):
        """Acquire the lock.

        If the optional flag argument is false, returns immediately
        when it cannot acquire the __wait lock without blocking (it
        may still block for a little while in order to acquire the
        __mutex lock).

        The return value is only relevant when the flag argument is
        false; it is 1 if the lock is acquired, 0 if not.

        """
        self.__mutex.acquire()
        try:
            if self.__tid == thread.get_ident():
                self.__count = self.__count + 1
                return 1
        finally:
            self.__mutex.release()
        locked = self.__wait.acquire(flag)
        if not flag and not locked:
            return 0
        try:
            self.__mutex.acquire()
            assert self.__tid == None
            assert self.__count == 0
            self.__tid = thread.get_ident()
            self.__count = 1
            return 1
        finally:
            self.__mutex.release()
示例#4
0
def work(a, b, c, d, lock):
	lock.acquire()
	print(time.strftime('%H:%M:%S',time.localtime()), '----', a, '----', _thread.get_ident())
	print(time.strftime('%H:%M:%S',time.localtime()), '----', b, '----', _thread.get_ident())
	print(time.strftime('%H:%M:%S',time.localtime()), '----', c, '----', _thread.get_ident())
	print(time.strftime('%H:%M:%S',time.localtime()), '----', d, '----', _thread.get_ident())
	lock.release()
示例#5
0
    def test_signals(self):
        with support.wait_threads_exit():
            # Test signal handling semantics of threads.
            # We spawn a thread, have the thread send two signals, and
            # wait for it to finish. Check that we got both signals
            # and that they were run by the main thread.
            signalled_all.acquire()
            self.spawnSignallingThread()
            signalled_all.acquire()

        # the signals that we asked the kernel to send
        # will come back, but we don't know when.
        # (it might even be after the thread exits
        # and might be out of order.)  If we haven't seen
        # the signals yet, send yet another signal and
        # wait for it return.
        if signal_blackboard[signal.SIGUSR1]['tripped'] == 0 \
           or signal_blackboard[signal.SIGUSR2]['tripped'] == 0:
            try:
                signal.alarm(1)
                signal.pause()
            finally:
                signal.alarm(0)

        self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped'], 1)
        self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped_by'],
                           thread.get_ident())
        self.assertEqual( signal_blackboard[signal.SIGUSR2]['tripped'], 1)
        self.assertEqual( signal_blackboard[signal.SIGUSR2]['tripped_by'],
                           thread.get_ident())
        signalled_all.release()
示例#6
0
 def do_get(self, url, locator, settings={}, headers={}, https=True):
     '''
         use for all get methods
         returns the response data
     '''
     api_qa_cert = os.getenv("PYTAF_HOME") + "/resources/cert.pem"
     certificate_file = settings.get("cert_file", api_qa_cert)
     host = settings.get('host', url)
     cookie = settings.get('cookie', None)
     content_type = settings.get('content_type', "text/xml")
     print("*** GET *** (thr: %s, t: %s) %s" %
           (_thread.get_ident(), time.time(), url + locator))
     if https == True:
         conn = httplib.HTTPSConnection(url, cert_file=certificate_file)
     else:
         conn = httplib.HTTPConnection(url)
     headers["Content-type"] = content_type
     if host != None:
         headers['host'] = host
     if cookie != None:
         headers['Set-Cookie'] = cookie
     start_time = time.time()
     conn.request("GET", locator, '', headers)
     response = conn.getresponse()
     end_time = time.time()
     if DEBUG:
         print("http response time: (thr: %s, t: %s)" %
             (_thread.get_ident(), round(float(end_time - start_time), 2)))
     if DEBUG:
         print(response.status, response.reason)
     data = response.read().decode()  # read() returns a bytes object
     if DEBUG:
         print(data)
     return {"data": data, "status": response.status,
              "reason": response.reason}
示例#7
0
 def run_thread():
     for i in range(10):
         time.sleep(0.1)
         dprint("thread %s %d" % (thread.get_ident(), i))
         mstr = String("thread %s %d" % (thread.get_ident(), i))
         pstr = mstr.ToString()
         done.append(None)
         dprint("thread %s %d done" % (thread.get_ident(), i))
示例#8
0
 def lookup(key):
     """Return the value of `key` in the current state"""
     try:
         state, getRule, lookup, child = states[get_ident()]
     except KeyError:
         empty().swap()
         state, getRule, lookup, child = states[get_ident()]
     return lookup(key)
示例#9
0
def service_thread(conn, addr):
    (caddr, cport) = addr
    print("Thread %s has connection from %s.\n" % (str(_thread.get_ident()),
                                                   caddr), end=' ')
    stdin = conn.makefile("r")
    stdout = conn.makefile("w", 0)
    run_interpreter(stdin, stdout)
    print("Thread %s is done.\n" % str(_thread.get_ident()), end=' ')
示例#10
0
 def get(key=None):
     try:
         state, getRule, lookup, child = states[get_ident()]
     except KeyError:
         empty().swap()
         state, getRule, lookup, child = states[get_ident()]
     if key is None:
         return state
     return getRule(key)
示例#11
0
    def test_get_ident(self):
        import _thread

        ident = _thread.get_ident()
        feedback = []

        def f():
            feedback.append(_thread.get_ident())

        ident2 = _thread.start_new_thread(f, ())
        assert ident2 != ident
        assert ident == _thread.get_ident()
        self.waitfor(lambda: feedback)
        assert feedback == [ident2]
示例#12
0
 def validate_thread_sharing(self):
     """
     Validate that the connection isn't accessed by another thread than the
     one which originally created it, unless the connection was explicitly
     authorized to be shared between threads (via the `allow_thread_sharing`
     property). Raise an exception if the validation fails.
     """
     if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
         raise DatabaseError(
             "DatabaseWrapper objects created in a "
             "thread can only be used in that same thread. The object "
             "with alias '%s' was created in thread id %s and this is "
             "thread id %s."
             % (self.alias, self._thread_ident, _thread.get_ident())
         )
示例#13
0
def _proceed_stack_method(self):
    stackname='__proceed_stack' + str(_thread.get_ident())
    try:
        return getattr(self,stackname)
    except:
        setattr(self,stackname,[])
        return getattr(self,stackname)
示例#14
0
文件: common.py 项目: Scalr/kombu
def oid_from(instance, threads=True):
    return generate_oid(
        get_node_id(),
        os.getpid(),
        get_ident() if threads else 0,
        instance,
    )
示例#15
0
文件: ioloop.py 项目: pitrou/tornado
 def add_callback(self, callback, *args, **kwargs):
     if thread.get_ident() != self._thread_ident:
         # If we're not on the IOLoop's thread, we need to synchronize
         # with other threads, or waking logic will induce a race.
         with self._callback_lock:
             if self._closing:
                 return
             list_empty = not self._callbacks
             self._callbacks.append(functools.partial(
                 stack_context.wrap(callback), *args, **kwargs))
             if list_empty:
                 # If we're not in the IOLoop's thread, and we added the
                 # first callback to an empty list, we may need to wake it
                 # up (it may wake up on its own, but an occasional extra
                 # wake is harmless).  Waking up a polling IOLoop is
                 # relatively expensive, so we try to avoid it when we can.
                 self._waker.wake()
     else:
         if self._closing:
             return
         # If we're on the IOLoop's thread, we don't need the lock,
         # since we don't need to wake anyone, just add the
         # callback. Blindly insert into self._callbacks. This is
         # safe even from signal handlers because the GIL makes
         # list.append atomic. One subtlety is that if the signal
         # is interrupting another thread holding the
         # _callback_lock block in IOLoop.start, we may modify
         # either the old or new version of self._callbacks, but
         # either way will work.
         self._callbacks.append(functools.partial(
             stack_context.wrap(callback), *args, **kwargs))
示例#16
0
 def __init__(self):
     self.requested = False
     self.returned = False
     self.func = None
     self.args = None
     self.result = None
     self.main_thread_id = _thread.get_ident()
def child():
    global exitstat                               # process global names
    exitstat += 1                                 # shared by all threads
    threadid = thread.get_ident()
    print('Hello from child', threadid, exitstat)
    thread.exit()
    print('never reached')
示例#18
0
def savepoint_rollback(sid):
    """
    Rolls back the most recent savepoint (if one exists). Does nothing if
    savepoints are not supported.
    """
    if _thread.get_ident() in savepoint_state:
        connection._savepoint_rollback(sid)
示例#19
0
    def test_callbacks(self):
        if os.environ.get('NONETWORK'):
            return
        self.queue = []
        req = UrlRequest('http://google.com',
                         on_success=self._on_success,
                         on_progress=self._on_progress,
                         on_error=self._on_error,
                         on_redirect=self._on_redirect,
                         debug=True)

        # don't use wait, but maximum 10s timeout
        for i in range(50):
            Clock.tick()
            sleep(.5)
            if req.is_finished:
                break

        self.assertTrue(req.is_finished)

        # we should have 2 progress minimum and one success
        self.assertTrue(len(self.queue) >= 3)

        # ensure the callback is called from this thread (main).
        tid = _thread.get_ident()
        self.assertEqual(self.queue[0][0], tid)
        self.assertEqual(self.queue[-2][0], tid)
        self.assertEqual(self.queue[-1][0], tid)

        self.assertEqual(self.queue[0][1], 'progress')
        self.assertEqual(self.queue[-2][1], 'progress')
        self.assertIn(self.queue[-1][1], ('success', 'redirect'))

        self.assertEqual(self.queue[0][2][0], 0)
        self.assertEqual(self.queue[-2][2][0], self.queue[-2][2][1])
示例#20
0
def child():
	global exitstat
	exitstat += 1
	threadid = _thread.get_ident()
	print('Hello from child', threadid, exitstat)
	_thread.exit()
	print('never reached')
示例#21
0
 def attachThread(self, target=None, args=None, kwargs=None,
                  mainThread=False):
     """
     Public method to setup a thread for DebugClient to debug.
     
     If mainThread is non-zero, then we are attaching to the already
     started mainthread of the app and the rest of the args are ignored.
     
     @param target the start function of the target thread (i.e. the
         user code)
     @param args arguments to pass to target
     @param kwargs keyword arguments to pass to target
     @param mainThread True, if we are attaching to the already
           started mainthread of the app
     @return identifier of the created thread
     """
     try:
         self.lockClient()
         newThread = DebugThread(self, target, args, kwargs, mainThread)
         ident = -1
         if mainThread:
             ident = _thread.get_ident()
             self.mainThread = newThread
             if self.debugging:
                 sys.setprofile(newThread.profile)
         else:
             ident = _original_start_thread(newThread.bootstrap, ())
         newThread.set_ident(ident)
         self.threads[newThread.get_ident()] = newThread
     finally:
         self.unlockClient()
     return ident
示例#22
0
 def getConnection(self):
     # SQLite can't share connections between threads, and so can't
     # pool connections.  Since we are isolating threads here, we
     # don't have to worry about locking as much.
     if self._memory:
         conn = self.makeConnection()
         self._connectionNumbers[id(conn)] = self._connectionCount
         self._connectionCount += 1
         return conn
     threadid = get_ident()
     if (self._pool is not None and threadid in self._threadPool):
         conn = self._threadPool[threadid]
         del self._threadPool[threadid]
         if conn in self._pool:
             self._pool.remove(conn)
     else:
         conn = self.makeConnection()
         if self._pool is not None:
             self._threadOrigination[id(conn)] = threadid
         self._connectionNumbers[id(conn)] = self._connectionCount
         self._connectionCount += 1
     if self.debug:
         s = 'ACQUIRE'
         if self._pool is not None:
             s += ' pool=[%s]' % ', '.join(
                 [str(self._connectionNumbers[id(v)]) for v in self._pool])
         self.printDebug(conn, s, 'Pool')
     return conn
示例#23
0
    def current_frames_with_threads(self):
        import threading, _thread
        import traceback

        # Spawn a thread that blocks at a known place.  Then the main
        # thread does sys._current_frames(), and verifies that the frames
        # returned make sense.
        entered_g = threading.Event()
        leave_g = threading.Event()
        thread_info = []  # the thread's id

        def f123():
            g456()

        def g456():
            thread_info.append(_thread.get_ident())
            entered_g.set()
            leave_g.wait()

        t = threading.Thread(target=f123)
        t.start()
        entered_g.wait()

        # At this point, t has finished its entered_g.set(), although it's
        # impossible to guess whether it's still on that line or has moved on
        # to its leave_g.wait().
        self.assertEqual(len(thread_info), 1)
        thread_id = thread_info[0]

        d = sys._current_frames()

        main_id = _thread.get_ident()
        self.assertIn(main_id, d)
        self.assertIn(thread_id, d)

        # Verify that the captured main-thread frame is _this_ frame.
        frame = d.pop(main_id)
        self.assertTrue(frame is sys._getframe())

        # Verify that the captured thread frame is blocked in g456, called
        # from f123.  This is a litte tricky, since various bits of
        # threading.py are also in the thread's call stack.
        frame = d.pop(thread_id)
        stack = traceback.extract_stack(frame)
        for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
            if funcname == "f123":
                break
        else:
            self.fail("didn't find f123() on thread's call stack")

        self.assertEqual(sourceline, "g456()")

        # And the next record must be for g456().
        filename, lineno, funcname, sourceline = stack[i+1]
        self.assertEqual(funcname, "g456")
        self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])

        # Reap the spawned thread.
        leave_g.set()
        t.join()
示例#24
0
def savepoint_commit(sid):
    """
    Commits the most recent savepoint (if one exists). Does nothing if
    savepoints are not supported.
    """
    if _thread.get_ident() in savepoint_state:
        connection._savepoint_commit(sid)
示例#25
0
文件: debug.py 项目: bobfrank/pydebug
 def __setup_thread_tracer(self, setting):
     empty_obj = ctypes.py_object()
     if setting:
         func = Py_tracefunc(breakpoint_tracer)
         arg = self
     else:
         func = ctypes.cast(None, Py_tracefunc)
         arg = empty_obj
     global debug__has_loaded_thread
     interp = ctypes.pythonapi.PyInterpreterState_Head()
     t      = ctypes.pythonapi.PyInterpreterState_ThreadHead(interp)
     while t != 0:
         t_p = ctypes.cast(t,ctypes.POINTER(PyThreadState))
         if t_p[0].thread_id != _thread.get_ident() and t_p[0].thread_id != debug__has_loaded_thread:
             try:
                 temp = t_p[0].c_traceobj
             except ValueError:
                 temp = None
             if arg != empty_obj: #Py_XINCREF
                 #ctypes.pythonapi._Total
                 refcount = ctypes.c_long.from_address(id(arg))
                 refcount.value += 1
             t_p[0].c_tracefunc = ctypes.cast(None, Py_tracefunc)
             t_p[0].c_traceobj  = empty_obj
             t_p[0].use_tracing = int(t_p[0].c_profilefunc is not None)
             if temp is not None: #Py_XDECREF
                 refcount = ctypes.c_long.from_address(id(temp))
                 refcount.value -= 1 #don't need to dealloc since we have a ref in here and it'll always be >0
             t_p[0].c_tracefunc = func
             t_p[0].c_traceobj  = arg
             t_p[0].use_tracing = int((func is not None) or (t_p[0].c_profilefunc is not None))
         t = ctypes.pythonapi.PyThreadState_Next(t)
示例#26
0
def _query_pageant(msg):
    """
    Communication with the Pageant process is done through a shared
    memory-mapped file.
    """
    hwnd = _get_pageant_window_object()
    if not hwnd:
        # Raise a failure to connect exception, pageant isn't running anymore!
        return None

    # create a name for the mmap
    map_name = 'PageantRequest%08x' % thread.get_ident()

    pymap = _winapi.MemoryMap(map_name, _AGENT_MAX_MSGLEN,
        _winapi.get_security_attributes_for_user(),
        )
    with pymap:
        pymap.write(msg)
        # Create an array buffer containing the mapped filename
        char_buffer = array.array("b", b(map_name) + zero_byte)  # noqa
        char_buffer_address, char_buffer_size = char_buffer.buffer_info()
        # Create a string to use for the SendMessage function call
        cds = COPYDATASTRUCT(_AGENT_COPYDATA_ID, char_buffer_size,
            char_buffer_address)

        response = ctypes.windll.user32.SendMessageA(hwnd,
            win32con_WM_COPYDATA, ctypes.sizeof(cds), ctypes.byref(cds))

        if response > 0:
            pymap.seek(0)
            datalen = pymap.read(4)
            retlen = struct.unpack('>I', datalen)[0]
            return datalen + pymap.read(retlen)
        return None
示例#27
0
def query_by_name(cls, name):
    """returns a lazy load function capable of caching object

    Use this alternative for classes with dynamic attributes (names
    not hardcoded in class definition), as property decorators
    (i.e. @lazyprop) don't function properly.

    As the SQLAlchemy session is NOT thread safe, we include the thread
    identifier in the key

    NB - attribute instances must be unique over (cls.__name__, name)
    within the containing class to avoid collisions.

    @param cls: ORM class to query
    @param name: name field in ORM class to uniquely define object

    """
    attr_name = '_lazy_{}_{}.{}'.format(
        cls.__name__, name, _thread.get_ident())

    def lookup(self):
        if not hasattr(self, attr_name):
            setattr(self, attr_name, cls.query.filter_by(name=name).one())
        attr = getattr(self, attr_name)

        # ORM objects (especially in testing) are occasionally detached
        if attr not in db.session:
            # reload - merge fails on collected objs
            attr = cls.query.filter_by(name=name).one()
            setattr(self, attr_name, attr)
        return attr

    return lookup
示例#28
0
def lazyprop(fn):
    """Property decorator for lazy intialization (load on first request)

    Useful on any expensive to load attribute on any class.  Simply
    decorate the 'getter' with @lazyprop, where the function definition
    loads the object to be assigned to the given attribute.

    As the SQLAlchemy session is NOT thread safe and this tends to be
    the primary use of the lazyprop decorator, we include the thread
    identifier in the key

    """
    attr_name = '_lazy_{}.{}'.format(fn.__name__, _thread.get_ident())

    @property
    def _lazyprop(self):
        if not hasattr(self, attr_name):
            setattr(self, attr_name, fn(self))

        attr = getattr(self, attr_name)

        # ORM objects (especially in testing) are occasionally detached
        if _is_sql_wrapper(attr):
            if attr not in db.session:
                attr = fn(self)  # reload - merge fails on collected objs
                setattr(self, attr_name, attr)
        return attr

    return _lazyprop
示例#29
0
文件: mount.py 项目: ambled/main
def setup_exchook():
    '''Send SIGTERM if any other thread terminates with an exception

    The exc_info will be saved in the list object returned
    by this function.
    '''

    main_thread = _thread.get_ident()
    old_exchook = sys.excepthook
    exc_info = []

    def exchook(exc_type, exc_inst, tb):
        reporting_thread = _thread.get_ident()
        if reporting_thread != main_thread:
            if exc_info:
                log.warning("Unhandled top-level exception during shutdown "
                            "(will not be re-raised)")
            else:
                log.debug('recording exception %s', exc_inst)
                os.kill(os.getpid(), signal.SIGTERM)
                exc_info.append(exc_inst)
                exc_info.append(tb)
            old_exchook(exc_type, exc_inst, tb)

        # If the main thread re-raised exception, there is no need to call
        # excepthook again
        elif exc_info and exc_info[0] is exc_inst:
            log.debug('Suppressing exception hook for re-raised %s', exc_inst)
        else:
            old_exchook(exc_type, exc_inst, tb)

    sys.excepthook = exchook

    return exc_info
示例#30
0
文件: db.py 项目: jjdelc/Photolog
 def _get_conn(self):
     _id = get_ident()
     if _id not in self._connection_cache:
         conn = sqlite3.Connection(self.path, timeout=60)
         conn.row_factory = dict_factory
         self._connection_cache[_id] = conn
     return self._connection_cache[_id]
示例#31
0
  def WaitForCallable(self):
    """Sets the thread ID and returns the callable and args for this request.

    This will block until the request details have been set.

    Returns:
      A tuple (target, args, kwargs) where
        target: A callable for the background thread to execute.
        args: A tuple of positional args to be passed to target.
        kwargs: A dict of keyword args to be passed to target.
    """
    with self._ready_condition:
      self._thread_id = _thread.get_ident()
      self._thread_id_ready = True
      self._ready_condition.notify()
      while not self._callable_ready:
        self._ready_condition.wait()
      return self._target, self._args, self._kwargs
示例#32
0
def _processRootEvents( controller ):
   import time
   gSystemProcessEvents = _root.gSystem.ProcessEvents

   if sys.platform == 'win32':
      import _thread
      _root.gROOT.ProcessLineSync('((TGWin32 *)gVirtualX)->SetUserThreadId(%ld)' % (_thread.get_ident()))

   while controller.keeppolling:
      try:
         gSystemProcessEvents()
         if PyConfig.GUIThreadScheduleOnce:
            for guicall in PyConfig.GUIThreadScheduleOnce:
               guicall()
            PyConfig.GUIThreadScheduleOnce = []
         time.sleep( 0.01 )
      except: # in case gSystem gets destroyed early on exit
         pass
示例#33
0
    def copy(self, src, dest, metadata=None):
        if not (metadata is None or isinstance(metadata, dict)):
            raise TypeError('*metadata*: expected dict or None, got %s' %
                            type(metadata))
        elif metadata is not None:
            buf = freeze_basic_mapping(metadata)
            if len(buf).bit_length() > 16:
                raise ValueError('Metadata too large')

        path_src = self._key_to_path(src)
        path_dest = self._key_to_path(dest)

        try:
            src = open(path_src, 'rb')
        except FileNotFoundError:
            raise NoSuchObject(src)

        dest = None
        try:
            # By renaming, we make sure that there are no conflicts between
            # parallel writes, the last one wins
            tmpname = '%s#%d-%d.tmp' % (path_dest, os.getpid(),
                                        _thread.get_ident())
            dest = ObjectW(tmpname)

            if metadata is not None:
                try:
                    _read_meta(src)
                except ThawError:
                    raise CorruptedObjectError('Invalid metadata')
                dest.write(b's3ql_1\n')
                dest.write(struct.pack('<H', len(buf)))
                dest.write(buf)
            shutil.copyfileobj(src, dest, BUFSIZE)
        except:
            if dest:
                os.unlink(tmpname)
            raise

        finally:
            src.close()
            dest.close()

        os.rename(tmpname, path_dest)
示例#34
0
 def run_annealing(self,size,kmax): 
     k=0
     id=_thread.get_ident()
     file = open("report_SA"+str(int(id/3))+"_KMAX_"+str(self.kmax)+".csv","w") #uses the thread id for creating unique filenames
     start_time=time.time()*1000
     town=[]
     interactions=0
     temp=self.temp
     tcost=0
     for i in range(size):
         town.append(i)
     random.shuffle(town)
     tcost=self.D[size-1][0]
     for i in range(size-1):
         tcost = tcost + self.D[i][i+1] 
     firstcost = tcost
     while (k<kmax): #Max iteractions without change
         newtown=town
         swap = random.randint(0,size-1)
         if swap<size-1:
             aux=newtown[swap]
             newtown[swap]=newtown[swap+1]
             newtown[swap+1]=aux
         else:
             aux=newtown[swap]
             newtown[swap]=newtown[0]
             newtown[0]=aux
         tnewcost=0
         for x in range(0,size-1,1):
             tnewcost=tnewcost+self.D[newtown[x]][newtown[x+1]]
         delta=tnewcost-tcost
         if delta < 0 or math.exp(-delta/temp) >= random.random():
             tcost = tnewcost
             k=0
         temp=temp*self.tempdecratio
         interactions=interactions+1
         file.write(str(interactions)+" "+str(tcost)+" "+str(temp)+"\n")
         k=k+1
     end_time=time.time()*1000
     costreduction=((firstcost-tcost)/firstcost)*100
     self.result.append([kmax,tnewcost,costreduction,int(end_time-start_time),interactions,"SA"])
     print("Thread finished")
     self.towns.append([kmax,town])
     file.close()
示例#35
0
    def exchook(exc_type, exc_inst, tb):
        reporting_thread = _thread.get_ident()
        if reporting_thread != main_thread:
            if exc_info:
                log.warning("Unhandled top-level exception during shutdown "
                            "(will not be re-raised)")
            else:
                log.debug('recording exception %s', exc_inst)
                os.kill(os.getpid(), signal.SIGTERM)
                exc_info.append(exc_inst)
                exc_info.append(tb)
            old_exchook(exc_type, exc_inst, tb)

        # If the main thread re-raised exception, there is no need to call
        # excepthook again
        elif exc_info and exc_info[0] is exc_inst:
            log.debug('Suppressing exception hook for re-raised %s', exc_inst)
        else:
            old_exchook(exc_type, exc_inst, tb)
示例#36
0
    def release(self):
        """
        If the current thread holds this lock, calling this method will
        decrement the lock count. When the lock count reaches zero (released as
        many times as it was acquired), the lock is released completely.

        A thread cannot release a lock that it does not hold.
        """

        ident = _thread.get_ident()
        if ident is not self.__owner:
            raise RuntimeError("Attempting to release lock without ownership")

        # By now, we know that we must be holding the lock, so updating
        # internal state is safe
        self.__lock_count -= 1
        if self.__lock_count is 0:
            self.__owner = None
            self.__internal_lock.release()
 def run_annealing(self,size,originalpath): 
     k=0
     interactions=0
     temp=self.temp
     tdistance=0
     path=originalpath[:]
     id=_thread.get_ident()
     #file = open("report_SA"+str(int(id/3))+"_KMAX_"+str(self.kmax)+".csv","w") #uses the thread id for creating unique filenames
     start_time=time.time()*1000
     tdistance=self.D[size-1][0]
     for i in range(size-1):
         tdistance = tdistance + self.D[path[i]][path[i+1]] 
     firstdistance = tdistance
     while (k<self.kmax): #Max iteractions without change
         newpath=path
         #Swaps one position and calculate delta
         swap = random.randint(0,size-1)
         if swap<size-1:
             aux=newpath[swap]
             newpath[swap]=newpath[swap+1]
             newpath[swap+1]=aux
         else:
             aux=newpath[swap]
             newpath[swap]=newpath[0]
             newpath[0]=aux
         tnewdistance=0
         for x in range(0,size-1,1):
             tnewdistance=tnewdistance+self.D[newpath[x]][newpath[x+1]]
         delta=tnewdistance-tdistance
         if (delta < 0) or (math.exp(-delta/temp) >= random.random()):
             tdistance = tnewdistance
             k=0
             path=newpath
         else:
             k=k+1
         temp=temp*self.tempdecratio
         interactions=interactions+1
         #file.write(str(interactions)+" "+str(tdistance)+" "+str(temp)+"\n")
     end_time=time.time()*1000
     distancereduction=((firstdistance-tdistance)/firstdistance)*100
     self.result.append([id,tdistance,distancereduction,int(end_time-start_time),interactions,"SA"])
     #print("Thread finished")
     self.paths.append([id,path])
示例#38
0
 def solve(self):
     '''
     Uses toulbar2 inference. Returns the best solution, i.e. a tuple
     of variable assignments.
     '''
     if not is_executable(_tb2path):
         raise Exception('toulbar2 cannot be found.')
     # append the process id to the filename to make it "process safe"
     tmpfile = tempfile.NamedTemporaryFile(prefix='{}-{}'.format(
         os.getpid(), _thread.get_ident()),
                                           suffix='.wcsp',
                                           delete=False)
     wcspfilename = tmpfile.name
     self.write(stream=tmpfile)
     tmpfile.close()
     cmd = '"%s" -s %s' % (_tb2path, wcspfilename)
     logger.debug('solving WCSP...')
     p = Popen(cmd, shell=True, stderr=PIPE, stdout=PIPE)
     solution = None
     nextLineIsSolution = False
     cost = None
     while True:
         l = p.stdout.readline()
         if not l: break
         if l.startswith(b'New solution'):
             cost = int(l.split()[2])
             nextLineIsSolution = True
             continue
         if nextLineIsSolution:
             solution = list(map(int, l.split()))
             nextLineIsSolution = False
     p.wait()
     logger.debug('toulbar2 process returned {}'.format(str(p.returncode)))
     try:
         os.remove(wcspfilename)
     except OSError:
         logger.warning(
             'could not remove temporary file {}'.format(wcspfilename))
     if p.returncode != 0:
         raise Exception(
             'toulbar2 returned a non-zero exit code: {}'.format(
                 p.returncode))
     return solution, cost
示例#39
0
    def __init__(self, networkId=None, userName=None, progName=None):
        self.networkId = networkId
        if networkId is None:
            self.networkId = str(struct.unpack('<L', socket.inet_aton(socket.gethostbyname(socket.gethostname())))[0])

        self.userName = userName
        if userName is None:
            if not pwd_error:
                self.userName = pwd.getpwuid(os.getuid()).pw_name
            else:
                self.userName = "******"

        self.progName = progName
        if progName is None:
            self.progName = "unknown"

        self.pid = os.getpid()

        self.threadId = int(_thread.get_ident())
def do_compute(call_no, delay):
    started = utcnow()
    process_id = os.getpid()
    thread_id = _thread.get_ident()

    # yes, we do the evil blocking thing here!
    # this is to simulate CPU intensive stuff
    time.sleep(delay)

    ended = utcnow()

    result = {
        'call_no': call_no,
        'started': started,
        'ended': ended,
        'process': process_id,
        'thread': thread_id
    }
    return result
示例#41
0
    def savepoint(self):
        """
        Create a savepoint inside the current transaction. Return an
        identifier for the savepoint that will be used for the subsequent
        rollback or commit. Do nothing if savepoints are not supported.
        """
        if not self._savepoint_allowed():
            return

        thread_ident = _thread.get_ident()
        tid = str(thread_ident).replace('-', '')

        self.savepoint_state += 1
        sid = "s%s_x%d" % (tid, self.savepoint_state)

        self.validate_thread_sharing()
        self._savepoint(sid)

        return sid
示例#42
0
def blink_colors():
    tid = _thread.get_ident()
    with binsem:
        pycom.heartbeat(False)
    while(True):
        with binsem:
            pycom.rgbled(0xff0000)
        time.sleep(1)
        with binsem:
            pycom.rgbled(0x00ff00)
        time.sleep(1)
        with binsem:
            pycom.rgbled(0x0000ff)
        time.sleep(1)
        print("tid {}: iteration done".format(tid))


        
#testcomment
示例#43
0
    def __init__(self,
                 controller=None,
                 filename=None,
                 message=None,
                 display_unlikely=True,
                 parameters=None,
                 importerclass=None,
                 source_type=None):
        super(AnnotationImporter, self).__init__(controller=controller)
        self.controller = controller
        self.parameters = parameters
        self.message = message

        self.close_on_package_load = False
        self.contextual_actions = ()
        self.options = {
            'display-unlikely': display_unlikely,
        }

        # Flag used to cancel import
        self.should_continue = True

        # Assume that the view is initialized in the current
        # thread. Store its id, so that we detect if calls
        # (esp. progress_callback) are made from another thread and
        # act accordingly.
        self.main_thread_id = _thread.get_ident()
        self.importer = None
        if importerclass is not None:
            self.importer = importerclass(controller=self.controller,
                                          callback=self.progress_callback,
                                          source_type=source_type)
        self.filename = filename
        self.widget = self.build_widget()

        if filename:
            self.fb.set_filename(filename)
            self.update_importers(filename=filename)
        else:
            # Open file selection dialog
            button = self.fb.get_children()[0]
            GObject.timeout_add(300, lambda: button.activate() and False)
    def _Dynamic_Put(self, put_request, put_response):
        if put_request.has_transaction():
            entities = put_request.entity_list()

            requires_id = lambda x: x.id() == 0 and not x.has_name()
            new_ents = [
                e for e in entities
                if requires_id(e.key().path().element_list()[-1])
            ]
            id_request = datastore_pb.PutRequest()

            txid = put_request.transaction().handle()
            txdata = self.__transactions[txid]
            assert (txdata.thread_id == _thread.get_ident()
                    ), "Transactions are single-threaded."
            if new_ents:
                for ent in new_ents:
                    e = id_request.add_entity()
                    e.mutable_key().CopyFrom(ent.key())
                    e.mutable_entity_group()
                id_response = datastore_pb.PutResponse()

                if txdata.is_xg:
                    rpc_name = 'GetIDsXG'
                else:
                    rpc_name = 'GetIDs'
                super(RemoteDatastoreStub,
                      self).MakeSyncCall('remote_datastore', rpc_name,
                                         id_request, id_response)
                assert id_request.entity_size() == id_response.key_size()
                for key, ent in zip(id_response.key_list(), new_ents):
                    ent.mutable_key().CopyFrom(key)
                    ent.mutable_entity_group().add_element().CopyFrom(
                        key.path().element(0))

            for entity in entities:
                txdata.entities[entity.key().Encode()] = (entity.key(), entity)
                put_response.add_key().CopyFrom(entity.key())
        else:
            super(RemoteDatastoreStub,
                  self).MakeSyncCall('datastore_v3', 'Put', put_request,
                                     put_response)
示例#45
0
    def meta_save(self, xmlname, x, y, z, size, tiles):
        #print "Saving %d tiles" % (size * size)
        meta_path = self.xyz_to_meta(xmlname, x, y, z)
        d = os.path.dirname(meta_path)
        if not os.path.exists(d):
            try:
                os.makedirs(d)
            except OSError:
                # Multiple threads can race when creating directories,
                # ignore exception if the directory now exists
                if not os.path.exists(d):
                    raise

        tmp = "%s.tmp.%d" % (meta_path, _thread.get_ident())
        f = open(tmp, "w")

        f.write(struct.pack("4s4i", META_MAGIC, METATILE * METATILE, x, y, z))
        offset = len(META_MAGIC) + 4 * 4
        # Need to pre-compensate the offsets for the size of the offset/size table we are about to write
        offset += (2 * 4) * (METATILE * METATILE)
        # Collect all the tile sizes
        sizes = {}
        offsets = {}
        for xx in range(0, size):
            for yy in range(0, size):
                mt = self.xyz_to_meta_offset(xmlname, x + xx, y + yy, z)
                sizes[mt] = len(tiles[(xx, yy)])
                offsets[mt] = offset
                offset += sizes[mt]
        # Write out the offset/size table
        for mt in range(0, METATILE * METATILE):
            if mt in sizes:
                f.write(struct.pack("2i", offsets[mt], sizes[mt]))
            else:
                f.write(struct.pack("2i", 0, 0))
        # Write out the tiles
        for xx in range(0, size):
            for yy in range(0, size):
                f.write(tiles[(xx, yy)])

        f.close()
        os.rename(tmp, meta_path)
示例#46
0
def CMFCoreSkinnableSkinnableObjectManager___getattr__(self, name):
    '''
    Looks for the name in an object with wrappers that only reach
    up to the root skins folder.
    This should be fast, flexible, and predictable.
  '''
    if name[:1] != '_' and name[:3] != 'aq_':
        skin_info = SKINDATA.get(get_ident())
        if skin_info is not None:
            _, skin_selection_name, ignore, resolve = skin_info
            try:
                return resolve[name]
            except KeyError:
                if name not in ignore:
                    object = skinResolve(self, skin_selection_name, name)
                    if object is not None:
                        resolve[name] = object
                        return object
                    ignore[name] = None
    raise AttributeError(name)
示例#47
0
    def open_write(self, key, metadata=None, is_compressed=False):
        path = self._key_to_path(key)

        # By renaming, we make sure that there are no
        # conflicts between parallel reads, the last one wins
        tmpname = '%s#%d-%d' % (path, os.getpid(), _thread.get_ident())

        try:
            dest = ObjectW(tmpname)
        except FileNotFoundError:
            try:
                os.makedirs(os.path.dirname(path))
            except FileExistsError:
                # Another thread may have created the directory already
                pass
            dest = ObjectW(tmpname)

        os.rename(tmpname, path)
        pickle.dump(metadata, dest, PICKLE_PROTOCOL)
        return dest
示例#48
0
    def __init__(self, backing_transport, commands, root_client_path,
                 jail_root=None):
        """Constructor.

        :param backing_transport: a Transport to handle requests for.
        :param commands: a registry mapping command names to SmartServerRequest
            subclasses. e.g. breezy.transport.smart.vfs.vfs_commands.
        """
        self._backing_transport = backing_transport
        self._root_client_path = root_client_path
        self._commands = commands
        if jail_root is None:
            jail_root = backing_transport
        self._jail_root = jail_root
        self.response = None
        self.finished_reading = False
        self._command = None
        if 'hpss' in debug.debug_flags:
            self._request_start_time = osutils.perf_counter()
            self._thread_id = get_ident()
示例#49
0
    def add_callback(self, callback):
        """Calls the given callback on the next I/O loop iteration.

        It is safe to call this method from any thread at any time.
        Note that this is the *only* method in IOLoop that makes this
        guarantee; all other interaction with the IOLoop must be done
        from that IOLoop's thread.  add_callback() may be used to transfer
        control from other threads to the IOLoop's thread.
        """
        with self._callback_lock:
            list_empty = not self._callbacks
            self._callbacks.append(stack_context.wrap(callback))
        if list_empty and _thread.get_ident() != self._thread_ident:
            # If we're in the IOLoop's thread, we know it's not currently
            # polling.  If we're not, and we added the first callback to an
            # empty list, we may need to wake it up (it may wake up on its
            # own, but an occasional extra wake is harmless).  Waking
            # up a polling IOLoop is relatively expensive, so we try to
            # avoid it when we can.
            self._waker.wake()
示例#50
0
    def release(self):
        """Release the lock.

        If this thread doesn't currently have the lock, an assertion
        error is raised.

        Only allow another thread to acquire the lock when the count
        reaches zero after decrementing it.

        """
        self.__mutex.acquire()
        try:
            assert self.__tid == thread.get_ident()
            assert self.__count > 0
            self.__count = self.__count - 1
            if self.__count == 0:
                self.__tid = None
                self.__wait.release()
        finally:
            self.__mutex.release()
示例#51
0
    def shutdown(self):
        if not self._is_live:
            return

        self._is_live = False

        if self._ident != get_ident():
            self._thread.join()

        for connection in list(self._map.values()):
            try:
                connection.close(None,
                                 HazelcastError("Client is shutting down"))
            except OSError as connection:
                if connection.args[0] == socket.EBADF:
                    pass
                else:
                    raise

        self._map.clear()
示例#52
0
def print_debug(_str):
    #if not g_fDebug:
    #    return

    t = time.time()
    l = time.localtime(t)
    s = time.strftime('%H:%M:%S', l) + '.%03d' % ((t - int(t)) * 1000)

    f = sys._getframe(1)

    tid = thread.get_ident()
    tname = thread_get_name( current_thread() )
    threadinfo = '%s/%d' % ( tname, tid )
    filename = os.path.basename(f.f_code.co_filename)
    lineno = f.f_lineno
    name = f.f_code.co_name

    str = '%s %s:%d %s %s(): %s' % (s, filename, lineno, threadinfo, name, _str)

    _print(str, sys.__stderr__)
    def test_notification_on_separate_thread(self):
        notifications = []

        def on_foo_notifications(obj, name, old, new):
            thread_id = _thread.get_ident()
            event = (thread_id, obj, name, old, new)
            notifications.append(event)

        obj = Foo()
        obj.on_trait_change(on_foo_notifications, 'foo', dispatch='new')

        obj.foo = 3
        # Wait for a while to make sure the notification has finished.
        time.sleep(0.1)

        self.assertEqual(len(notifications), 1)
        self.assertEqual(notifications[0][1:], (obj, 'foo', 0, 3))

        this_thread_id = _thread.get_ident()
        self.assertNotEqual(this_thread_id, notifications[0][0])
示例#54
0
def alertable_wait(lock, timeout=None):
    jobs = []
    tid = thread.get_ident()
    g_alertable_waiters[tid] = (lock, jobs)

    try:
        safe_wait(lock, timeout)

        while len(jobs) != 0:
            job = jobs.pop(0)
            try:
                job()
            except:
                pass

            if len(jobs) == 0:
                time.sleep(0.1)

    finally:
        del g_alertable_waiters[tid]
示例#55
0
 def start_threads(self):
     """Spawn threads for URL checking and status printing."""
     if self.config["status"]:
         t = status.Status(self, self.config["status_wait_seconds"])
         t.start()
         self.threads.append(t)
     if self.config["maxrunseconds"]:
         t = interrupt.Interrupt(self.config["maxrunseconds"])
         t.start()
         self.threads.append(t)
     num = self.config["threads"]
     if num > 0:
         for dummy in range(num):
             t = checker.Checker(self.urlqueue, self.logger,
                                 self.add_request_session)
             self.threads.append(t)
             t.start()
     else:
         self.request_sessions[_thread.get_ident()] = new_request_session(
             self.config, self.cookies)
         checker.check_urls(self.urlqueue, self.logger)
示例#56
0
 def __init__(self,
              global_config,
              call_id=None,
              from_tag=None,
              to_tag=None,
              notify_socket=None,
              notify_tag=None):
     self.global_config = global_config
     self.my_ident = get_ident()
     if '_rtp_proxy_clients' in global_config:
         rtp_proxy_clients = [
             x for x in global_config['_rtp_proxy_clients'] if x.online
         ]
         n = len(rtp_proxy_clients)
         if n == 0:
             raise Exception('No online RTP proxy client has been found')
         self.rtp_proxy_client = rtp_proxy_clients[int(random() * n)]
     else:
         self.rtp_proxy_client = global_config['rtp_proxy_client']
         if not self.rtp_proxy_client.online:
             raise Exception('No online RTP proxy client has been found')
     if call_id != None:
         self.call_id = call_id
     else:
         salt = str(random()) + str(time())
         self.call_id = md5(salt.encode()).hexdigest()
     if from_tag != None:
         self.from_tag = from_tag
     else:
         salt = str(random()) + str(time())
         self.from_tag = md5(salt.encode()).hexdigest()
     if to_tag != None:
         self.to_tag = to_tag
     else:
         salt = str(random()) + str(time())
         self.to_tag = md5(salt.encode()).hexdigest()
     self.notify_socket = notify_socket
     self.notify_tag = notify_tag
     self.caller = _rtpps_side()
     self.callee = _rtpps_side()
    def test_auth_auto(self):
        if os.environ.get('NONETWORK'):
            return
        self.queue = []
        req = UrlRequest(
            'http://*****:*****@httpbin.org/basic-auth/user/passwd',
            on_success=self._on_success,
            on_progress=self._on_progress,
            on_error=self._on_error,
            on_redirect=self._on_redirect,
            debug=True)

        # don't use wait, but maximum 10s timeout
        for i in range(50):
            Clock.tick()
            sleep(.5)
            if req.is_finished:
                break

        self.assertTrue(req.is_finished)

        # we should have 2 progress minimum and one success
        self.assertTrue(len(self.queue) >= 3)

        # ensure the callback is called from this thread (main).
        tid = _thread.get_ident()
        self.assertEqual(self.queue[0][0], tid)
        self.assertEqual(self.queue[-2][0], tid)
        self.assertEqual(self.queue[-1][0], tid)

        self.assertEqual(self.queue[0][1], 'progress')
        self.assertEqual(self.queue[-2][1], 'progress')
        self.assertIn(self.queue[-1][1], ('success', 'redirect'))
        self.assertEqual(self.queue[-1][2], ({
            'authenticated': True,
            'user': '******'
        }, ))

        self.assertEqual(self.queue[0][2][0], 0)
        self.assertEqual(self.queue[-2][2][0], self.queue[-2][2][1])
示例#58
0
文件: sh.py 项目: vsolina/mipyshell
    def _run_thread(args, command, _func):
        global _active_threads
        global _active_threads_ksignal

        tid = _thread.get_ident()
        _active_threads_ksignal[tid] = -1

        def _thread_watchdog(timer):
            if _active_threads_ksignal[tid] != -1:
                timer.deinit()

                def _quit(msg):
                    print("Quitting from {}".format(_thread.get_ident()))
                    _thread.exit()

    #                raise Exception(msg)
    #             micropython.schedule(_quit, ("[{}] Kill thread signal {}".format(tid, _active_threads_ksignal[tid])))

                print("Quitting from {}".format(_thread.get_ident()))
                _thread.exit()
                _thread.exit()

        _active_threads[tid] = (command, time.ticks_us())
        print("[1] {}".format(tid))
        try:
            timer = machine.Timer(-1)
            #timer.init(period=100, mode=machine.Timer.PERIODIC, callback=_thread_watchdog)

            _func(args)
            timer.deinit()
            del _active_threads[tid]
            print("Exited [{}] {}".format(tid, command))
            gc.collect()
        except Exception as e:
            timer.deinit()
            del _active_threads[tid]
            print("Exited [{}] {}".format(tid, command))
            gc.collect()
            raise e
示例#59
0
    def progress_callback(self, value=None, label=None):
        if _thread.get_ident() != self.main_thread_id:
            self.do_gui_operation(self.progress_callback,
                                  value=value,
                                  label=label)
            return self.should_continue

        if value is None:
            self.progressbar.pulse()
        else:
            self.progressbar.set_fraction(value)
        if label is not None:
            self.progressbar.set_text(label)
        # We could do a "while Gtk.events_pending()" but we want to
        # avoid process lock because of too many pending events
        # processing.
        for i in range(8):
            if Gtk.events_pending():
                Gtk.main_iteration()
            else:
                break
        return self.should_continue
示例#60
0
文件: mount.py 项目: r0ps3c/s3ql
    def exchook(exc_type, exc_inst, tb):
        reporting_thread = _thread.get_ident()
        if reporting_thread != main_thread:
            if exc_info:
                log.warning("Unhandled top-level exception during shutdown "
                            "(will not be re-raised)")
            else:
                log.error("Unhandled exception in thread, terminating", exc_info=True)
                exc_info.append(exc_inst)
                exc_info.append(tb)
                trio.from_thread.run_sync(
                    pyfuse3.terminate,
                    trio_token=pyfuse3.trio_token)

            old_exchook(exc_type, exc_inst, tb)

        # If the main thread re-raised exception, there is no need to call
        # excepthook again
        elif exc_info and exc_info[0] is exc_inst:
            log.debug('Suppressing exception hook for re-raised %s', exc_inst)
        else:
            old_exchook(exc_type, exc_inst, tb)