Ejemplo n.º 1
0
 def __setattr__(self, name, value):
     if name in ('_lock', '_root', '_proxies'):
         super(ThreadProxy, self).__setattr__(name, value)
     elif threading.get_ident() in self._proxies:
         setattr(self.proxiesp[threading.get_ident()], name, value)
     else:
         raise AttributeError("no proxy for current thread")
Ejemplo n.º 2
0
 def execute(self, raw_req):
     try:
         self._lock.acquire()
         print('=====================>>>>>enter', threading.get_ident())
         return self._execute(raw_req)
     finally:
         self._lock.release()
         print('<<<<<<=================leave', threading.get_ident())
Ejemplo n.º 3
0
 def waitResult(self):
   """ Wait for the execution of the last enqueued job to be done, and return the result or raise an exception. """
   self.thread.execute_queue.join()
   try:
     e = self.thread.exception_queue[threading.get_ident()].get_nowait()
   except queue.Empty:
     return self.thread.result_queue[threading.get_ident()].get_nowait()
   else:
     raise e
Ejemplo n.º 4
0
 def trywrapper(self, f, msginfo):
     msginfo["_function_id"] = f.__name__
     thread_details[threading.get_ident()] = msginfo.copy()
     try:
         f(msg=msginfo)
     except Exception as e:
         traceback.print_exc()
         if msginfo.get("msg"):
             send("Error executing {}: {}".format(f, e))
     del thread_details[threading.get_ident()]
Ejemplo n.º 5
0
 def _check_thread(self):
     try:
         if self.__thread_ident == threading.get_ident():
             return
     except AttributeError:
         pass
     else:
         raise ProgrammingError(
             "SQLite objects created in a thread can only be used in that "
             "same thread. The object was created in thread id %d and this "
             "is thread id %d" % (self.__thread_ident, threading.get_ident()))
Ejemplo n.º 6
0
    def db_conn(self):
        """
        Refers to a database connection via thread identifier
        :return: database connection handle
        """

        # Does a connection exist for this thread
        if threading.get_ident() not in self._db.keys():
            self.connect()

        return self._db[threading.get_ident()]
Ejemplo n.º 7
0
    def __apply_gradients(self):
        logger.info('{}: training - apply gradients begin'.format(
            threading.get_ident()))

        gradients_begin = time.time()
        # zero gradient accumulators
        ModelBuilder.zero_model_gradient_accumulators()
        if args.debug:
            ModelBuilder.model_gradient_accumulators_debug_info()

        # calculate move rate gradients
        move_rate_values = []
        ucb_move_rate_values = []
        ugtsa_move_rate_values = []
        for move_rate, (
                move_rate_value,
                oracle_ucb_move_rate_value,
                oracle_ugtsa_move_rate_value) in sorted(
                    self.shared_state.move_rate_dict.items()):
            move_rate_values += [move_rate_value]
            ucb_move_rate_values += [oracle_ucb_move_rate_value]
            ugtsa_move_rate_values += [oracle_ugtsa_move_rate_value]

        loss, move_rates_gradient = ModelBuilder.cost_function(
            move_rate_values, ucb_move_rate_values, ugtsa_move_rate_values)

        logger.info('loss {}'.format(loss))
        if args.debug:
            print(move_rates_gradient)

        # accumulate gradients
        self.ugtsa_algorithm.computation_graph.model_gradients(
            first_node=self.shared_state.first_node,
            y_grads={
                move_rate: gradient
                for (move_rate, _), gradient in zip(
                    sorted(self.shared_state.move_rate_dict.items()),
                    move_rates_gradient)})

        # apply gradients
        ModelBuilder.apply_gradients()
        if args.debug:
            ModelBuilder.model_gradient_accumulators_debug_info()
        gradients_end = time.time()
        logger.info('gradients took {}'.format(
            gradients_end - gradients_begin))

        self.shared_state.first_node = len(
            self.ugtsa_algorithm.computation_graph.computation_graph.nodes)
        self.shared_state.move_rate_dict = {}

        logger.info('{}: training - apply gradients end'.format(
            threading.get_ident()))
Ejemplo n.º 8
0
    def connect(self):
        logger.info('Attempting to connect to redis with ident={}, thread={}'.format(
            self.ident, threading.get_ident()))
        if self.redis_url is None:
            raise RedisConnectionError('self.redis_url not set')

        self.redis = redis.StrictRedis.from_url(self.redis_url,
                                                decode_responses=True)

        self.register_scripts()
        logger.info('Redis connection successful with ident={}, thread={}'.format(
            self.ident, threading.get_ident()))
Ejemplo n.º 9
0
 def trywrapper(self, f, msginfo):
     msginfo["_function_id"] = f.__name__
     thread_details[threading.get_ident()] = msginfo.copy()
     try:
         if len(inspect.signature(f).parameters) > 0:
             f(msginfo)
         else:
             f()
     except Exception as e:
         traceback.print_exc()
         if msginfo.get("msg"):
             send("Error executing {}: {}".format(command, e))
     del thread_details[threading.get_ident()]
Ejemplo n.º 10
0
def run_fork():
    lock = threading.Lock()
    child_t = threading.Thread(target=child_thread, args=(lock,))
    child_t.start()
    print('main thread identity', threading.get_ident())
    pid = os.fork()
    if pid == -1:
        raise RuntimeError('fork failed.')

    if pid == 0:
        print('main thread identity', threading.get_ident())
        child_worker(lock)
    else:
        time.sleep(120)
Ejemplo n.º 11
0
 def func_assig_thread(self, O, all_threads):
     if threading.get_ident() in all_threads:
         th = all_threads[threading.get_ident()]
     else:
         all_threads[threading.get_ident()] = all_threads['count']
         th = all_threads['count']
         all_threads['count'] += 1
     x = skimming_single_origin(O, self.graph, self.results, self.aux_res, th)
     self.cumulative += 1
     if x != O:
         self.report.append(x)
     if pyqt:
         self.skimming.emit(['zones finalized', self.cumulative])
         txt = str(self.cumulative) + ' / ' + str(self.matrix.zones)
         self.skimming.emit(['text skimming', txt])
Ejemplo n.º 12
0
 def run_task(self, **kwargs):
   Session.merge(self.task)
   self.task.start_time = datetime.utcnow()
   self.task.ident = threading.get_ident()
   self.task.status = TaskStatus.running.value
   Session.merge(self.task)
   Session.commit()
   try:
     self.run_function(**kwargs)
     self.task.log = self.log.messages
     self.task.end_time = datetime.utcnow()
     self.task.status = TaskStatus.finished.value
     self.task.result = TaskResult.success.value
     self.task = Session.merge(self.task)
     Session.commit()
   except Exception as e:
     self.task.log = self.log.messages
     self.task.tb = traceback.format_exc()
     self.task.end_time = datetime.utcnow()
     self.task.status = TaskStatus.finished.value
     self.task.result = TaskResult.fail.value
     self.task = Session.merge(self.task)
     Session.commit()
     defect = jira.defect_for_exception(
       "Background Task Error: {}".format(
         self.task.name),
       e, tb=traceback.format_exc(),
       username=self.task.username)
     self.task.defect_ticket = defect.key
     self.task = Session.merge(self.task)
     Session.commit()
   finally:
     Session.remove()
Ejemplo n.º 13
0
 def register_event_pipe(self, pipe, desired_stop_message):
     """Same as register_event_queue, but the thread takes work from
        one or more OS-level pipes.  PIPE is the write fd of one of
        these pipes, and the stop message must be acceptable to os.write.
     """
     self._worker_event_queues[threading.get_ident()] = \
         ('p', pipe, desired_stop_message)
Ejemplo n.º 14
0
    def async_start(self):
        """Finalize startup from inside the event loop.

        This method is a coroutine.
        """
        _LOGGER.info("Starting Home Assistant")
        self.state = CoreState.starting

        # pylint: disable=protected-access
        self.loop._thread_ident = threading.get_ident()
        self.bus.async_fire(EVENT_HOMEASSISTANT_START)

        try:
            # Only block for EVENT_HOMEASSISTANT_START listener
            self.async_stop_track_tasks()
            with timeout(TIMEOUT_EVENT_START, loop=self.loop):
                yield from self.async_block_till_done()
        except asyncio.TimeoutError:
            _LOGGER.warning(
                'Something is blocking Home Assistant from wrapping up the '
                'start up phase. We\'re going to continue anyway. Please '
                'report the following info at http://bit.ly/2ogP58T : %s',
                ', '.join(self.config.components))

        # Allow automations to set up the start triggers before changing state
        yield from asyncio.sleep(0, loop=self.loop)
        self.state = CoreState.running
        _async_create_timer(self)
    def current_frames_with_threads(self):
        import threading
        import traceback

        # Spawn a thread that blocks at a known place.  Then the main
        # thread does sys._current_frames(), and verifies that the frames
        # returned make sense.
        entered_g = threading.Event()
        leave_g = threading.Event()
        thread_info = []  # the thread's id

        def f123():
            g456()

        def g456():
            thread_info.append(threading.get_ident())
            entered_g.set()
            leave_g.wait()

        t = threading.Thread(target=f123)
        t.start()
        entered_g.wait()

        # At this point, t has finished its entered_g.set(), although it's
        # impossible to guess whether it's still on that line or has moved on
        # to its leave_g.wait().
        self.assertEqual(len(thread_info), 1)
        thread_id = thread_info[0]

        d = sys._current_frames()

        main_id = threading.get_ident()
        self.assertIn(main_id, d)
        self.assertIn(thread_id, d)

        # Verify that the captured main-thread frame is _this_ frame.
        frame = d.pop(main_id)
        self.assertTrue(frame is sys._getframe())

        # Verify that the captured thread frame is blocked in g456, called
        # from f123.  This is a litte tricky, since various bits of
        # threading.py are also in the thread's call stack.
        frame = d.pop(thread_id)
        stack = traceback.extract_stack(frame)
        for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
            if funcname == "f123":
                break
        else:
            self.fail("didn't find f123() on thread's call stack")

        self.assertEqual(sourceline, "g456()")

        # And the next record must be for g456().
        filename, lineno, funcname, sourceline = stack[i+1]
        self.assertEqual(funcname, "g456")
        self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])

        # Reap the spawned thread.
        leave_g.set()
        t.join()
    def process_request(self, request):

        # Todo: remove when IE9 support will expire.
        request.ie_ajax_iframe = request.method == 'POST' and \
                 'HTTP_X_REQUESTED_WITH' not in request.META and \
                'HTTP_X_REQUESTED_WITH' in request.POST
        if request.ie_ajax_iframe:
            # Fix IE9 not being able to post $.ajaxForm() with proper HTTP headers due to iframe emulation.
            request.META['HTTP_X_REQUESTED_WITH'] = request.POST['HTTP_X_REQUESTED_WITH']

        # Get local timezone from browser and activate it.
        if getattr(settings, 'USE_JS_TIMEZONE', False):
            tz_name = self.__class__.get_request_timezone(request)
            if tz_name is not None:
                timezone.activate(pytz.timezone(tz_name))

        self.__class__._threadmap[threading.get_ident()] = request

        # Optional server-side injected JSON.
        request.client_data = {}
        """
            request.client_routes = [
                'logout',
                'users_list',
            ]
        """
        request.client_routes = []
        vm_list = to_vm_list(request.client_data)
        if has_vm_list(request.session):
            vm_session = to_vm_list(request.session)
            vm_list.extend(vm_session)
Ejemplo n.º 17
0
def do_work(num):
	#Pretend to do work
	#time.sleep(0.1)
	with lock:
		print(num)
		print("Current thread:", str(threading.current_thread()).split(",")[0].split("-")[1]) #Prints just the thread number, not identifier. The order the threads were created in is the number starting at 1
		print("Thread ident:", threading.get_ident())
Ejemplo n.º 18
0
    async def async_start(self) -> None:
        """Finalize startup from inside the event loop.

        This method is a coroutine.
        """
        _LOGGER.info("Starting Home Assistant")
        self.state = CoreState.starting

        setattr(self.loop, '_thread_ident', threading.get_ident())
        self.bus.async_fire(EVENT_HOMEASSISTANT_START)

        try:
            # Only block for EVENT_HOMEASSISTANT_START listener
            self.async_stop_track_tasks()
            with timeout(TIMEOUT_EVENT_START):
                await self.async_block_till_done()
        except asyncio.TimeoutError:
            _LOGGER.warning(
                'Something is blocking Home Assistant from wrapping up the '
                'start up phase. We\'re going to continue anyway. Please '
                'report the following info at http://bit.ly/2ogP58T : %s',
                ', '.join(self.config.components))

        # Allow automations to set up the start triggers before changing state
        await asyncio.sleep(0)

        if self.state != CoreState.starting:
            _LOGGER.warning(
                'Home Assistant startup has been interrupted. '
                'Its state may be inconsistent.')
            return

        self.state = CoreState.running
        _async_create_timer(self)
Ejemplo n.º 19
0
 def pr(self, name, a=None, kw=None):
     f = sys._getframe(2)
     if f.f_code.co_filename.endswith('ZODB/utils.py'):
         f = sys._getframe(3)
     f = '%s:%s' % (f.f_code.co_filename, f.f_lineno)
     print(id(self), self._lock, threading.get_ident(), f, name,
           a if a else '', kw if kw else '')
Ejemplo n.º 20
0
 def __enter__(self):
     id = threading.get_ident()
     if id not in self.calls:
         self.calls[id] = 0
         self.prec[id] = float('-inf')
     self.calls[id] += 1
     return self
Ejemplo n.º 21
0
 def _test_factory(fifo, start):
     start.wait()
     factory = warehouse.http.ThreadLocalSessionFactory()
     # the actual session instance is stuck into the queue here as to
     # maintain a reference so it's not gc'd (which can result in id
     # reuse)
     fifo.put((threading.get_ident(), factory(_REQUEST)))
Ejemplo n.º 22
0
def async_test_home_assistant(loop):
    """Return a Home Assistant object pointing at test config dir."""
    loop._thread_ident = threading.get_ident()

    hass = ha.HomeAssistant(loop)
    hass.async_track_tasks()

    hass.config.location_name = 'test home'
    hass.config.config_dir = get_test_config_dir()
    hass.config.latitude = 32.87336
    hass.config.longitude = -117.22743
    hass.config.elevation = 0
    hass.config.time_zone = date_util.get_time_zone('US/Pacific')
    hass.config.units = METRIC_SYSTEM
    hass.config.skip_pip = True

    if 'custom_components.test' not in loader.AVAILABLE_COMPONENTS:
        yield from loop.run_in_executor(None, loader.prepare, hass)

    hass.state = ha.CoreState.running

    # Mock async_start
    orig_start = hass.async_start

    @asyncio.coroutine
    def mock_async_start():
        """Start the mocking."""
        with patch('homeassistant.core._async_create_timer'):
            yield from orig_start()

    hass.async_start = mock_async_start

    return hass
Ejemplo n.º 23
0
    def async_start(self):
        """Finalize startup from inside the event loop.

        This method is a coroutine.
        """
        _LOGGER.info("Starting Home Assistant")

        self.state = CoreState.starting

        # Register the restart/stop event
        self.services.async_register(DOMAIN, SERVICE_HOMEASSISTANT_STOP, self._async_stop_handler)
        self.services.async_register(DOMAIN, SERVICE_HOMEASSISTANT_RESTART, self._async_restart_handler)

        # Setup signal handling
        if sys.platform != "win32":
            try:
                self.loop.add_signal_handler(signal.SIGTERM, self._async_stop_handler)
            except ValueError:
                _LOGGER.warning("Could not bind to SIGTERM.")

            try:
                self.loop.add_signal_handler(signal.SIGHUP, self._async_restart_handler)
            except ValueError:
                _LOGGER.warning("Could not bind to SIGHUP.")

        # pylint: disable=protected-access
        self.loop._thread_ident = threading.get_ident()
        _async_create_timer(self)
        self.bus.async_fire(EVENT_HOMEASSISTANT_START)
        self.state = CoreState.running
Ejemplo n.º 24
0
    def call_sync(self, name, *params):
        """
        Synchronous method call to be used from another thread.
        """
        if threading.get_ident() == self.__thread_id:
            raise RuntimeError('You cannot call_sync from main thread')

        serviceobj, methodobj = self._method_lookup(name)
        # This method is already being called from a thread so we cant use the same
        # thread pool or we may get in a deadlock situation if all threads in the default
        # pool are waiting.
        # Instead we launch a new thread just for that call (io_thread).
        fut = asyncio.run_coroutine_threadsafe(self._call(name, serviceobj, methodobj, params, io_thread=True), self.__loop)
        event = threading.Event()

        def done(_):
            event.set()

        fut.add_done_callback(done)

        # In case middleware dies while we are waiting for a `call_sync` result
        while not event.wait(1):
            if not self.__loop.is_running():
                raise RuntimeError('Middleware is terminating')
        return fut.result()
Ejemplo n.º 25
0
    def wrapper(self, *args, **kw):
        """Wraps instance method to be called on loop thread"""

        # Just return when already on the event thread
        if self.thread.ident == get_ident():
            return func(self, *args, **kw)

        barrier = Barrier(2)
        result = None
        ex = None

        def call():
            """Calls function on loop thread"""
            nonlocal result, ex
            try:
                result = func(self, *args, **kw)
            except Exception as exc:
                ex = exc
            finally:
                barrier.wait()

        self.loop.call_soon_threadsafe(call)
        barrier.wait()
        if ex:
            raise ex or Exception("Unknown error")
        return result
Ejemplo n.º 26
0
 def run_loop():
     """Run event loop."""
     # pylint: disable=protected-access
     loop._thread_ident = threading.get_ident()
     loop.run_forever()
     loop.close()
     stop_event.set()
Ejemplo n.º 27
0
 def close(self):
     """
     Close the connection owned by this thread.
     """
     conn = self._db.pop(threading.get_ident(), None)
     if conn:
         conn.close()
Ejemplo n.º 28
0
    def connect(self):
        try:
            import pymysql
            pymysql.install_as_MySQLdb()
            info("Using pure python SQL client")
        except ImportError:
            info("Using other SQL client")

        try:
            import MySQLdb
        except ImportError:
            critical("ERROR: missing a mysql python module")
            critical("Install either 'PyMySQL' or 'mysqlclient' from your OS software repository or from PyPI")
            raise

        try:
            args = {
                'host': self.config.db_host,
                'port': self.config.db_port,
                'user': self.config.db_user,
                'passwd': self.config.db_pass,
                'db': self.config.db_name
            }
            if self.config.db_socket:
                args['unix_socket'] = self.config.db_socket

            conn = MySQLdb.connect(**args)
            conn.autocommit(True)
            conn.ping(True)
            self._db[threading.get_ident()] = conn
        except Exception as e:
            critical("ERROR: Could not connect to MySQL database! {}".format(e))
            raise
Ejemplo n.º 29
0
def inform_deleted_data(
    deleted_elements: Iterable[Tuple[str, int]],
    information: List[str] = None,
    user_id: Optional[int] = None,
    restricted: bool = False,
) -> None:
    """
    Informs the autoupdate system and the caching system about the deletion of
    elements.

    History creation is enabled.
    """
    if information is None:
        information = []
    elements: Dict[str, Element] = {}
    for deleted_element in deleted_elements:
        key = deleted_element[0] + str(deleted_element[1])
        elements[key] = Element(
            id=deleted_element[1],
            collection_string=deleted_element[0],
            full_data=None,
            information=information,
            restricted=restricted,
            user_id=user_id,
        )

    bundle = autoupdate_bundle.get(threading.get_ident())
    if bundle is not None:
        # Put all elements into the autoupdate_bundle.
        bundle.update(elements)
    else:
        # Send autoupdate directly
        handle_changed_elements(elements.values())
Ejemplo n.º 30
0
 async def test_thread_io(self):
     inp = os.path.join(PATH, 'example', 'randomtext.txt')
     tasks = self.api.tasks
     task = await tasks.queue('read.text', input=inp)
     self.assertEqual(task.status_string, 'SUCCESS')
     self.assertNotEqual(task.result['thread'], threading.get_ident())
     self.assertTrue(task.result['text'])
Ejemplo n.º 31
0
 def cache_get(self):
     return self.dbcache[(os.getpid(), threading.get_ident())]
Ejemplo n.º 32
0
    def thread_protocol(self):

        return self.protocols[get_ident()]
Ejemplo n.º 33
0
 def switch_to(self, new_filename_or_stream):
     self.alt_threadid = get_ident()
     self.alt_handler = logging.FileHandler(new_filename_or_stream)
     self.alt_handler.setLevel(self.level)
     self.alt_handler.setFormatter(logging.Formatter(ALT_LOGGING_FORMAT))
Ejemplo n.º 34
0
 def cache_store(self, db):
     self.dbcache[(os.getpid(), threading.get_ident())] = db
Ejemplo n.º 35
0
 def _thread_id(self):
     return threading.get_ident()
Ejemplo n.º 36
0
 def callback():
     idents.append(threading.get_ident())
Ejemplo n.º 37
0
import time
import ctypes
import logHandler
import globalVars
from logHandler import log
import addonHandler
import extensionPoints
import garbageHandler  # noqa: E402

# inform those who want to know that NVDA has finished starting up.
postNvdaStartup = extensionPoints.Action()

PUMP_MAX_DELAY = 10

#: The thread identifier of the main thread.
mainThreadId = threading.get_ident()

#: Notifies when a window message has been received by NVDA.
#: This allows components to perform an action when several system events occur,
#: such as power, screen orientation and hardware changes.
#: Handlers are called with three arguments.
#: @param msg: The window message.
#: @type msg: int
#: @param wParam: Additional message information.
#: @type wParam: int
#: @param lParam: Additional message information.
#: @type lParam: int
post_windowMessageReceipt = extensionPoints.Action()

_pump = None
_isPumpPending = False
Ejemplo n.º 38
0
    def per_netname(net):
        # Get a unique prefix from the thread ID
        prefix = "thread{}_".format(threading.get_ident())
        assoc_arcs = net_arcs[net]
        # Obtain the set of databases
        tile_dbs = {
            tile: pytrellis.get_tile_bitdata(
                pytrellis.TileLocator(config.family, config.device,
                                      tiles.type_from_fullname(tile)))
            for tile in config.tiles
        }
        # First filter using netname predicate
        if netname_filter_union:
            assoc_arcs = filter(
                lambda x: netname_predicate(x[0], netnames) and
                netname_predicate(x[1], netnames), assoc_arcs)
        else:
            assoc_arcs = filter(
                lambda x: netname_predicate(x[0], netnames) or
                netname_predicate(x[1], netnames), assoc_arcs)
        # Then filter using the arc predicate
        fuzz_arcs = list(
            filter(lambda x: arc_predicate(x, netnames), assoc_arcs))

        # Ful fullmux mode only
        changed_bits = set()
        arc_tiles = {}
        tiles_changed = set()

        for arc in fuzz_arcs:
            # Route statement containing arc for NCL file
            arc_route = "route\n\t\t\t" + arc[0] + "." + arc[1] + ";"
            # Build a bitstream and load it using libtrellis
            arc_bitf = config.build_design(config.ncl, {"route": arc_route},
                                           prefix)
            arc_chip = pytrellis.Bitstream.read_bit(
                arc_bitf).deserialise_chip()
            # Compare the bitstream with the arc to the baseline bitstream
            diff = arc_chip - baseline_chip
            if (not full_mux_style) or len(fuzz_arcs) == 1:
                if len(diff) == 0:
                    # No difference means fixed interconnect
                    # We consider this to be in the first tile if multiple tiles are being analysed
                    if fc_predicate(arc, netnames):
                        norm_arc = normalise_arc_in_tile(config.tiles[0], arc)
                        fc = pytrellis.FixedConnection()
                        norm_arc = [
                            fc_prefix + _ if not _.startswith("G_") else _
                            for _ in norm_arc
                        ]
                        norm_arc = [add_nonlocal_prefix(_) for _ in norm_arc]
                        fc.source, fc.sink = norm_arc
                        tile_dbs[config.tiles[0]].add_fixed_conn(fc)
                else:
                    for tile in config.tiles:
                        if tile in diff:
                            # Configurable interconnect in <tile>
                            norm_arc = normalise_arc_in_tile(tile, arc)
                            norm_arc = [
                                add_nonlocal_prefix(_) for _ in norm_arc
                            ]
                            ad = pytrellis.ArcData()
                            ad.source, ad.sink = norm_arc
                            ad.bits = pytrellis.BitGroup(diff[tile])
                            tile_dbs[tile].add_mux_arc(ad)
            else:
                arc_tiles[arc] = {}
                for tile in config.tiles:
                    if tile in diff:
                        tiles_changed.add(tile)
                        for bit in diff[tile]:
                            changed_bits.add((tile, bit.frame, bit.bit))
                    arc_tiles[arc][tile] = arc_chip.tiles[tile]

        if full_mux_style and len(fuzz_arcs) > 1:
            for tile in tiles_changed:
                for arc in arc_tiles:
                    bg = pytrellis.BitGroup()
                    for (btile, bframe, bbit) in changed_bits:
                        if btile == tile:
                            state = arc_tiles[arc][tile].cram.bit(bframe, bbit)
                            cb = pytrellis.ConfigBit()
                            cb.frame = bframe
                            cb.bit = bbit
                            cb.inv = (state == 0)
                            bg.bits.add(cb)
                    ad = pytrellis.ArcData()
                    ad.source, ad.sink = normalise_arc_in_tile(tile, arc)
                    ad.bits = bg
                    tile_dbs[tile].add_mux_arc(ad)
        # Flush database to disk
        for tile, db in tile_dbs.items():
            db.save()
Ejemplo n.º 39
0
def set_external_logger(logger):
    loggers[threading.get_ident()] = logger
Ejemplo n.º 40
0
 async def afunc():
     tid = threading.get_ident()
     return tid
Ejemplo n.º 41
0
 def func(*args, **kwargs):
     tid = threading.get_ident()
     return tid, args, kwargs
Ejemplo n.º 42
0
 def run_loop():
     """Run event loop."""
     # pylint: disable=protected-access
     loop._thread_ident = threading.get_ident()
     loop.run_forever()
     loop_stop_event.set()
Ejemplo n.º 43
0
 def handler():
     result.append(threading.get_ident())
Ejemplo n.º 44
0
 def set_thread_ident():
     self.thread_id = threading.get_ident()
Ejemplo n.º 45
0
 def close(self):
     super().close()
     if self._reader and self._reader.is_alive(
     ) and threading.get_ident() != self._reader.ident:
         self._reader.join(timeout=5.0)
         self._reader = None
Ejemplo n.º 46
0
    def run_forever(self, app_context):
        """Set up the asyncio event loop, integrate it with the Winforms
        event loop, and start the application.

        This largely duplicates the setup behavior of the default Proactor
        run_forever implementation.

        :param app_context: The WinForms.ApplicationContext instance
            controlling the lifecycle of the app.
        """
        # Python 3.8 added an implementation of run_forever() in
        # ProactorEventLoop. The only part that actually matters is the
        # refactoring that moved the initial call to stage _loop_self_reading;
        # it now needs to be created as part of run_forever; otherwise the
        # event loop locks up, because there won't be anything for the
        # select call to process.
        if sys.version_info >= (3, 8):
            self.call_soon(self._loop_self_reading)

        # Remember the application context.
        self.app_context = app_context

        # Register a custom user window message.
        self.msg_id = user32.RegisterWindowMessageA("Python asyncio tick")
        # Add a message filter to listen for the asyncio tick message
        # FIXME: Actually install the message filter.
        # msg_filter = AsyncIOTickMessageFilter(self, self.msg_id)
        # WinForms.Application.AddMessageFilter(msg_filter)

        # Setup the Proactor.
        # The code between the following markers should be exactly the same as
        # the official CPython implementation, up to the start of the
        # `while True:` part of run_forever() (see BaseEventLoop.run_forever()
        # in Lib/ascynio/base_events.py)
        # === START BaseEventLoop.run_forever() setup ===
        self._check_closed()
        if self.is_running():
            raise RuntimeError('This event loop is already running')
        if events._get_running_loop() is not None:
            raise RuntimeError(
                'Cannot run the event loop while another loop is running')
        self._set_coroutine_origin_tracking(self._debug)
        self._thread_id = threading.get_ident()
        try:
            self._old_agen_hooks = sys.get_asyncgen_hooks()
            sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
                                   finalizer=self._asyncgen_finalizer_hook)
        except AttributeError:
            # Python < 3.6 didn't have sys.get_asyncgen_hooks();
            # No action required for those versions.
            pass

        events._set_running_loop(self)
        # === END BaseEventLoop.run_forever() setup ===

        # Rather than going into a `while True:` loop, we're going to use the
        # Winforms event loop to queue a tick() message that will cause a
        # single iteration of the asyncio event loop to be executed. Each time
        # we do this, we queue *another* tick() message in 5ms time. In this
        # way, we'll get a continuous stream of tick() calls, without blocking
        # the Winforms event loop.

        # Queue the first asyncio tick.
        self.enqueue_tick()

        # Start the Winforms event loop.
        WinForms.Application.Run(self.app_context)
Ejemplo n.º 47
0
 def register(self, stream: TextIO):
     """Start output redirection for current thread."""
     self._redirected_streams[threading.get_ident()] = stream
Ejemplo n.º 48
0
    def test_PyThreadState_SetAsyncExc(self):
        ctypes = import_module("ctypes")

        set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
        set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)

        class AsyncExc(Exception):
            pass

        exception = ctypes.py_object(AsyncExc)

        # First check it works when setting the exception from the same thread.
        tid = threading.get_ident()
        self.assertIsInstance(tid, int)
        self.assertGreater(tid, 0)

        try:
            result = set_async_exc(tid, exception)
            # The exception is async, so we might have to keep the VM busy until
            # it notices.
            while True:
                pass
        except AsyncExc:
            pass
        else:
            # This code is unreachable but it reflects the intent. If we wanted
            # to be smarter the above loop wouldn't be infinite.
            self.fail("AsyncExc not raised")
        try:
            self.assertEqual(result, 1)  # one thread state modified
        except UnboundLocalError:
            # The exception was raised too quickly for us to get the result.
            pass

        # `worker_started` is set by the thread when it's inside a try/except
        # block waiting to catch the asynchronously set AsyncExc exception.
        # `worker_saw_exception` is set by the thread upon catching that
        # exception.
        worker_started = threading.Event()
        worker_saw_exception = threading.Event()

        class Worker(threading.Thread):
            def run(self):
                self.id = threading.get_ident()
                self.finished = False

                try:
                    while True:
                        worker_started.set()
                        time.sleep(0.1)
                except AsyncExc:
                    self.finished = True
                    worker_saw_exception.set()

        t = Worker()
        t.daemon = True  # so if this fails, we don't hang Python at shutdown
        t.start()
        if verbose:
            print("    started worker thread")

        # Try a thread id that doesn't make sense.
        if verbose:
            print("    trying nonsensical thread id")
        result = set_async_exc(-1, exception)
        self.assertEqual(result, 0)  # no thread states modified

        # Now raise an exception in the worker thread.
        if verbose:
            print("    waiting for worker thread to get started")
        ret = worker_started.wait()
        self.assertTrue(ret)
        if verbose:
            print("    verifying worker hasn't exited")
        self.assertFalse(t.finished)
        if verbose:
            print("    attempting to raise asynch exception in worker")
        result = set_async_exc(t.id, exception)
        self.assertEqual(result, 1)  # one thread state modified
        if verbose:
            print("    waiting for worker to say it caught the exception")
        worker_saw_exception.wait(timeout=10)
        self.assertTrue(t.finished)
        if verbose:
            print("    all OK -- joining worker")
        if t.finished:
            t.join()
Ejemplo n.º 49
0
def log(*msgs):
    with open(LOG_FILE, "a") as fo:
        fo.write("{}.{}: {}\n".format(os.getpid(), threading.get_ident(), " - ".join(msgs)))
Ejemplo n.º 50
0
 def unregister(self):
     """Stop output redirection for current thread."""
     self._redirected_streams.pop(threading.get_ident(), None)
Ejemplo n.º 51
0
 def clear(self):
     """Invoked from each client's thread after a frame was processed."""
     self.events[threading.get_ident()].clear()
Ejemplo n.º 52
0
 def _target_stream(self):
     return self._redirected_streams.get(threading.get_ident(),
                                         self._original_stream)
Ejemplo n.º 53
0
 def __init__(self):
     self._event_loop = asyncio.get_event_loop()
     self._thread_ident = threading.get_ident()
Ejemplo n.º 54
0
def download_task(filename):
    print('启动下载线程,进程号[%d]-线程号[%d].' % (os.getpid(), threading.get_ident()))
    print('开始下载%s...' % filename)
    t = random.randint(5, 10)
    time.sleep(t)
    print('%s下载完成! 耗费了%d秒' % (filename, t))
Ejemplo n.º 55
0
 def run_on_handle_events_thread(self, callback):
     """Run a function on the thread that handles the events."""
     if threading.get_ident() == self._handle_event_thread_ident:
         callback()
     else:
         self.set_timer(0, callback)
Ejemplo n.º 56
0
def is_gui_thread():
    return threading.get_ident(
    ) == GlobalInfo.gui_thread or GlobalInfo.gui_thread is None
Ejemplo n.º 57
0
    def run(self, *args):
        # This controls the main symbolic execution loop of one of the workers
        logger.debug(
            "Starting Manticore Symbolic Emulator Worker %d. Pid %d Tid %d).",
            self.id,
            os.getpid(),
            threading.get_ident(),
        )

        m = self.manticore
        current_state = None
        m._publish("will_start_worker", self.id)

        # If CTRL+C is received at any worker lets abort exploration via m.kill()
        # kill will set m._killed flag to true and then each worker will slowly
        # get out of its mainloop and quit.
        with WithKeyboardInterruptAs(m.kill):

            # The worker runs until the manticore is killed
            while not m._killed.value:

                # STARTED - Will try to consume states until a STOP event is received
                # Outer loop, Keep getting states until someone request us to STOP
                try:  # handle fatal errors even exceptions in the exception handlers
                    try:  # handle Concretize and TerminateState

                        # At RUNNING
                        # The START has been requested, we operate with under the assumption
                        # that manticore we will let us stay at this phase for a _while_
                        # Requests to STOP will be honored ASAP (i.e. Not immediately)

                        # Select a single state
                        # wait for other worker to add states to the READY list
                        # This momentarily get the main lock and then releases
                        # it while waiting for changes
                        # Raises an Exception if manticore gets cancelled
                        # while waiting or if there are no more potential states
                        logger.debug("[%r] Waiting for states", self.id)
                        # If at STANDBY wait for any change
                        current_state = m._get_state(wait=True)

                        # there are no more states to process
                        # states can come from the ready list or by forking
                        # states currently being analyzed in the busy list
                        if current_state is None:
                            logger.debug("[%r] No more states", self.id)
                            break

                        # assert current_state is not None
                        # Allows to terminate manticore worker on user request
                        # even in the middle of an execution
                        logger.debug("[%r] Running", self.id)
                        assert (current_state.id in m._busy_states
                                and current_state.id not in m._ready_states)

                        # This does not hold the lock so we may loss some event
                        # flickering
                        while not m._killed.value:
                            current_state.execute()
                        else:
                            logger.debug("[%r] Stopped and/or Killed", self.id)
                            # On going execution was stopped or killed. Lets
                            # save any progress on the current state using the
                            # same id. No other worker will use this state in
                            # this run
                            m._save(current_state, state_id=current_state.id)
                            m._revive_state(current_state.id)
                            current_state = None

                        assert current_state is None
                    # Handling Forking and terminating exceptions
                    except Concretize as exc:
                        logger.debug("[%r] Performing %r", self.id,
                                     exc.message)
                        # The fork() method can decides which state to keep
                        # exploring. For example when the fork results in a
                        # single state it is better to just keep going.
                        # Though, normally fork() saves the spawned childs,
                        # returns a None and let _get_state choose what to explore
                        # next
                        m._fork(current_state, exc.expression, exc.policy,
                                exc.setstate)
                        current_state = None

                    except TerminateState as exc:
                        logger.debug("[%r] Debug State %r %r", self.id,
                                     current_state, exc)
                        # Notify this state is done
                        m._publish("will_terminate_state", current_state, exc)
                        # Update the stored version of the current state

                        current_state._terminated_by = exc

                        m._save(current_state, state_id=current_state.id)
                        # Add the state to the terminated state list re-using
                        # the same id. No other worker will use this state in
                        # this run
                        m._terminate_state(current_state.id)

                        m._publish("did_terminate_state", current_state, exc)
                        current_state = None

                except (Exception, AssertionError) as exc:
                    import traceback

                    formatted = traceback.format_exc()
                    logger.error("Exception in state %r: %r\n%s ", self.id,
                                 exc, formatted)
                    # Internal Exception
                    # Add the state to the terminated state list
                    if current_state is not None:
                        # Drop any work on this state in case it is inconsistent

                        # Update the stored version of the current state
                        # Saved to a fresh id in case other worker have an old
                        # version this state cached over the old id
                        m._publish("will_kill_state", current_state, exc)
                        m._save(current_state, state_id=current_state.id)
                        m._kill_state(current_state.id)
                        m._publish("did_kill_state", current_state, exc)
                        current_state = None
                    break

            # Getting out.
            # At KILLED
            logger.debug("[%r] Getting out of the mainloop", self.id)
            m._publish("did_terminate_worker", self.id)
Ejemplo n.º 58
0
 def cache_clear(self):
     try:
         del self.dbcache[(os.getpid(), threading.get_ident())]
     except KeyError as e:
         pass
Ejemplo n.º 59
0
def set_logger(module):
    if threading.get_ident() in loggers:
        return loggers[threading.get_ident()]
    logger = logging.getLogger(module)
    return logger
Ejemplo n.º 60
0
 def emit(self, record):
     if self.alt_handler and get_ident() == self.alt_threadid:
         self.alt_handler.emit(record)
     else:
         self.handler.emit(record)