Ejemplo n.º 1
0
class ManticoreMultiprocessing(ManticoreBase):
    _worker_type = WorkerProcess

    def __init__(self, *args, **kwargs):
        # This is the global manager that will handle all shared memory access
        # See. https://docs.python.org/3/library/multiprocessing.html#multiprocessing.managers.SyncManager
        self._manager = SyncManager()
        self._manager.start(raise_signal)
        # The main manticore lock. Acquire this for accessing shared objects
        # THINKME: we use the same lock to access states lists and shared contexts
        self._lock = self._manager.Condition()
        self._killed = self._manager.Value(bool, False)
        self._running = self._manager.Value(bool, False)

        # List of state ids of States on storage
        self._ready_states = self._manager.list()
        self._terminated_states = self._manager.list()
        self._busy_states = self._manager.list()
        self._killed_states = self._manager.list()
        self._shared_context = self._manager.dict()
        self._context_value_types = {
            list: self._manager.list,
            dict: self._manager.dict
        }

        super().__init__(*args, **kwargs)
Ejemplo n.º 2
0
class StateTest(unittest.TestCase):
    _multiprocess_can_split_ = True

    def setUp(self):
        if not hasattr(self, 'manager'):
            self.manager = SyncManager()
            self.manager.start(
                lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
        dirname = os.path.dirname(__file__)
        l = linux.Linux(os.path.join(dirname, 'binaries', 'basic_linux_amd64'))
        self.state = State(ConstraintSet(), l)
        self.lock = self.manager.Condition()

    def test_workspace_save_load(self):
        self.state.constraints.add(True)
        workspace = Workspace(self.lock, 'mem:')
        id_ = workspace.save_state(self.state)
        state = workspace.load_state(id_)

        # Make sure our memory maps come back through serialization
        for left, right in zip(sorted(self.state.mem._maps),
                               sorted(state.mem._maps)):
            self.assertEqual(left.start, right.start)
            self.assertEqual(left.end, right.end)
            self.assertEqual(left.name, right.name)

        # Check constraints
        self.assertEqual(str(state.constraints), str(self.state.constraints))

    def test_workspace_id_start_with_zero(self):
        workspace = Workspace(self.lock, 'mem:')
        id_ = workspace.save_state(self.state)
        self.assertEquals(id_, 0)

    def test_output(self):
        out = ManticoreOutput('mem:')
        name = 'mytest'
        message = 'custom message'
        out.save_testcase(self.state, name, message)
        workspace = out._store._data

        # Make sure names are constructed correctly
        for entry, data in workspace.items():
            self.assertTrue(entry.startswith(name))
            if 'messages' in entry:
                self.assertTrue(message in data)

        keys = [x.split('.')[1] for x in workspace.keys()]

        for key in self.state.platform.generate_workspace_files():
            self.assertIn(key, keys)

        # Make sure we log everything we should be logging
        self.assertIn('smt', keys)
        self.assertIn('trace', keys)
        self.assertIn('messages', keys)
        self.assertIn('input', keys)
        self.assertIn('pkl', keys)
Ejemplo n.º 3
0
class ManticoreBase(Eventful):
    def _manticore_single(self):
        self._worker_type = WorkerSingle

        class FakeLock:
            def _nothing(self, *args, **kwargs):
                pass

            acquire = _nothing
            release = _nothing
            __enter__ = _nothing
            __exit__ = _nothing
            notify_all = _nothing
            wait = _nothing

            def wait_for(self, condition, *args, **kwargs):
                if not condition():
                    raise Exception("Deadlock: Waiting for CTRL+C")

        self._lock = FakeLock()
        self._killed = ctypes.c_bool(False)
        self._running = ctypes.c_bool(False)
        self._ready_states = []
        self._terminated_states = []
        self._busy_states = []
        self._killed_states = []
        self._shared_context = {}

    def _manticore_threading(self):
        self._worker_type = WorkerThread
        self._lock = threading.Condition()
        self._killed = ctypes.c_bool(False)
        self._running = ctypes.c_bool(False)
        self._ready_states = []
        self._terminated_states = []
        self._busy_states = []
        self._killed_states = []
        self._shared_context = {}

    def _manticore_multiprocessing(self):
        def raise_signal():
            signal.signal(signal.SIGINT, signal.SIG_IGN)

        self._worker_type = WorkerProcess
        # This is the global manager that will handle all shared memory access
        # See. https://docs.python.org/3/library/multiprocessing.html#multiprocessing.managers.SyncManager
        self._manager = SyncManager()
        self._manager.start(raise_signal)
        # The main manticore lock. Acquire this for accessing shared objects
        # THINKME: we use the same lock to access states lists and shared contexts
        self._lock = self._manager.Condition()
        self._killed = self._manager.Value(bool, False)
        self._running = self._manager.Value(bool, False)
        # List of state ids of States on storage
        self._ready_states = self._manager.list()
        self._terminated_states = self._manager.list()
        self._busy_states = self._manager.list()
        self._killed_states = self._manager.list()
        self._shared_context = self._manager.dict()
        self._context_value_types = {
            list: self._manager.list,
            dict: self._manager.dict
        }

    # Decorators added first for convenience.
    def sync(func: Callable) -> Callable:  # type: ignore
        """Synchronization decorator"""
        @functools.wraps(func)
        def newFunction(self, *args, **kw):
            with self._lock:
                return func(self, *args, **kw)

        return newFunction

    def at_running(func: Callable) -> Callable:  # type: ignore
        """Allows the decorated method to run only when manticore is actively
           exploring states
        """
        @functools.wraps(func)
        def newFunction(self, *args, **kw):
            if not self.is_running():
                raise ManticoreError(
                    f"{func.__name__} only allowed while exploring states")
            return func(self, *args, **kw)

        return newFunction

    def at_not_running(func: Callable) -> Callable:  # type: ignore
        """Allows the decorated method to run only when manticore is NOT
           exploring states
        """
        @functools.wraps(func)
        def newFunction(self, *args, **kw):
            if self.is_running():
                logger.error("Calling at running not allowed")
                raise ManticoreError(
                    f"{func.__name__} only allowed while NOT exploring states")
            return func(self, *args, **kw)

        return newFunction

    def only_from_main_script(func: Callable) -> Callable:  # type: ignore
        """Allows the decorated method to run only from the main manticore script
        """
        @functools.wraps(func)
        def newFunction(self, *args, **kw):
            if not self.is_main() or self.is_running():
                logger.error(
                    "Calling from worker or forked process not allowed")
                raise ManticoreError(f"{func.__name__} only allowed from main")
            return func(self, *args, **kw)

        return newFunction

    _published_events = {
        "run",
        "start_worker",
        "terminate_worker",
        "enqueue_state",
        "fork_state",
        "load_state",
        "terminate_state",
        "kill_state",
        "execute_instruction",
        "terminate_execution",
    }

    def __init__(self,
                 initial_state,
                 workspace_url=None,
                 outputspace_url=None,
                 **kwargs):
        """
        Manticore symbolically explores program states.


        **Manticore phases**

        Manticore has multiprocessing capabilities. Several worker processes
        could be registered to do concurrent exploration of the READY states.
        Manticore can be itself at different phases: STANDBY, RUNNING.

        .. code-block:: none

                      +---------+               +---------+
                ----->| STANDBY +<------------->+ RUNNING |
                      +---------+               +----+----+

        *Phase STANDBY*

        Manticore starts at STANDBY with a single initial state. Here the user
        can inspect, modify and generate testcases for the different states. The
        workers are paused and not doing any work. Actions: run()


        *Phase RUNNING*

        At RUNNING the workers consume states from the READY state list and
        potentially fork new states or terminate states. A RUNNING manticore can
        be stopped back to STANDBY. Actions: stop()


        **States and state lists**

        A state contains all the information of the running program at a given
        moment. State snapshots are saved to the workspace often. Internally
        Manticore associates a fresh id with each saved state. The memory copy
        of the state is then changed by the emulation of the specific arch.
        Stored snapshots are periodically updated using: _save() and _load().

        .. code-block:: none

                      _save     +-------------+  _load
            State  +----------> |  WORKSPACE  +----------> State
                                +-------------+

        During exploration Manticore spawns a number of temporary states that are
        maintained in different lists:

        .. code-block:: none

                Initial
                State
                  |   +-+---{fork}-----+
                  |   | |              |
                  V   V V              |
                +---------+        +---+----+      +------------+
                |  READY  +------->|  BUSY  +----->| TERMINATED |
                +---------+        +---+----+      +------------+
                     |
                     |                             +--------+
                     +---------------------------->| KILLED |
                                                   +--------+

        At any given time a state must be at the READY, BUSY, TERMINATED or
        KILLED list.

        *State list: READY*

        The READY list holds all the runnable states. Internally a state is
        added to the READY list via method `_put_state(state)`. Workers take
        states from the READY list via the `_get_state(wait=True|False)` method.
        A worker mainloop will consume states from the READY list and mark them
        as BUSYwhile working on them. States in the READY list can go to BUSY or
        KILLED


        *State list: BUSY*

        When a state is selected for exploration from the READY list it is
        marked as busy and put in the BUSY list. States being explored will be
        constantly modified  and only saved back to storage when moved out of
        the BUSY list. Hence, when at BUSY the stored copy of the state will be
        potentially outdated. States in the BUSY list can go to TERMINATED,
        KILLED or they can be {forked} back to READY. The forking process
        could involve generating new child states and removing the parent
        from all the lists.


        *State list: TERMINATED*

        TERMINATED contains states that have reached a final condition and raised
        TerminateState. Worker's mainloop simply moves the states that requested
        termination to the TERMINATED list. This is a final list.

        ```An inherited Manticore class like ManticoreEVM could internally revive
        the states in TERMINATED that pass some condition and move them back to
        READY so the user can apply a following transaction.```

        *State list: KILLED*

        KILLED contains all the READY and BUSY states found at a cancel event.
        Manticore supports interactive analysis and has a prominent event system.
        A user can stop or cancel the exploration at any time. The unfinished
        states caught in this situation are simply moved to their own list for
        further user action. This is a final list.


        :param initial_state: the initial root `State` object to start from
        :param workspace_url: workspace folder name
        :param outputspace_url: Folder to place final output. Defaults to workspace
        :param kwargs: other kwargs, e.g.
        """
        super().__init__()
        random.seed(consts.seed)
        {
            consts.mprocessing.single: self._manticore_single,
            consts.mprocessing.threading: self._manticore_threading,
            consts.mprocessing.multiprocessing:
            self._manticore_multiprocessing,
        }[consts.mprocessing]()

        if any(not hasattr(self, x) for x in (
                "_worker_type",
                "_lock",
                "_running",
                "_killed",
                "_ready_states",
                "_terminated_states",
                "_killed_states",
                "_busy_states",
                "_shared_context",
        )):
            raise ManticoreError(
                "Need to instantiate one of: ManticoreNative, ManticoreThreads.."
            )

        # The workspace and the output
        # Manticore will use the workspace to save and share temporary states.
        # Manticore will use the output to save the final reports.
        # By default the output folder and the workspace folder are the same.
        # Check type, default to fs:
        if isinstance(workspace_url, str):
            if ":" not in workspace_url:
                workspace_url = f"fs:{workspace_url}"
        else:
            if workspace_url is not None:
                raise TypeError(
                    f"Invalid workspace type: {type(workspace_url).__name__}")
        self._workspace = Workspace(workspace_url)
        # reuse the same workspace if not specified
        if outputspace_url is None:
            outputspace_url = workspace_url
        if outputspace_url is None:
            outputspace_url = f"fs:{self._workspace.uri}"
        self._output = ManticoreOutput(outputspace_url)

        # The set of registered plugins
        # The callback methods defined in the plugin object will be called when
        # the different type of events occur over an exploration.
        # Note that each callback will run in a worker process and that some
        # careful use of the shared context is needed.
        self.plugins = set()

        # Set initial root state
        if not isinstance(initial_state, StateBase):
            raise TypeError(
                f"Invalid initial_state type: {type(initial_state).__name__}")
        self._put_state(initial_state)

        # Workers will use manticore __dict__ So lets spawn them last
        self._workers = [
            self._worker_type(id=i, manticore=self)
            for i in range(consts.procs)
        ]
        self._snapshot = None
        self._main_id = os.getpid(), threading.current_thread().ident

    def is_main(self):
        """ True if called from the main process/script
        Note: in "single" mode this is _most likely_ True """
        return self._main_id == (os.getpid(), threading.current_thread().ident)

    @sync
    @only_from_main_script
    def take_snapshot(self):
        """ Copy/Duplicate/backup all ready states and save it in a snapshot.
        If there is a snapshot already saved it will be overrwritten
        """
        if self._snapshot is not None:
            logger.info("Overwriting a snapshot of the ready states")
        snapshot = []
        for state_id in self._ready_states:
            state = self._load(state_id)
            # Re-save the state in case the user changed its data
            snapshot.append(self._save(state))
        self._snapshot = snapshot

    @sync
    @only_from_main_script
    def goto_snapshot(self):
        """ REMOVE current ready states and replace them with the saved states
        in a snapshot """
        if not self._snapshot:
            raise ManticoreError("No snapshot to go to")
        self.clear_ready_states()
        for state_id in self._snapshot:
            self._ready_states.append(state_id)
        self._snapshot = None

    @sync
    @only_from_main_script
    def clear_snapshot(self):
        """ Remove any saved states """
        if self._snapshot:
            for state_id in self._snapshot:
                self._remove(state_id)
        self._snapshot = None

    @sync
    @at_not_running
    def clear_terminated_states(self):
        """ Remove all states from the terminated list """
        terminated_states_ids = tuple(self._terminated_states)
        for state_id in terminated_states_ids:
            self._terminated_states.remove(state_id)
            self._remove(state_id)
        assert self.count_terminated_states() == 0

    @sync
    @at_not_running
    def clear_ready_states(self):
        """ Remove all states from the ready list """
        ready_states_ids = tuple(self._ready_states)
        for state_id in ready_states_ids:
            self._ready_states.remove(state_id)
            self._remove(state_id)
        assert self.count_ready_states() == 0

    def __str__(self):
        return f"<{str(type(self))[8:-2]}| Alive States: {self.count_ready_states()}; Running States: {self.count_busy_states()} Terminated States: {self.count_terminated_states()} Killed States: {self.count_killed_states()} Started: {self._running.value} Killed: {self._killed.value}>"

    @classmethod
    def from_saved_state(cls, filename: str, *args, **kwargs):
        """
        Creates a Manticore object starting from a serialized state on the disk.

        :param filename: File to load the state from
        :param args: Arguments forwarded to the Manticore object
        :param kwargs: Keyword args forwarded to the Manticore object
        :return: An instance of a subclass of ManticoreBase with the given initial state
        """
        from ..utils.helpers import PickleSerializer

        with open(filename, "rb") as fd:
            deserialized = PickleSerializer().deserialize(fd)

        return cls(deserialized, *args, **kwargs)

    def _fork(self, state, expression, policy="ALL", setstate=None):
        """
        Fork state on expression concretizations.
        Using policy build a list of solutions for expression.
        For the state on each solution setting the new state with setstate

        For example if expression is a Bool it may have 2 solutions. True or False.

                                 Parent
                            (expression = ??)

                   Child1                         Child2
            (expression = True)             (expression = False)
               setstate(True)                   setstate(False)

        The optional setstate() function is supposed to set the concrete value
        in the child state.

        Parent state is removed from the busy list and the child states are added
        to the ready list.

        """
        assert isinstance(
            expression, Expression), f"{type(expression)} is not an Expression"

        if setstate is None:

            def setstate(x, y):
                pass

        # Find a set of solutions for expression
        solutions = state.concretize(expression, policy)

        if not solutions:
            raise ManticoreError("Forking on unfeasible constraint set")

        logger.debug("Forking. Policy: %s. Values: %s", policy,
                     ", ".join(f"0x{sol:x}" for sol in solutions))

        self._publish("will_fork_state", state, expression, solutions, policy)

        # Build and enqueue a state for each solution
        children = []
        for new_value in solutions:
            with state as new_state:
                new_state.constrain(expression == new_value)

                # and set the PC of the new state to the concrete pc-dest
                # (or other register or memory address to concrete)
                setstate(new_state, new_value)

                # enqueue new_state, assign new state id
                new_state_id = self._put_state(new_state)

                # maintain a list of children for logging purpose
                children.append(new_state_id)

        with self._lock:
            self._busy_states.remove(state.id)
            self._remove(state.id)
            state._id = None
            self._lock.notify_all()

        self._publish("did_fork_state", new_state, expression, new_value,
                      policy)

        logger.debug("Forking current state %r into states %r", state.id,
                     children)

    @staticmethod
    @deprecated("Use utils.log.set_verbosity instead.")
    def verbosity(level):
        """ Sets global verbosity level.
            This will activate different logging profiles globally depending
            on the provided numeric value
        """
        set_verbosity(level)

    # State storage
    @Eventful.will_did("save_state", can_raise=False)
    def _save(self, state, state_id=None):
        """ Store or update a state in secondary storage under state_id.
            Use a fresh id is None is provided.

            :param state: A manticore State
            :param state_id: if not None force state_id (overwrite)
            :type state_id: int or None
            :returns: the state id used
        """
        state._id = self._workspace.save_state(state, state_id=state_id)
        return state.id

    @Eventful.will_did("load_state", can_raise=False)
    def _load(self, state_id):
        """ Load the state from the secondary storage

            :param state_id: a estate id
            :type state_id: int
            :returns: the state id used
        """
        if not hasattr(self, "stcache"):
            self.stcache = weakref.WeakValueDictionary()
        if state_id in self.stcache:
            return self.stcache[state_id]
        state = self._workspace.load_state(state_id, delete=False)
        state._id = state_id
        self.forward_events_from(state, True)
        state.manticore = self
        self.stcache[state_id] = state
        return state

    @Eventful.will_did("remove_state", can_raise=False)
    def _remove(self, state_id):
        """ Remove a state from secondary storage

            :param state_id: a estate id
            :type state_id: int
        """
        if not hasattr(self, "stcache"):
            self.stcache = weakref.WeakValueDictionary()
        if state_id in self.stcache:
            del self.stcache[state_id]

        self._workspace.rm_state(state_id)

    # Internal support for state lists
    def _put_state(self, state):
        """ This enqueues the state for exploration.

            Serialize and store the state with a fresh state_id. Then add it to
            the shared READY states list

                          +-------+
            State +----- >+ READY |
                          +-------+

        """
        state_id = self._save(state, state_id=state.id)
        with self._lock:
            # Enqueue it in the ready state list for processing
            self._ready_states.append(state_id)
            self._lock.notify_all()
        return state_id

    def _get_state(self, wait=False):
        """ Dequeue a state form the READY list and add it to the BUSY list """
        with self._lock:
            # If wait is true do the conditional wait for states
            if wait:
                # if not more states in the queue, let's wait for some forks
                while not self._ready_states and not self._killed.value:
                    # if a shutdown has been requested then bail
                    if self.is_killed():
                        return None  # Cancelled operation
                    # If there are no more READY states and no more BUSY states
                    # there is no chance we will get any new state so raise
                    if not self._busy_states:
                        return None  # There are not states

                    # if there ares actually some workers ready, wait for state forks
                    logger.debug("Waiting for available states")
                    self._lock.wait()

            if self._killed.value:
                return None

            # at this point we know there is at least one element
            # and we have exclusive access
            assert self._ready_states

            # make the choice under exclusive access to the shared ready list
            # state_id = self._policy.choice(list(self._ready_states)[0])
            state_id = random.choice(list(self._ready_states))

            # Move from READY to BUSY
            self._ready_states.remove(state_id)
            self._busy_states.append(state_id)
            self._lock.notify_all()

        return self._load(state_id)

    @sync
    def _revive_state(self, state_id):
        """ Send a BUSY state back to READY list

            +--------+        +------+
            | READY  +<-------+ BUSY |
            +---+----+        +------+

        """
        # Move from BUSY to READY
        self._busy_states.remove(state_id)
        self._ready_states.append(state_id)
        self._lock.notify_all()

    @sync
    def _terminate_state(self, state_id, delete=False):
        """ Send a BUSY state to the TERMINATED list or trash it if delete is True

            +------+        +------------+
            | BUSY +------->+ TERMINATED |
            +---+--+        +------------+
                |
                v
               ###
               ###

        """
        # wait for a state id to be added to the ready list and remove it
        if state_id not in self._busy_states:
            raise ManticoreError(
                "Can not terminate. State is not being analyzed")
        self._busy_states.remove(state_id)

        if delete:
            self._remove(state_id)
        else:
            # add the state_id to the terminated list
            self._terminated_states.append(state_id)

        # wake up everyone waiting for a change in the state lists
        self._lock.notify_all()

    @sync
    def _kill_state(self, state_id, delete=False):
        """ Send a BUSY state to the KILLED list or trash it if delete is True

            +------+        +--------+
            | BUSY +------->+ KILLED |
            +---+--+        +--------+
                |
                v
               ###
               ###

        """
        # wait for a state id to be added to the ready list and remove it
        if state_id not in self._busy_states:
            raise ManticoreError(
                "Can not even kill it. State is not being analyzed")
        self._busy_states.remove(state_id)

        if delete:
            self._remove(state_id)
        else:
            # add the state_id to the terminated list
            self._killed_states.append(state_id)

        # wake up everyone waiting for a change in the state lists
        self._lock.notify_all()

    @sync
    def kill_state(self, state, delete=False):
        """ Kill a state.
             A state is moved from any list to the kill list or fully
             removed from secondary storage

            :param state_id: a estate id
            :type state_id: int
            :param delete: if true remove the state from the secondary storage
            :type delete: bool
        """
        state_id = state.id
        if state_id in self._busy_states:
            self._busy_states.remove(state_id)
        if state_id in self._terminated_states:
            self._terminated_states.remove(state_id)
        if state_id in self._ready_states:
            self._ready_states.remove(state_id)

        if delete:
            self._remove(state_id)
        else:
            # add the state_id to the terminated list
            self._killed_states.append(state_id)

    @property  # type: ignore
    @sync
    def ready_states(self):
        """
        Iterator over ready states.
        It supports state changes. State changes will be saved back at each iteration.

        The state data change must be done in a loop, e.g. `for state in ready_states: ...`
        as we re-save the state when the generator comes back to the function.

        This means it is not possible to change the state used by Manticore with `states = list(m.ready_states)`.
        """
        _ready_states = self._ready_states
        for state_id in _ready_states:
            state = self._load(state_id)
            yield state
            # Re-save the state in case the user changed its data
            self._save(state, state_id=state_id)

    @property
    def running_states(self):
        logger.warning(
            "manticore.running_states is deprecated! (You probably want manticore.ready_states)"
        )
        return self.ready_states

    @property  # type: ignore
    @sync
    def terminated_states(self):
        """
        Iterates over the terminated states.

        See also `ready_states`.
        """
        for state_id in self._terminated_states:
            state = self._load(state_id)
            yield state
            # Re-save the state in case the user changed its data
            self._save(state, state_id=state_id)

    @property  # type: ignore
    @sync
    @at_not_running
    def killed_states(self):
        """
        Iterates over the cancelled/killed states.

        See also `ready_states`.
        """
        for state_id in self._killed_states:
            state = self._load(state_id)
            yield state
            # Re-save the state in case the user changed its data
            self._save(state, state_id=state_id)

    @property  # type: ignore
    @sync
    @at_not_running
    def _all_states(self):
        """ Only allowed at not running.
            (At running we can have states at busy)
            Returns a tuple with all active state ids.
            Notably the "killed" states are not included here.
        """
        return tuple(self._ready_states) + tuple(self._terminated_states)

    @property  # type: ignore
    @sync
    def all_states(self):
        """
        Iterates over the all states (ready and terminated)
        It holds a lock so no changes state lists are allowed

        Notably the cancelled states are not included here.

        See also `ready_states`.
        """
        for state_id in self._all_states:
            state = self._load(state_id)
            yield state
            # Re-save the state in case the user changed its data
            self._save(state, state_id=state_id)

    @sync
    def count_states(self):
        """ Total states count """
        return len(self._all_states)

    @sync
    def count_all_states(self):
        """ Total states count """
        return self.count_states()

    @sync
    def count_ready_states(self):
        """ Ready states count """
        return len(self._ready_states)

    @sync
    def count_busy_states(self):
        """ Busy states count """
        return len(self._busy_states)

    @sync
    def count_killed_states(self):
        """ Cancelled states count """
        return len(self._killed_states)

    @sync
    def count_terminated_states(self):
        """ Terminated states count """
        return len(self._terminated_states)

    def generate_testcase(self,
                          state,
                          message: str = "test",
                          name: str = "test") -> Testcase:
        if message == "test" and hasattr(
                state, "_terminated_by") and state._terminated_by:
            message = str(state._terminated_by)
        testcase = self._output.testcase(prefix=name)
        with testcase.open_stream("pkl", binary=True) as statef:
            PickleSerializer().serialize(state, statef)

        # Let the plugins generate a state based report
        for p in self.plugins:
            p.generate_testcase(state, testcase, message)

        logger.info("Generated testcase No. %d - %s", testcase.num, message)
        return testcase

    @at_not_running
    def register_plugin(self, plugin: Plugin):
        # Global enumeration of valid events
        assert isinstance(plugin, Plugin)
        assert plugin not in self.plugins, "Plugin instance already registered"
        assert getattr(plugin, "manticore",
                       None) is None, "Plugin instance already owned"

        plugin.manticore = self
        self.plugins.add(plugin)

        events = Eventful.all_events()
        prefix = Eventful.prefixes
        all_events = [x + y for x, y in itertools.product(prefix, events)]
        for event_name in all_events:
            callback_name = f"{event_name}_callback"
            callback = getattr(plugin, callback_name, None)
            if callback is not None:
                self.subscribe(event_name, callback)

        # Safety checks
        for callback_name in dir(plugin):
            if callback_name.endswith("_callback"):
                event_name = callback_name[:-9]
                if event_name not in all_events:
                    logger.warning(
                        "There is no event named %s for callback on plugin %s",
                        event_name,
                        type(plugin).__name__,
                    )

        for event_name in all_events:
            for plugin_method_name in dir(plugin):
                if event_name in plugin_method_name:
                    if not plugin_method_name.endswith("_callback"):
                        if (plugin_method_name.startswith("on_")
                                or plugin_method_name.startswith("will_")
                                or plugin_method_name.startswith("did_")):
                            logger.warning(
                                "Plugin methods named '%s()' should end with '_callback' on plugin %s",
                                plugin_method_name,
                                type(plugin).__name__,
                            )
                    if (plugin_method_name.endswith("_callback")
                            and not plugin_method_name.startswith("on_")
                            and not plugin_method_name.startswith("will_")
                            and not plugin_method_name.startswith("did_")):
                        logger.warning(
                            "Plugin methods named '%s()' should start with 'on_', 'will_' or 'did_' on plugin %s",
                            plugin_method_name,
                            type(plugin).__name__,
                        )

        plugin.on_register()
        return plugin

    @at_not_running
    def unregister_plugin(self, plugin):
        """ Removes a plugin from manticore.
            No events should be sent to it after
        """
        assert plugin in self.plugins, "Plugin instance not registered"
        plugin.on_unregister()
        self.plugins.remove(plugin)
        plugin.manticore = None

    def subscribe(self, name, callback):
        """ Register a callback to an event"""
        from types import MethodType

        if not isinstance(callback, MethodType):
            callback = MethodType(callback, self)
        super().subscribe(name, callback)

    @property  # type: ignore
    @at_not_running
    def context(self):
        """ Convenient access to shared context. We maintain a local copy of the
            share context during the time manticore is not running.
            This local context is copied to the shared context when a run starts
            and copied back when a run finishes
        """
        return self._shared_context

    @contextmanager
    def locked_context(self, key=None, value_type=list):
        """
        A context manager that provides safe parallel access to the global
        Manticore context. This should be used to access the global Manticore
        context when parallel analysis is activated. Code within the `with` block
        is executed atomically, so access of shared variables should occur within.

        Example use::

            with m.locked_context() as context:
                visited['visited'].append(state.cpu.PC)

        Optionally, parameters can specify a key and type for the object paired to this key.::

            with m.locked_context('feature_list', list) as feature_list:
                feature_list.append(1)

        Note: If standard (non-proxy) list or dict objects are contained in a
        referent, modifications to those mutable values will not be propagated
        through the manager because the proxy has no way of knowing when the
        values contained within are modified. However, storing a value in a
        container proxy (which triggers a __setitem__ on the proxy object) does
        propagate through the manager and so to effectively modify such an item,
        one could re-assign the modified value to the container proxy:

        :param object key: Storage key
        :param value_type: type of value associated with key
        :type value_type: list or dict or set
        """
        with self._lock:
            if key is None:
                # If no key is provided we yield the raw shared context under a lock
                yield self._shared_context
            else:
                # if a key is provided we yield the specific value or a fresh one
                if value_type not in (list, dict):
                    raise TypeError("Type must be list or dict")
                if hasattr(self, "_context_value_types"):
                    value_type = self._context_value_types[value_type]
                context = self._shared_context
                if key not in context:
                    context[key] = value_type()
                yield context[key]

    ############################################################################
    # Public API

    @sync
    def wait(self, condition):
        """ Waits for the condition callable to return True """
        self._lock.wait_for(condition)

    @sync
    def kill(self):
        """ Attempt to cancel and kill all the workers.
            Workers must terminate
            RUNNING, STANDBY -> KILLED
        """
        self._publish("will_terminate_execution", self._output)
        self._killed.value = True
        self._lock.notify_all()
        self._publish("did_terminate_execution", self._output)

    def terminate(self):
        logger.warning(
            "manticore.terminate is deprecated (Use manticore.kill)")
        self.kill()

    @sync
    def is_running(self):
        """ True if workers are exploring BUSY states or waiting for READY states """
        # If there are still states in the BUSY list then the STOP/KILL event
        # was not yet answered
        # We know that BUSY states can only decrease after a stop is requested
        return self._running.value

    @sync
    def is_killed(self):
        """ True if workers are killed. It is safe to join them """
        # If there are still states in the BUSY list then the STOP/KILL event
        # was not yet answered
        # We know that BUSY states can only decrease after a kill is requested
        return self._killed.value

    @property
    def workspace(self):
        return self._output.store.uri

    @contextmanager
    def kill_timeout(self, timeout=None):
        """ A convenient context manager that will kill a manticore run after
            timeout seconds
        """
        if timeout is None:
            timeout = consts.timeout

        # Run forever is timeout is negative
        if timeout <= 0:
            try:
                yield
            finally:
                return

        # THINKME kill grabs the lock. Is npt this a deadlock hazard?
        timer = threading.Timer(timeout, self.kill)
        timer.start()

        try:
            yield
        finally:
            timer.cancel()

    @at_not_running
    def run(self):
        """
        Runs analysis.
        """
        # Delete state cache
        # The cached version of a state may get out of sync if a worker in a
        # different process modifies the state
        self.stcache = weakref.WeakValueDictionary()

        # Lazy process start. At the first run() the workers are not forked.
        # This actually starts the worker procs/threads
        if self.subscribe:
            # User subscription to events is disabled from now on
            self.subscribe = None

        self._publish("will_run", self.ready_states)
        self._running.value = True
        # start all the workers!
        for w in self._workers:
            w.start()

        # Main process. Lets just wait and capture CTRL+C at main
        with WithKeyboardInterruptAs(self.kill):
            with self._lock:
                while (self._busy_states
                       or self._ready_states) and not self._killed.value:
                    self._lock.wait()

        # Join all the workers!
        for w in self._workers:
            w.join()

        with self._lock:
            assert not self._busy_states and not self._ready_states or self._killed.value

            if self.is_killed():
                logger.debug(
                    "Killed. Moving all remaining ready states to killed list")
                # move all READY to KILLED:
                while self._ready_states:
                    self._killed_states.append(self._ready_states.pop())

        self._running.value = False
        self._publish("did_run")
        assert not self.is_running()

    @sync
    @at_not_running
    def remove_all(self):
        """
            Deletes all streams from storage and clean state lists
        """
        for state_id in self._all_states:
            self._remove(state_id)

        del self._ready_states[:]
        del self._busy_states[:]
        del self._terminated_states[:]
        del self._killed_states[:]

    def finalize(self):
        """
        Generate a report testcase for every state in the system and remove
        all temporary files/streams from the workspace
        """
        self.kill()
        for state in self.all_states:
            self.generate_testcase(state)
        self.remove_all()

    ############################################################################
    ############################################################################
    ############################################################################
    ############################################################################
    ############################################################################
    ############################################################################

    def save_run_data(self):
        with self._output.save_stream("command.sh") as f:
            f.write(" ".join(map(shlex.quote, sys.argv)))

        with self._output.save_stream("manticore.yml") as f:
            config.save(f)

        logger.info("Results in %s", self._output.store.uri)
Ejemplo n.º 4
0
    def run(self, shuffle=False):
        """
        Runs all experiments. Blocks until all experiment are finished.
        """

        # Setup multiprocessing logging
        manager = SyncManager()
        manager.start(lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
        result_log_queue = manager.Queue()
        self.cancel_experiments = manager.Value('b', 0)
        self.interrupt_condition = manager.Condition()
        listener = multiprocessing.Process(target=result_log_listener,
                                           args=(result_log_queue, setup_result_logger, self.result_log_name,))
        listener.start()

        # Setup callback throttling
        result_lock = Lock()
        callback_lock = Lock()
        self.callback_disabled = False

        def call_callback(experiment_id=None, pause=0):
            with callback_lock:
                with result_lock:
                    if pause:
                        output_status(prefix='C')
                    self.last_callback = datetime.now()
                    self.save_results()
                    sys.stdout.write('Results saved. ')
                    sys.stdout.flush()
                    if not self.callback_disabled:
                        sys.stdout.write('Digesting results ... ')
                        sys.stdout.flush()

                        try:
                            self.update_callback(experiment_id)
                        except Exception as ex:  # pylint: disable=W
                            sys.stdout.write('errored with {}\n\n\n'.format(ex.__class__))
                            self.callback_disabled = True
                        else:
                            sys.stdout.write('done\n')
                    else:
                        sys.stdout.write('Digestion disabled, due to previous exception.\n')

        self.last_callback = datetime.now()

        # setup process pool
        self.jobs_total = len(self.experiments)
        start_time = datetime.now()
        print("Using up to %i CPUs %s" %
              (self.cpu_limit,
               'with numpy multi-threading disabled' if os.environ.get('OMP_NUM_THREADS', None) == '1' else ''))
        with multiprocessing.Pool(self.cpu_limit) as pool:

            # print status function
            def output_status(prefix='F'):
                progress = self.jobs_finished / self.jobs_total
                elapsed_time = datetime.now() - start_time
                errors = "" if self.jobs_errored == 0 else " %i ERRORED, " % self.jobs_errored
                sys.stdout.write(
                    ("%s %s: %i jobs, %i finished, %i queued," + errors + " %.0f%%, ~remaining: %s\n") %
                    (
                        prefix,
                        datetime.now().strftime('%c'),
                        self.jobs_total,
                        self.jobs_finished,
                        self.jobs_total - self.jobs_finished,
                        progress * 100,
                        timedelta(seconds=(elapsed_time * (1 - progress) / progress).total_seconds() // 15 * 15)
                        if progress > 0 else '???',
                    )
                )

            # define callbacks, they are run within the main process, but in separate threads
            def update_status(result=None):
                from pandas import DataFrame
                with result_lock:
                    self.jobs_finished += 1
                    output_status()
                    if not result:
                        return

                    row = {}
                    experiment = self.experiments[result.experiment_id]
                    row.update({
                        'experiment_id': result.experiment_id,
                        'experiment_hash': experiment.hash,
                        'experiment': experiment.__class__.__name__,
                    })
                    row.update(experiment.parameters._asdict())
                    row.update(result._asdict())
                    self.results = self.results.append(DataFrame([row]), sort=True)

                # If there is already a callback waiting, we will replace it and therefore cancel it
                if self.next_callback and self.next_callback.is_alive():
                    sleep(0)  # let other threads run first
                    if callback_lock.acquire(blocking=False):
                        self.next_callback.cancel()
                        callback_lock.release()
                    else:
                        # the callback is currently waiting for the result_lock
                        return

                # Schedule callback either immediately (0) or after the pause expired
                pause = max(0.0, self.update_callback_min_pause - (datetime.now() - self.last_callback).total_seconds())
                self.next_callback = Timer(
                    pause,
                    call_callback,
                    args=[result.experiment_id, pause],
                )
                self.next_callback.start()

            def update_status_error(exception):
                if isinstance(exception, ExperimentCanceledException):
                    return
                print('Experiment exception: ', exception, file=sys.stderr)
                traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
                self.jobs_errored += 1
                self.exceptions.append(exception)
                update_status()

            # randomize order
            experiments = list(self.experiments.values())
            if shuffle:
                random.seed(0xdeadbeef)
                random.shuffle(experiments)

            # filter loaded experiments
            if not self.results.empty:
                known_hashes = [ex.hash for ex in experiments]
                len_before = len(experiments)
                loaded_experiment_hashes = self.results.loc[:, ['experiment_hash']].values[:, 0]
                experiments = [ex for ex in experiments if ex.hash not in loaded_experiment_hashes]
                if loaded_experiment_hashes.size:
                    print('Continuing from %s' % self.results_file)
                    self.jobs_finished = len_before - len(experiments)

                # check for experiments with results that we don't know
                unknown_experiments = self.results.loc[~self.results['experiment_hash'].isin(known_hashes)]
                if not unknown_experiments.empty:
                    print('@' * 80)
                    print('Results file %s contains %i results that are not in the study\'s' %
                          (self.results_file, len(unknown_experiments)))
                    print('experiment definition. Did you delete experiments from your study?')
                    print('@' * 80)

            # experiment execution
            for experiment in experiments:
                # Assign experiment to GPU (if used) : might be replaced by more sophisticated load balancer
                if self.gpu_limit > 0:
                    gpu_num = self.gpu_counter % self.gpu_limit
                    experiment.assign_to_gpu(gpu_num)
                    self.gpu_counter += 1
                # Add experiment to execution queue
                pool.apply_async(
                    experiment.execute,
                    (result_log_queue, self.result_log_name, self.cancel_experiments, self.interrupt_condition),
                    callback=update_status,
                    error_callback=update_status_error,
                )

            def signal_handler(_sig, _frame):
                self.num_int += 1
                if self.num_int > 1:
                    print("Killing all processes.")
                    sys.exit(1)
                print(
                    "\rPerforming graceful shutdown... (Press CTRL-C again to force. This might result in data loss.)")
                with self.interrupt_condition:
                    self.cancel_experiments.value = 1
                    self.interrupt_condition.notify_all()

            signal.signal(signal.SIGINT, signal_handler)

            # show status, then block until we're ready
            output_status()
            pool.close()

            pool.join()

            if self.next_callback and self.next_callback.is_alive():
                with callback_lock:
                    self.next_callback.cancel()

            call_callback()

            # quit logger
            result_log_queue.put(None)  # trigger listener to quit
            listener.join()

            # check if we got any exceptions as results
            if self.exceptions:
                raise FailedExperimentsException(self.exceptions)
Ejemplo n.º 5
0
class Executor(Eventful):
    '''
    The executor guides the execution of a single state, handles state forking
    and selection, maintains run statistics and handles all exceptional
    conditions (system calls, memory faults, concretization, etc.)
    '''

    _published_events = {'enqueue_state', 'generate_testcase', 'fork_state', 'load_state', 'terminate_state'}

    def __init__(self, initial=None, store=None, policy='random', context=None, **kwargs):
        super().__init__(**kwargs)

        # Signals / Callbacks handlers will be invoked potentially at different
        # worker processes. State provides a local context to save data.

        self.subscribe('did_load_state', self._register_state_callbacks)

        # This is the global manager that will handle all shared memory access among workers
        self.manager = SyncManager()
        self.manager.start(lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))

        # The main executor lock. Acquire this for accessing shared objects
        self._lock = self.manager.Condition()

        # Shutdown Event
        self._shutdown = self.manager.Event()

        # States on storage. Shared dict state name ->  state stats
        self._states = self.manager.list()

        # Number of currently running workers. Initially no running workers
        self._running = self.manager.Value('i', 0)

        self._workspace = Workspace(self._lock, store)

        # Executor wide shared context
        if context is None:
            context = {}
        self._shared_context = self.manager.dict(context)

        # scheduling priority policy (wip)
        # Set policy
        policies = {'random': Random,
                    'uncovered': Uncovered,
                    'branchlimited': BranchLimited,
                    }
        self._policy = policies[policy](self)
        assert isinstance(self._policy, Policy)

        if self.load_workspace():
            if initial is not None:
                logger.error("Ignoring initial state")
        else:
            if initial is not None:
                self.add(initial)

    def __del__(self):
        self.manager.shutdown()

    @contextmanager
    def locked_context(self, key=None, default=dict):
        ''' Executor context is a shared memory object. All workers share this.
            It needs a lock. Its used like this:

            with executor.context() as context:
                visited = context['visited']
                visited.append(state.cpu.PC)
                context['visited'] = visited
        '''
        assert default in (list, dict, set)
        with self._lock:
            if key is None:
                yield self._shared_context
            else:
                sub_context = self._shared_context.get(key, None)
                if sub_context is None:
                    sub_context = default()
                yield sub_context
                self._shared_context[key] = sub_context

    def _register_state_callbacks(self, state, state_id):
        '''
            Install forwarding callbacks in state so the events can go up.
            Going up, we prepend state in the arguments.
        '''
        # Forward all state signals
        self.forward_events_from(state, True)

    def enqueue(self, state):
        '''
            Enqueue state.
            Save state on storage, assigns an id to it, then add it to the
            priority queue
        '''
        # save the state to secondary storage
        state_id = self._workspace.save_state(state)
        self.put(state_id)
        self._publish('did_enqueue_state', state_id, state)
        return state_id

    def load_workspace(self):
        # Browse and load states in a workspace in case we are trying to
        # continue from paused run
        loaded_state_ids = self._workspace.try_loading_workspace()
        if not loaded_state_ids:
            return False

        for id in loaded_state_ids:
            self._states.append(id)

        return True

    ###############################################
    # Synchronization helpers
    @sync
    def _notify_start_run(self):
        # notify siblings we are about to start a run()
        self._running.value += 1

    @sync
    def _notify_stop_run(self):
        # notify siblings we are about to stop this run()
        self._running.value -= 1
        if self._running is None or self._running.value < 0:
            raise SystemExit
        self._lock.notify_all()

    ################################################
    # Public API
    @property
    def running(self):
        ''' Report an estimate  of how many workers are currently running '''
        return self._running.value

    def shutdown(self):
        ''' This will stop all workers '''
        self._shutdown.set()

    def is_shutdown(self):
        ''' Returns True if shutdown was requested '''
        return self._shutdown.is_set()

    ###############################################
    # Priority queue
    @sync
    def put(self, state_id):
        ''' Enqueue it for processing '''
        self._states.append(state_id)
        self._lock.notify_all()
        return state_id

    @sync
    def get(self):
        ''' Dequeue a state with the max priority '''

        # A shutdown has been requested
        if self.is_shutdown():
            return None

        # if not more states in the queue, let's wait for some forks
        while len(self._states) == 0:
            # if no worker is running, bail out
            if self.running == 0:
                return None
            # if a shutdown has been requested, bail out
            if self.is_shutdown():
                return None
            # if there ares actually some workers running, wait for state forks
            logger.debug("Waiting for available states")
            self._lock.wait()

        state_id = self._policy.choice(list(self._states))
        if state_id is None:
            return None
        del self._states[self._states.index(state_id)]
        return state_id

    def list(self):
        ''' Returns the list of states ids currently queued '''
        return list(self._states)

    def generate_testcase(self, state, message='Testcase generated'):
        '''
        Simply announce that we're going to generate a testcase. Actual generation
        should be handled by the driver class (such as :class:`~manticore.Manticore`)

        :param state: The state to generate information about
        :param message: Accompanying message
        '''

        # broadcast test generation. This is the time for other modules
        # to output whatever helps to understand this testcase
        self._publish('will_generate_testcase', state, 'test', message)

    def fork(self, state, expression, policy='ALL', setstate=None):
        '''
        Fork state on expression concretizations.
        Using policy build a list of solutions for expression.
        For the state on each solution setting the new state with setstate

        For example if expression is a Bool it may have 2 solutions. True or False.

                                 Parent
                            (expression = ??)

                   Child1                         Child2
            (expression = True)             (expression = True)
               setstate(True)                   setstate(False)

        The optional setstate() function is supposed to set the concrete value
        in the child state.

        '''
        assert isinstance(expression, Expression)

        if setstate is None:
            setstate = lambda x, y: None

        # Find a set of solutions for expression
        solutions = state.concretize(expression, policy)

        if not solutions:
            raise ExecutorError("Forking on unfeasible constraint set")

        if len(solutions) == 1:
            setstate(state, solutions[0])
            return state

        logger.info("Forking. Policy: %s. Values: %s",
                    policy,
                    ', '.join(f'0x{sol:x}' for sol in solutions))

        self._publish('will_fork_state', state, expression, solutions, policy)

        # Build and enqueue a state for each solution
        children = []
        for new_value in solutions:
            with state as new_state:
                new_state.constrain(expression == new_value)

                # and set the PC of the new state to the concrete pc-dest
                #(or other register or memory address to concrete)
                setstate(new_state, new_value)

                self._publish('did_fork_state', new_state, expression, new_value, policy)

                # enqueue new_state
                state_id = self.enqueue(new_state)
                # maintain a list of children for logging purpose
                children.append(state_id)

        logger.info("Forking current state into states %r", children)
        return None

    def run(self):
        '''
        Entry point of the Executor; called by workers to start analysis.
        '''
        # policy_order=self.policy_order
        # policy=self.policy
        current_state = None
        current_state_id = None

        with WithKeyboardInterruptAs(self.shutdown):
            # notify siblings we are about to start a run
            self._notify_start_run()

            logger.debug("Starting Manticore Symbolic Emulator Worker (pid %d).", os.getpid())
            solver = Z3Solver()
            while not self.is_shutdown():
                try:  # handle fatal errors: exceptions in Manticore
                    try:  # handle external (e.g. solver) errors, and executor control exceptions
                        # select a suitable state to analyze
                        if current_state is None:
                            with self._lock:
                                # notify siblings we are about to stop this run
                                self._notify_stop_run()
                                try:
                                    # Select a single state_id
                                    current_state_id = self.get()
                                    # load selected state from secondary storage
                                    if current_state_id is not None:
                                        self._publish('will_load_state', current_state_id)
                                        current_state = self._workspace.load_state(current_state_id)
                                        self.forward_events_from(current_state, True)
                                        self._publish('did_load_state', current_state, current_state_id)
                                        logger.info("load state %r", current_state_id)
                                    # notify siblings we have a state to play with
                                finally:
                                    self._notify_start_run()

                        # If current_state is still None. We are done.
                        if current_state is None:
                            logger.debug("No more states in the queue, byte bye!")
                            break

                        assert current_state is not None
                        assert current_state.constraints is current_state.platform.constraints

                        # Allows to terminate manticore worker on user request
                        while not self.is_shutdown():
                            if not current_state.execute():
                                break
                        else:
                            # Notify this worker is done
                            self._publish('will_terminate_state', current_state, current_state_id, TerminateState('Shutdown'))
                            current_state = None

                    # Handling Forking and terminating exceptions
                    except Concretize as e:
                        # expression
                        # policy
                        # setstate()
                        logger.debug("Generic state fork on condition")
                        current_state = self.fork(current_state, e.expression, e.policy, e.setstate)

                    except TerminateState as e:
                        # Notify this worker is done
                        self._publish('will_terminate_state', current_state, current_state_id, e)

                        logger.debug("Generic terminate state")
                        if e.testcase:
                            self.generate_testcase(current_state, str(e))
                        current_state = None

                    except SolverException as e:
                        # raise
                        import traceback
                        trace = traceback.format_exc()
                        logger.error("Exception: %s\n%s", str(e), trace)

                        # Notify this state is done
                        self._publish('will_terminate_state', current_state, current_state_id, e)

                        if solver.check(current_state.constraints):
                            self.generate_testcase(current_state, "Solver failed" + str(e))
                        current_state = None

                except (Exception, AssertionError) as e:
                    # raise
                    import traceback
                    trace = traceback.format_exc()
                    logger.error("Exception: %s\n%s", str(e), trace)
                    # Notify this worker is done
                    self._publish('will_terminate_state', current_state, current_state_id, e)
                    current_state = None
                    logger.setState(None)

            assert current_state is None or self.is_shutdown()

            # notify siblings we are about to stop this run
            self._notify_stop_run()
Ejemplo n.º 6
0
def main():
    # global server
    global ticker_condition, platforms
    global get_dict, get_condition

    # Only works with 3.6+, checking version
    version = sys.version.split(' ')[0].split('.')
    invalid = False
    if int(version[0]) < 3:
        invalid = True
    elif int(version[0]) == 3:
        if int(version[1]) < 6:
            invalid = True
    if invalid:
        print('Invalid version of Python. Requires Python 3.6 or newer')
        return

    read_config()

    manager = SyncManager()
    manager.start()

    running = manager.Event()
    running.set()

    platforms = manager.dict()
    ticker_condition = manager.Condition()

    r_queue = manager.Queue()
    r_condition = manager.Condition()

    # used for HTTP GET requests and their responses
    get_dict = manager.dict()
    get_condition = manager.Condition()

    # used for HTTP PUT and POST to issue their responses
    reply_queue = manager.Queue()
    reply_condition = manager.Condition()

    # A pool closes, don't want it to close.
    reply_list = []
    reply_args = (running, reply_queue, reply_condition)
    for i in range(config['threads']['return_threads']):
        r = Process(target=outbound, args=reply_args, name='Reply_%s' % i)
        r.deamon = True
        reply_list.append(r)
        r.start()

    # A pool closes, don't want it to close.
    r_list = []
    r_args = (running, api_key, platforms, r_queue, r_condition, get_dict,
              get_condition, reply_queue, reply_condition, ticker_condition,
              platform_lock)
    for i in range(config['threads']['api_threads']):
        r = Process(target=retriever, args=r_args, name='Retriever_%s' % i)
        r.deamon = True
        r_list.append(r)
        r.start()

    ticking_args = (running, platforms, ticker_condition, r_queue, r_condition)
    ticking = Process(target=ticker, args=ticking_args, name='Ticker')
    ticking.deamon = True
    ticking.start()

    poking_args = (running, platforms, ticker_condition)
    poking = Process(target=poker, args=poking_args, name='Poker')
    poking.daemon = True
    poking.start()

    handler = CreateSyncHTTPHandler(manager)
    #server = http.server.HTTPServer( (config['server']['host'], config['server']['port']), MyHTTPHandler)
    server = http.server.HTTPServer(
        (config['server']['host'], config['server']['port']), handler)
    # server.set_manager(manager) # Need another way to pass this in, since HTTPServer doesn't have it...
    try:
        server.serve_forever()
    except KeyboardInterrupt:
        print('\nStopping server...')
Ejemplo n.º 7
0
    :return: None
    '''
    Signal.signal(Signal.SIGINT, Signal.SIG_IGN)


# explicitly starting the manager, and telling it to ignore the interrupt signal and propagate it.
manager.start(mgr_init)

CLIENT_STATUS = manager.dict()
CLIENT_STATUS["Sync_RX_status"] = False
CLIENT_STATUS["keyboard_disconnect"] = False
CLIENT_STATUS["keyboard_disconnect_attemp"] = 0
CLIENT_STATUS["measure_running_now"] = False

# threading condition variables for controlling Sync RX thread activity
Sync_RX_condition = manager.Condition()

@contextlib.contextmanager
def nostdout():
    """
    Silence stdoutput of function within a context.

    Example:
    >>> with nostdout():
    >>>     f0,Qi,Qr,zfit,modelwise = do_fit(...)
    """
    save_stdout = sys.stdout
    sys.stdout = io.BytesIO()
    yield
    sys.stdout = save_stdout