예제 #1
0
    def execute(self, state, log_errors=True):
        """
        Execute the ``Actions`` in order. If an Action raises
        :class:`CancelWalk`, we will stop executing immediately.
        :return: True if the Walk was run successfully or `CancelWalk` was raised, False otherwise.
        """
        try:
            for op in self:
                # I will leave it up to the user to decide to log or not, and
                # the appropriate verbosity.
                op(state=state)
        except CancelWalk as e:
            # Likewise here: let the user decide to log or not in their layer
            return True
        except Exception as e:
            msg_state = "Walk was: %s\nstate: %s" % (repr(self),
                                                     encode.encode(state))
            exc = type(e)(str(e) + "\n" + msg_state)
            if log_errors:
                logger.exception(exc)
                logger.error(msg_state)

            new_msg = "Walk failed:\n"
            new_msg += traceback.format_exc()
            new_msg += "\n" + msg_state
            wfe = WalkFailedError(new_msg)
            if hasattr(e, 'errno'):
                wfe.errno = e.errno
            raise wfe

        return False
예제 #2
0
    def exposed_gather_full_state(self, worker_id):
        try:
            runner = self._runners[worker_id]
            states = runner.get_full_states(full=False)
            return encode.encode(states)
        except KeyError:
            pass

        return
예제 #3
0
    def _flush_queue(self,
                     client_key,
                     client,
                     queue,
                     max_thread_count=None,
                     state=None,
                     serial_action=None):
        logger.debug("Sending %d items to %s", len(queue), str(client_key))

        state = copy.copy(state)
        if serial_action is not None:
            serial_action = encode.encode(serial_action)
            repack_kwargs = {'serial_action': serial_action}
        else:
            repack_kwargs = None

        worker_id = client.start_work(queue,
                                      state,
                                      max_thread_count=max_thread_count,
                                      repack_kwargs=repack_kwargs)
        del queue[:]
        return worker_id
예제 #4
0
def replay_walk(walk_to_run, step=False, log_errors=True, state=None):
    """
    Run a single :class:`combtest.walk.Walk`

    :param Walk walk_to_run: self evident
    :param bool step: if True, step Action-by-Action through the Walk; the user
                      hits a key to proceed to the next Action.
    :param bool log_errors: log exceptions to the logger if True
    :param object state: state passed to the Walk for execution.
    """
    try:
        for op in walk_to_run:
            op(state=state)

            if step:
                print(str(type(op)))
                raw_input("Press key to continue...")

    except CancelWalk as e:
        return True
    except Exception as e:
        msg_state = "Walk was: %s\nstate: %s" % (repr(walk_to_run),
                                                 encode.encode(state))

        if log_errors:
            logger.exception(e)
            logger.error(msg_state)

        new_msg = "Walk failed:\n"
        new_msg += traceback.format_exc()
        new_msg += "\n" + msg_state
        wfe = WalkFailedError(new_msg)
        if hasattr(e, 'errno'):
            wfe.errno = e.errno
        raise wfe

    return False
예제 #5
0
 def next(inner_self):
     encoded = encode.encode(inner_self.sc.next())
     return encoded
예제 #6
0
 def as_json(self):
     encoded = encode.encode(self)
     return encoded
예제 #7
0
 def trace(self, **op_info):
     info = encode.encode(op_info) + "\n"
     self.write(info)
예제 #8
0
    def scatter_work(self, work, max_thread_count=None, state=None):
        """
        Partition the provided iterable of work into roughly even-sized
        portions and send them to each of the remote services. ``state`` will
        be copied to each node independently. The user must handle the logic of
        retrieving results and stitching them together. See
        :func:`gather_state`.

        :param iterable work: iterable of work items
        :param int max_thread_count: override of how many threads each remote
                                     executor should have
        :param object state: make sure it is picklable
        :return: a dict mapping (hostname/ip, port) -> worker_id
        """
        if self._give_up:
            return {}

        out_queues = []
        clients = self.clients
        service_count = len(clients)
        worker_ids = [None] * service_count
        keys = list(self.clients.keys())
        for _ in range(service_count):
            out_queues.append([])

        # NOTE: do we want to track worker_ids of all the work we started?
        # Meaning: inside our instance?
        client_idx = 0
        work_item_counts = {}
        for work_item in work:
            if self._give_up:
                break

            current_q = out_queues[client_idx]
            current_q.append(encode.encode(work_item))

            current_key = keys[client_idx]
            if len(current_q) == self.WORK_QUANTUM_SIZE:

                if worker_ids[client_idx] is None:
                    logger.debug("Sending %d items to %s", len(current_q),
                                 str(current_key))
                    worker_id = clients[current_key].start_work(
                        current_q,
                        max_thread_count=max_thread_count,
                        state=state)
                    worker_ids[client_idx] = worker_id
                else:
                    logger.debug("Sending %d items to %s", len(current_q),
                                 current_key)
                    worker_id = worker_ids[client_idx]
                    clients[current_key].add_work(worker_id, current_q)

                out_queues[client_idx] = []

            client_idx += 1
            client_idx %= service_count

            if current_key not in work_item_counts:
                work_item_counts[current_key] = 0
            work_item_counts[current_key] += 1

        # Tail dump any work not yet flushed
        for client_idx, current_q in enumerate(out_queues):
            if self._give_up:
                break

            if current_q:
                current_key = keys[client_idx]
                if worker_ids[client_idx] is None:
                    logger.debug("Sending %d items to %s", len(current_q),
                                 str(current_key))
                    worker_id = clients[current_key].start_work(
                        current_q,
                        max_thread_count=max_thread_count,
                        state=state)
                    worker_ids[client_idx] = worker_id
                else:
                    logger.debug("Sending %d items to %s", len(current_q),
                                 str(current_key))
                    worker_id = worker_ids[client_idx]
                    clients[current_key].add_work(worker_id, current_q)

        worker_ids_out = {}
        for client_idx, current_key in enumerate(keys):
            worker_ids_out[current_key] = worker_ids[client_idx]

        logger.info("Started %s work items", str(work_item_counts))
        return worker_ids_out
예제 #9
0
                        type=str,
                        help="Qualname for function used for replay; see %s" %
                        DEFAULT_REPLAY_FUNC_NAME,
                        default=DEFAULT_REPLAY_FUNC_NAME)
    parser.add_argument('--state',
                        type=str,
                        help="state provided as a "
                        "JSON string, decodable by %s" %
                        utils.get_class_qualname(encode.decode))
    parser.add_argument('walk_id', type=int)
    parser.add_argument('--print_state', action='store_true')
    args = parser.parse_args()

    command = args.command

    step = command == 'step'

    if args.state:
        state = encode.decode(args.state)
    else:
        state = None

    state = replay_walk_by_id(args.log_file,
                              args.walk_id,
                              step=step,
                              replay_func_qualname=args.replay_func,
                              state=state)

    if args.print_state:
        print(encode.encode(state))
예제 #10
0
    def scatter_work(self,
                     epoch,
                     id_map=None,
                     max_thread_count=None,
                     state=None):
        """
        Scatter some ``Walk`` segments out to the remote workers.

        :param iterable epoch: iterable of (walk_id, branch_id, Walk)
        :param dict id_map: optional map walk_id->(ip, port) of service
                            currently holding that Walk's state.
        :return: an updated ``id_map``
        """
        # Holds queued work for each IP
        # Maps client_key->work queue
        # Flushed when quantum size is hit, and at tail flush
        out_queues = {}

        total_count = 0

        clients = self.clients

        id_map = id_map or self.id_map
        key_idx = 0
        keys = list(self.clients.keys())
        key_count = len(keys)

        # Maps (ip, port)->worker ids
        worker_ids = {}
        for key in keys:
            worker_ids[key] = []

        # NOTE: 'walk_to_run' is in-fact (walk_id, branch_id, Walk)
        for walk_to_run in epoch:
            walk_id, branch_id, actual_walk = walk_to_run

            if walk_id in id_map:
                target_key = id_map[walk_id]
            else:
                # Assign the next client
                target_key = keys[key_idx]
                key_idx += 1
                key_idx %= key_count
                id_map[walk_id] = target_key

            if target_key not in out_queues:
                out_queues[target_key] = []
            current_queue = out_queues[target_key]

            jsonified = encode.encode((walk_id, branch_id, actual_walk))
            current_queue.append(jsonified)
            if len(current_queue) == self.WORK_QUANTUM_SIZE:
                total_count += len(current_queue)
                worker_id = self._flush_queue(
                    target_key,
                    clients[target_key],
                    current_queue,
                    max_thread_count=max_thread_count,
                    state=state,
                    serial_action=epoch.serial_action)
                worker_ids[target_key].append(worker_id)

        for target_key, queue in out_queues.items():
            client = clients[target_key]
            total_count += len(queue)
            worker_id = self._flush_queue(target_key,
                                          client,
                                          queue,
                                          max_thread_count=max_thread_count,
                                          state=state,
                                          serial_action=epoch.serial_action)
            worker_ids[target_key].append(worker_id)

        self.id_map = id_map
        return id_map, total_count, worker_ids