示例#1
0
def _wrap(
    local_rank: int,
    fn: Callable,
    args: Dict[int, Tuple],
    envs: Dict[int, Dict[str, str]],
    stdout_redirects: Dict[
        int, str],  # redirect file for stdout (to console if None)
    stderr_redirects: Dict[
        int, str],  # redirect file for stderr (to console if None)
    ret_vals: Dict[int, mp.SimpleQueue],
    queue_finished_reading_event: synchronize.Event,
) -> None:
    # get the per-rank params up front so we fail fast if no mapping is found
    args_ = args[local_rank]
    env_ = envs[local_rank]
    ret_val_ = ret_vals[local_rank]

    stdout_rd = stdout_redirects[local_rank]
    stderr_rd = stderr_redirects[local_rank]

    stdout_cm = redirect_stdout(stdout_rd) if stdout_rd else _nullcontext()
    stderr_cm = redirect_stderr(stderr_rd) if stderr_rd else _nullcontext()

    for k, v in env_.items():
        os.environ[k] = v

    with stdout_cm, stderr_cm:
        ret = record(fn)(*args_)
    ret_val_.put(ret)
    queue_finished_reading_event.wait()
示例#2
0
def execute_worker(
    task_queue: Queue,
    continue_execution_flag: Value,
    i_am_done_event: EventType,
    worker_wait_for_exit_event: EventType,
) -> None:
    """Define workers work loop."""
    while True:
        if not continue_execution_flag.value:
            i_am_done_event.set()
            worker_wait_for_exit_event.wait()
        try:
            _ = task_queue.get(block=False)
            sleep(0.001)
        except Empty:
            continue
示例#3
0
def spin(msg: str, done: synchronize.Event) -> None:
    for char in itertools.cycle("|/-\\"):
        status = f"\r{char} {msg}"
        print(status, end="", flush=True)
        if done.wait(0.1):
            break
    blanks = " " * len(status)
    print(f"\r{blanks}\r", end="")
def spin(msg: str, done: synchronize.Event) -> None:  # <1>
    for char in itertools.cycle(r'\|/-'):  # <2>
        status = f'\r{char} {msg}'  # <3>
        print(status, end='', flush=True)
        if done.wait(.1):  # <4>
            break  # <5>
    blanks = ' ' * len(status)
    print(f'\r{blanks}\r', end='')  # <6>
示例#5
0
def spin(msg: str, done: synchronize.Event) -> None:
    for char in itertools.cycle(r'\|/-'):
        status = f'\r{char} {msg}'
        print(status, end='', flush=True)
        if done.wait(.1):
            break
    blanks = ' ' * len(status)
    print(f'\r{blanks}\r', end='')
def enqueue_worker(
    workload_publisher_url: str,
    task_queue: Queue,
    continue_execution_flag: Value,
    worker_wait_for_exit_event: EventType,
) -> None:
    """Fill the queue."""
    context = Context()
    sub_socket = context.socket(SUB)
    sub_socket.connect(workload_publisher_url)
    sub_socket.setsockopt_string(SUBSCRIBE, "")

    while True:
        published_data: Dict = sub_socket.recv_json()
        if not continue_execution_flag.value:
            worker_wait_for_exit_event.wait()
        else:
            handle_published_data(published_data, task_queue)
示例#7
0
def execute_queries(
    worker_id: str,
    task_queue: Queue,
    connection_pool: pool,
    continue_execution_flag: Value,
    database_id: str,
    i_am_done_event: EventType,
    worker_wait_for_exit_event: EventType,
) -> None:
    """Define workers work loop."""
    with PoolCursor(connection_pool) as cur:
        with StorageCursor(STORAGE_HOST, STORAGE_PORT, STORAGE_USER,
                           STORAGE_PASSWORD, database_id) as log:
            succesful_queries: List[Tuple[int, int, str, str, str]] = []
            failed_queries: List[Tuple[int, str, str, str]] = []
            last_batched = time_ns()
            while True:
                if not continue_execution_flag.value:
                    i_am_done_event.set()
                    worker_wait_for_exit_event.wait()

                try:
                    task: Tuple[str, Tuple[Tuple[Union[str, int],
                                                 Optional[str]], ...], str,
                                str] = task_queue.get(block=False)
                    query, not_formatted_parameters, workload_type, query_type = task
                    query = query.replace("[STREAM_ID]", str(worker_id))
                    formatted_parameters = get_formatted_parameters(
                        not_formatted_parameters)
                    endts, latency = execute_task(cur, query,
                                                  formatted_parameters)
                    succesful_queries.append(
                        (endts, latency, workload_type, query_type, worker_id))
                except Empty:
                    continue
                except (ValueError, Error) as e:
                    failed_queries.append(
                        (time_ns(), worker_id, str(task), str(e)))

                last_batched = log_results(log, last_batched,
                                           succesful_queries, failed_queries)
示例#8
0
def _handle_lock(event_in: synchronize.Event, event_out: synchronize.Event,
                 path: str) -> None:
    """
    Acquire a file lock on given path, then wait to release it. This worker is coordinated
    using events to signal when the lock should be acquired and released.
    :param multiprocessing.Event event_in: event object to signal when to release the lock
    :param multiprocessing.Event event_out: event object to signal when the lock is acquired
    :param path: the path to lock
    """
    if os.path.isdir(path):
        my_lock = lock.lock_dir(path)
    else:
        my_lock = lock.LockFile(path)
    try:
        event_out.set()
        assert event_in.wait(
            timeout=20), 'Timeout while waiting to release the lock.'
    finally:
        my_lock.release()