Пример #1
0
def test_is_cache_enabled():
    if sys.version_info >= (3, 8):
        case1 = is_cache_enabled("true")
        assert case1, "Unexpected return. Expected: <bool> True"
        case2 = is_cache_enabled("True")
        assert not case2, "Unexpected return. Expected: <bool> False"
        case3 = is_cache_enabled("true:1000")
        assert case3, "Unexpected return. Expected: <bool> True"
        case4 = is_cache_enabled("True:1000")
        assert not case4, "Unexpected return. Expected: <bool> False"
    else:
        print(NOT_PYTHON_3_8)
Пример #2
0
def main():
    # type: () -> None
    """ Main mpi piper worker

    :return: None
    """
    # Configure the global tracing variable from the argument
    global TRACING
    global WORKER_CONF
    global CACHE_IDS
    global CACHE_QUEUE

    TRACING = (int(sys.argv[4]) > 0)

    # Enable coverage if performed
    if "COVERAGE_PROCESS_START" in os.environ:
        import coverage
        coverage.process_startup()

    # Configure the piper worker with the arguments
    WORKER_CONF = PiperWorkerConfiguration()
    WORKER_CONF.update_params(sys.argv)

    persistent_storage = (WORKER_CONF.storage_conf != 'null')
    _, _, _, log_dir = load_loggers(WORKER_CONF.debug, persistent_storage)

    cache_profiler = False
    if WORKER_CONF.cache_profiler.lower() == 'true':
        cache_profiler = True

    if is_worker():
        # Setup cache
        if is_cache_enabled(str(WORKER_CONF.cache)):
            # Deploy the necessary processes
            cache = True
            cache_params = start_cache(None, str(WORKER_CONF.cache),
                                       cache_profiler, log_dir)
        else:
            # No cache
            cache = False
            cache_params = (None, None, None, None)  # type: ignore
    else:
        # Otherwise it is an executor
        cache = False  # to stop only the cache from the main process
        cache_params = (None, None, None, None)  # type: ignore
    smm, cache_process, CACHE_QUEUE, CACHE_IDS = cache_params

    if is_worker():
        with trace_mpi_worker() if TRACING else dummy_context():
            compss_persistent_worker(WORKER_CONF)
    else:
        with trace_mpi_executor() if TRACING else dummy_context():
            compss_persistent_executor(WORKER_CONF)

    if cache and is_worker():
        stop_cache(smm, CACHE_QUEUE, cache_profiler, cache_process)  # noqa
Пример #3
0
def main():
    # Configure the global tracing variable from the argument
    global TRACING
    global WORKER_CONF
    global CACHE_IDS
    global CACHE_QUEUE

    TRACING = (int(sys.argv[4]) > 0)

    # Enable coverage if performed
    if "COVERAGE_PROCESS_START" in os.environ:
        import coverage
        coverage.process_startup()

    # Configure the piper worker with the arguments
    WORKER_CONF = PiperWorkerConfiguration()
    WORKER_CONF.update_params(sys.argv)

    if is_worker():
        # Setup cache
        if is_cache_enabled(WORKER_CONF.cache):
            # Deploy the necessary processes
            cache = True
            cache_params = start_cache(None, WORKER_CONF.cache)
        else:
            # No cache
            cache = False
            cache_params = (None, None, None, None)
    else:
        # Otherwise it is an executor
        cache = False  # to stop only the cache from the main process
        cache_params = (None, None, None, None)
    smm, cache_process, CACHE_QUEUE, CACHE_IDS = cache_params

    if is_worker():
        with trace_mpi_worker() if TRACING else dummy_context():
            compss_persistent_worker(WORKER_CONF)
    else:
        with trace_mpi_executor() if TRACING else dummy_context():
            compss_persistent_executor(WORKER_CONF)

    if cache and is_worker():
        stop_cache(smm, CACHE_QUEUE, cache_process)  # noqa
Пример #4
0
def compss_persistent_worker(config):
    # type: (PiperWorkerConfiguration) -> None
    """ Persistent worker main function.

    Retrieves the initial configuration and spawns the worker processes.

    :param config: Piper Worker Configuration description.
    :return: None
    """
    global CACHE
    global CACHE_PROCESS

    # Catch SIGTERM sent by bindings_piper
    signal.signal(signal.SIGTERM, shutdown_handler)

    # Set the binding in worker mode
    context.set_pycompss_context(context.WORKER)

    persistent_storage = (config.storage_conf != 'null')

    logger, logger_cfg, storage_loggers, log_dir = load_loggers(config.debug, persistent_storage)

    if __debug__:
        logger.debug(HEADER + "piper_worker.py wake up")
        config.print_on_logger(logger)

    if persistent_storage:
        # Initialize storage
        logger.debug(HEADER + "Starting persistent storage")
        with event_worker(INIT_STORAGE_AT_WORKER_EVENT):
            from storage.api import initWorker as initStorageAtWorker  # noqa
            initStorageAtWorker(config_file_path=config.storage_conf)

    # Create new processes
    queues = []

    cache_profiler = False
    if config.cache_profiler.lower() == 'true':
        cache_profiler = True

    # Setup cache
    if is_cache_enabled(str(config.cache)):
        # Deploy the necessary processes
        CACHE = True
        cache_params = start_cache(logger, str(config.cache), cache_profiler, log_dir)
    else:
        # No cache
        CACHE = False
        cache_params = (None, None, None, None)  # type: ignore
    smm, CACHE_PROCESS, cache_queue, cache_ids = cache_params

    # Create new executor processes
    conf = ExecutorConf(config.debug,
                        get_temporary_directory(),
                        TRACING,
                        config.storage_conf,
                        logger,
                        logger_cfg,
                        persistent_storage,
                        storage_loggers,
                        config.stream_backend,
                        config.stream_master_name,
                        config.stream_master_port,
                        cache_ids,
                        cache_queue,
                        cache_profiler)

    for i in range(0, config.tasks_x_node):
        if __debug__:
            logger.debug(HEADER + "Launching process " + str(i))
        process_name = "".join(("Process-", str(i)))
        pid, queue = create_executor_process(process_name, conf, config.pipes[i])
        queues.append(queue)

    # Read command from control pipe
    alive = True
    process_counter = config.tasks_x_node
    control_pipe = config.control_pipe  # type: typing.Any
    while alive:
        command = control_pipe.read_command(retry_period=1)
        if command != "":
            line = command.split()

            if line[0] == ADD_EXECUTOR_TAG:
                process_name = "".join(("Process-", str(process_counter)))
                process_counter = process_counter + 1
                in_pipe = line[1]
                out_pipe = line[2]
                pipe = Pipe(in_pipe, out_pipe)
                pid, queue = create_executor_process(process_name, conf, pipe)
                queues.append(queue)
                control_pipe.write(" ".join((ADDED_EXECUTOR_TAG,
                                             out_pipe,
                                             in_pipe,
                                             str(pid))))

            elif line[0] == QUERY_EXECUTOR_ID_TAG:
                in_pipe = line[1]
                out_pipe = line[2]
                proc = PROCESSES.get(in_pipe)  # type: typing.Any
                pid = proc.pid
                control_pipe.write(" ".join((REPLY_EXECUTOR_ID_TAG,
                                             out_pipe,
                                             in_pipe,
                                             str(pid))))

            elif line[0] == CANCEL_TASK_TAG:
                in_pipe = line[1]
                cancel_proc = PROCESSES.get(in_pipe)  # type: typing.Any
                cancel_pid = cancel_proc.pid
                if __debug__:
                    logger.debug(HEADER + "Signaling process with PID " +
                                 str(cancel_pid) + " to cancel a task")
                os.kill(cancel_pid, signal.SIGUSR2)  # NOSONAR cancellation produced by COMPSs

            elif line[0] == REMOVE_EXECUTOR_TAG:
                in_pipe = line[1]
                out_pipe = line[2]
                proc = PROCESSES.pop(in_pipe, None)
                if proc:
                    if proc.is_alive():
                        logger.warn(HEADER + "Forcing terminate on : " +
                                    proc.name)
                        proc.terminate()
                    proc.join()
                control_pipe.write(" ".join((REMOVED_EXECUTOR_TAG,
                                             out_pipe,
                                             in_pipe)))

            elif line[0] == PING_TAG:
                control_pipe.write(PONG_TAG)

            elif line[0] == QUIT_TAG:
                alive = False

    # Wait for all threads
    for proc in PROCESSES.values():
        proc.join()

    # Check if there is any exception message from the threads
    for i in range(0, config.tasks_x_node):
        if not queues[i].empty:
            logger.error(HEADER + "Exception in threads queue: " +
                         str(queues[i].get()))

    for queue in queues:
        queue.close()
        queue.join_thread()

    if CACHE:
        stop_cache(smm, cache_queue, cache_profiler, CACHE_PROCESS)  # noqa

    if persistent_storage:
        # Finish storage
        if __debug__:
            logger.debug(HEADER + "Stopping persistent storage")
        with event_worker(FINISH_STORAGE_AT_WORKER_EVENT):
            from storage.api import finishWorker as finishStorageAtWorker  # noqa
            finishStorageAtWorker()

    if __debug__:
        logger.debug(HEADER + "Finished")

    control_pipe.write(QUIT_TAG)
    control_pipe.close()