Esempio n. 1
0
def test_piper_worker_cache_reuse():
    if sys.version_info >= (3, 8):
        # Initiate cache
        smm, cache_process, cache_queue, cache_ids = start_cache(
            logging, "true:100000", False, "")
        load_shared_memory_manager()
        # Create multiple objects and store with the same name:
        amount = 10
        np_objs = [np.random.rand(4) for _ in range(amount)]
        obj_name = "name"
        np_objs_names = [obj_name for _ in range(amount)]
        # Check insertions
        for i in range(amount):
            insert_object_into_cache_wrapper(logging, cache_queue, np_objs[i],
                                             np_objs_names[i],
                                             np_objs_names[i], None)
        if obj_name not in cache_ids:
            raise Exception("Object " + obj_name + " not found in cache_ids.")
        else:
            if cache_ids[obj_name][4] != 9:
                raise Exception("Wrong number of hits!!!")
        # Stop cache
        stop_cache(smm, cache_queue, False, cache_process)
    else:
        print(NOT_PYTHON_3_8)
Esempio n. 2
0
def main():
    # type: () -> None
    """ Main mpi piper worker

    :return: None
    """
    # Configure the global tracing variable from the argument
    global TRACING
    global WORKER_CONF
    global CACHE_IDS
    global CACHE_QUEUE

    TRACING = (int(sys.argv[4]) > 0)

    # Enable coverage if performed
    if "COVERAGE_PROCESS_START" in os.environ:
        import coverage
        coverage.process_startup()

    # Configure the piper worker with the arguments
    WORKER_CONF = PiperWorkerConfiguration()
    WORKER_CONF.update_params(sys.argv)

    persistent_storage = (WORKER_CONF.storage_conf != 'null')
    _, _, _, log_dir = load_loggers(WORKER_CONF.debug, persistent_storage)

    cache_profiler = False
    if WORKER_CONF.cache_profiler.lower() == 'true':
        cache_profiler = True

    if is_worker():
        # Setup cache
        if is_cache_enabled(str(WORKER_CONF.cache)):
            # Deploy the necessary processes
            cache = True
            cache_params = start_cache(None, str(WORKER_CONF.cache),
                                       cache_profiler, log_dir)
        else:
            # No cache
            cache = False
            cache_params = (None, None, None, None)  # type: ignore
    else:
        # Otherwise it is an executor
        cache = False  # to stop only the cache from the main process
        cache_params = (None, None, None, None)  # type: ignore
    smm, cache_process, CACHE_QUEUE, CACHE_IDS = cache_params

    if is_worker():
        with trace_mpi_worker() if TRACING else dummy_context():
            compss_persistent_worker(WORKER_CONF)
    else:
        with trace_mpi_executor() if TRACING else dummy_context():
            compss_persistent_executor(WORKER_CONF)

    if cache and is_worker():
        stop_cache(smm, CACHE_QUEUE, cache_profiler, cache_process)  # noqa
Esempio n. 3
0
def test_piper_worker_cache_reuse():
    if sys.version_info >= (3, 8):
        # Initiate cache
        smm, cache_process, cache_queue, cache_ids = start_cache(
            logging, "true:100")
        load_shared_memory_manager()
        # Create multiple objects and store with the same name:
        amount = 10
        np_objs = [np.random.rand(4) for _ in range(amount)]
        np_objs_names = ["name" for _ in range(amount)]
        # Check insertions
        for i in range(amount):
            insert_object_into_cache_wrapper(logging, cache_queue, np_objs[i],
                                             np_objs_names[i])
        # Stop cache
        stop_cache(smm, cache_queue, cache_process)
    else:
        print(NOT_PYTHON_3_8)
Esempio n. 4
0
def main():
    # Configure the global tracing variable from the argument
    global TRACING
    global WORKER_CONF
    global CACHE_IDS
    global CACHE_QUEUE

    TRACING = (int(sys.argv[4]) > 0)

    # Enable coverage if performed
    if "COVERAGE_PROCESS_START" in os.environ:
        import coverage
        coverage.process_startup()

    # Configure the piper worker with the arguments
    WORKER_CONF = PiperWorkerConfiguration()
    WORKER_CONF.update_params(sys.argv)

    if is_worker():
        # Setup cache
        if is_cache_enabled(WORKER_CONF.cache):
            # Deploy the necessary processes
            cache = True
            cache_params = start_cache(None, WORKER_CONF.cache)
        else:
            # No cache
            cache = False
            cache_params = (None, None, None, None)
    else:
        # Otherwise it is an executor
        cache = False  # to stop only the cache from the main process
        cache_params = (None, None, None, None)
    smm, cache_process, CACHE_QUEUE, CACHE_IDS = cache_params

    if is_worker():
        with trace_mpi_worker() if TRACING else dummy_context():
            compss_persistent_worker(WORKER_CONF)
    else:
        with trace_mpi_executor() if TRACING else dummy_context():
            compss_persistent_executor(WORKER_CONF)

    if cache and is_worker():
        stop_cache(smm, CACHE_QUEUE, cache_process)  # noqa
Esempio n. 5
0
def test_piper_worker_cache():
    if sys.version_info >= (3, 8):
        # Initiate cache
        smm, cache_process, cache_queue, cache_ids = start_cache(
            logging, "Default")
        load_shared_memory_manager()
        # Supported types:
        np_obj = np.random.rand(4)
        np_obj_name = "np_obj"
        list_obj = [1, 2, 3, 4, 5, "hi"]
        list_obj_name = "list_obj_name"
        tuple_obj = ("1", 2, 3, "4", "hi")
        tuple_obj_name = "tuple_obj_name"
        # Check insertions
        insert_object_into_cache_wrapper(logging, cache_queue, np_obj,
                                         np_obj_name)  # noqa: E501
        insert_object_into_cache_wrapper(logging, cache_queue, list_obj,
                                         list_obj_name)  # noqa: E501
        insert_object_into_cache_wrapper(logging, cache_queue, tuple_obj,
                                         tuple_obj_name)  # noqa: E501
        # Check retrieves
        np_obj_new, np_obj_shm = retrieve_object_from_cache(
            logging, cache_ids, np_obj_name)  # noqa: E501
        list_obj_new, list_obj_shm = retrieve_object_from_cache(
            logging, cache_ids, list_obj_name)  # noqa: E501
        tuple_obj_new, tuple_obj_shm = retrieve_object_from_cache(
            logging, cache_ids, tuple_obj_name)  # noqa: E501
        assert (
            set(np_obj_new).intersection(np_obj)
        ), "ERROR: Numpy object retrieved from cache differs from inserted"
        assert (set(list_obj_new).intersection(list_obj)
                ), "ERROR: List retrieved from cache differs from inserted"
        assert (set(tuple_obj_new).intersection(tuple_obj)
                ), "ERROR: Tuple retrieved from cache differs from inserted"
        # Check replace
        new_list_obj = ["hello", "world", 6]
        replace_object_into_cache(logging, cache_queue, new_list_obj,
                                  list_obj_name)  # noqa: E501
        time.sleep(0.5)
        list_obj_new2, list_obj_shm2 = retrieve_object_from_cache(
            logging, cache_ids, list_obj_name)  # noqa: E501
        assert (set(list_obj_new2).intersection(new_list_obj)
                ), "ERROR: List retrieved from cache differs from inserted"
        # Remove object
        remove_object_from_cache(logging, cache_queue, list_obj_name)
        is_ok = False
        try:
            _, _ = retrieve_object_from_cache(logging, cache_ids,
                                              list_obj_name)  # noqa: E501
        except Exception:  # NOSONAR
            is_ok = True
        assert (is_ok), "ERROR: List has not been removed."
        # Check if in cache
        assert (in_cache(
            np_obj_name,
            cache_ids)), "ERROR: numpy object not in cache. And it should be."
        assert (not in_cache(list_obj_name, cache_ids)
                ), "ERROR: list object should not be in cache."
        assert (not in_cache(list_obj_name, {})
                ), "ERROR: in cache should return False if dict is empty."
        # Stop cache
        stop_cache(smm, cache_queue, cache_process)
    else:
        print(NOT_PYTHON_3_8)
Esempio n. 6
0
def compss_persistent_worker(config):
    # type: (PiperWorkerConfiguration) -> None
    """ Persistent worker main function.

    Retrieves the initial configuration and spawns the worker processes.

    :param config: Piper Worker Configuration description.
    :return: None
    """
    global CACHE
    global CACHE_PROCESS

    # Catch SIGTERM sent by bindings_piper
    signal.signal(signal.SIGTERM, shutdown_handler)

    # Set the binding in worker mode
    context.set_pycompss_context(context.WORKER)

    persistent_storage = (config.storage_conf != 'null')

    logger, logger_cfg, storage_loggers, log_dir = load_loggers(config.debug, persistent_storage)

    if __debug__:
        logger.debug(HEADER + "piper_worker.py wake up")
        config.print_on_logger(logger)

    if persistent_storage:
        # Initialize storage
        logger.debug(HEADER + "Starting persistent storage")
        with event_worker(INIT_STORAGE_AT_WORKER_EVENT):
            from storage.api import initWorker as initStorageAtWorker  # noqa
            initStorageAtWorker(config_file_path=config.storage_conf)

    # Create new processes
    queues = []

    cache_profiler = False
    if config.cache_profiler.lower() == 'true':
        cache_profiler = True

    # Setup cache
    if is_cache_enabled(str(config.cache)):
        # Deploy the necessary processes
        CACHE = True
        cache_params = start_cache(logger, str(config.cache), cache_profiler, log_dir)
    else:
        # No cache
        CACHE = False
        cache_params = (None, None, None, None)  # type: ignore
    smm, CACHE_PROCESS, cache_queue, cache_ids = cache_params

    # Create new executor processes
    conf = ExecutorConf(config.debug,
                        get_temporary_directory(),
                        TRACING,
                        config.storage_conf,
                        logger,
                        logger_cfg,
                        persistent_storage,
                        storage_loggers,
                        config.stream_backend,
                        config.stream_master_name,
                        config.stream_master_port,
                        cache_ids,
                        cache_queue,
                        cache_profiler)

    for i in range(0, config.tasks_x_node):
        if __debug__:
            logger.debug(HEADER + "Launching process " + str(i))
        process_name = "".join(("Process-", str(i)))
        pid, queue = create_executor_process(process_name, conf, config.pipes[i])
        queues.append(queue)

    # Read command from control pipe
    alive = True
    process_counter = config.tasks_x_node
    control_pipe = config.control_pipe  # type: typing.Any
    while alive:
        command = control_pipe.read_command(retry_period=1)
        if command != "":
            line = command.split()

            if line[0] == ADD_EXECUTOR_TAG:
                process_name = "".join(("Process-", str(process_counter)))
                process_counter = process_counter + 1
                in_pipe = line[1]
                out_pipe = line[2]
                pipe = Pipe(in_pipe, out_pipe)
                pid, queue = create_executor_process(process_name, conf, pipe)
                queues.append(queue)
                control_pipe.write(" ".join((ADDED_EXECUTOR_TAG,
                                             out_pipe,
                                             in_pipe,
                                             str(pid))))

            elif line[0] == QUERY_EXECUTOR_ID_TAG:
                in_pipe = line[1]
                out_pipe = line[2]
                proc = PROCESSES.get(in_pipe)  # type: typing.Any
                pid = proc.pid
                control_pipe.write(" ".join((REPLY_EXECUTOR_ID_TAG,
                                             out_pipe,
                                             in_pipe,
                                             str(pid))))

            elif line[0] == CANCEL_TASK_TAG:
                in_pipe = line[1]
                cancel_proc = PROCESSES.get(in_pipe)  # type: typing.Any
                cancel_pid = cancel_proc.pid
                if __debug__:
                    logger.debug(HEADER + "Signaling process with PID " +
                                 str(cancel_pid) + " to cancel a task")
                os.kill(cancel_pid, signal.SIGUSR2)  # NOSONAR cancellation produced by COMPSs

            elif line[0] == REMOVE_EXECUTOR_TAG:
                in_pipe = line[1]
                out_pipe = line[2]
                proc = PROCESSES.pop(in_pipe, None)
                if proc:
                    if proc.is_alive():
                        logger.warn(HEADER + "Forcing terminate on : " +
                                    proc.name)
                        proc.terminate()
                    proc.join()
                control_pipe.write(" ".join((REMOVED_EXECUTOR_TAG,
                                             out_pipe,
                                             in_pipe)))

            elif line[0] == PING_TAG:
                control_pipe.write(PONG_TAG)

            elif line[0] == QUIT_TAG:
                alive = False

    # Wait for all threads
    for proc in PROCESSES.values():
        proc.join()

    # Check if there is any exception message from the threads
    for i in range(0, config.tasks_x_node):
        if not queues[i].empty:
            logger.error(HEADER + "Exception in threads queue: " +
                         str(queues[i].get()))

    for queue in queues:
        queue.close()
        queue.join_thread()

    if CACHE:
        stop_cache(smm, cache_queue, cache_profiler, CACHE_PROCESS)  # noqa

    if persistent_storage:
        # Finish storage
        if __debug__:
            logger.debug(HEADER + "Stopping persistent storage")
        with event_worker(FINISH_STORAGE_AT_WORKER_EVENT):
            from storage.api import finishWorker as finishStorageAtWorker  # noqa
            finishStorageAtWorker()

    if __debug__:
        logger.debug(HEADER + "Finished")

    control_pipe.write(QUIT_TAG)
    control_pipe.close()