Пример #1
0
def test_piper_worker_cache_reuse():
    if sys.version_info >= (3, 8):
        # Initiate cache
        smm, cache_process, cache_queue, cache_ids = start_cache(
            logging, "true:100000", False, "")
        load_shared_memory_manager()
        # Create multiple objects and store with the same name:
        amount = 10
        np_objs = [np.random.rand(4) for _ in range(amount)]
        obj_name = "name"
        np_objs_names = [obj_name for _ in range(amount)]
        # Check insertions
        for i in range(amount):
            insert_object_into_cache_wrapper(logging, cache_queue, np_objs[i],
                                             np_objs_names[i],
                                             np_objs_names[i], None)
        if obj_name not in cache_ids:
            raise Exception("Object " + obj_name + " not found in cache_ids.")
        else:
            if cache_ids[obj_name][4] != 9:
                raise Exception("Wrong number of hits!!!")
        # Stop cache
        stop_cache(smm, cache_queue, False, cache_process)
    else:
        print(NOT_PYTHON_3_8)
Пример #2
0
def test_piper_worker_cache_reuse():
    if sys.version_info >= (3, 8):
        # Initiate cache
        smm, cache_process, cache_queue, cache_ids = start_cache(
            logging, "true:100")
        load_shared_memory_manager()
        # Create multiple objects and store with the same name:
        amount = 10
        np_objs = [np.random.rand(4) for _ in range(amount)]
        np_objs_names = ["name" for _ in range(amount)]
        # Check insertions
        for i in range(amount):
            insert_object_into_cache_wrapper(logging, cache_queue, np_objs[i],
                                             np_objs_names[i])
        # Stop cache
        stop_cache(smm, cache_queue, cache_process)
    else:
        print(NOT_PYTHON_3_8)
Пример #3
0
def test_piper_worker_cache():
    if sys.version_info >= (3, 8):
        # Initiate cache
        smm, cache_process, cache_queue, cache_ids = start_cache(
            logging, "Default")
        load_shared_memory_manager()
        # Supported types:
        np_obj = np.random.rand(4)
        np_obj_name = "np_obj"
        list_obj = [1, 2, 3, 4, 5, "hi"]
        list_obj_name = "list_obj_name"
        tuple_obj = ("1", 2, 3, "4", "hi")
        tuple_obj_name = "tuple_obj_name"
        # Check insertions
        insert_object_into_cache_wrapper(logging, cache_queue, np_obj,
                                         np_obj_name)  # noqa: E501
        insert_object_into_cache_wrapper(logging, cache_queue, list_obj,
                                         list_obj_name)  # noqa: E501
        insert_object_into_cache_wrapper(logging, cache_queue, tuple_obj,
                                         tuple_obj_name)  # noqa: E501
        # Check retrieves
        np_obj_new, np_obj_shm = retrieve_object_from_cache(
            logging, cache_ids, np_obj_name)  # noqa: E501
        list_obj_new, list_obj_shm = retrieve_object_from_cache(
            logging, cache_ids, list_obj_name)  # noqa: E501
        tuple_obj_new, tuple_obj_shm = retrieve_object_from_cache(
            logging, cache_ids, tuple_obj_name)  # noqa: E501
        assert (
            set(np_obj_new).intersection(np_obj)
        ), "ERROR: Numpy object retrieved from cache differs from inserted"
        assert (set(list_obj_new).intersection(list_obj)
                ), "ERROR: List retrieved from cache differs from inserted"
        assert (set(tuple_obj_new).intersection(tuple_obj)
                ), "ERROR: Tuple retrieved from cache differs from inserted"
        # Check replace
        new_list_obj = ["hello", "world", 6]
        replace_object_into_cache(logging, cache_queue, new_list_obj,
                                  list_obj_name)  # noqa: E501
        time.sleep(0.5)
        list_obj_new2, list_obj_shm2 = retrieve_object_from_cache(
            logging, cache_ids, list_obj_name)  # noqa: E501
        assert (set(list_obj_new2).intersection(new_list_obj)
                ), "ERROR: List retrieved from cache differs from inserted"
        # Remove object
        remove_object_from_cache(logging, cache_queue, list_obj_name)
        is_ok = False
        try:
            _, _ = retrieve_object_from_cache(logging, cache_ids,
                                              list_obj_name)  # noqa: E501
        except Exception:  # NOSONAR
            is_ok = True
        assert (is_ok), "ERROR: List has not been removed."
        # Check if in cache
        assert (in_cache(
            np_obj_name,
            cache_ids)), "ERROR: numpy object not in cache. And it should be."
        assert (not in_cache(list_obj_name, cache_ids)
                ), "ERROR: list object should not be in cache."
        assert (not in_cache(list_obj_name, {})
                ), "ERROR: in cache should return False if dict is empty."
        # Stop cache
        stop_cache(smm, cache_queue, cache_process)
    else:
        print(NOT_PYTHON_3_8)
Пример #4
0
def executor(queue, process_name, pipe, conf):
    # type: (typing.Union[None, Queue], str, Pipe, typing.Any) -> None
    """Thread main body - Overrides Threading run method.

    Iterates over the input pipe in order to receive tasks (with their
    parameters) and process them.
    Notifies the runtime when each task  has finished with the
    corresponding output value.
    Finishes when the "quit" message is received.

    :param queue: Queue where to put exception messages.
    :param process_name: Process name (Thread-X, where X is the thread id).
    :param pipe: Pipe to receive and send messages from/to the runtime.
    :param conf: configuration of the executor.
    :return: None
    """
    try:
        # Replace Python Worker's SIGTERM handler.
        signal.signal(signal.SIGTERM, shutdown_handler)

        if len(conf.logger.handlers) == 0:
            # Logger has not been inherited correctly. Happens in MacOS.
            set_temporary_directory(conf.tmp_dir, create_tmpdir=False)
            # Reload logger
            conf.logger, conf.logger_cfg, conf.storage_loggers, _ = \
                load_loggers(conf.debug, conf.persistent_storage)
            # Set the binding in worker mode too
            context.set_pycompss_context(context.WORKER)
        logger = conf.logger

        tracing = conf.tracing
        storage_conf = conf.storage_conf
        storage_loggers = conf.storage_loggers

        # Get a copy of the necessary information from the logger to
        # re-establish after each task
        logger_handlers = copy.copy(logger.handlers)
        logger_level = logger.getEffectiveLevel()
        logger_formatter = logging.Formatter(
            logger_handlers[0].formatter._fmt)  # noqa
        storage_loggers_handlers = []
        for storage_logger in storage_loggers:
            storage_loggers_handlers.append(copy.copy(storage_logger.handlers))

        # Establish link with the binding-commons to enable task nesting
        if __debug__:
            logger.debug(HEADER +
                         "Establishing link with runtime in process " +
                         str(process_name))  # noqa: E501
        COMPSs.load_runtime(external_process=False, _logger=logger)
        COMPSs.set_pipes(pipe.output_pipe, pipe.input_pipe)

        if storage_conf != "null":
            try:
                from storage.api import initWorkerPostFork  # noqa
                with event_worker(INIT_WORKER_POSTFORK_EVENT):
                    initWorkerPostFork()
            except (ImportError, AttributeError):
                if __debug__:
                    logger.info(
                        HEADER +
                        "[%s] Could not find initWorkerPostFork storage call. Ignoring it."
                        %  # noqa: E501
                        str(process_name))

        # Start the streaming backend if necessary
        streaming = False
        if conf.stream_backend not in [None, "null", "NONE"]:
            streaming = True

        if streaming:
            # Initialize streaming
            logger.debug(HEADER + "Starting streaming for process " +
                         str(process_name))
            try:
                DistroStreamClientHandler.init_and_start(
                    master_ip=conf.stream_master_ip,
                    master_port=conf.stream_master_port)
            except Exception as e:
                logger.error(e)
                raise e

        # Connect to Shared memory manager
        if conf.cache_queue:
            load_shared_memory_manager()

        # Process properties
        alive = True

        if __debug__:
            logger.debug(HEADER + "[%s] Starting process" % str(process_name))

        # MAIN EXECUTOR LOOP
        while alive:
            # Runtime -> pipe - Read command from pipe
            command = COMPSs.read_pipes()
            if command != "":
                if __debug__:
                    logger.debug(HEADER + "[%s] Received command %s" %
                                 (str(process_name), str(command)))
                # Process the command
                alive = process_message(
                    command, process_name, pipe, queue, tracing, logger,
                    conf.logger_cfg, logger_handlers, logger_level,
                    logger_formatter, storage_conf, storage_loggers,
                    storage_loggers_handlers, conf.cache_queue, conf.cache_ids,
                    conf.cache_profiler)
        # Stop storage
        if storage_conf != "null":
            try:
                from storage.api import finishWorkerPostFork  # noqa
                with event_worker(FINISH_WORKER_POSTFORK_EVENT):
                    finishWorkerPostFork()
            except (ImportError, AttributeError):
                if __debug__:
                    logger.info(
                        HEADER +
                        "[%s] Could not find finishWorkerPostFork storage call. Ignoring it."
                        %  # noqa: E501
                        str(process_name))

        # Stop streaming
        if streaming:
            logger.debug(HEADER + "Stopping streaming for process " +
                         str(process_name))
            DistroStreamClientHandler.set_stop()

        sys.stdout.flush()
        sys.stderr.flush()
        if __debug__:
            logger.debug(HEADER + "[%s] Exiting process " % str(process_name))
        pipe.write(QUIT_TAG)
        pipe.close()
    except Exception as e:
        logger.error(e)
        raise e