示例#1
0
    def track(self, obj, collection=False):
        # type: (object, bool) -> (str, str)
        """ Start tracking an object.

        Collections are not stored into a file. Consequently, we just register
        it to keep track of the identifier, but no file is stored. However,
        the collection elements are stored into files.

        :param obj: Object to track.
        :param collection: If the object to be tracked is a collection.
        :return: Object identifier and its corresponding file name.
        """
        if collection:
            obj_id = self._register_object(obj, True)
            file_name = None
            if __debug__:
                logger.debug("Tracking collection %s" % obj_id)
        else:
            obj_id = self._register_object(obj, True)
            file_name = "%s/%s" % (get_temporary_directory(), str(obj_id))
            self._set_file_name(obj_id, file_name)
            self.set_pending_to_synchronize(obj_id)
            if __debug__:
                logger.debug("Tracking object %s to file %s" %
                             (obj_id, file_name))
        address = self._get_object_address(obj)
        self.address_to_obj_id[address] = obj_id
        if self.reporting:
            self.report_now()

        return obj_id, file_name
示例#2
0
 def not_track(self, collection=False):
     obj_id = "%s-%d" % (self.runtime_id, self.current_id)
     if collection:
         file_name = None
     else:
         file_name = "%s/%s" % (get_temporary_directory(), str(obj_id))
     self.current_id += 1
     return obj_id, file_name
示例#3
0
 def not_track(self, collection=False):
     # type: (bool) -> typing.Tuple[str, str]
     obj_id = "%s-%d" % (self.runtime_id, self.current_id)
     if collection:
         file_name = "None"
     else:
         file_name = "%s/%s" % (get_temporary_directory(), str(obj_id))
     self.current_id += 1
     return obj_id, file_name
示例#4
0
def compss_persistent_executor(config):
    # type: (PiperWorkerConfiguration) -> None
    """ Persistent executor main function.

    Retrieves the initial configuration and spawns the worker processes.

    :param config: Piper Worker Configuration description.
    :return: None
    """
    COMM.gather(str(os.getpid()), root=0)

    # Catch SIGTERM sent by bindings_piper
    signal.signal(signal.SIGTERM, shutdown_handler)
    # Catch SIGUSER2 to solve strange behaviour with mpi4py
    signal.signal(signal.SIGUSR2, user_signal_handler)

    # Set the binding in worker mode
    import pycompss.util.context as context
    context.set_pycompss_context(context.WORKER)

    persistent_storage = (config.storage_conf != "null")

    logger, logger_cfg, storage_loggers, _ = load_loggers(
        config.debug, persistent_storage)

    cache_profiler = False
    if config.cache_profiler.lower() == 'true':
        cache_profiler = True

    if persistent_storage:
        # Initialize storage
        with event_worker(INIT_STORAGE_AT_WORKER_EVENT):
            from storage.api import initWorker as initStorageAtWorker  # noqa
            initStorageAtWorker(config_file_path=config.storage_conf)

    process_name = "".join(("Rank-", str(RANK)))
    conf = ExecutorConf(config.debug, get_temporary_directory(), TRACING,
                        config.storage_conf, logger, logger_cfg,
                        persistent_storage, storage_loggers,
                        config.stream_backend, config.stream_master_name,
                        config.stream_master_port, CACHE_IDS, CACHE_QUEUE,
                        cache_profiler)
    executor(None, process_name, config.pipes[RANK - 1], conf)

    if persistent_storage:
        # Finish storage
        if __debug__:
            logger.debug(HEADER + "Stopping persistent storage")
        with event_worker(FINISH_STORAGE_AT_WORKER_EVENT):
            from storage.api import finishWorker as finishStorageAtWorker  # noqa
            finishStorageAtWorker()
示例#5
0
def _clean_temps():
    # type: () -> None
    """ Clean temporary files.

    The temporary files end with the IT extension.

    :return: None
    """
    temp_directory = get_temporary_directory()
    rmtree(temp_directory, True)
    cwd = os.getcwd()
    for f in os.listdir(cwd):
        if re.search(r'd\d+v\d+_\d+\.IT', f):  # NOSONAR
            os.remove(os.path.join(cwd, f))
示例#6
0
文件: utils.py 项目: giulange/compss
def load_loggers(debug, persistent_storage):
    # type: (bool, str) -> (..., str, ...)
    """ Load all loggers.

    :param debug: is Debug enabled.
    :param persistent_storage: is persistent storage enabled.
    :return: main logger of the application, the log config file (json) and
             a list of loggers for the persistent data framework.
    """
    # Load log level configuration file
    worker_path = os.path.dirname(os.path.realpath(__file__))
    log_cfg_path = "".join((worker_path, '/../../../../log'))
    if not os.path.isdir(log_cfg_path):
        # If not exists, then we are using the source for unit testing
        log_cfg_path = "".join((worker_path, '/../../../../../log'))
    if debug:
        # Debug
        log_json = "/".join((log_cfg_path, 'logging_worker_debug.json'))
    else:
        # Default
        log_json = "/".join((log_cfg_path, 'logging_worker_off.json'))
    log_dir = get_temporary_directory()
    # log_dir is of the form:
    #    With agents or worker in master: /path/to/working_directory/tmpFiles/pycompssID/
    #    Normal master-worker execution : /path/to/working_directory/machine_name/pycompssID/
    # With normal master-worker execution, it transfers the err and out files in the
    # expected folder to the master.
    # With agents or worker in master it does not, so keep it in previous two folders:
    if context.is_nesting_enabled() or "tmpFiles" in log_dir:
        log_dir = os.path.join(log_dir, "..", "..")
    else:
        log_dir = os.path.join(log_dir, "..")
    init_logging_worker_piper(log_json, log_dir)

    # Define logger facilities
    logger = logging.getLogger('pycompss.worker.piper.piper_worker')
    storage_loggers = []
    if persistent_storage:
        storage_loggers.append(logging.getLogger('dataclay'))
        storage_loggers.append(logging.getLogger('hecuba'))
        storage_loggers.append(logging.getLogger('redis'))
        storage_loggers.append(logging.getLogger('storage'))
    return logger, log_json, storage_loggers
示例#7
0
def compss_persistent_worker(config):
    # type: (PiperWorkerConfiguration) -> None
    """ Persistent worker main function.

    Retrieves the initial configuration and spawns the worker processes.

    :param config: Piper Worker Configuration description.
    :return: None
    """
    global CACHE
    global CACHE_PROCESS

    # Catch SIGTERM sent by bindings_piper
    signal.signal(signal.SIGTERM, shutdown_handler)

    # Set the binding in worker mode
    context.set_pycompss_context(context.WORKER)

    persistent_storage = (config.storage_conf != 'null')

    logger, logger_cfg, storage_loggers, log_dir = load_loggers(config.debug, persistent_storage)

    if __debug__:
        logger.debug(HEADER + "piper_worker.py wake up")
        config.print_on_logger(logger)

    if persistent_storage:
        # Initialize storage
        logger.debug(HEADER + "Starting persistent storage")
        with event_worker(INIT_STORAGE_AT_WORKER_EVENT):
            from storage.api import initWorker as initStorageAtWorker  # noqa
            initStorageAtWorker(config_file_path=config.storage_conf)

    # Create new processes
    queues = []

    cache_profiler = False
    if config.cache_profiler.lower() == 'true':
        cache_profiler = True

    # Setup cache
    if is_cache_enabled(str(config.cache)):
        # Deploy the necessary processes
        CACHE = True
        cache_params = start_cache(logger, str(config.cache), cache_profiler, log_dir)
    else:
        # No cache
        CACHE = False
        cache_params = (None, None, None, None)  # type: ignore
    smm, CACHE_PROCESS, cache_queue, cache_ids = cache_params

    # Create new executor processes
    conf = ExecutorConf(config.debug,
                        get_temporary_directory(),
                        TRACING,
                        config.storage_conf,
                        logger,
                        logger_cfg,
                        persistent_storage,
                        storage_loggers,
                        config.stream_backend,
                        config.stream_master_name,
                        config.stream_master_port,
                        cache_ids,
                        cache_queue,
                        cache_profiler)

    for i in range(0, config.tasks_x_node):
        if __debug__:
            logger.debug(HEADER + "Launching process " + str(i))
        process_name = "".join(("Process-", str(i)))
        pid, queue = create_executor_process(process_name, conf, config.pipes[i])
        queues.append(queue)

    # Read command from control pipe
    alive = True
    process_counter = config.tasks_x_node
    control_pipe = config.control_pipe  # type: typing.Any
    while alive:
        command = control_pipe.read_command(retry_period=1)
        if command != "":
            line = command.split()

            if line[0] == ADD_EXECUTOR_TAG:
                process_name = "".join(("Process-", str(process_counter)))
                process_counter = process_counter + 1
                in_pipe = line[1]
                out_pipe = line[2]
                pipe = Pipe(in_pipe, out_pipe)
                pid, queue = create_executor_process(process_name, conf, pipe)
                queues.append(queue)
                control_pipe.write(" ".join((ADDED_EXECUTOR_TAG,
                                             out_pipe,
                                             in_pipe,
                                             str(pid))))

            elif line[0] == QUERY_EXECUTOR_ID_TAG:
                in_pipe = line[1]
                out_pipe = line[2]
                proc = PROCESSES.get(in_pipe)  # type: typing.Any
                pid = proc.pid
                control_pipe.write(" ".join((REPLY_EXECUTOR_ID_TAG,
                                             out_pipe,
                                             in_pipe,
                                             str(pid))))

            elif line[0] == CANCEL_TASK_TAG:
                in_pipe = line[1]
                cancel_proc = PROCESSES.get(in_pipe)  # type: typing.Any
                cancel_pid = cancel_proc.pid
                if __debug__:
                    logger.debug(HEADER + "Signaling process with PID " +
                                 str(cancel_pid) + " to cancel a task")
                os.kill(cancel_pid, signal.SIGUSR2)  # NOSONAR cancellation produced by COMPSs

            elif line[0] == REMOVE_EXECUTOR_TAG:
                in_pipe = line[1]
                out_pipe = line[2]
                proc = PROCESSES.pop(in_pipe, None)
                if proc:
                    if proc.is_alive():
                        logger.warn(HEADER + "Forcing terminate on : " +
                                    proc.name)
                        proc.terminate()
                    proc.join()
                control_pipe.write(" ".join((REMOVED_EXECUTOR_TAG,
                                             out_pipe,
                                             in_pipe)))

            elif line[0] == PING_TAG:
                control_pipe.write(PONG_TAG)

            elif line[0] == QUIT_TAG:
                alive = False

    # Wait for all threads
    for proc in PROCESSES.values():
        proc.join()

    # Check if there is any exception message from the threads
    for i in range(0, config.tasks_x_node):
        if not queues[i].empty:
            logger.error(HEADER + "Exception in threads queue: " +
                         str(queues[i].get()))

    for queue in queues:
        queue.close()
        queue.join_thread()

    if CACHE:
        stop_cache(smm, cache_queue, cache_profiler, CACHE_PROCESS)  # noqa

    if persistent_storage:
        # Finish storage
        if __debug__:
            logger.debug(HEADER + "Stopping persistent storage")
        with event_worker(FINISH_STORAGE_AT_WORKER_EVENT):
            from storage.api import finishWorker as finishStorageAtWorker  # noqa
            finishStorageAtWorker()

    if __debug__:
        logger.debug(HEADER + "Finished")

    control_pipe.write(QUIT_TAG)
    control_pipe.close()