Esempio n. 1
0
def genFragment(numv, dim):
    import pyextrae.multiprocessing as pyextrae
    # if mode == "gauss":
    #    return init_board_gauss(numv, dim, k)
    # else:
    pyextrae.eventandcounters(USER_EVENTS, GENERATE)
    frag = init_board_random(numv, dim)
    pyextrae.eventandcounters(USER_EVENTS, 0)
    return frag
Esempio n. 2
0
def partial_sum(XP, clusters, ind):
    import pyextrae.multiprocessing as pyextrae
    import numpy as np
    XP = np.array(XP)
    pyextrae.eventandcounters(USER_EVENTS, PARTIAL_SUM)
    p = [(i, [(XP[j - ind]) for j in clusters[i]]) for i in clusters]
    pyextrae.eventandcounters(USER_EVENTS, 0)
    dic = {}
    for i, l in p:
        dic[i] = (len(l), np.sum(l, axis=0))
    return dic
Esempio n. 3
0
def cluster_points_partial(XP, mu, ind):
    import pyextrae.multiprocessing as pyextrae
    import numpy as np
    dic = {}
    XP = np.array(XP)
    pyextrae.eventandcounters(USER_EVENTS, CLUSTER_MAIN_LOOP)
    for x in enumerate(XP):
        bestmukey = min([(i[0], np.linalg.norm(x[1] - mu[i[0]]))
                         for i in enumerate(mu)],
                        key=lambda t: t[1])[0]
        if bestmukey not in dic:
            dic[bestmukey] = [x[0] + ind]
        else:
            dic[bestmukey].append(x[0] + ind)
    pyextrae.eventandcounters(USER_EVENTS, 0)
    return dic
Esempio n. 4
0
def reduceCentersTask(a, b):
    import pyextrae.multiprocessing as pyextrae
    for key in b:
        if key not in a:
            pyextrae.eventandcounters(USER_EVENTS, REDUCE_NO_KEY)
            a[key] = b[key]
            pyextrae.eventandcounters(USER_EVENTS, 0)
        else:
            pyextrae.eventandcounters(USER_EVENTS, REDUCE_KEY)
            a[key] = (a[key][0] + b[key][0], a[key][1] + b[key][1])
            pyextrae.eventandcounters(USER_EVENTS, 0)
    return a
Esempio n. 5
0
def compss_persistent_worker():

    # Get args
    debug = (sys.argv[1] == 'true')
    tracing = (sys.argv[2] == 'true')
    storage_conf = sys.argv[3]
    tasks_x_node = int(sys.argv[4])
    in_pipes = sys.argv[5:5 + tasks_x_node]
    out_pipes = sys.argv[5 + tasks_x_node:]

    if tracing:
        import pyextrae.multiprocessing as pyextrae
        pyextrae.eventandcounters(SYNC_EVENTS, 1)
        pyextrae.eventandcounters(TASK_EVENTS, WORKER_RUNNING)

    if debug:
        assert tasks_x_node == len(in_pipes)
        assert tasks_x_node == len(out_pipes)

    persistent_storage = False
    if storage_conf != 'null':
        persistent_storage = True
        from storage.api import initWorker as initStorageAtWorker
        from storage.api import finishWorker as finishStorageAtWorker

    # Load log level configuration file
    worker_path = os.path.dirname(os.path.realpath(__file__))
    if debug:
        # Debug
        init_logging_worker(worker_path + '/../../log/logging.json.debug')
    else:
        # Default
        init_logging_worker(worker_path + '/../../log/logging.json.off')

    if __debug__:
        logger = logging.getLogger('pycompss.worker.worker')
        logger.debug("[PYTHON WORKER] piper_worker.py wake up")
        logger.debug("[PYTHON WORKER] -----------------------------")
        logger.debug("[PYTHON WORKER] Persistent worker parameters:")
        logger.debug("[PYTHON WORKER] -----------------------------")
        logger.debug("[PYTHON WORKER] Debug          : " + str(debug))
        logger.debug("[PYTHON WORKER] Tracing        : " + str(tracing))
        logger.debug("[PYTHON WORKER] Tasks per node : " + str(tasks_x_node))
        logger.debug("[PYTHON WORKER] In Pipes       : " + str(in_pipes))
        logger.debug("[PYTHON WORKER] Out Pipes      : " + str(out_pipes))
        logger.debug("[PYTHON WORKER] Storage conf.  : " + str(storage_conf))
        logger.debug("[PYTHON WORKER] -----------------------------")

    if persistent_storage:
        # Initialize storage
        initStorageAtWorker(config_file_path=storage_conf)

    # Create new threads
    queues = []
    for i in range(0, tasks_x_node):
        if __debug__:
            logger.debug("[PYTHON WORKER] Launching process " + str(i))
        process_name = 'Process-' + str(i)
        queues.append(Queue())

        def create_threads():
            processes.append(Process(target=worker, args=(queues[i],
                                                          process_name,
                                                          in_pipes[i],
                                                          out_pipes[i],
                                                          storage_conf)))
            processes[i].start()
        create_threads()

    # Catch SIGTERM send by bindings_piper to exit all processes
    signal.signal(signal.SIGTERM, shutdown_handler)

    # Wait for all threads
    for i in range(0, tasks_x_node):
        processes[i].join()

    # Check if there is any exception message from the threads
    for i in range(0, tasks_x_node):
        if not queues[i].empty:
            print(queues[i].get())

    for q in queues:
        q.close()
        q.join_thread()

    if persistent_storage:
        # Finish storage
        finishStorageAtWorker()

    if __debug__:
        logger.debug("[PYTHON WORKER] Finished")

    if tracing:
        pyextrae.eventandcounters(TASK_EVENTS, 0)
        pyextrae.eventandcounters(SYNC_EVENTS, 0)
Esempio n. 6
0
    # slaves = sys.argv[9-i]
    # numCus = sys.argv[i+1]
    # has_target = sys.argv[i+2] == 'true'
    # num_params = int(sys.argv[i+3])
    # params = sys.argv[i+4..]

    persistent_storage = False
    if storage_conf != 'null':
        persistent_storage = True
        from storage.api import initWorker as initStorageAtWorker
        from storage.api import finishWorker as finishStorageAtWorker

    if tracing:
        import pyextrae.multiprocessing as pyextrae

        pyextrae.eventandcounters(SYNC_EVENTS, taskId)
        # pyextrae.eventandcounters(TASK_EVENTS, 0)
        pyextrae.eventandcounters(TASK_EVENTS, WORKER_INITIALIZATION)

    # Load log level configuration file
    worker_path = os.path.dirname(os.path.realpath(__file__))
    if log_level == 'true' or log_level == "debug":
        # Debug
        init_logging_worker(worker_path + '/../../log/logging.json.debug')
    elif log_level == "info" or log_level == "off":
        # Info or no debug
        init_logging_worker(worker_path + '/../../log/logging.json.off')
    else:
        # Default
        init_logging_worker(worker_path + '/../../log/logging.json')
Esempio n. 7
0
def trace_mpi_worker():
    # type: () -> typing.Iterator[None]
    """ Sets up the tracing for the mpi worker.

    :return: None
    """
    global PYEXTRAE
    global TRACING
    import pyextrae.mpi as pyextrae  # noqa
    PYEXTRAE = pyextrae
    TRACING = True
    pyextrae.eventandcounters(SYNC_EVENTS, 1)
    pyextrae.eventandcounters(INSIDE_WORKER_TYPE, WORKER_RUNNING_EVENT)
    yield  # here the worker runs
    pyextrae.eventandcounters(INSIDE_WORKER_TYPE, 0)
    pyextrae.eventandcounters(SYNC_EVENTS, 0)
    pyextrae.eventandcounters(SYNC_EVENTS, int(time.time()))
    pyextrae.eventandcounters(SYNC_EVENTS, 0)
Esempio n. 8
0
def main():
    # Emit sync event if tracing is enabled
    tracing = sys.argv[1] == 'true'
    task_id = int(sys.argv[2])
    log_level = sys.argv[3]
    storage_conf = sys.argv[4]
    stream_backend = sys.argv[5]
    stream_master_name = sys.argv[6]
    stream_master_port = sys.argv[7]
    # method_type = sys.argv[8]
    params = sys.argv[9:]
    # class_name = sys.argv[9]
    # method_name = sys.argv[10]
    # num_slaves = sys.argv[11]
    # i = 11 + num_slaves
    # slaves = sys.argv[11..i]
    # numCus = sys.argv[i+1]
    # has_target = sys.argv[i+2] == 'true'
    # num_params = int(sys.argv[i+3])
    # params = sys.argv[i+4..]

    print("tracing = " + str(tracing))
    print("task_id = " + str(task_id))
    print("log_level = " + str(log_level))
    print("storage_conf = " + str(storage_conf))

    persistent_storage = False
    if storage_conf != 'null':
        persistent_storage = True
        from storage.api import initWorker as initStorageAtWorker
        from storage.api import finishWorker as finishStorageAtWorker

    streaming = False
    if stream_backend not in [None, 'null', 'NONE']:
        streaming = True

    if tracing:
        # Start tracing
        import pyextrae.multiprocessing as pyextrae
        pyextrae.eventandcounters(SYNC_EVENTS, task_id)
        # pyextrae.eventandcounters(TASK_EVENTS, 0)
        pyextrae.eventandcounters(TASK_EVENTS, WORKER_RUNNING_EVENT)

    if streaming:
        # Start streaming
        DistroStreamClientHandler.init_and_start(
            master_ip=stream_master_name, master_port=stream_master_port)

    # Load log level configuration file
    worker_path = os.path.dirname(os.path.realpath(__file__))
    if log_level == 'true' or log_level == "debug":
        # Debug
        init_logging_worker(worker_path + '/../../../log/logging_debug.json')
    elif log_level == "info" or log_level == "off":
        # Info or no debug
        init_logging_worker(worker_path + '/../../../log/logging_off.json')
    else:
        # Default
        init_logging_worker(worker_path + '/../../../log/logging.json')

    if persistent_storage:
        # Initialize storage
        initStorageAtWorker(config_file_path=storage_conf)

    # Init worker
    exit_code = compss_worker(tracing, str(task_id), storage_conf, params)

    if tracing:
        # Finish tracing
        pyextrae.eventandcounters(TASK_EVENTS, 0)
        # pyextrae.eventandcounters(TASK_EVENTS, PROCESS_DESTRUCTION)
        pyextrae.eventandcounters(SYNC_EVENTS, task_id)

    if streaming:
        # Finish streaming
        DistroStreamClientHandler.set_stop()

    if persistent_storage:
        # Finish storage
        finishStorageAtWorker()

    if exit_code == 1:
        exit(1)
Esempio n. 9
0
def compss_persistent_worker(config):
    """
    Persistent worker main function.
    Retrieves the initial configuration and spawns the worker processes.

    :param config: Piper Worker Configuration description

    :return: None
    """
    # Catch SIGTERM sent by bindings_piper
    signal.signal(signal.SIGTERM, shutdown_handler)

    # Set the binding in worker mode
    context.set_pycompss_context(context.WORKER)

    if TRACING:
        try:
            user_paths = os.environ['PYTHONPATH']
        except KeyError:
            user_paths = ""
        print("PYTHON PATH = " + user_paths)
        import pyextrae.multiprocessing as pyextrae
        pyextrae.eventandcounters(SYNC_EVENTS, 1)
        pyextrae.eventandcounters(TASK_EVENTS, WORKER_RUNNING_EVENT)

    persistent_storage = (config.storage_conf != 'null')

    logger, storage_loggers = load_loggers(config.debug, persistent_storage)

    if __debug__:
        logger.debug(HEADER + "piper_worker.py wake up")
        config.print_on_logger(logger)

    if persistent_storage:
        # Initialize storage
        logger.debug(HEADER + "Starting persitent storage")
        from storage.api import initWorker as initStorageAtWorker
        initStorageAtWorker(config_file_path=config.storage_conf)

    # Create new threads
    queues = []
    for i in range(0, config.tasks_x_node):
        if __debug__:
            logger.debug(HEADER + "Launching process " + str(i))
        process_name = 'Process-' + str(i)
        queue = Queue()
        queues.append(queue)
        conf = ExecutorConf(TRACING,
                            config.storage_conf,
                            logger,
                            storage_loggers,
                            config.stream_backend,
                            config.stream_master_name,
                            config.stream_master_port)
        process = Process(target=executor, args=(queue,
                                                 process_name,
                                                 config.pipes[i],
                                                 conf))
        PROCESSES[config.pipes[i].input_pipe] = process
        process.start()

    # Read command from control pipe
    alive = True
    process_counter = config.tasks_x_node
    control_pipe = config.control_pipe
    while alive:
        command = control_pipe.read_command(retry_period=1)
        if command != "":
            line = command.split()

            if line[0] == ADD_EXECUTOR_TAG:

                process_name = 'Process-' + str(process_counter)
                process_counter = process_counter + 1
                in_pipe = line[1]
                out_pipe = line[2]
                pipe = Pipe(in_pipe, out_pipe)
                pid = create_threads(process_name, pipe)
                control_pipe.write(ADDED_EXECUTOR_TAG + " " +
                                   out_pipe + " " +
                                   in_pipe + " " +
                                   str(pid))

            elif line[0] == QUERY_EXECUTOR_ID_TAG:
                in_pipe = line[1]
                out_pipe = line[2]
                proc = PROCESSES.get(in_pipe)
                pid = proc.pid
                control_pipe.write(REPLY_EXECUTOR_ID_TAG + " " +
                                   out_pipe + " " +
                                   in_pipe + " " +
                                   str(pid))

            elif line[0] == CANCEL_TASK_TAG:
                in_pipe = line[1]
                proc = PROCESSES.get(in_pipe)
                pid = proc.pid
                logger.debug("[PYTHON WORKER] Signaling process with PID " + str(pid) + " to cancel a task")
                kill(pid, signal.SIGUSR2)

            elif line[0] == REMOVE_EXECUTOR_TAG:

                in_pipe = line[1]
                out_pipe = line[2]

                proc = PROCESSES.pop(in_pipe, None)

                if proc:
                    if proc.is_alive():
                        logger.warn(HEADER + "Forcing terminate on : " +
                                    proc.name)
                        proc.terminate()
                    proc.join()
                control_pipe.write(REMOVED_EXECUTOR_TAG + " " +
                                   out_pipe + " " +
                                   in_pipe)

            elif line[0] == PING_TAG:
                control_pipe.write(PONG_TAG)

            elif line[0] == QUIT_TAG:
                alive = False

    # Wait for all threads
    for proc in PROCESSES.values():
        proc.join()

    # Check if there is any exception message from the threads
    for i in range(0, config.tasks_x_node):
        if not queues[i].empty:
            logger.error(HEADER + "Exception in threads queue: " +
                         str(queues[i].get()))

    for queue in queues:
        queue.close()
        queue.join_thread()

    if persistent_storage:
        # Finish storage
        logger.debug(HEADER + "Stopping persistent storage")
        from storage.api import finishWorker as finishStorageAtWorker
        finishStorageAtWorker()

    if __debug__:
        logger.debug(HEADER + "Finished")

    if TRACING:
        pyextrae.eventandcounters(TASK_EVENTS, 0)
        pyextrae.eventandcounters(SYNC_EVENTS, 0)

    control_pipe.write(QUIT_TAG)
    control_pipe.close()
Esempio n. 10
0
def trace_multiprocessing_worker():
    # type: () -> None
    """ Sets up the tracing for the multiprocessing worker.

    :return: None
    """
    global PYEXTRAE
    global TRACING
    import pyextrae.multiprocessing as pyextrae  # noqa
    PYEXTRAE = pyextrae
    TRACING = True
    pyextrae.eventandcounters(SYNC_EVENTS, 1)
    pyextrae.eventandcounters(WORKER_EVENTS, WORKER_RUNNING_EVENT)
    yield  # here the worker runs
    pyextrae.eventandcounters(WORKER_EVENTS, 0)
    pyextrae.eventandcounters(SYNC_EVENTS, 0)
    pyextrae.eventandcounters(SYNC_EVENTS, int(time.time()))
    pyextrae.eventandcounters(SYNC_EVENTS, 0)