Exemplo n.º 1
0
from truckms.inference.analytics import filter_pred_detections, get_important_frames
from flask import make_response
import subprocess as sps
from flaskwebgui import FlaskUI  # get the FlaskUI class
from threading import Thread
from functools import partial
from p2prpc.p2p_client import create_p2p_client_app
import signal
from p2prpc import monitoring
import datetime

password = "******"
root_dir = '/home/achellaris/projects_data/main_dir'
path = osp.join(root_dir, 'clientdb')
client_app = create_p2p_client_app(osp.join(root_dir,
                                            "network_discovery_client.txt"),
                                   password=password,
                                   cache_path=path)
dec_analyze_movie = client_app.register_p2p_func(
    can_do_locally_func=lambda: True)(analyze_movie)
app = Flask(__name__,
            template_folder=osp.join(osp.dirname(__file__), 'templates'),
            static_folder=osp.join(osp.dirname(__file__), 'templates',
                                   'assets'))
Bootstrap(app)
#/home/achellaris/projects/TruckMonitoringSystem/tests/truckms/service/data/cut.mkv


def shutdown_clientapp():
    client_app.background_server.shutdown()

Exemplo n.º 2
0
from p2prpc.p2p_client import create_p2p_client_app
from function import p2prpc_analyze_large_file
import os.path as osp
import time
import logging

logger = logging.getLogger(__name__)

client_app = create_p2p_client_app("discovery.txt",
                                   password="******",
                                   cache_path=osp.join(
                                       osp.abspath(osp.dirname(__file__)),
                                       'clientdb'))

p2prpc_analyze_large_file = client_app.register_p2p_func()(
    p2prpc_analyze_large_file)

kwargs = dict()
res = p2prpc_analyze_large_file(video_handle=open(__file__, 'rb'), arg2=10)

res.terminate()

client_app.background_server.shutdown()
Exemplo n.º 3
0
from p2prpc.p2p_client import create_p2p_client_app
from examples.function import analyze_large_file
import os.path as osp
import time
import logging
logger = logging.getLogger(__name__)

password = "******"
path = osp.join(osp.abspath(osp.dirname(__file__)), 'clientdb')

client_app = create_p2p_client_app("network_discovery_client.txt",
                                   password=password,
                                   cache_path=path)

analyze_large_file = client_app.register_p2p_func()(analyze_large_file)

print(__file__)
# res = analyze_large_file(video_handle=open(__file__, 'rb'), arg2=100)
# print(res.get())
res = analyze_large_file(video_handle=open(__file__, 'rb'), arg2=160)
logger.error("Going to sleep")
time.sleep(5)
logger.error("Going to terminate")
res.terminate()
logger.error("Going to restart")
res.restart()

logger.error("Going to get")
print(res.get())

client_app.background_server.shutdown()
Exemplo n.º 4
0
def monitor_functions(tmpdir, port_offset):
    file = __file__

    client_port = 5000 + port_offset
    broker_port = 5004 + port_offset

    ndclient_path = os.path.join(tmpdir, "ndclient.txt")
    cache_client_dir = os.path.join(tmpdir, "client")
    cache_bw_dir = os.path.join(tmpdir, "bw")
    with open(ndclient_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    client_app = create_p2p_client_app(ndclient_path,
                                       local_port=client_port,
                                       mongod_port=client_port + 100,
                                       cache_path=cache_client_dir)
    client_large_file_function = client_app.register_p2p_func()(
        large_file_function)
    client_do_nothing_function = client_app.register_p2p_func()(
        do_nothing_function)
    client_long_runningdo_nothing_function = client_app.register_p2p_func()(
        long_runningdo_nothing_function)

    broker_worker_app = P2PBrokerworkerApp(None,
                                           local_port=broker_port,
                                           mongod_port=broker_port + 100,
                                           cache_path=cache_bw_dir)
    broker_worker_app.register_p2p_func()(large_file_function)
    broker_worker_app.register_p2p_func()(do_nothing_function)
    broker_worker_app.register_p2p_func()(long_runningdo_nothing_function)
    broker_worker_thread = ServerThread(broker_worker_app, 10)
    broker_worker_thread.start()
    for func_ in [
            large_file_function, do_nothing_function,
            long_runningdo_nothing_function
    ]:
        while select_lru_worker(
                client_app.registry_functions[func_.__name__]) == (None, None):
            time.sleep(3)
            print("Waiting for client to know about broker")

    ndcw_path = os.path.join(tmpdir, "ndcw.txt")
    client_worker_port = 5005 + port_offset
    cache_cw_dir = os.path.join(tmpdir, "cw")
    with open(ndcw_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    clientworker_app = P2PClientworkerApp(ndcw_path,
                                          local_port=client_worker_port,
                                          mongod_port=client_worker_port + 100,
                                          cache_path=cache_cw_dir)
    clientworker_app.register_p2p_func(
        can_do_work_func=lambda: True)(large_file_function)
    clientworker_app.register_p2p_func(
        can_do_work_func=lambda: True)(do_nothing_function)
    clientworker_app.register_p2p_func(
        can_do_work_func=lambda: True)(long_runningdo_nothing_function)
    clientworker_thread = ServerThread(clientworker_app)
    clientworker_thread.start()
    for func_ in [
            large_file_function, do_nothing_function,
            long_runningdo_nothing_function
    ]:
        while select_lru_worker(clientworker_app.registry_functions[
                func_.__name__].p2pfunction) == (None, None):
            time.sleep(3)
            print("Waiting for worker to know about broker")

    with ThreadPoolExecutor(max_workers=10) as executor:
        pool_future1 = executor.submit(client_large_file_function,
                                       video_handle=open(file, 'rb'),
                                       random_arg=10)
        time.sleep(5)
        pool_future2 = executor.submit(client_do_nothing_function,
                                       random_arg=20)
        time.sleep(5)
        pool_future3 = executor.submit(client_long_runningdo_nothing_function,
                                       random_arg=30)
        time.sleep(5)
        p2p_future1 = pool_future1.result()
        p2p_future2 = pool_future2.result()
        p2p_future3 = pool_future3.result()

    from pprint import pprint
    print("\n\n\n Client app function stats")
    pprint(client_app.function_stats())
    from p2prpc.p2p_brokerworker import check_function_stats
    print("\n\n\n Broker app function stats")
    pprint(
        check_function_stats("localhost", broker_port,
                             broker_worker_app.crypt_pass))
    print("\n\n\n Worker app function stats")
    pprint(
        check_function_stats("localhost", client_worker_port,
                             clientworker_app.crypt_pass))

    client_app.background_server.shutdown()
    print("Shutdown client")
    broker_worker_thread.shutdown()
    print("Shutdown brokerworker")
    clientworker_thread.shutdown()
    print("Shutdown worker")
    time.sleep(3)
Exemplo n.º 5
0
from p2prpc.p2p_client import create_p2p_client_app
from function import p2prpc_analyze_large_file
import os.path as osp
import time
import logging
from pymongo import MongoClient
import os
logger = logging.getLogger(__name__)

password = "******"
path = osp.join(osp.abspath(osp.dirname(__file__)), 'clientdb')

client_app = create_p2p_client_app("discovery.txt",
                                   password=password,
                                   cache_path=path)

p2prpc_analyze_large_file = client_app.register_p2p_func()(
    p2prpc_analyze_large_file)

kwargs = dict(video_handle=open(__file__, 'rb'), arg2=160)
res = p2prpc_analyze_large_file(**kwargs)
print(res.get())

# MONGO_PORT = int(os.environ['MONGO_PORT'])
# MONGO_HOST = os.environ['MONGO_HOST']
#
# db_name, db_collection = 'p2p', p2prpc_analyze_large_file.__name__
# client = MongoClient(host=MONGO_HOST, port=MONGO_PORT)[db_name][db_collection]
# client.remove()
#
# kwargs = dict(video_handle=open(__file__, 'rb'), arg2=160)
Exemplo n.º 6
0
def function_delete_on_clientworker(tmpdir, port_offset, func):
    file = __file__

    client_port = 5000 + port_offset
    broker_port = 5004 + port_offset

    ndclient_path = os.path.join(tmpdir, "ndclient.txt")
    cache_client_dir = os.path.join(tmpdir, "client")
    cache_bw_dir = os.path.join(tmpdir, "bw")
    with open(ndclient_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    client_app = create_p2p_client_app(ndclient_path,
                                       local_port=client_port,
                                       mongod_port=client_port + 100,
                                       cache_path=cache_client_dir)
    client_func = client_app.register_p2p_func()(func)

    broker_worker_app = P2PBrokerworkerApp(None,
                                           local_port=broker_port,
                                           mongod_port=broker_port + 100,
                                           cache_path=cache_bw_dir)
    broker_worker_app.register_p2p_func()(func)
    broker_worker_thread = ServerThread(broker_worker_app, 10)
    broker_worker_thread.start()
    while select_lru_worker(
            client_app.registry_functions[func.__name__]) == (None, None):
        time.sleep(3)
        print("Waiting for client to know about broker")

    ndcw_path = os.path.join(tmpdir, "ndcw.txt")
    client_worker_port = 5005 + port_offset
    cache_cw_dir = os.path.join(tmpdir, "cw")
    with open(ndcw_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    clientworker_app = P2PClientworkerApp(ndcw_path,
                                          local_port=client_worker_port,
                                          mongod_port=client_worker_port + 100,
                                          cache_path=cache_cw_dir)
    clientworker_app.register_p2p_func(can_do_work_func=lambda: True)(func)
    clientworker_thread = ServerThread(clientworker_app)
    clientworker_thread.start()
    while select_lru_worker(clientworker_app.registry_functions[
            func.__name__].p2pfunction) == (None, None):
        time.sleep(3)
        print("Waiting for worker to know about broker")

    with ThreadPoolExecutor(max_workers=10) as executor:
        pool_future = executor.submit(client_func,
                                      file_handle=open(file, 'rb'))
        p2p_future = pool_future.result()
        time.sleep(5)
        p2p_future.delete()
        # assert len(list_results) == num_calls and all(isinstance(r, dict) for r in list_results)
        # print(list_results)
    col = list(
        MongoClient(port=broker_port + 100)["p2p"][func.__name__].find({}))
    if len(col) != 0:
        print("broker", col)
        assert False
    col = list(
        MongoClient(port=client_worker_port + 100)["p2p"][func.__name__].find(
            {}))
    if len(col) != 0:
        print("worker", col)
        assert False
    client_app.background_server.shutdown()
    print("Shutdown client")
    broker_worker_thread.shutdown()
    print("Shutdown brokerworker")
    clientworker_thread.shutdown()
    print("Shutdown worker")
    time.sleep(3)
Exemplo n.º 7
0
def multiple_client_calls_client_worker(tmpdir, port_offset, func, file=None):
    if file is None:
        file = __file__
    client_port = 5000 + port_offset
    broker_port = 5004 + port_offset
    client_worker_port = 5005 + port_offset

    ndclient_path = os.path.join(tmpdir, "ndclient.txt")
    ndcw_path = os.path.join(tmpdir, "ndcw.txt")
    cache_client_dir = os.path.join(tmpdir, "client")
    cache_bw_dir = os.path.join(tmpdir, "bw")
    cache_cw_dir = os.path.join(tmpdir, "cw")
    with open(ndclient_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    with open(ndcw_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    client_app = create_p2p_client_app(ndclient_path,
                                       local_port=client_port,
                                       mongod_port=client_port + 100,
                                       cache_path=cache_client_dir)
    client_func = client_app.register_p2p_func()(func)

    broker_worker_app = P2PBrokerworkerApp(None,
                                           local_port=broker_port,
                                           mongod_port=broker_port + 100,
                                           cache_path=cache_bw_dir)
    broker_worker_app.register_p2p_func()(func)
    broker_worker_thread = ServerThread(broker_worker_app)
    broker_worker_thread.start()
    clientworker_app = P2PClientworkerApp(ndcw_path,
                                          local_port=client_worker_port,
                                          mongod_port=client_worker_port + 100,
                                          cache_path=cache_cw_dir)
    clientworker_app.register_p2p_func(can_do_work_func=lambda: True)(func)
    clientworker_thread = ServerThread(clientworker_app)
    clientworker_thread.start()
    while select_lru_worker(
            client_app.registry_functions[func.__name__]) == (None, None):
        time.sleep(3)
        print("Waiting for client to know about broker")
    while select_lru_worker(clientworker_app.registry_functions[
            func.__name__].p2pfunction) == (None, None):
        time.sleep(3)
        print("Waiting for worker to know about broker")

    with ThreadPoolExecutor(max_workers=10) as executor:
        num_calls = 1
        list_futures_of_futures = []
        for i in range(num_calls):
            future = executor.submit(client_func,
                                     video_handle=open(file, 'rb'),
                                     random_arg=i)
            list_futures_of_futures.append(future)
        list_futures = [f.result() for f in list_futures_of_futures]
        assert len(list_futures) == num_calls
        list_results = [f.get() for f in list_futures]
        assert len(list_results) == num_calls and all(
            isinstance(r, dict) for r in list_results)

    client_app.background_server.shutdown()
    print("Shutdown client")
    broker_worker_thread.shutdown()
    print("Shutdown brokerworker")
    clientworker_thread.shutdown()
    print("Shutdown worker")
    time.sleep(3)
Exemplo n.º 8
0
def function_restart_unfinished_upload_on_broker(tmpdir, port_offset, func):
    file = r'/home/achellaris/big_data/torrent/torrents/The.Sopranos.S06.720p.BluRay.DD5.1.x264-DON/The.Sopranos.S06E15.Remember.When.720p.BluRay.DD5.1.x264-DON.mkv'

    client_port = 5000 + port_offset
    broker_port = 5004 + port_offset

    ndclient_path = os.path.join(tmpdir, "ndclient.txt")
    cache_client_dir = os.path.join(tmpdir, "client")
    cache_bw_dir = os.path.join(tmpdir, "bw")
    with open(ndclient_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    client_app = create_p2p_client_app(ndclient_path,
                                       local_port=client_port,
                                       mongod_port=client_port + 100,
                                       cache_path=cache_client_dir)
    client_func = client_app.register_p2p_func()(func)

    broker_worker_app = P2PBrokerworkerApp(None,
                                           local_port=broker_port,
                                           mongod_port=broker_port + 100,
                                           cache_path=cache_bw_dir)
    broker_worker_app.register_p2p_func()(func)
    broker_worker_thread = ServerThread(broker_worker_app, 10)
    broker_worker_thread.start()
    while select_lru_worker(
            client_app.registry_functions[func.__name__]) == (None, None):
        time.sleep(3)
        print("Waiting for client to know about broker")

    ndcw_path = os.path.join(tmpdir, "ndcw.txt")
    client_worker_port = 5005 + port_offset
    cache_cw_dir = os.path.join(tmpdir, "cw")
    with open(ndcw_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    clientworker_app = P2PClientworkerApp(ndcw_path,
                                          local_port=client_worker_port,
                                          mongod_port=client_worker_port + 100,
                                          cache_path=cache_cw_dir)
    clientworker_app.register_p2p_func(can_do_work_func=lambda: True)(func)
    clientworker_thread = ServerThread(clientworker_app)
    clientworker_thread.start()
    while select_lru_worker(clientworker_app.registry_functions[
            func.__name__].p2pfunction) == (None, None):
        time.sleep(3)
        print("Waiting for worker to know about broker")

    with ThreadPoolExecutor(max_workers=10) as executor:
        pool_future = executor.submit(client_func,
                                      file_handle=open(file, 'rb'))
        p2p_future = pool_future.result()
        try:
            p2p_future.restart()
        except Exception as e:
            print(str(e))
            assert "identifier not found" in str(e)
        # assert len(list_results) == num_calls and all(isinstance(r, dict) for r in list_results)
        # print(list_results)
    client_app.background_server.shutdown()
    print("Shutdown client")
    broker_worker_thread.shutdown()
    print("Shutdown brokerworker")
    clientworker_thread.shutdown()
    print("Shutdown worker")
    time.sleep(3)
Exemplo n.º 9
0
def upload_only_no_execution_multiple_large_files(tmpdir, port_offset, func,
                                                  file):
    client_port = 5000 + port_offset
    broker_port = 5004 + port_offset

    ndclient_path = os.path.join(tmpdir, "ndclient.txt")
    cache_client_dir = os.path.join(tmpdir, "client")
    cache_bw_dir = os.path.join(tmpdir, "bw")
    with open(ndclient_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    client_app = create_p2p_client_app(ndclient_path,
                                       local_port=client_port,
                                       mongod_port=client_port + 100,
                                       cache_path=cache_client_dir)
    client_func = client_app.register_p2p_func()(func)

    broker_worker_app = P2PBrokerworkerApp(None,
                                           local_port=broker_port,
                                           mongod_port=broker_port + 100,
                                           cache_path=cache_bw_dir,
                                           old_requests_time_limit=(1 / 3600) *
                                           10)
    broker_worker_app.register_p2p_func()(func)
    broker_worker_thread = ServerThread(broker_worker_app, 10)
    broker_worker_thread.start()
    while select_lru_worker(
            client_app.registry_functions[func.__name__]) == (None, None):
        time.sleep(3)
        print("Waiting for client to know about broker")

    with ThreadPoolExecutor(max_workers=10) as executor:
        num_calls = 5
        list_futures_of_futures = []
        for i in range(num_calls):
            future = executor.submit(client_func,
                                     video_handle=open(file, 'rb'),
                                     random_arg=i)
            time.sleep(1)
            list_futures_of_futures.append(future)

        list_futures = []
        for f in list_futures_of_futures:
            try:
                list_futures.append(f.result().get())
            except:
                list_futures.append(None)
        print(
            list_futures
        )  # I expect them to be None because altough the upload did finish, the item was quickly deteled due to expiration
        assert len(list_futures) <= num_calls

    while True:
        col = list(
            MongoClient(port=broker_port + 100)["p2p"][func.__name__].find({}))
        if len(col) != 0:
            print("Waiting to delete the following items")
            pprint.pprint(col)
            time.sleep(3)
        else:
            break

    client_app.background_server.shutdown()
    print("Shutdown client")
    broker_worker_thread.shutdown()
    print("Shutdown brokerworker")
    time.sleep(3)
Exemplo n.º 10
0
def delete_old_requests(tmpdir, port_offset, func, file=None):
    """
    In this situation. It is ok for check_brokerworker_deletion or check_brokerworker_termination
    to show error messages that originate from the broker. What happens is that on the broker side, the
    arguments expired and were deleted. And the worker is trying to pull arguments that no longer exist.
    """
    if file is None:
        file = __file__
    client_port = 5000 + port_offset
    broker_port = 5004 + port_offset
    client_worker_port = 5005 + port_offset

    ndclient_path = os.path.join(tmpdir, "ndclient.txt")
    ndcw_path = os.path.join(tmpdir, "ndcw.txt")
    cache_client_dir = os.path.join(tmpdir, "client")
    cache_bw_dir = os.path.join(tmpdir, "bw")
    cache_cw_dir = os.path.join(tmpdir, "cw")
    with open(ndclient_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    with open(ndcw_path, "w") as f:
        f.write("localhost:{}\n".format(broker_port))
    client_app = create_p2p_client_app(ndclient_path,
                                       local_port=client_port,
                                       mongod_port=client_port + 100,
                                       cache_path=cache_client_dir)
    client_func = client_app.register_p2p_func()(func)

    broker_worker_app = P2PBrokerworkerApp(None,
                                           local_port=broker_port,
                                           mongod_port=broker_port + 100,
                                           cache_path=cache_bw_dir,
                                           old_requests_time_limit=(1 / 3600) *
                                           40)
    broker_worker_app.register_p2p_func()(func)
    broker_worker_thread = ServerThread(broker_worker_app, processes=10)
    broker_worker_thread.start()
    clientworker_app = P2PClientworkerApp(ndcw_path,
                                          local_port=client_worker_port,
                                          mongod_port=client_worker_port + 100,
                                          cache_path=cache_cw_dir)
    clientworker_app.register_p2p_func(can_do_work_func=lambda: True)(func)
    clientworker_thread = ServerThread(clientworker_app)
    clientworker_thread.start()
    while select_lru_worker(
            client_app.registry_functions[func.__name__]) == (None, None):
        time.sleep(3)
        print("Waiting for client to know about broker")
    while select_lru_worker(clientworker_app.registry_functions[
            func.__name__].p2pfunction) == (None, None):
        time.sleep(3)
        print("Waiting for worker to know about broker")

    with ThreadPoolExecutor(max_workers=10) as executor:
        num_calls = 2
        list_futures_of_futures = []
        for i in range(num_calls):
            future = executor.submit(client_func,
                                     video_handle=open(file, 'rb'),
                                     random_arg=i)
            list_futures_of_futures.append(future)
            time.sleep(2)
        list_futures = [f.result() for f in list_futures_of_futures]
        assert len(list_futures) <= num_calls
        list_results = [f.get() for f in list_futures]
        assert len(list_results) == num_calls and all(
            isinstance(r, dict) for r in list_results)

    from pymongo import MongoClient
    while True:
        col = list(
            MongoClient(port=broker_port + 100)["p2p"][func.__name__].find({}))
        if len(col) != 0:
            print("Waiting to delete the following items", col)
            time.sleep(3)
        else:
            break

    client_app.background_server.shutdown()
    print("Shutdown client")
    broker_worker_thread.shutdown()
    print("Shutdown brokerworker")
    clientworker_thread.shutdown()
    print("Shutdown worker")
    time.sleep(3)