Esempio n. 1
0
    def __init__(self, log_color=None):
        if log_color:
            self.log = get_logger("Node", bg_color=log_color)
        else:
            self.log = get_logger("Node")

        self.log.info("A Node has appeared")
        self.reactor = ReactorInterface(router=self)
        self.reactor.notify_ready()
Esempio n. 2
0
def do_something_else(data):
    log = get_logger("Main")
    log.info("do_something_ELSE with data: {}".format(data))

# if __name__ == "__main__":
#     log = get_logger("Main")
#     log.debug("\n\n-- MAIN THREAD --")
#     # q = aioprocessing.AioQueue()
#     # reactor = NetworkReactor(queue=q)
#     # reactor.start()
#     #
#     # q.coro_put(Command(Command.SUB, url=URL, callback=do_something))
#     #
#     # q.coro_put(Command(Command.PUB, url=URL2, data=b'oh boy i hope this gets through'))
#
#     nr = NetworkReactor()
#     nr.execute(Command.SUB, url=URL, callback=do_something)
#     nr.execute(Command.PUB, url=URL2, data=b'oh boy i hope this gets through')
#
#     log.critical("Will stop subbing in 6 seconds...")
#     time.sleep(6)
#     nr.execute(Command.UNSUB, url=URL)
#
#     log.critical("Rescribing in 2 seconds...")
#     time.sleep(2)
#     nr.execute(Command.SUB, url=URL, callback=do_something_else)
Esempio n. 3
0
def publisher():
    from cilantro.logger import get_logger
    from cilantro.utils.test import MPComposer
    from cilantro.messages.transaction.standard import StandardTransactionBuilder
    import time, os, sys

    log = get_logger("Publisher")
    sub_info = delegates[1]
    sub_info['ip'] = os.getenv('HOST_IP')

    d_info = delegates[0]
    d_info['ip'] = os.getenv('HOST_IP')

    pub = MPComposer(sk=d_info['sk'])

    # Publish on this node's own IP
    pub.add_pub(os.getenv('HOST_IP'))

    for i in range(100):
        log.critical("Sending pub")
        msg = StandardTransactionBuilder.random_tx()
        time.sleep(0.1)
        pub.send_pub_msg(filter='0', message=msg)

    log.critical("Pub Done")
    sys.exit(0)
    exit
Esempio n. 4
0
    def __init__(self, ip: str, signing_key: str, name='', *args, **kwargs):
        """
        IMPORTANT: This should not be overridden by subclasses. Instead, override the setup() method.

        Creates a Worker instance and starts the event loop. Instantiating this class blocks indefinitely, thus any
        setup must be done by overriding the setup() method (see comments below for explanation)
        :param args: This should never be set, as only kwargs are supported.
        :param kwargs: A list of named variables that will be set as instance attributes.
        """
        assert len(args) == 0, "Worker cannot be constructed with args. Only key word args are supported."

        self.name = name or type(self).__name__
        self.signing_key = signing_key
        self.ip = ip
        self.verifying_key = wallet.get_vk(self.signing_key)
        self.log = get_logger(name)

        # We set all kwargs to instance variables so they are accessible in the setup() function. Setup cannot be done
        # in subclasses by overriding __init__ because instantiating this instance involves running an event loop
        # forever (which would block the setup code upon calling 'super().__init__(..)' in the subclass)
        # TODO this pattern is questionable. Perhaps args/kwargs should be passed into setup(...)?  --davis
        for k, v in kwargs.items():
            self.log.important("setting k {} to v {}".format(k, v))
            setattr(self, k, v)

        self._router = Router(get_handler_func=lambda: self, name=name)
        self._manager = ExecutorManager(signing_key=signing_key, router=self._router, name=name)
        self.composer = Composer(manager=self._manager, signing_key=signing_key, ip=ip, name=name)
        self._router.composer = self.composer

        self.setup()

        self.log.notice("Starting Worker named {}".format(name))
        self._manager.start()  # This starts the event loop and blocks this process indefinitely
Esempio n. 5
0
def do1():
    log = get_logger("do1")
    with DB() as db:
        log.debug("[1] hey i got db: {}".format(db))
        log.debug("sleeping for 2 seconds")
        time.sleep(2)
        log.debug("done sleeping for 2")
Esempio n. 6
0
def do2():
    log = get_logger("***do2")
    with DB() as db:
        log.debug("[2] hey i got db: {}".format(db))
        log.debug("sleeping for another 2 seconds")
        time.sleep(2)
        log.debug("done sleeping for 2")
Esempio n. 7
0
def run_node():
    import asyncio, os, zmq.auth
    from cilantro.protocol.overlay.dht import DHT
    from cilantro.logger import get_logger
    log = get_logger(__name__)

    signing_keys = {
        'node_1':
        '06391888e37a48cef1ded85a375490df4f9b2c74f7723e88c954a055f3d2685a',
        'node_2':
        '91f7021a9e8c65ca873747ae24de08e0a7acf58159a8aa6548910fe152dab3d8',
        'node_3':
        'f9489f880ef1a8b2ccdecfcad073e630ede1dd190c3b436421f665f767704c55',
        'node_4':
        '8ddaf072b9108444e189773e2ddcb4cbd2a76bbf3db448e55d0bfc131409a197',
        'node_5':
        '5664ec7306cc22e56820ae988b983bdc8ebec8246cdd771cfee9671299e98e3c',
        'node_6':
        '20b577e71e0c3bddd3ae78c0df8f7bb42b29b0c0ce9ca42a44e6afea2912d17b'
    }
    sk = signing_keys.get(os.getenv('HOSTNAME'))

    dht = DHT(mode='test', sk=sk, wipe_certs=True, block=False)

    dht.loop.run_forever()
Esempio n. 8
0
def start_delelegate(url, port, delegates, sk):
    log = get_logger("DelegateFactory")
    log.debug("Instantiating a new delegate")
    d = Delegate(url=url,
                 port=port,
                 delegates=public_delegate_list,
                 signing_key=sk)
Esempio n. 9
0
 def __init__(self, loop, context, inproc_socket, ironhouse):
     self.loop = loop
     asyncio.set_event_loop(self.loop)
     self.context = context
     self.inproc_socket = inproc_socket
     self.ironhouse = ironhouse
     self.log = get_logger("{}.{}".format(Executor._parent_name, type(self).__name__))
Esempio n. 10
0
 async def something_sketch():
     log = get_logger("SomethingSketch")
     log.debug("something sketch starting")
     # log.debug("about to blow up")
     # i = 10 / 0
     # log.debug('dead')
     log.debug("something sketch over")
Esempio n. 11
0
    def __init__(self, router, loop, signing_key, name='Node'):
        self.log = get_logger("{}.ReactorInterface".format(name))
        self.url = "ipc://{}-ReactorIPC-".format(name) + str(
            random.randint(0, pow(2, 16)))

        # Set instance vars
        self.router = router
        self.loop = loop
        asyncio.set_event_loop(
            self.loop)  # not sure if we need this (we shouldnt tbh)

        # Create zmq context and pair socket to communicate with reactor sub process
        self.context = zmq.asyncio.Context()
        self.socket = self.context.socket(zmq.PAIR)

        self.socket.bind(self.url)

        # Start reactor sub process
        self.proc = LProcess(target=self._start_daemon,
                             args=(self.url, signing_key, name))
        # self.proc.daemon = True
        self.proc.start()

        # Register signal handler to teardown
        signal.signal(signal.SIGTERM, self._signal_teardown)

        # Block execution of this proc until reactor proc is ready
        self.loop.run_until_complete(self._wait_child_rdy())
Esempio n. 12
0
    def __init__(self, ip, signing_key, loop, name='Node'):
        super().__init__()

        self.log = get_logger(name)
        self.ip = ip
        self.name = name

        self.signing_key = signing_key
        self.verifying_key = wallet.get_vk(self.signing_key)

        # DEBUG
        import os
        self.log.important3("Node with vk {} has ip {}".format(
            self.verifying_key, os.getenv("HOST_IP")))
        # END DEBUG

        self.loop = loop
        asyncio.set_event_loop(loop)

        self.log.notice("Starting overlay service")
        self.overlay_proc = Process(target=OverlayInterface.start_service,
                                    args=(signing_key, ))
        self.overlay_proc.start()

        self._composer = None

        self.tasks = []
Esempio n. 13
0
def run_delegate(slot_num):
    TEST_DUR = 170
    from cilantro.logger import get_logger, overwrite_logger_level
    from cilantro.utils.test.god import countdown
    from cilantro.constants.testnet import TESTNET_DELEGATES
    from cilantro.utils.test.mp_testables import MPDelegate
    import os, time
    import logging

    log = get_logger("DelegateRunner")
    log.important3("Test starting")
    # overwrite_logger_level(logging.WARNING)
    overwrite_logger_level(21)
    # overwrite_logger_level(10)

    d_info = TESTNET_DELEGATES[slot_num]
    d_info['ip'] = os.getenv('HOST_IP')

    delegate = MPDelegate(signing_key=d_info['sk'])

    log.important3(
        "Sleeping for {} seconds before tearing down".format(TEST_DUR))
    countdown(TEST_DUR,
              "Tearing down in {} seconds...",
              log,
              status_update_freq=10)
    # time.sleep(TEST_DUR)
    delegate.teardown()

    log.success("EXPERIMENT OVER!!!")
Esempio n. 14
0
def start_server(xx=None):
    import os
    import asyncio
    import zmq.asyncio
    import time
    from cilantro.logger import get_logger

    loop = asyncio.get_event_loop()
    asyncio.set_event_loop(loop)

    log = get_logger("ZMQ Server")
    log.critical(xx)
    log.info("server host ip is {}".format(os.getenv('HOST_IP')))
    assert os.getenv('HOST_IP') == '172.29.5.1', "what the heck host IP is not what we expected for node_1"
    ctx = zmq.asyncio.Context()
    socket = ctx.socket(socket_type=zmq.PAIR)

    url = "tcp://172.29.5.1:10200"
    log.info("SERVER BINDING TO {}".format(url))
    socket.bind(url)

    log.info("sending first msg")
    socket.send_pyobj("hello for the first time")

    t = 0
    while t < 5:
        msg = "sup" # b'sup'
        log.debug("sending msg {}".format(msg))
        socket.send_pyobj(msg)
        time.sleep(1)
        t += 1

    socket.close()
Esempio n. 15
0
def run_mn():
    TEST_DUR = 170
    from cilantro.logger import get_logger, overwrite_logger_level
    from cilantro.utils.test.god import countdown
    from cilantro.utils.test.mp_testables import MPMasternode
    from cilantro.constants.testnet import TESTNET_MASTERNODES
    import os, time
    import logging

    log = get_logger("MasternodeRunner")
    log.important3("Test starting")

    # overwrite_logger_level(logging.WARNING)
    # overwrite_logger_level(logging.DEBUG)
    overwrite_logger_level(21)
    # overwrite_logger_level(10)

    sk = TESTNET_MASTERNODES[0]['sk']
    mn = MPMasternode(signing_key=sk)

    log.important3(
        "Sleeping for {} seconds before tearing down".format(TEST_DUR))
    countdown(TEST_DUR,
              "Tearing down in {} seconds...",
              log,
              status_update_freq=10)
    time.sleep(TEST_DUR)
    mn.teardown()

    log.success("EXPERIMENT OVER!!!")
Esempio n. 16
0
def publisher():
    SLEEP_TIME = 1
    MAX_TIME = 10
    from cilantro.logger import get_logger, overwrite_logger_level
    from cilantro.utils.test import MPComposer
    from cilantro.messages.transaction.standard import StandardTransactionBuilder
    import time, os

    log = get_logger("Publisher")
    sub_info = delegates[1]
    sub_info['ip'] = os.getenv('HOST_IP')

    d_info = delegates[0]
    d_info['ip'] = os.getenv('HOST_IP')

    pub = MPComposer(sk=d_info['sk'])

    # Publish on this node's own IP
    pub.add_pub(os.getenv('HOST_IP'))

    log.critical(
        "Starting experiment, sending messages every {} seconds for a total of {} seconds"
        .format(SLEEP_TIME, MAX_TIME))
    elapsed_time = 0

    while elapsed_time < MAX_TIME:
        log.info("Sending pub")
        msg = StandardTransactionBuilder.random_tx()
        pub.send_pub_msg(filter='0', message=msg)

        time.sleep(SLEEP_TIME)
        elapsed_time += SLEEP_TIME

    pub.teardown()
    log.critical("Done with experiment!")
Esempio n. 17
0
    def __init__(self, port=0):
        super().__init__()
        self.db = DriverManager(db=port[-1:])
        self.log = get_logger("Delegate.Interpreter:{}".format(port))

        self.log.debug("Interpreter flushing scratch...")
        self.db.scratch.flush()
Esempio n. 18
0
    def __init__(self):
        self.log = get_logger("ReactorThread")
        # self.ctx = context
        self.ctx = zmq.asyncio.Context()

        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)

        self.recv_socket = self.ctx.socket(zmq.PAIR)
        self.send_socket = self.ctx.socket(zmq.PAIR)

        self.recv_socket.connect(URL)
        self.send_socket.connect(URL2)

        # self.log.debug("sleeping 1 second...")
        time.sleep(0.25)
        self.log.debug("zsending msg to main thread")
        self.send_socket.send(b'hi this is ur good fren the reactor')

        # self.log.debug("sleeping another 1 second...")
        # time.sleep(1)
        # self.log.debug("sending ANOTHER msg to main thread")
        # self.main_socket.send(b'ANOTHER -- hihihi this is ur good fren the reactor')

        self.loop.run_until_complete(self.start_recv())
Esempio n. 19
0
    def __init__(self,
                 config_fn=None,
                 assert_fn=None,
                 name='TestableProcess',
                 always_run_as_subproc=False,
                 *args,
                 **kwargs):
        super().__init__()
        self.log = get_logger(name)
        self.name = name

        self.config_fn = config_fn  # Function to configure object with mocks
        self.assert_fn = assert_fn  # Function to run assertions on said mocks

        self.test_proc = None
        self.container_name = None  # Name of the docker container this object is proxying to (if run on VM)

        # Create a wrapper around the build_obj with args and kwargs. We do this b/c this function will actually be
        # invoked in a separate process/machine, thus we need to capture the function call to serialize it and send
        # it across a socket
        build_fn = wrap_func(type(self).build_obj, *args, **kwargs)

        self._config_url_and_test_proc(build_fn, always_run_as_subproc)

        # 'socket' is used to proxy commands to blocking object running in a child process (or possibly on a VM)
        self.ctx = zmq.Context()
        self.socket = self.ctx.socket(socket_type=zmq.PAIR)
        self.log.debug("Test Orchestrator connecting to url {}".format(
            self.url))
        self.socket.connect(self.url)

        # Block this process until we get a ready signal from the subprocess/VM
        self.wait_for_test_object()
Esempio n. 20
0
    def __init__(self, config_fn=None, assert_fn=None, name='TestableProcess', *args, **kwargs):
        super().__init__()
        self.log = get_logger(name)
        self.name = name
        self.url = _gen_url(name)

        self.config_fn = config_fn  # Function to configure object with mocks
        self.assert_fn = assert_fn  # Function to run assertions on said mocks

        # 'socket' is used to proxy commands to blocking object running in a child process (possibly on a VM)
        self.ctx = zmq.Context()
        self.socket = self.ctx.socket(socket_type=zmq.PAIR)
        self.socket.bind(self.url)

        # Add this object to the registry of testers
        MPTesterBase.testers.append(self)

        # Create a wrapper around the build_obj with args and kwargs. We do this b/c this function will actually be
        # invoked in a separate process/machine, thus we need to capture the function call to serialize it and send
        # it across a socket
        build_fn = wrap_func(type(self).build_obj, *args, **kwargs)

        # Create and start the subprocess that will run the blocking object
        self.test_proc = LProcess(target=self._run_test_proc, args=(self.name, self.url, build_fn,
                                                               self.config_fn, self.assert_fn,))
        self.start_test()
Esempio n. 21
0
def something_terrible():
    def nested_horror():
        async def async_death():
            log = get_logger("async_death")
            log.debug("async death start")

            log.debug("starting startup nap")
            await asyncio.sleep(4)
            log.debug("startup nap finished")

            # while True:
            #     log.debug('sleeping...')
            #     await asyncio.sleep(1.5)
            #     log.debug('yawn')
            # log.critical("gunna die")
            # i = 10 / 0
            # log.debug("about to die")
            # i = 10 / 0
            log.debug("async death over")

        async def something_boring():
            log = get_logger("SomethingBoring")
            log.debug("starting something boring")
            await asyncio.sleep(2)
            log.debug("something boring done")

        async def something_sketch():
            log = get_logger("SomethingSketch")
            log.debug("something sketch starting")
            # log.debug("about to blow up")
            # i = 10 / 0
            # log.debug('dead')
            log.debug("something sketch over")

        log = get_logger("NestedHorror")
        log.critical("nested horror commence")

        log.critical("nesthorror start sleep")
        time.sleep(4)
        log.critical("nesthorror done sleep")

        # loop = asyncio.new_event_loop()
        # asyncio.set_event_loop(loop)
        # asyncio.ensure_future(something_boring())
        # asyncio.ensure_future(something_sketch())
        #
        # loop.run_until_complete(async_death())

        # THIS WILL NEVER PRINT (it blocks ofc)
        # log.critical("about to destruct")
        # i = 10 / 0

    log = get_logger("SomethingTerrible")
    log.debug("something terrible spinning up nested_horror")
    p = Process(target=nested_horror)
    p.start()

    log.debug("\n\nPYTHON EQUIVALENT OF JUMPING OFF GOLDEN GATE BRIDGE\n\n")
    pickle.dumps(log)
Esempio n. 22
0
    def __init__(self, should_reset):
        self.log = get_logger("DB")
        self.log.info("Creating DB instance with should_reset={}".format(should_reset))

        self.lock = Lock()

        self.ex = Executer(**DB_SETTINGS)
        self.tables = build_tables(self.ex, should_drop=should_reset)
Esempio n. 23
0
    def __init__(self):
        self.log = get_logger("PubSubNode")
        self.log.info("-- PubSubNode Init-ing --")

        self.reactor = ReactorInterface(self)
        self.reactor.execute(Command.ADD_SUB, url=URL, callback='do_something')
        self.reactor.execute(Command.ADD_PUB, url=URL)

        self.reactor.notify_ready()
Esempio n. 24
0
def seed_wallets(amount=10000, i=0):
    log = get_logger("WalletSeeder")
    log.critical("Seeding wallets with amount {}".format(amount))
    with DB('{}_{}'.format(DB_NAME, i)) as db:
        log.critical("GOT DB WITH NAME: {}".format(db.db_name))
        for wallet in KNOWN_ADRS:
            q = insert(db.tables.balances).values(wallet=wallet[1].encode(),
                                                  amount=amount)
            db.execute(q)
Esempio n. 25
0
    def __new__(cls, clsname, bases, clsdict):
        print("LogMeta NEW for class {}".format(clsname))
        clsobj = super().__new__(cls, clsname, bases, clsdict)
        clsobj.log = get_logger(clsname)

        clsobj._receivers = {r._recv: r for r in clsdict.values() if hasattr(r, '_recv')}
        print("_receivers: ", clsobj._receivers)

        return clsobj
Esempio n. 26
0
def run_pusher():
    log = get_logger("Pusher")
    log.notice("Starting pusher...")
    ctx = zmq.Context()

    sock = ctx.socket(socket_type=zmq.PUSH)

    for _ in range(NUM_MSG):
        sock.send(b'sup')
Esempio n. 27
0
    def __init__(self, statemachine: StateMachine, name='Node'):
        super().__init__()
        self.log = get_logger("{}.Router".format(name))
        self.sm = statemachine

        # Define mapping between callback names and router functions
        self.routes = {StateInput.INPUT: self._route,
                       StateInput.REQUEST: self._route_request,
                       StateInput.TIMEOUT: self._route}
Esempio n. 28
0
    def __init__(self, db_name, should_reset):
        self.db_name = db_name
        self.log = get_logger("DB-{}".format(db_name))
        self.log.info(
            "Creating DB instance for {} with should_reset={}".format(
                db_name, should_reset))
        self.lock = Lock()

        self.db, self.tables = create_db(db_name, should_reset)
Esempio n. 29
0
    def __init__(self, should_reset):
        self.log = get_logger("DB")
        self.log.info("Creating DB instance with should_reset={}".format(should_reset))

        self.lock = Lock()

        # self.ex = Executer.init_local_noauth_dev()
        self.ex = Executer('root', '', '', '127.0.0.1')
        self.tables = build_tables(self.ex, should_drop=should_reset)
Esempio n. 30
0
 def __init__(self, *args, **kwargs):
     self.log = get_logger("BaseNode")
     self.log.info("-- BaseNode Initiating --")
     self.port = int(os.getenv('PORT', 31337))
     self.host = os.getenv('HOST_IP', '127.0.0.1')
     self.loop = asyncio.get_event_loop()
     self.wallet = ED25519Wallet()
     self.router = Router(statemachine=self)
     self.reactor = ReactorInterface(self.router, self.loop, self.wallet.s)
     self.composer = Composer(self.reactor, self.wallet.s)