Exemplo n.º 1
0
def responder(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    return responder
def test_monitor_chain_single_update(block_processor):
    # This test tests that if both threads try to add the same block to the queue, only the first one will make it
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)

    chain_monitor.polling_delta = 2

    # We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only
    # been added once.
    chain_monitor.monitor_chain()
    chain_monitor.activate()
    generate_blocks(1)

    assert len(chain_monitor.receiving_queues) == 2

    queue0_block = chain_monitor.receiving_queues[0].get()
    queue1_block = chain_monitor.receiving_queues[1].get()
    assert queue0_block == queue1_block
    assert chain_monitor.receiving_queues[0].empty()
    assert chain_monitor.receiving_queues[1].empty()

    # The delta for polling is 2 secs, so let's wait and see
    time.sleep(2)
    assert chain_monitor.receiving_queues[0].empty()
    assert chain_monitor.receiving_queues[1].empty()

    # We can also force an update and see that it won't go through
    assert chain_monitor.enqueue(queue0_block) is False

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
def test_monitor_chain_single_update(db_manager, block_processor):
    # This test tests that if both threads try to add the same block to the queue, only the first one will make it
    chain_monitor = ChainMonitor(Queue(), Queue(), block_processor,
                                 bitcoind_feed_params)

    chain_monitor.best_tip = None
    chain_monitor.polling_delta = 2

    # We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only
    # been added once.
    chain_monitor.monitor_chain()
    generate_block()

    watcher_block = chain_monitor.watcher_queue.get()
    responder_block = chain_monitor.responder_queue.get()
    assert watcher_block == responder_block
    assert chain_monitor.watcher_queue.empty()
    assert chain_monitor.responder_queue.empty()

    # The delta for polling is 2 secs, so let's wait and see
    time.sleep(2)
    assert chain_monitor.watcher_queue.empty()
    assert chain_monitor.responder_queue.empty()

    # We can also force an update and see that it won't go through
    assert chain_monitor.update_state(watcher_block) is False
Exemplo n.º 4
0
def test_check_confirmations(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    # check_confirmations checks, given a list of transaction for a block, what of the known penalty transaction have
    # been confirmed. To test this we need to create a list of transactions and the state of the responder
    txs = [get_random_value_hex(32) for _ in range(20)]

    # The responder has a list of unconfirmed transaction, let make that some of them are the ones we've received
    responder.unconfirmed_txs = [get_random_value_hex(32) for _ in range(10)]
    txs_subset = random.sample(txs, k=10)
    responder.unconfirmed_txs.extend(txs_subset)

    # We also need to add them to the tx_tracker_map since they would be there in normal conditions
    responder.tx_tracker_map = {
        txid: TransactionTracker(txid[:LOCATOR_LEN_HEX], txid, None, None,
                                 None)
        for txid in responder.unconfirmed_txs
    }

    # Let's make sure that there are no txs with missed confirmations yet
    assert len(responder.missed_confirmations) == 0

    responder.check_confirmations(txs)

    # After checking confirmations the txs in txs_subset should be confirmed (not part of unconfirmed_txs anymore)
    # and the rest should have a missing confirmation
    for tx in txs_subset:
        assert tx not in responder.unconfirmed_txs

    for tx in responder.unconfirmed_txs:
        assert responder.missed_confirmations[tx] == 1
Exemplo n.º 5
0
def test_monitor_chain_wrong_status_raises(block_processor_mock, monkeypatch):
    # Calling monitor_chain when not idle should raise
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params)

    for status in ChainMonitorStatus:
        if status != ChainMonitorStatus.IDLE:
            monkeypatch.setattr(chain_monitor, "status", status)
            with pytest.raises(RuntimeError, match="can only be called in IDLE status"):
                chain_monitor.monitor_chain()
def test_monitor_chain_wrong_status_raises(block_processor):
    # calling monitor_chain when not idle should raise
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)

    for status in ChainMonitorStatus:
        if status != ChainMonitorStatus.IDLE:
            chain_monitor.status = status  # mock the status
            with pytest.raises(RuntimeError, match="can only be called in IDLE status"):
                chain_monitor.monitor_chain()
Exemplo n.º 7
0
def responder(db_manager, gatekeeper, carrier, block_processor):
    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    chain_monitor = ChainMonitor([Queue(), responder.block_queue],
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()
    responder_thread = responder.awake()
    chain_monitor.activate()

    yield responder

    chain_monitor.terminate()
    responder_thread.join()
Exemplo n.º 8
0
def test_manage_subscription_expiry(gatekeeper):
    # The subscription are expired at expiry but data is deleted once outdated (expiry_delta blocks after)
    current_height = gatekeeper.block_processor.get_block_count()
    expiring_users = {
        get_random_value_hex(32):
        UserInfo(available_slots=10, subscription_expiry=current_height + 1)
        for _ in range(10)
    }
    gatekeeper.registered_users.update(expiring_users)

    # We will need a ChainMonitor instance for this so data can be feed to us
    bitcoind_feed_params = {
        k: v
        for k, v in config.items() if k.startswith("BTC_FEED")
    }
    chain_monitor = ChainMonitor([gatekeeper.block_queue],
                                 gatekeeper.block_processor,
                                 bitcoind_feed_params)
    chain_monitor.monitor_chain()
    chain_monitor.activate()

    # Users expire after this block. Check that they are currently not expired
    for user_id in expiring_users.keys():
        has_subscription_expired, _ = gatekeeper.has_subscription_expired(
            user_id)
        assert not has_subscription_expired

    # Generate a block and users must have expired
    generate_blocks_with_delay(1)
    for user_id in expiring_users.keys():
        has_subscription_expired, _ = gatekeeper.has_subscription_expired(
            user_id)
        assert has_subscription_expired

    # Users will remain in the registered_users dictionary until expiry_delta blocks later.
    generate_blocks_with_delay(gatekeeper.expiry_delta - 1)
    # Users will be deleted in the next block
    assert set(expiring_users).issubset(gatekeeper.registered_users)

    generate_blocks_with_delay(1)
    # Data has just been deleted but should still be present on the cache
    block_height_deletion = gatekeeper.block_processor.get_block_count()
    assert not set(expiring_users).issubset(gatekeeper.registered_users)
    for user_id, _ in expiring_users.items():
        assert not gatekeeper.user_db.load_user(user_id)
    assert gatekeeper.outdated_users_cache[block_height_deletion].keys(
    ) == expiring_users.keys()

    # After OUTDATED_USERS_CACHE_SIZE_BLOCKS they data should not be there anymore (check one before and the one)
    generate_blocks_with_delay(OUTDATED_USERS_CACHE_SIZE_BLOCKS - 1)
    assert block_height_deletion in gatekeeper.outdated_users_cache
    generate_blocks_with_delay(1)
    assert block_height_deletion not in gatekeeper.outdated_users_cache
Exemplo n.º 9
0
def test_rebroadcast(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    txs_to_rebroadcast = []

    # Rebroadcast calls add_response with retry=True. The tracker data is already in trackers.
    for i in range(20):
        uuid = uuid4().hex
        locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
            penalty_rawtx=create_dummy_transaction().hex())

        tracker = TransactionTracker(locator, dispute_txid, penalty_txid,
                                     penalty_rawtx, appointment_end)

        responder.trackers[uuid] = {
            "locator": locator,
            "penalty_txid": penalty_txid,
            "appointment_end": appointment_end,
        }

        # We need to add it to the db too
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_json())

        responder.tx_tracker_map[penalty_txid] = [uuid]
        responder.unconfirmed_txs.append(penalty_txid)

        # Let's add some of the txs in the rebroadcast list
        if (i % 2) == 0:
            txs_to_rebroadcast.append(penalty_txid)

    # The block_hash passed to rebroadcast does not matter much now. It will in the future to deal with errors
    receipts = responder.rebroadcast(txs_to_rebroadcast)

    # All txs should have been delivered and the missed confirmation reset
    for txid, receipt in receipts:
        # Sanity check
        assert txid in txs_to_rebroadcast

        assert receipt.delivered is True
        assert responder.missed_confirmations[txid] == 0
Exemplo n.º 10
0
def watcher(db_manager):
    block_processor = BlockProcessor(bitcoind_connect_params)
    carrier = Carrier(bitcoind_connect_params)

    responder = Responder(db_manager, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        block_processor,
        responder,
        signing_key.to_der(),
        config.get("MAX_APPOINTMENTS"),
        config.get("EXPIRY_DELTA"),
    )

    chain_monitor = ChainMonitor(watcher.block_queue,
                                 watcher.responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    return watcher
Exemplo n.º 11
0
def test_terminate(block_processor):
    queue = Queue()
    chain_monitor = ChainMonitor([queue, Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    chain_monitor.monitor_chain()
    chain_monitor.activate()

    chain_monitor.terminate()

    assert chain_monitor.status == ChainMonitorStatus.TERMINATED

    # generate a new block
    generate_blocks(1)
    time.sleep(0.11)  # wait longer than the polling_delta

    # there should be only the ChainMonitor.END_MESSAGE message in the receiving queue, as the new block was generated
    # after terminating
    assert queue.qsize() == 1
    assert queue.get() == ChainMonitor.END_MESSAGE
Exemplo n.º 12
0
def watcher(db_manager, gatekeeper):
    block_processor = BlockProcessor(bitcoind_connect_params)
    carrier = Carrier(bitcoind_connect_params)

    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        gatekeeper,
        block_processor,
        responder,
        signing_key.to_der(),
        MAX_APPOINTMENTS,
        config.get("LOCATOR_CACHE_SIZE"),
    )

    chain_monitor = ChainMonitor(
        watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params
    )
    chain_monitor.monitor_chain()

    return watcher
Exemplo n.º 13
0
def test_monitor_chain_and_activate(block_processor):
    # In this test, we generate some blocks after `monitor_chain`, then `activate` and generate few more blocks.
    # We verify that all the generated blocks are indeed sent to the queues in the right order.

    queue1 = Queue()
    queue2 = Queue()

    # We add some initial blocks to the receiving queues, to simulate a bootstrap with previous information
    pre_blocks = [get_random_value_hex(32) for _ in range(5)]
    for block in pre_blocks:
        queue1.put(block)
        queue2.put(block)

    # We don't activate the ChainMonitor but we start listening; therefore received blocks should accumulate in the
    # internal queue
    chain_monitor = ChainMonitor([queue1, queue2], block_processor, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    chain_monitor.monitor_chain()
    assert chain_monitor.status == ChainMonitorStatus.LISTENING

    # we generate some blocks while the monitor is listening but not active
    init_blocks = generate_blocks_with_delay(3, 0.15)

    time.sleep(0.11)  # higher than the polling interval

    chain_monitor.activate()

    # generate some more blocks after activating
    after_blocks = generate_blocks_with_delay(3, 0.15)

    # we now check that all the blocks are in the receiving queues in the correct order
    all_blocks = pre_blocks + init_blocks + after_blocks
    for block in all_blocks:
        assert queue1.get(timeout=0.1) == block
        assert queue2.get(timeout=0.1) == block

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
Exemplo n.º 14
0
def test_activate(block_processor):
    # Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()
    chain_monitor.activate()
    assert chain_monitor.status == ChainMonitorStatus.ACTIVE

    # last_tips is updated before starting the threads, so it not be empty now.
    assert len(chain_monitor.last_tips) > 0

    # Blocks should be received
    for _ in range(5):
        generate_blocks(1)
        watcher_block = chain_monitor.receiving_queues[0].get()
        responder_block = chain_monitor.receiving_queues[1].get()
        assert watcher_block == responder_block
        assert chain_monitor.receiving_queues[0].empty()
        assert chain_monitor.receiving_queues[1].empty()

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
Exemplo n.º 15
0
def test_terminate(block_processor_mock, monkeypatch):
    # Test that the ChainMonitor is stopped on a terminate signal
    queue = Queue()
    chain_monitor = ChainMonitor([queue, Queue()], block_processor_mock, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    # Activate the monitor
    chain_monitor.monitor_chain()
    chain_monitor.activate()

    # Ask it to terminate
    chain_monitor.terminate()
    assert chain_monitor.status == ChainMonitorStatus.TERMINATED

    # Mock generating a block generate a new block
    monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: get_random_value_hex(32))
    time.sleep(0.11)  # wait longer than the polling_delta

    # there should be only the ChainMonitor.END_MESSAGE message in the receiving queue, as the new block was generated
    # after terminating
    assert queue.qsize() == 1
    assert queue.get() == ChainMonitor.END_MESSAGE
Exemplo n.º 16
0
def run_api(db_manager, carrier, block_processor):
    sk, pk = generate_keypair()

    responder = Responder(db_manager, carrier, block_processor)
    watcher = Watcher(db_manager, block_processor, responder, sk.to_der(),
                      config.get("MAX_APPOINTMENTS"),
                      config.get("EXPIRY_DELTA"))

    chain_monitor = ChainMonitor(watcher.block_queue,
                                 watcher.responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    watcher.awake()
    chain_monitor.monitor_chain()

    api_thread = Thread(
        target=API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")),
                   watcher).start)
    api_thread.daemon = True
    api_thread.start()

    # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
    sleep(0.1)
Exemplo n.º 17
0
def watcher(run_bitcoind, db_manager, gatekeeper):
    block_processor = BlockProcessor(bitcoind_connect_params)
    carrier = Carrier(bitcoind_connect_params)

    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        gatekeeper,
        block_processor,
        responder,
        signing_key,
        MAX_APPOINTMENTS,
        config.get("LOCATOR_CACHE_SIZE"),
    )

    watcher.last_known_block = block_processor.get_best_block_hash()

    chain_monitor = ChainMonitor(
        [watcher.block_queue, watcher.responder.block_queue], block_processor,
        bitcoind_feed_params)
    chain_monitor.monitor_chain()
    chain_monitor.activate()

    return watcher
Exemplo n.º 18
0
def test_monitor_chain(block_processor):
    # We don't activate it but we start listening; therefore received blocks should accumulate in the internal queue
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    chain_monitor.monitor_chain()
    assert chain_monitor.status == ChainMonitorStatus.LISTENING

    # The tip is updated before starting the threads, so it should have been added to last_tips.
    assert len(chain_monitor.last_tips) > 0

    # Blocks should be received and added to the queue
    count = 0
    for _ in range(5):
        generate_blocks(1)
        count += 1
        time.sleep(0.11)  # higher than the polling interval
        assert chain_monitor.receiving_queues[0].empty()
        assert chain_monitor.receiving_queues[1].empty()
        assert chain_monitor.queue.qsize() == count

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
def test_monitor_chain(db_manager, block_processor):
    # Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate
    chain_monitor = ChainMonitor(Queue(), Queue(), block_processor,
                                 bitcoind_feed_params)

    chain_monitor.best_tip = None
    chain_monitor.monitor_chain()

    # The tip is updated before starting the threads, so it should have changed.
    assert chain_monitor.best_tip is not None

    # Blocks should be received
    for _ in range(5):
        generate_block()
        watcher_block = chain_monitor.watcher_queue.get()
        responder_block = chain_monitor.responder_queue.get()
        assert watcher_block == responder_block
        assert chain_monitor.watcher_queue.empty()
        assert chain_monitor.responder_queue.empty()

    # And the thread be terminated on terminate
    chain_monitor.terminate = True
    # The zmq thread needs a block generation to release from the recv method.
    generate_block()
Exemplo n.º 20
0
def test_get_completed_trackers(db_manager, gatekeeper, carrier,
                                block_processor):
    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    # A complete tracker is a tracker which penalty transaction has been irrevocably resolved (i.e. has reached 100
    # confirmations)
    # We'll create 3 type of txs: irrevocably resolved, confirmed but not irrevocably resolved, and unconfirmed
    trackers_ir_resolved = {
        uuid4().hex:
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(10)
    }

    trackers_confirmed = {
        uuid4().hex:
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(10)
    }

    trackers_unconfirmed = {}
    for _ in range(10):
        tracker = create_dummy_tracker(
            penalty_rawtx=create_dummy_transaction().hex())
        responder.unconfirmed_txs.append(tracker.penalty_txid)
        trackers_unconfirmed[uuid4().hex] = tracker

    all_trackers = {}
    all_trackers.update(trackers_ir_resolved)
    all_trackers.update(trackers_confirmed)
    all_trackers.update(trackers_unconfirmed)

    # Let's add all to the Responder
    for uuid, tracker in all_trackers.items():
        responder.trackers[uuid] = tracker.get_summary()

    for uuid, tracker in trackers_ir_resolved.items():
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)

    generate_block_w_delay()

    for uuid, tracker in trackers_confirmed.items():
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)

    # ir_resolved have 100 confirmations and confirmed have 99
    generate_blocks_w_delay(99)

    # Let's check
    completed_trackers = responder.get_completed_trackers()
    ended_trackers_keys = list(trackers_ir_resolved.keys())
    assert set(completed_trackers) == set(ended_trackers_keys)

    # Generating 1 additional blocks should also include confirmed
    generate_block_w_delay()

    completed_trackers = responder.get_completed_trackers()
    ended_trackers_keys.extend(list(trackers_confirmed.keys()))
    assert set(completed_trackers) == set(ended_trackers_keys)
Exemplo n.º 21
0
class TeosDaemon:
    """
    The :class:`TeosDaemon` organizes the code to initialize all the components of teos, start the service, stop and
    teardown.

    Args:
        config (:obj:`dict`): the configuration object.
        sk (:obj:`PrivateKey`): the :obj:`PrivateKey` of the tower.
        logger (:obj:`Logger <teos.logger.Logger>`): the logger instance.
        logging_port (:obj:`int`): the port where the logging server can be reached (localhost:logging_port)
        stop_log_event (:obj:`multiprocessing.Event`): the event to signal a stop to the logging server
        logging_process (:obj:`multiprocessing.Process`): the logging server process

    Attributes:
        stop_command_event (:obj:`threading.Event`): The event that will be set to initiate a graceful shutdown.
        stop_event (:obj:`multiprocessing.Event`): The event that services running on different processes will monitor
            in order to be informed that they should shutdown.
        block_processor (:obj:`teos.block_processor.BlockProcessor`): The block processor instance.
        db_manager (:obj:`teos.appointments_dbm.AppointmentsDBM`): The db manager for appointments.
        watcher (:obj:`teos.watcher.Watcher`): The watcher instance.
        watcher_thread (:obj:`multithreading.Thread`): After ``bootstrap_components``, the thread that
            runs the Watcher monitoring (set to :obj:`None` beforehand).
        responder_thread (:obj:`multithreading.Thread`): After ``bootstrap_components``, the thread that
            runs the Responder monitoring (set to :obj:`None` beforehand).
        chain_monitor (:obj:`teos.chain_monitor.ChainMonitor`): The ``ChainMonitor`` instance.
        internal_api_endpoint (:obj:`str`): The full host name and port of the internal api.
        internal_api (:obj:`teos.internal_api.InternalAPI`): The InternalAPI instance.
        api_proc (:obj:`subprocess.Popen` or :obj:`multiprocessing.Process`): Once the rpc process
            is created, the instance of either ``Popen`` or ``Process`` that is serving the public API (set to
            :obj:`None` beforehand).
        rpc_process (:obj:`multiprocessing.Process`): The instance of the internal RPC server; only set if running.
    """
    def __init__(self, config, sk, logger, logging_port, stop_log_event,
                 logging_process):
        self.config = config
        self.logger = logger
        self.logging_port = logging_port
        self.stop_log_event = stop_log_event
        self.logging_process = logging_process

        # event triggered when a ``stop`` command is issued
        # Using multiprocessing.Event seems to cause a deadlock if event.set() is called in a signal handler that
        # interrupted event.wait(). This does not happen with threading.Event.
        # See https://bugs.python.org/issue41606
        self.stop_command_event = threading.Event()

        # event triggered when the public API is halted, hence teosd is ready to stop
        self.stop_event = multiprocessing.Event()

        bitcoind_connect_params = {
            k: v
            for k, v in config.items() if k.startswith("BTC_RPC")
        }
        bitcoind_feed_params = {
            k: v
            for k, v in config.items() if k.startswith("BTC_FEED")
        }

        bitcoind_reachable = threading.Event()
        if not can_connect_to_bitcoind(bitcoind_connect_params):
            raise RuntimeError("Cannot connect to bitcoind")
        elif not in_correct_network(bitcoind_connect_params,
                                    config.get("BTC_NETWORK")):
            raise RuntimeError(
                "bitcoind is running on a different network, check teos.conf and bitcoin.conf"
            )
        else:
            bitcoind_reachable.set()

        self.logger.info("tower_id = {}".format(
            Cryptographer.get_compressed_pk(sk.public_key)))
        self.block_processor = BlockProcessor(bitcoind_connect_params,
                                              bitcoind_reachable)
        carrier = Carrier(bitcoind_connect_params, bitcoind_reachable)

        gatekeeper = Gatekeeper(
            UsersDBM(self.config.get("USERS_DB_PATH")),
            self.block_processor,
            self.config.get("SUBSCRIPTION_SLOTS"),
            self.config.get("SUBSCRIPTION_DURATION"),
            self.config.get("EXPIRY_DELTA"),
        )
        self.db_manager = AppointmentsDBM(
            self.config.get("APPOINTMENTS_DB_PATH"))
        responder = Responder(self.db_manager, gatekeeper, carrier,
                              self.block_processor)
        self.watcher = Watcher(
            self.db_manager,
            gatekeeper,
            self.block_processor,
            responder,
            sk,
            self.config.get("MAX_APPOINTMENTS"),
            self.config.get("LOCATOR_CACHE_SIZE"),
        )

        self.watcher_thread = None
        self.responder_thread = None

        # Create the chain monitor
        self.chain_monitor = ChainMonitor(
            [
                self.watcher.block_queue, responder.block_queue,
                gatekeeper.block_queue
            ],
            self.block_processor,
            bitcoind_feed_params,
        )

        # Set up the internal API
        self.internal_api_endpoint = f'{self.config.get("INTERNAL_API_HOST")}:{self.config.get("INTERNAL_API_PORT")}'
        self.internal_api = InternalAPI(
            self.watcher, self.internal_api_endpoint,
            self.config.get("INTERNAL_API_WORKERS"), self.stop_command_event)

        # Create the rpc, without starting it
        self.rpc_process = multiprocessing.Process(
            target=rpc.serve,
            args=(
                self.config.get("RPC_BIND"),
                self.config.get("RPC_PORT"),
                self.internal_api_endpoint,
                self.logging_port,
                self.stop_event,
            ),
            daemon=True,
        )

        # This variables will contain the handle of the process running the API, when the service is started.
        # It will be an instance of either Popen or Process, depending on the WSGI config setting.
        self.api_proc = None

    def bootstrap_components(self):
        """
        Performs the initial setup of the components. It loads the appointments and tracker for the watcher and the
        responder (if any), and awakes the components. It also populates the block queues with any missing data, in
        case the tower has been offline for some time. Finally, it starts the chain monitor.
        """

        # Make sure that the ChainMonitor starts listening to new blocks while we bootstrap
        self.chain_monitor.monitor_chain()

        watcher_appointments_data = self.db_manager.load_watcher_appointments()
        responder_trackers_data = self.db_manager.load_responder_trackers()

        if len(watcher_appointments_data) == 0 and len(
                responder_trackers_data) == 0:
            self.logger.info("Fresh bootstrap")

            self.watcher_thread = self.watcher.awake()
            self.responder_thread = self.watcher.responder.awake()

        else:
            self.logger.info("Bootstrapping from backed up data")

            # Update the Watcher backed up data if found.
            if len(watcher_appointments_data) != 0:
                self.watcher.appointments, self.watcher.locator_uuid_map = Builder.build_appointments(
                    watcher_appointments_data)

            # Update the Responder with backed up data if found.
            if len(responder_trackers_data) != 0:
                self.watcher.responder.trackers, self.watcher.responder.tx_tracker_map = Builder.build_trackers(
                    responder_trackers_data)

            # Awaking components so the states can be updated.
            self.watcher_thread = self.watcher.awake()
            self.responder_thread = self.watcher.responder.awake()

            last_block_watcher = self.db_manager.load_last_block_hash_watcher()
            last_block_responder = self.db_manager.load_last_block_hash_responder(
            )

            # Populate the block queues with data if they've missed some while offline. If the blocks of both match
            # we don't perform the search twice.

            # FIXME: 32-reorgs-offline dropped txs are not used at this point.
            last_common_ancestor_watcher, dropped_txs_watcher = self.block_processor.find_last_common_ancestor(
                last_block_watcher)
            missed_blocks_watcher = self.block_processor.get_missed_blocks(
                last_common_ancestor_watcher)

            if last_block_watcher == last_block_responder:
                dropped_txs_responder = dropped_txs_watcher
                missed_blocks_responder = missed_blocks_watcher

            else:
                last_common_ancestor_responder, dropped_txs_responder = self.block_processor.find_last_common_ancestor(
                    last_block_responder)
                missed_blocks_responder = self.block_processor.get_missed_blocks(
                    last_common_ancestor_responder)

            # If only one of the instances needs to be updated, it can be done separately.
            if len(missed_blocks_watcher
                   ) == 0 and len(missed_blocks_responder) != 0:
                Builder.populate_block_queue(
                    self.watcher.responder.block_queue,
                    missed_blocks_responder)
                self.watcher.responder.block_queue.join()

            elif len(missed_blocks_responder
                     ) == 0 and len(missed_blocks_watcher) != 0:
                Builder.populate_block_queue(self.watcher.block_queue,
                                             missed_blocks_watcher)
                self.watcher.block_queue.join()

            # Otherwise they need to be updated at the same time, block by block
            elif len(missed_blocks_responder) != 0 and len(
                    missed_blocks_watcher) != 0:
                Builder.update_states(
                    self.watcher.block_queue,
                    self.watcher.responder.block_queue,
                    missed_blocks_watcher,
                    missed_blocks_responder,
                )

        # Activate ChainMonitor
        self.chain_monitor.activate()

    def start_services(self, logging_port):
        """
        Readies the tower by setting up signal handling, and starting all the services.

        Args:
            logging_port (:obj:`int`): the port where the logging server can be reached (localhost:logging_port)
        """

        signal(SIGINT, self.handle_signals)
        signal(SIGTERM, self.handle_signals)
        signal(SIGQUIT, self.handle_signals)

        # Start the rpc process
        self.rpc_process.start()

        # Start the internal API
        # This MUST be done after rpc_process.start to avoid the issue that was solved in
        # https://github.com/talaia-labs/python-teos/pull/198
        self.internal_api.rpc_server.start()
        self.logger.info(
            f"Internal API initialized. Serving at {self.internal_api_endpoint}"
        )

        # Start the public API server
        api_endpoint = f"{self.config.get('API_BIND')}:{self.config.get('API_PORT')}"
        if self.config.get("WSGI") == "gunicorn":
            # FIXME: We may like to add workers depending on a config value
            teos_folder = os.path.dirname(os.path.realpath(__file__))
            self.api_proc = subprocess.Popen(
                [
                    "gunicorn",
                    f"--config={os.path.join(teos_folder, 'gunicorn_config.py')}",
                    f"--bind={api_endpoint}",
                    f"teos.api:serve(internal_api_endpoint='{self.internal_api_endpoint}', "
                    f"endpoint='{api_endpoint}', logging_port='{logging_port}', "
                    f"min_to_self_delay='{self.config.get('MIN_TO_SELF_DELAY')}')",
                ],
                env={
                    **os.environ,
                    **{
                        "LOG_SERVER_PORT": str(logging_port)
                    }
                },
            )
        else:
            self.api_proc = multiprocessing.Process(
                target=api.serve,
                kwargs={
                    "internal_api_endpoint": self.internal_api_endpoint,
                    "endpoint": api_endpoint,
                    "logging_port": logging_port,
                    "min_to_self_delay": self.config.get("MIN_TO_SELF_DELAY"),
                    "auto_run": True,
                },
            )
            self.api_proc.start()

    def handle_signals(self, signum, frame):
        """Handles signals by initiating a graceful shutdown."""
        self.logger.debug(f"Signal {signum} received. Stopping")

        self.stop_command_event.set()

    def teardown(self):
        """Shuts down all services and closes the DB, then exits. This method does not return."""
        self.logger.info("Terminating public API")

        # Stop the public API first
        if isinstance(self.api_proc, subprocess.Popen):
            self.api_proc.terminate()
            self.api_proc.wait()
        elif isinstance(self.api_proc, multiprocessing.Process):
            # FIXME: using SIGKILL for now, adapt it to use SIGTERM so the shutdown can be grateful
            self.api_proc.kill()
            self.api_proc.join()

        self.logger.info("Public API terminated")

        # Signals readiness to shutdown to the other processes
        self.stop_event.set()

        # wait for RPC process to shutdown
        self.rpc_process.join()

        # Stops the internal API, after waiting for some grace time
        self.logger.info("Stopping internal API")
        self.internal_api.rpc_server.stop(SHUTDOWN_GRACE_TIME).wait()
        self.logger.info("Internal API stopped")

        # terminate the ChainMonitor
        self.chain_monitor.terminate()

        # wait for watcher and responder to finish processing their queues
        self.watcher_thread.join()
        self.responder_thread.join()

        self.logger.info("Closing connection with appointments db")
        self.db_manager.close()
        self.logger.info("Closing connection with users db")
        self.watcher.gatekeeper.user_db.close()

        self.logger.info("Shutting down TEOS")
        self.stop_log_event.set()
        self.logging_process.join()
        exit(0)

    def start(self):
        """This method implements the whole lifetime cycle of the the TEOS tower. This method does not return."""
        self.logger.info("Starting TEOS")
        self.bootstrap_components()
        self.start_services(self.logging_port)

        self.stop_command_event.wait()
        self.teardown()
Exemplo n.º 22
0
def test_do_watch(temp_db_manager, gatekeeper, carrier, block_processor):
    # Create a fresh responder to simplify the test
    responder = Responder(temp_db_manager, gatekeeper, carrier,
                          block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    trackers = [
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(20)
    ]
    subscription_expiry = responder.block_processor.get_block_count() + 110

    # Let's set up the trackers first
    for tracker in trackers:
        uuid = uuid4().hex

        # Simulate user registration so trackers can properly expire
        responder.gatekeeper.registered_users[tracker.user_id] = UserInfo(
            available_slots=10, subscription_expiry=subscription_expiry)

        # Add data to the Responder
        responder.trackers[uuid] = tracker.get_summary()
        responder.tx_tracker_map[tracker.penalty_txid] = [uuid]
        responder.missed_confirmations[tracker.penalty_txid] = 0
        responder.unconfirmed_txs.append(tracker.penalty_txid)
        # Assuming the appointment only took a single slot
        responder.gatekeeper.registered_users[
            tracker.user_id].appointments[uuid] = 1

        # We also need to store the info in the db
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_dict())

    # Let's start to watch
    Thread(target=responder.do_watch, daemon=True).start()

    # And broadcast some of the transactions
    broadcast_txs = []
    for tracker in trackers[:5]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_block_w_delay()

    # The transactions we sent shouldn't be in the unconfirmed transaction list anymore
    assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)

    # CONFIRMATIONS_BEFORE_RETRY+1 blocks after, the responder should rebroadcast the unconfirmed txs (15 remaining)
    generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY + 1)
    assert len(responder.unconfirmed_txs) == 0
    assert len(responder.trackers) == 20

    # Generating 100 - CONFIRMATIONS_BEFORE_RETRY -2 additional blocks should complete the first 5 trackers
    generate_blocks_w_delay(100 - CONFIRMATIONS_BEFORE_RETRY - 2)
    assert len(responder.unconfirmed_txs) == 0
    assert len(responder.trackers) == 15
    # Check they are not in the Gatekeeper either
    for tracker in trackers[:5]:
        assert len(responder.gatekeeper.registered_users[
            tracker.user_id].appointments) == 0

    # CONFIRMATIONS_BEFORE_RETRY additional blocks should complete the rest
    generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY)
    assert len(responder.unconfirmed_txs) == 0
    assert len(responder.trackers) == 0
    # Check they are not in the Gatekeeper either
    for tracker in trackers[5:]:
        assert len(responder.gatekeeper.registered_users[
            tracker.user_id].appointments) == 0
Exemplo n.º 23
0
def test_do_watch(temp_db_manager, carrier, block_processor):
    # Create a fresh responder to simplify the test
    responder = Responder(temp_db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    trackers = [
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(20)
    ]

    # Let's set up the trackers first
    for tracker in trackers:
        uuid = uuid4().hex

        responder.trackers[uuid] = {
            "locator": tracker.locator,
            "penalty_txid": tracker.penalty_txid,
            "appointment_end": tracker.appointment_end,
        }
        responder.tx_tracker_map[tracker.penalty_txid] = [uuid]
        responder.missed_confirmations[tracker.penalty_txid] = 0
        responder.unconfirmed_txs.append(tracker.penalty_txid)

        # We also need to store the info in the db
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_json())

    # Let's start to watch
    Thread(target=responder.do_watch, daemon=True).start()

    # And broadcast some of the transactions
    broadcast_txs = []
    for tracker in trackers[:5]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_block()

    # The transactions we sent shouldn't be in the unconfirmed transaction list anymore
    assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)

    # TODO: test that reorgs can be detected once data persistence is merged (new version of the simulator)

    # Generating 5 additional blocks should complete the 5 trackers
    generate_blocks(5)

    assert not set(broadcast_txs).issubset(responder.tx_tracker_map)

    # Do the rest
    broadcast_txs = []
    for tracker in trackers[5:]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_blocks(6)

    assert len(responder.tx_tracker_map) == 0
Exemplo n.º 24
0
def main(command_line_conf):
    global db_manager, chain_monitor

    signal(SIGINT, handle_signals)
    signal(SIGTERM, handle_signals)
    signal(SIGQUIT, handle_signals)

    # Loads config and sets up the data folder and log file
    config_loader = ConfigLoader(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF,
                                 command_line_conf)
    config = config_loader.build_config()
    setup_data_folder(DATA_DIR)
    setup_logging(config.get("LOG_FILE"), LOG_PREFIX)

    logger.info("Starting TEOS")
    db_manager = DBManager(config.get("DB_PATH"))

    bitcoind_connect_params = {
        k: v
        for k, v in config.items() if k.startswith("BTC")
    }
    bitcoind_feed_params = {
        k: v
        for k, v in config.items() if k.startswith("FEED")
    }

    if not can_connect_to_bitcoind(bitcoind_connect_params):
        logger.error("Can't connect to bitcoind. Shutting down")

    elif not in_correct_network(bitcoind_connect_params,
                                config.get("BTC_NETWORK")):
        logger.error(
            "bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down"
        )

    else:
        try:
            secret_key_der = Cryptographer.load_key_file(
                config.get("TEOS_SECRET_KEY"))
            if not secret_key_der:
                raise IOError("TEOS private key can't be loaded")

            block_processor = BlockProcessor(bitcoind_connect_params)
            carrier = Carrier(bitcoind_connect_params)

            responder = Responder(db_manager, carrier, block_processor)
            watcher = Watcher(
                db_manager,
                block_processor,
                responder,
                secret_key_der,
                config.get("MAX_APPOINTMENTS"),
                config.get("EXPIRY_DELTA"),
            )

            # Create the chain monitor and start monitoring the chain
            chain_monitor = ChainMonitor(watcher.block_queue,
                                         watcher.responder.block_queue,
                                         block_processor, bitcoind_feed_params)

            watcher_appointments_data = db_manager.load_watcher_appointments()
            responder_trackers_data = db_manager.load_responder_trackers()

            if len(watcher_appointments_data) == 0 and len(
                    responder_trackers_data) == 0:
                logger.info("Fresh bootstrap")

                watcher.awake()
                watcher.responder.awake()

            else:
                logger.info("Bootstrapping from backed up data")

                # Update the Watcher backed up data if found.
                if len(watcher_appointments_data) != 0:
                    watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments(
                        watcher_appointments_data)

                # Update the Responder with backed up data if found.
                if len(responder_trackers_data) != 0:
                    watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers(
                        responder_trackers_data)

                # Awaking components so the states can be updated.
                watcher.awake()
                watcher.responder.awake()

                last_block_watcher = db_manager.load_last_block_hash_watcher()
                last_block_responder = db_manager.load_last_block_hash_responder(
                )

                # Populate the block queues with data if they've missed some while offline. If the blocks of both match
                # we don't perform the search twice.

                # FIXME: 32-reorgs-offline dropped txs are not used at this point.
                last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor(
                    last_block_watcher)
                missed_blocks_watcher = block_processor.get_missed_blocks(
                    last_common_ancestor_watcher)

                if last_block_watcher == last_block_responder:
                    dropped_txs_responder = dropped_txs_watcher
                    missed_blocks_responder = missed_blocks_watcher

                else:
                    last_common_ancestor_responder, dropped_txs_responder = block_processor.find_last_common_ancestor(
                        last_block_responder)
                    missed_blocks_responder = block_processor.get_missed_blocks(
                        last_common_ancestor_responder)

                # If only one of the instances needs to be updated, it can be done separately.
                if len(missed_blocks_watcher
                       ) == 0 and len(missed_blocks_responder) != 0:
                    Builder.populate_block_queue(watcher.responder.block_queue,
                                                 missed_blocks_responder)
                    watcher.responder.block_queue.join()

                elif len(missed_blocks_responder
                         ) == 0 and len(missed_blocks_watcher) != 0:
                    Builder.populate_block_queue(watcher.block_queue,
                                                 missed_blocks_watcher)
                    watcher.block_queue.join()

                # Otherwise they need to be updated at the same time, block by block
                elif len(missed_blocks_responder) != 0 and len(
                        missed_blocks_watcher) != 0:
                    Builder.update_states(watcher, missed_blocks_watcher,
                                          missed_blocks_responder)

            # Fire the API and the ChainMonitor
            # FIXME: 92-block-data-during-bootstrap-db
            chain_monitor.monitor_chain()
            API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")),
                watcher).start()
        except Exception as e:
            logger.error("An error occurred: {}. Shutting down".format(e))
            exit(1)
Exemplo n.º 25
0
def test_get_completed_trackers(db_manager, carrier, block_processor):
    initial_height = bitcoin_cli(bitcoind_connect_params).getblockcount()

    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    # A complete tracker is a tracker that has reached the appointment end with enough confs (> MIN_CONFIRMATIONS)
    # We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached
    trackers_end_conf = {
        uuid4().hex:
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(10)
    }

    trackers_end_no_conf = {}
    for _ in range(10):
        tracker = create_dummy_tracker(
            penalty_rawtx=create_dummy_transaction().hex())
        responder.unconfirmed_txs.append(tracker.penalty_txid)
        trackers_end_no_conf[uuid4().hex] = tracker

    trackers_no_end = {}
    for _ in range(10):
        tracker = create_dummy_tracker(
            penalty_rawtx=create_dummy_transaction().hex())
        tracker.appointment_end += 10
        trackers_no_end[uuid4().hex] = tracker

    all_trackers = {}
    all_trackers.update(trackers_end_conf)
    all_trackers.update(trackers_end_no_conf)
    all_trackers.update(trackers_no_end)

    # Let's add all to the  responder
    for uuid, tracker in all_trackers.items():
        responder.trackers[uuid] = {
            "locator": tracker.locator,
            "penalty_txid": tracker.penalty_txid,
            "appointment_end": tracker.appointment_end,
        }

    for uuid, tracker in all_trackers.items():
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)

    # The dummy appointments have a end_appointment time of current + 2, but trackers need at least 6 confs by default
    generate_blocks(6)

    # And now let's check
    completed_trackers = responder.get_completed_trackers(initial_height + 6)
    completed_trackers_ids = [
        tracker_id for tracker_id, confirmations in completed_trackers.items()
    ]
    ended_trackers_keys = list(trackers_end_conf.keys())
    assert set(completed_trackers_ids) == set(ended_trackers_keys)

    # Generating 6 additional blocks should also confirm trackers_no_end
    generate_blocks(6)

    completed_trackers = responder.get_completed_trackers(initial_height + 12)
    completed_trackers_ids = [
        tracker_id for tracker_id, confirmations in completed_trackers.items()
    ]
    ended_trackers_keys.extend(list(trackers_no_end.keys()))

    assert set(completed_trackers_ids) == set(ended_trackers_keys)