Beispiel #1
0
def responder(db_manager, gatekeeper, carrier, block_processor):
    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    chain_monitor = ChainMonitor([Queue(), responder.block_queue],
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()
    responder_thread = responder.awake()
    chain_monitor.activate()

    yield responder

    chain_monitor.terminate()
    responder_thread.join()
def responder(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    return responder
def test_update_states_responder_misses_more(run_bitcoind, db_manager,
                                             gatekeeper, carrier,
                                             block_processor):
    w = Watcher(
        db_manager=db_manager,
        gatekeeper=gatekeeper,
        block_processor=block_processor,
        responder=Responder(db_manager, gatekeeper, carrier, block_processor),
        sk_der=generate_keypair()[0].to_der(),
        max_appointments=config.get("MAX_APPOINTMENTS"),
        blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"),
    )

    blocks = []
    for _ in range(5):
        generate_block()
        blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash())

    # Updating the states should bring both to the same last known block.
    w.awake()
    w.responder.awake()
    Builder.update_states(w, blocks, blocks[1:])

    assert db_manager.load_last_block_hash_watcher() == blocks[-1]
    assert w.responder.last_known_block == blocks[-1]
def test_handle_breach(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)

    uuid = uuid4().hex
    tracker = create_dummy_tracker()

    # The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
    receipt = responder.handle_breach(
        tracker.locator,
        uuid,
        tracker.dispute_txid,
        tracker.penalty_txid,
        tracker.penalty_rawtx,
        tracker.appointment_end,
        block_hash=get_random_value_hex(32),
    )

    assert receipt.delivered is True
def test_rebroadcast(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    txs_to_rebroadcast = []

    # Rebroadcast calls add_response with retry=True. The tracker data is already in trackers.
    for i in range(20):
        uuid = uuid4().hex
        locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
            penalty_rawtx=create_dummy_transaction().hex())

        tracker = TransactionTracker(locator, dispute_txid, penalty_txid,
                                     penalty_rawtx, appointment_end)

        responder.trackers[uuid] = {
            "locator": locator,
            "penalty_txid": penalty_txid,
            "appointment_end": appointment_end,
        }

        # We need to add it to the db too
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_json())

        responder.tx_tracker_map[penalty_txid] = [uuid]
        responder.unconfirmed_txs.append(penalty_txid)

        # Let's add some of the txs in the rebroadcast list
        if (i % 2) == 0:
            txs_to_rebroadcast.append(penalty_txid)

    # The block_hash passed to rebroadcast does not matter much now. It will in the future to deal with errors
    receipts = responder.rebroadcast(txs_to_rebroadcast)

    # All txs should have been delivered and the missed confirmation reset
    for txid, receipt in receipts:
        # Sanity check
        assert txid in txs_to_rebroadcast

        assert receipt.delivered is True
        assert responder.missed_confirmations[txid] == 0
def test_init_responder(temp_db_manager, carrier, block_processor):
    responder = Responder(temp_db_manager, carrier, block_processor)
    assert isinstance(responder.trackers, dict) and len(
        responder.trackers) == 0
    assert isinstance(responder.tx_tracker_map, dict) and len(
        responder.tx_tracker_map) == 0
    assert isinstance(responder.unconfirmed_txs, list) and len(
        responder.unconfirmed_txs) == 0
    assert isinstance(responder.missed_confirmations, dict) and len(
        responder.missed_confirmations) == 0
    assert responder.block_queue.empty()
def test_check_confirmations(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    # check_confirmations checks, given a list of transaction for a block, what of the known penalty transaction have
    # been confirmed. To test this we need to create a list of transactions and the state of the responder
    txs = [get_random_value_hex(32) for _ in range(20)]

    # The responder has a list of unconfirmed transaction, let make that some of them are the ones we've received
    responder.unconfirmed_txs = [get_random_value_hex(32) for _ in range(10)]
    txs_subset = random.sample(txs, k=10)
    responder.unconfirmed_txs.extend(txs_subset)

    # We also need to add them to the tx_tracker_map since they would be there in normal conditions
    responder.tx_tracker_map = {
        txid: TransactionTracker(txid[:LOCATOR_LEN_HEX], txid, None, None,
                                 None)
        for txid in responder.unconfirmed_txs
    }

    # Let's make sure that there are no txs with missed confirmations yet
    assert len(responder.missed_confirmations) == 0

    responder.check_confirmations(txs)

    # After checking confirmations the txs in txs_subset should be confirmed (not part of unconfirmed_txs anymore)
    # and the rest should have a missing confirmation
    for tx in txs_subset:
        assert tx not in responder.unconfirmed_txs

    for tx in responder.unconfirmed_txs:
        assert responder.missed_confirmations[tx] == 1
def internal_api(db_manager, gatekeeper, carrier, block_processor):
    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    watcher = Watcher(db_manager, gatekeeper, block_processor, responder,
                      teos_sk, MAX_APPOINTMENTS,
                      config.get("LOCATOR_CACHE_SIZE"))
    watcher.last_known_block = block_processor.get_best_block_hash()
    i_api = InternalAPI(watcher, internal_api_endpoint,
                        config.get("INTERNAL_API_WORKERS"), Event())
    i_api.rpc_server.start()

    yield i_api

    i_api.rpc_server.stop(None)
def test_handle_breach_bad_response(db_manager, block_processor):
    # We need a new carrier here, otherwise the transaction will be flagged as previously sent and receipt.delivered
    # will be True
    responder = Responder(db_manager, Carrier(bitcoind_connect_params),
                          block_processor)

    uuid = uuid4().hex
    tracker = create_dummy_tracker()

    # A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though.
    tracker.penalty_rawtx = tracker.penalty_txid

    # The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
    receipt = responder.handle_breach(
        tracker.locator,
        uuid,
        tracker.dispute_txid,
        tracker.penalty_txid,
        tracker.penalty_rawtx,
        tracker.appointment_end,
        block_hash=get_random_value_hex(32),
    )

    assert receipt.delivered is False
Beispiel #10
0
def api(db_manager, carrier, block_processor, gatekeeper, run_bitcoind):
    sk, pk = generate_keypair()

    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        gatekeeper,
        block_processor,
        responder,
        sk.to_der(),
        MAX_APPOINTMENTS,
        config.get("LOCATOR_CACHE_SIZE"),
    )
    inspector = Inspector(block_processor, config.get("MIN_TO_SELF_DELAY"))
    api = API(config.get("API_HOST"), config.get("API_PORT"), inspector, watcher)

    return api
def test_init_responder(temp_db_manager, gatekeeper, carrier, block_processor):
    responder = Responder(temp_db_manager, gatekeeper, carrier,
                          block_processor)
    assert isinstance(responder.trackers, dict) and len(
        responder.trackers) == 0
    assert isinstance(responder.tx_tracker_map, dict) and len(
        responder.tx_tracker_map) == 0
    assert isinstance(responder.unconfirmed_txs, list) and len(
        responder.unconfirmed_txs) == 0
    assert isinstance(responder.missed_confirmations, dict) and len(
        responder.missed_confirmations) == 0
    assert isinstance(responder.block_queue,
                      Queue) and responder.block_queue.empty()
    assert isinstance(responder.db_manager, AppointmentsDBM)
    assert isinstance(responder.gatekeeper, Gatekeeper)
    assert isinstance(responder.carrier, Carrier)
    assert isinstance(responder.block_processor, BlockProcessor)
    assert responder.last_known_block is None or isinstance(
        responder.last_known_block, str)
Beispiel #12
0
def watcher(db_manager):
    block_processor = BlockProcessor(bitcoind_connect_params)
    carrier = Carrier(bitcoind_connect_params)

    responder = Responder(db_manager, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        block_processor,
        responder,
        signing_key.to_der(),
        config.get("MAX_APPOINTMENTS"),
        config.get("EXPIRY_DELTA"),
    )

    chain_monitor = ChainMonitor(watcher.block_queue,
                                 watcher.responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    return watcher
def watcher(db_manager, gatekeeper):
    block_processor = BlockProcessor(bitcoind_connect_params)
    carrier = Carrier(bitcoind_connect_params)

    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        gatekeeper,
        block_processor,
        responder,
        signing_key.to_der(),
        MAX_APPOINTMENTS,
        config.get("LOCATOR_CACHE_SIZE"),
    )

    chain_monitor = ChainMonitor(
        watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params
    )
    chain_monitor.monitor_chain()

    return watcher
def internal_api(gatekeeper_mock, carrier_mock):
    db_manager = DBManagerMock()
    responder = Responder(db_manager, gatekeeper_mock, carrier_mock,
                          gatekeeper_mock.block_processor)
    watcher = Watcher(
        db_manager,
        gatekeeper_mock,
        gatekeeper_mock.block_processor,
        responder,
        teos_sk,
        MAX_APPOINTMENTS,
        config.get("LOCATOR_CACHE_SIZE"),
    )

    i_api = InternalAPI(watcher, internal_api_endpoint,
                        config.get("INTERNAL_API_WORKERS"), Event())
    i_api.rpc_server.start()

    yield i_api

    i_api.rpc_server.stop(None)
Beispiel #15
0
def test_update_states_empty_list(db_manager, carrier, block_processor):
    w = Watcher(
        db_manager=db_manager,
        block_processor=block_processor,
        responder=Responder(db_manager, carrier, block_processor),
        sk_der=None,
        max_appointments=config.get("MAX_APPOINTMENTS"),
        expiry_delta=config.get("EXPIRY_DELTA"),
    )

    missed_blocks_watcher = []
    missed_blocks_responder = [get_random_value_hex(32)]

    # Any combination of empty list must raise a ValueError
    with pytest.raises(ValueError):
        Builder.update_states(w, missed_blocks_watcher,
                              missed_blocks_responder)

    with pytest.raises(ValueError):
        Builder.update_states(w, missed_blocks_responder,
                              missed_blocks_watcher)
Beispiel #16
0
def run_api(db_manager, carrier, block_processor):
    sk, pk = generate_keypair()

    responder = Responder(db_manager, carrier, block_processor)
    watcher = Watcher(db_manager, block_processor, responder, sk.to_der(),
                      config.get("MAX_APPOINTMENTS"),
                      config.get("EXPIRY_DELTA"))

    chain_monitor = ChainMonitor(watcher.block_queue,
                                 watcher.responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    watcher.awake()
    chain_monitor.monitor_chain()

    api_thread = Thread(
        target=API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")),
                   watcher).start)
    api_thread.daemon = True
    api_thread.start()

    # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
    sleep(0.1)
Beispiel #17
0
def test_update_states_responder_misses_more(run_bitcoind, db_manager, carrier,
                                             block_processor):
    w = Watcher(
        db_manager=db_manager,
        block_processor=block_processor,
        responder=Responder(db_manager, carrier, block_processor),
        sk_der=None,
        max_appointments=config.get("MAX_APPOINTMENTS"),
        expiry_delta=config.get("EXPIRY_DELTA"),
    )

    blocks = []
    for _ in range(5):
        generate_block()
        blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash())

    # Updating the states should bring both to the same last known block.
    w.awake()
    w.responder.awake()
    Builder.update_states(w, blocks, blocks[1:])

    assert db_manager.load_last_block_hash_watcher() == blocks[-1]
    assert w.responder.last_known_block == blocks[-1]
def test_update_states_empty_list(db_manager, gatekeeper, carrier,
                                  block_processor):
    w = Watcher(
        db_manager=db_manager,
        gatekeeper=gatekeeper,
        block_processor=block_processor,
        responder=Responder(db_manager, gatekeeper, carrier, block_processor),
        sk_der=generate_keypair()[0].to_der(),
        max_appointments=config.get("MAX_APPOINTMENTS"),
        blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"),
    )

    missed_blocks_watcher = []
    missed_blocks_responder = [get_random_value_hex(32)]

    # Any combination of empty list must raise a ValueError
    with pytest.raises(ValueError):
        Builder.update_states(w, missed_blocks_watcher,
                              missed_blocks_responder)

    with pytest.raises(ValueError):
        Builder.update_states(w, missed_blocks_responder,
                              missed_blocks_watcher)
Beispiel #19
0
def test_update_states_watcher_misses_more(db_manager, carrier,
                                           block_processor):
    # Same as before, but data is now in the Responder
    w = Watcher(
        db_manager=db_manager,
        block_processor=block_processor,
        responder=Responder(db_manager, carrier, block_processor),
        sk_der=None,
        max_appointments=config.get("MAX_APPOINTMENTS"),
        expiry_delta=config.get("EXPIRY_DELTA"),
    )

    blocks = []
    for _ in range(5):
        generate_block()
        blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash())

    w.awake()
    w.responder.awake()
    Builder.update_states(w, blocks[1:], blocks)

    assert db_manager.load_last_block_hash_watcher() == blocks[-1]
    assert db_manager.load_last_block_hash_responder() == blocks[-1]
Beispiel #20
0
def watcher(run_bitcoind, db_manager, gatekeeper):
    block_processor = BlockProcessor(bitcoind_connect_params)
    carrier = Carrier(bitcoind_connect_params)

    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        gatekeeper,
        block_processor,
        responder,
        signing_key,
        MAX_APPOINTMENTS,
        config.get("LOCATOR_CACHE_SIZE"),
    )

    watcher.last_known_block = block_processor.get_best_block_hash()

    chain_monitor = ChainMonitor(
        [watcher.block_queue, watcher.responder.block_queue], block_processor,
        bitcoind_feed_params)
    chain_monitor.monitor_chain()
    chain_monitor.activate()

    return watcher
def test_update_states_watcher_misses_more(db_manager, gatekeeper, carrier,
                                           block_processor):
    # Same as before, but data is now in the Responder
    w = Watcher(
        db_manager=db_manager,
        gatekeeper=gatekeeper,
        block_processor=block_processor,
        responder=Responder(db_manager, gatekeeper, carrier, block_processor),
        sk_der=generate_keypair()[0].to_der(),
        max_appointments=config.get("MAX_APPOINTMENTS"),
        blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"),
    )

    blocks = []
    for _ in range(5):
        generate_block()
        blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash())

    w.awake()
    w.responder.awake()
    Builder.update_states(w, blocks[1:], blocks)

    assert db_manager.load_last_block_hash_watcher() == blocks[-1]
    assert db_manager.load_last_block_hash_responder() == blocks[-1]
Beispiel #22
0
def main(command_line_conf):
    global db_manager, chain_monitor

    signal(SIGINT, handle_signals)
    signal(SIGTERM, handle_signals)
    signal(SIGQUIT, handle_signals)

    # Loads config and sets up the data folder and log file
    config_loader = ConfigLoader(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF,
                                 command_line_conf)
    config = config_loader.build_config()
    setup_data_folder(DATA_DIR)
    setup_logging(config.get("LOG_FILE"), LOG_PREFIX)

    logger.info("Starting TEOS")
    db_manager = DBManager(config.get("DB_PATH"))

    bitcoind_connect_params = {
        k: v
        for k, v in config.items() if k.startswith("BTC")
    }
    bitcoind_feed_params = {
        k: v
        for k, v in config.items() if k.startswith("FEED")
    }

    if not can_connect_to_bitcoind(bitcoind_connect_params):
        logger.error("Can't connect to bitcoind. Shutting down")

    elif not in_correct_network(bitcoind_connect_params,
                                config.get("BTC_NETWORK")):
        logger.error(
            "bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down"
        )

    else:
        try:
            secret_key_der = Cryptographer.load_key_file(
                config.get("TEOS_SECRET_KEY"))
            if not secret_key_der:
                raise IOError("TEOS private key can't be loaded")

            block_processor = BlockProcessor(bitcoind_connect_params)
            carrier = Carrier(bitcoind_connect_params)

            responder = Responder(db_manager, carrier, block_processor)
            watcher = Watcher(
                db_manager,
                block_processor,
                responder,
                secret_key_der,
                config.get("MAX_APPOINTMENTS"),
                config.get("EXPIRY_DELTA"),
            )

            # Create the chain monitor and start monitoring the chain
            chain_monitor = ChainMonitor(watcher.block_queue,
                                         watcher.responder.block_queue,
                                         block_processor, bitcoind_feed_params)

            watcher_appointments_data = db_manager.load_watcher_appointments()
            responder_trackers_data = db_manager.load_responder_trackers()

            if len(watcher_appointments_data) == 0 and len(
                    responder_trackers_data) == 0:
                logger.info("Fresh bootstrap")

                watcher.awake()
                watcher.responder.awake()

            else:
                logger.info("Bootstrapping from backed up data")

                # Update the Watcher backed up data if found.
                if len(watcher_appointments_data) != 0:
                    watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments(
                        watcher_appointments_data)

                # Update the Responder with backed up data if found.
                if len(responder_trackers_data) != 0:
                    watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers(
                        responder_trackers_data)

                # Awaking components so the states can be updated.
                watcher.awake()
                watcher.responder.awake()

                last_block_watcher = db_manager.load_last_block_hash_watcher()
                last_block_responder = db_manager.load_last_block_hash_responder(
                )

                # Populate the block queues with data if they've missed some while offline. If the blocks of both match
                # we don't perform the search twice.

                # FIXME: 32-reorgs-offline dropped txs are not used at this point.
                last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor(
                    last_block_watcher)
                missed_blocks_watcher = block_processor.get_missed_blocks(
                    last_common_ancestor_watcher)

                if last_block_watcher == last_block_responder:
                    dropped_txs_responder = dropped_txs_watcher
                    missed_blocks_responder = missed_blocks_watcher

                else:
                    last_common_ancestor_responder, dropped_txs_responder = block_processor.find_last_common_ancestor(
                        last_block_responder)
                    missed_blocks_responder = block_processor.get_missed_blocks(
                        last_common_ancestor_responder)

                # If only one of the instances needs to be updated, it can be done separately.
                if len(missed_blocks_watcher
                       ) == 0 and len(missed_blocks_responder) != 0:
                    Builder.populate_block_queue(watcher.responder.block_queue,
                                                 missed_blocks_responder)
                    watcher.responder.block_queue.join()

                elif len(missed_blocks_responder
                         ) == 0 and len(missed_blocks_watcher) != 0:
                    Builder.populate_block_queue(watcher.block_queue,
                                                 missed_blocks_watcher)
                    watcher.block_queue.join()

                # Otherwise they need to be updated at the same time, block by block
                elif len(missed_blocks_responder) != 0 and len(
                        missed_blocks_watcher) != 0:
                    Builder.update_states(watcher, missed_blocks_watcher,
                                          missed_blocks_responder)

            # Fire the API and the ChainMonitor
            # FIXME: 92-block-data-during-bootstrap-db
            chain_monitor.monitor_chain()
            API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")),
                watcher).start()
        except Exception as e:
            logger.error("An error occurred: {}. Shutting down".format(e))
            exit(1)
Beispiel #23
0
    def __init__(self, config, sk, logger, logging_port, stop_log_event,
                 logging_process):
        self.config = config
        self.logger = logger
        self.logging_port = logging_port
        self.stop_log_event = stop_log_event
        self.logging_process = logging_process

        # event triggered when a ``stop`` command is issued
        # Using multiprocessing.Event seems to cause a deadlock if event.set() is called in a signal handler that
        # interrupted event.wait(). This does not happen with threading.Event.
        # See https://bugs.python.org/issue41606
        self.stop_command_event = threading.Event()

        # event triggered when the public API is halted, hence teosd is ready to stop
        self.stop_event = multiprocessing.Event()

        bitcoind_connect_params = {
            k: v
            for k, v in config.items() if k.startswith("BTC_RPC")
        }
        bitcoind_feed_params = {
            k: v
            for k, v in config.items() if k.startswith("BTC_FEED")
        }

        bitcoind_reachable = threading.Event()
        if not can_connect_to_bitcoind(bitcoind_connect_params):
            raise RuntimeError("Cannot connect to bitcoind")
        elif not in_correct_network(bitcoind_connect_params,
                                    config.get("BTC_NETWORK")):
            raise RuntimeError(
                "bitcoind is running on a different network, check teos.conf and bitcoin.conf"
            )
        else:
            bitcoind_reachable.set()

        self.logger.info("tower_id = {}".format(
            Cryptographer.get_compressed_pk(sk.public_key)))
        self.block_processor = BlockProcessor(bitcoind_connect_params,
                                              bitcoind_reachable)
        carrier = Carrier(bitcoind_connect_params, bitcoind_reachable)

        gatekeeper = Gatekeeper(
            UsersDBM(self.config.get("USERS_DB_PATH")),
            self.block_processor,
            self.config.get("SUBSCRIPTION_SLOTS"),
            self.config.get("SUBSCRIPTION_DURATION"),
            self.config.get("EXPIRY_DELTA"),
        )
        self.db_manager = AppointmentsDBM(
            self.config.get("APPOINTMENTS_DB_PATH"))
        responder = Responder(self.db_manager, gatekeeper, carrier,
                              self.block_processor)
        self.watcher = Watcher(
            self.db_manager,
            gatekeeper,
            self.block_processor,
            responder,
            sk,
            self.config.get("MAX_APPOINTMENTS"),
            self.config.get("LOCATOR_CACHE_SIZE"),
        )

        self.watcher_thread = None
        self.responder_thread = None

        # Create the chain monitor
        self.chain_monitor = ChainMonitor(
            [
                self.watcher.block_queue, responder.block_queue,
                gatekeeper.block_queue
            ],
            self.block_processor,
            bitcoind_feed_params,
        )

        # Set up the internal API
        self.internal_api_endpoint = f'{self.config.get("INTERNAL_API_HOST")}:{self.config.get("INTERNAL_API_PORT")}'
        self.internal_api = InternalAPI(
            self.watcher, self.internal_api_endpoint,
            self.config.get("INTERNAL_API_WORKERS"), self.stop_command_event)

        # Create the rpc, without starting it
        self.rpc_process = multiprocessing.Process(
            target=rpc.serve,
            args=(
                self.config.get("RPC_BIND"),
                self.config.get("RPC_PORT"),
                self.internal_api_endpoint,
                self.logging_port,
                self.stop_event,
            ),
            daemon=True,
        )

        # This variables will contain the handle of the process running the API, when the service is started.
        # It will be an instance of either Popen or Process, depending on the WSGI config setting.
        self.api_proc = None
def responder(dbm_mock, gatekeeper_mock, carrier_mock, block_processor_mock):
    responder = Responder(dbm_mock, gatekeeper_mock, carrier_mock,
                          block_processor_mock)

    return responder
def test_get_completed_trackers(db_manager, carrier, block_processor):
    initial_height = bitcoin_cli(bitcoind_connect_params).getblockcount()

    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    # A complete tracker is a tracker that has reached the appointment end with enough confs (> MIN_CONFIRMATIONS)
    # We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached
    trackers_end_conf = {
        uuid4().hex:
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(10)
    }

    trackers_end_no_conf = {}
    for _ in range(10):
        tracker = create_dummy_tracker(
            penalty_rawtx=create_dummy_transaction().hex())
        responder.unconfirmed_txs.append(tracker.penalty_txid)
        trackers_end_no_conf[uuid4().hex] = tracker

    trackers_no_end = {}
    for _ in range(10):
        tracker = create_dummy_tracker(
            penalty_rawtx=create_dummy_transaction().hex())
        tracker.appointment_end += 10
        trackers_no_end[uuid4().hex] = tracker

    all_trackers = {}
    all_trackers.update(trackers_end_conf)
    all_trackers.update(trackers_end_no_conf)
    all_trackers.update(trackers_no_end)

    # Let's add all to the  responder
    for uuid, tracker in all_trackers.items():
        responder.trackers[uuid] = {
            "locator": tracker.locator,
            "penalty_txid": tracker.penalty_txid,
            "appointment_end": tracker.appointment_end,
        }

    for uuid, tracker in all_trackers.items():
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)

    # The dummy appointments have a end_appointment time of current + 2, but trackers need at least 6 confs by default
    generate_blocks(6)

    # And now let's check
    completed_trackers = responder.get_completed_trackers(initial_height + 6)
    completed_trackers_ids = [
        tracker_id for tracker_id, confirmations in completed_trackers.items()
    ]
    ended_trackers_keys = list(trackers_end_conf.keys())
    assert set(completed_trackers_ids) == set(ended_trackers_keys)

    # Generating 6 additional blocks should also confirm trackers_no_end
    generate_blocks(6)

    completed_trackers = responder.get_completed_trackers(initial_height + 12)
    completed_trackers_ids = [
        tracker_id for tracker_id, confirmations in completed_trackers.items()
    ]
    ended_trackers_keys.extend(list(trackers_no_end.keys()))

    assert set(completed_trackers_ids) == set(ended_trackers_keys)
def test_do_watch(temp_db_manager, gatekeeper, carrier, block_processor):
    # Create a fresh responder to simplify the test
    responder = Responder(temp_db_manager, gatekeeper, carrier,
                          block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    trackers = [
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(20)
    ]
    subscription_expiry = responder.block_processor.get_block_count() + 110

    # Let's set up the trackers first
    for tracker in trackers:
        uuid = uuid4().hex

        # Simulate user registration so trackers can properly expire
        responder.gatekeeper.registered_users[tracker.user_id] = UserInfo(
            available_slots=10, subscription_expiry=subscription_expiry)

        # Add data to the Responder
        responder.trackers[uuid] = tracker.get_summary()
        responder.tx_tracker_map[tracker.penalty_txid] = [uuid]
        responder.missed_confirmations[tracker.penalty_txid] = 0
        responder.unconfirmed_txs.append(tracker.penalty_txid)
        # Assuming the appointment only took a single slot
        responder.gatekeeper.registered_users[
            tracker.user_id].appointments[uuid] = 1

        # We also need to store the info in the db
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_dict())

    # Let's start to watch
    Thread(target=responder.do_watch, daemon=True).start()

    # And broadcast some of the transactions
    broadcast_txs = []
    for tracker in trackers[:5]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_block_w_delay()

    # The transactions we sent shouldn't be in the unconfirmed transaction list anymore
    assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)

    # CONFIRMATIONS_BEFORE_RETRY+1 blocks after, the responder should rebroadcast the unconfirmed txs (15 remaining)
    generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY + 1)
    assert len(responder.unconfirmed_txs) == 0
    assert len(responder.trackers) == 20

    # Generating 100 - CONFIRMATIONS_BEFORE_RETRY -2 additional blocks should complete the first 5 trackers
    generate_blocks_w_delay(100 - CONFIRMATIONS_BEFORE_RETRY - 2)
    assert len(responder.unconfirmed_txs) == 0
    assert len(responder.trackers) == 15
    # Check they are not in the Gatekeeper either
    for tracker in trackers[:5]:
        assert len(responder.gatekeeper.registered_users[
            tracker.user_id].appointments) == 0

    # CONFIRMATIONS_BEFORE_RETRY additional blocks should complete the rest
    generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY)
    assert len(responder.unconfirmed_txs) == 0
    assert len(responder.trackers) == 0
    # Check they are not in the Gatekeeper either
    for tracker in trackers[5:]:
        assert len(responder.gatekeeper.registered_users[
            tracker.user_id].appointments) == 0
def test_do_watch(temp_db_manager, carrier, block_processor):
    # Create a fresh responder to simplify the test
    responder = Responder(temp_db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    trackers = [
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(20)
    ]

    # Let's set up the trackers first
    for tracker in trackers:
        uuid = uuid4().hex

        responder.trackers[uuid] = {
            "locator": tracker.locator,
            "penalty_txid": tracker.penalty_txid,
            "appointment_end": tracker.appointment_end,
        }
        responder.tx_tracker_map[tracker.penalty_txid] = [uuid]
        responder.missed_confirmations[tracker.penalty_txid] = 0
        responder.unconfirmed_txs.append(tracker.penalty_txid)

        # We also need to store the info in the db
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_json())

    # Let's start to watch
    Thread(target=responder.do_watch, daemon=True).start()

    # And broadcast some of the transactions
    broadcast_txs = []
    for tracker in trackers[:5]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_block()

    # The transactions we sent shouldn't be in the unconfirmed transaction list anymore
    assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)

    # TODO: test that reorgs can be detected once data persistence is merged (new version of the simulator)

    # Generating 5 additional blocks should complete the 5 trackers
    generate_blocks(5)

    assert not set(broadcast_txs).issubset(responder.tx_tracker_map)

    # Do the rest
    broadcast_txs = []
    for tracker in trackers[5:]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_blocks(6)

    assert len(responder.tx_tracker_map) == 0
def test_get_completed_trackers(db_manager, gatekeeper, carrier,
                                block_processor):
    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    # A complete tracker is a tracker which penalty transaction has been irrevocably resolved (i.e. has reached 100
    # confirmations)
    # We'll create 3 type of txs: irrevocably resolved, confirmed but not irrevocably resolved, and unconfirmed
    trackers_ir_resolved = {
        uuid4().hex:
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(10)
    }

    trackers_confirmed = {
        uuid4().hex:
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(10)
    }

    trackers_unconfirmed = {}
    for _ in range(10):
        tracker = create_dummy_tracker(
            penalty_rawtx=create_dummy_transaction().hex())
        responder.unconfirmed_txs.append(tracker.penalty_txid)
        trackers_unconfirmed[uuid4().hex] = tracker

    all_trackers = {}
    all_trackers.update(trackers_ir_resolved)
    all_trackers.update(trackers_confirmed)
    all_trackers.update(trackers_unconfirmed)

    # Let's add all to the Responder
    for uuid, tracker in all_trackers.items():
        responder.trackers[uuid] = tracker.get_summary()

    for uuid, tracker in trackers_ir_resolved.items():
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)

    generate_block_w_delay()

    for uuid, tracker in trackers_confirmed.items():
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)

    # ir_resolved have 100 confirmations and confirmed have 99
    generate_blocks_w_delay(99)

    # Let's check
    completed_trackers = responder.get_completed_trackers()
    ended_trackers_keys = list(trackers_ir_resolved.keys())
    assert set(completed_trackers) == set(ended_trackers_keys)

    # Generating 1 additional blocks should also include confirmed
    generate_block_w_delay()

    completed_trackers = responder.get_completed_trackers()
    ended_trackers_keys.extend(list(trackers_confirmed.keys()))
    assert set(completed_trackers) == set(ended_trackers_keys)