def test_check_start_time():
    # Time is defined in block height
    current_time = 100

    # Right format and right value (start time in the future)
    start_time = 101
    assert Inspector.check_start_time(start_time,
                                      current_time) == APPOINTMENT_OK

    # Start time too small (either same block or block in the past)
    start_times = [100, 99, 98, -1]
    for start_time in start_times:
        assert Inspector.check_start_time(
            start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL

    # Empty field
    start_time = None
    assert Inspector.check_start_time(
        start_time, current_time)[0] == APPOINTMENT_EMPTY_FIELD

    # Wrong data type
    start_times = WRONG_TYPES
    for start_time in start_times:
        assert Inspector.check_start_time(
            start_time, current_time)[0] == APPOINTMENT_WRONG_FIELD_TYPE
Beispiel #2
0
def serve(internal_api_endpoint,
          endpoint,
          logging_port,
          min_to_self_delay,
          auto_run=False):
    """
    Starts the API.

    This method can be handled either form an external WSGI (like gunicorn) or by the Flask development server.

    Args:
        internal_api_endpoint (:obj:`str`): endpoint where the internal api is running (``host:port``).
        endpoint (:obj:`str`): endpoint where the http api will be running (``host:port``).
        logging_port (:obj:`int`): the port where the logging server can be reached (localhost:logging_port)
        min_to_self_delay (:obj:`str`): the minimum to_self_delay accepted by the :obj:`Inspector`.
        auto_run (:obj:`bool`): whether the server should be started by this process. False if run with an external
            WSGI. True is run by Flask.

    Returns:
        The application object needed by the WSGI server to run if ``auto_run`` is False, :obj:`None` otherwise.
    """

    setup_logging(logging_port)
    inspector = Inspector(int(min_to_self_delay))
    api = API(inspector, internal_api_endpoint)

    api.logger.info(f"Initialized. Serving at {endpoint}")

    if auto_run:
        # Waitress will serve both on IPV4 and IPV6 if localhost is passed as endpoint. Defaulting to IPV4 only in that
        # case.
        wsgi_serve(api.app, listen=endpoint.replace("localhost", "127.0.0.1"))
    else:
        return api.app
def test_check_appointment_signature():
    # The inspector receives the public key as hex
    client_sk, client_pk = generate_keypair()
    client_pk_hex = client_pk.format().hex()

    dummy_appointment_data, _ = generate_dummy_appointment_data(
        real_height=False)
    assert Inspector.check_appointment_signature(
        dummy_appointment_data["appointment"],
        dummy_appointment_data["signature"],
        dummy_appointment_data["public_key"])

    fake_sk, _ = generate_keypair()

    # Create a bad signature to make sure inspector rejects it
    bad_signature = Cryptographer.sign(
        Appointment.from_dict(
            dummy_appointment_data["appointment"]).serialize(), fake_sk)
    assert (Inspector.check_appointment_signature(
        dummy_appointment_data["appointment"], bad_signature,
        client_pk_hex)[0] == APPOINTMENT_INVALID_SIGNATURE)
def test_check_locator():
    # Right appointment type, size and format
    locator = get_random_value_hex(LOCATOR_LEN_BYTES)
    assert Inspector.check_locator(locator) == APPOINTMENT_OK

    # Wrong size (too big)
    locator = get_random_value_hex(LOCATOR_LEN_BYTES + 1)
    assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE

    # Wrong size (too small)
    locator = get_random_value_hex(LOCATOR_LEN_BYTES - 1)
    assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE

    # Empty
    locator = None
    assert Inspector.check_locator(locator)[0] == APPOINTMENT_EMPTY_FIELD

    # Wrong type (several types tested, it should do for anything that is not a string)
    locators = [[], -1, 3.2, 0, 4, (), object, {}, object()]

    for locator in locators:
        assert Inspector.check_locator(
            locator)[0] == APPOINTMENT_WRONG_FIELD_TYPE

    # Wrong format (no hex)
    locators = NO_HEX_STRINGS
    for locator in locators:
        assert Inspector.check_locator(
            locator)[0] == APPOINTMENT_WRONG_FIELD_FORMAT
def test_check_blob():
    # Right format and length
    encrypted_blob = get_random_value_hex(120)
    assert Inspector.check_blob(encrypted_blob) == APPOINTMENT_OK

    # # Wrong content
    # # FIXME: There is not proper defined format for this yet. It should be restricted by size at least, and check it
    # #        is multiple of the block size defined by the encryption function.

    # Wrong type
    encrypted_blobs = WRONG_TYPES_NO_STR
    for encrypted_blob in encrypted_blobs:
        assert Inspector.check_blob(
            encrypted_blob)[0] == APPOINTMENT_WRONG_FIELD_TYPE

    # Empty field
    encrypted_blob = None
    assert Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_EMPTY_FIELD

    # Wrong format (no hex)
    encrypted_blobs = NO_HEX_STRINGS
    for encrypted_blob in encrypted_blobs:
        assert Inspector.check_blob(
            encrypted_blob)[0] == APPOINTMENT_WRONG_FIELD_FORMAT
def test_check_end_time():
    # Time is defined in block height
    current_time = 100
    start_time = 120

    # Right format and right value (start time before end and end in the future)
    end_time = 121
    assert Inspector.check_end_time(end_time, start_time,
                                    current_time) == APPOINTMENT_OK

    # End time too small (start time after end time)
    end_times = [120, 119, 118, -1]
    for end_time in end_times:
        assert Inspector.check_end_time(
            end_time, start_time,
            current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL

    # End time too small (either same height as current block or in the past)
    current_time = 130
    end_times = [130, 129, 128, -1]
    for end_time in end_times:
        assert Inspector.check_end_time(
            end_time, start_time,
            current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL

    # Empty field
    end_time = None
    assert Inspector.check_end_time(end_time, start_time,
                                    current_time)[0] == APPOINTMENT_EMPTY_FIELD

    # Wrong data type
    end_times = WRONG_TYPES
    for end_time in end_times:
        assert Inspector.check_end_time(
            end_time, start_time,
            current_time)[0] == APPOINTMENT_WRONG_FIELD_TYPE
Beispiel #7
0
def api(db_manager, carrier, block_processor, gatekeeper, run_bitcoind):
    sk, pk = generate_keypair()

    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        gatekeeper,
        block_processor,
        responder,
        sk.to_der(),
        MAX_APPOINTMENTS,
        config.get("LOCATOR_CACHE_SIZE"),
    )
    inspector = Inspector(block_processor, config.get("MIN_TO_SELF_DELAY"))
    api = API(config.get("API_HOST"), config.get("API_PORT"), inspector, watcher)

    return api
Beispiel #8
0
def run_api(db_manager, carrier, block_processor):
    sk, pk = generate_keypair()

    responder = Responder(db_manager, carrier, block_processor)
    watcher = Watcher(db_manager, block_processor, responder, sk.to_der(),
                      config.get("MAX_APPOINTMENTS"),
                      config.get("EXPIRY_DELTA"))

    chain_monitor = ChainMonitor(watcher.block_queue,
                                 watcher.responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    watcher.awake()
    chain_monitor.monitor_chain()

    api_thread = Thread(
        target=API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")),
                   watcher).start)
    api_thread.daemon = True
    api_thread.start()

    # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
    sleep(0.1)
Beispiel #9
0
def main(command_line_conf):
    global db_manager, chain_monitor

    signal(SIGINT, handle_signals)
    signal(SIGTERM, handle_signals)
    signal(SIGQUIT, handle_signals)

    # Loads config and sets up the data folder and log file
    config_loader = ConfigLoader(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF,
                                 command_line_conf)
    config = config_loader.build_config()
    setup_data_folder(DATA_DIR)
    setup_logging(config.get("LOG_FILE"), LOG_PREFIX)

    logger.info("Starting TEOS")
    db_manager = DBManager(config.get("DB_PATH"))

    bitcoind_connect_params = {
        k: v
        for k, v in config.items() if k.startswith("BTC")
    }
    bitcoind_feed_params = {
        k: v
        for k, v in config.items() if k.startswith("FEED")
    }

    if not can_connect_to_bitcoind(bitcoind_connect_params):
        logger.error("Can't connect to bitcoind. Shutting down")

    elif not in_correct_network(bitcoind_connect_params,
                                config.get("BTC_NETWORK")):
        logger.error(
            "bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down"
        )

    else:
        try:
            secret_key_der = Cryptographer.load_key_file(
                config.get("TEOS_SECRET_KEY"))
            if not secret_key_der:
                raise IOError("TEOS private key can't be loaded")

            block_processor = BlockProcessor(bitcoind_connect_params)
            carrier = Carrier(bitcoind_connect_params)

            responder = Responder(db_manager, carrier, block_processor)
            watcher = Watcher(
                db_manager,
                block_processor,
                responder,
                secret_key_der,
                config.get("MAX_APPOINTMENTS"),
                config.get("EXPIRY_DELTA"),
            )

            # Create the chain monitor and start monitoring the chain
            chain_monitor = ChainMonitor(watcher.block_queue,
                                         watcher.responder.block_queue,
                                         block_processor, bitcoind_feed_params)

            watcher_appointments_data = db_manager.load_watcher_appointments()
            responder_trackers_data = db_manager.load_responder_trackers()

            if len(watcher_appointments_data) == 0 and len(
                    responder_trackers_data) == 0:
                logger.info("Fresh bootstrap")

                watcher.awake()
                watcher.responder.awake()

            else:
                logger.info("Bootstrapping from backed up data")

                # Update the Watcher backed up data if found.
                if len(watcher_appointments_data) != 0:
                    watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments(
                        watcher_appointments_data)

                # Update the Responder with backed up data if found.
                if len(responder_trackers_data) != 0:
                    watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers(
                        responder_trackers_data)

                # Awaking components so the states can be updated.
                watcher.awake()
                watcher.responder.awake()

                last_block_watcher = db_manager.load_last_block_hash_watcher()
                last_block_responder = db_manager.load_last_block_hash_responder(
                )

                # Populate the block queues with data if they've missed some while offline. If the blocks of both match
                # we don't perform the search twice.

                # FIXME: 32-reorgs-offline dropped txs are not used at this point.
                last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor(
                    last_block_watcher)
                missed_blocks_watcher = block_processor.get_missed_blocks(
                    last_common_ancestor_watcher)

                if last_block_watcher == last_block_responder:
                    dropped_txs_responder = dropped_txs_watcher
                    missed_blocks_responder = missed_blocks_watcher

                else:
                    last_common_ancestor_responder, dropped_txs_responder = block_processor.find_last_common_ancestor(
                        last_block_responder)
                    missed_blocks_responder = block_processor.get_missed_blocks(
                        last_common_ancestor_responder)

                # If only one of the instances needs to be updated, it can be done separately.
                if len(missed_blocks_watcher
                       ) == 0 and len(missed_blocks_responder) != 0:
                    Builder.populate_block_queue(watcher.responder.block_queue,
                                                 missed_blocks_responder)
                    watcher.responder.block_queue.join()

                elif len(missed_blocks_responder
                         ) == 0 and len(missed_blocks_watcher) != 0:
                    Builder.populate_block_queue(watcher.block_queue,
                                                 missed_blocks_watcher)
                    watcher.block_queue.join()

                # Otherwise they need to be updated at the same time, block by block
                elif len(missed_blocks_responder) != 0 and len(
                        missed_blocks_watcher) != 0:
                    Builder.update_states(watcher, missed_blocks_watcher,
                                          missed_blocks_responder)

            # Fire the API and the ChainMonitor
            # FIXME: 92-block-data-during-bootstrap-db
            chain_monitor.monitor_chain()
            API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")),
                watcher).start()
        except Exception as e:
            logger.error("An error occurred: {}. Shutting down".format(e))
            exit(1)
Beispiel #10
0
    2.0,
    (),
    object,
    {},
    " " * LOCATOR_LEN_HEX,
    object(),
]
WRONG_TYPES_NO_STR = [[],
                      unhexlify(get_random_value_hex(LOCATOR_LEN_BYTES)), 3.2,
                      2.0, (), object, {},
                      object()]

config = get_config()
MIN_TO_SELF_DELAY = config.get("MIN_TO_SELF_DELAY")
block_processor = BlockProcessor(bitcoind_connect_params)
inspector = Inspector(block_processor, MIN_TO_SELF_DELAY)


def test_check_locator():
    # Right appointment type, size and format
    locator = get_random_value_hex(LOCATOR_LEN_BYTES)
    assert inspector.check_locator(locator) is None

    # Wrong size (too big)
    locator = get_random_value_hex(LOCATOR_LEN_BYTES + 1)
    with pytest.raises(InspectionFailed):
        try:
            inspector.check_locator(locator)

        except InspectionFailed as e:
            assert e.erno == errors.APPOINTMENT_WRONG_FIELD_SIZE
Beispiel #11
0
def api():
    inspector = Inspector(config.get("MIN_TO_SELF_DELAY"))
    api = API(inspector, internal_api_endpoint)

    return api
    get_random_value_hex(LOCATOR_LEN_BYTES),
    3.2,
    2.0,
    (),
    object,
    {},
    " " * LOCATOR_LEN_HEX,
    object(),
]
WRONG_TYPES_NO_STR = [[],
                      bytes.fromhex(get_random_value_hex(LOCATOR_LEN_BYTES)),
                      3.2, 2.0, (), object, {},
                      object()]

MIN_TO_SELF_DELAY = config.get("MIN_TO_SELF_DELAY")
inspector = Inspector(MIN_TO_SELF_DELAY)


def test_check_locator():
    # Right appointment type, size and format
    locator = get_random_value_hex(LOCATOR_LEN_BYTES)
    assert inspector.check_locator(locator) is None

    # Wrong size (too big)
    locator = get_random_value_hex(LOCATOR_LEN_BYTES + 1)
    with pytest.raises(InspectionFailed):
        try:
            inspector.check_locator(locator)

        except InspectionFailed as e:
            assert e.erno == errors.APPOINTMENT_WRONG_FIELD_SIZE
Beispiel #13
0
def inspector():
    return Inspector(MIN_TO_SELF_DELAY)