Ejemplo n.º 1
0
def user_db_manager(db_name="test_user_db"):
    manager = UsersDBM(db_name)

    yield manager

    manager.db.close()
    shutil.rmtree(db_name)
Ejemplo n.º 2
0
def user_db_manager():
    manager = UsersDBM("test_user_db")
    # Add last know block for the Responder in the db

    yield manager

    manager.db.close()
    rmtree("test_user_db")
Ejemplo n.º 3
0
def open_create_db(db_path):

    try:
        db_manager = UsersDBM(db_path)

        return db_manager

    except ValueError:
        return False
Ejemplo n.º 4
0
    def __init__(self, config, sk, logger, logging_port, stop_log_event,
                 logging_process):
        self.config = config
        self.logger = logger
        self.logging_port = logging_port
        self.stop_log_event = stop_log_event
        self.logging_process = logging_process

        # event triggered when a ``stop`` command is issued
        # Using multiprocessing.Event seems to cause a deadlock if event.set() is called in a signal handler that
        # interrupted event.wait(). This does not happen with threading.Event.
        # See https://bugs.python.org/issue41606
        self.stop_command_event = threading.Event()

        # event triggered when the public API is halted, hence teosd is ready to stop
        self.stop_event = multiprocessing.Event()

        bitcoind_connect_params = {
            k: v
            for k, v in config.items() if k.startswith("BTC_RPC")
        }
        bitcoind_feed_params = {
            k: v
            for k, v in config.items() if k.startswith("BTC_FEED")
        }

        bitcoind_reachable = threading.Event()
        if not can_connect_to_bitcoind(bitcoind_connect_params):
            raise RuntimeError("Cannot connect to bitcoind")
        elif not in_correct_network(bitcoind_connect_params,
                                    config.get("BTC_NETWORK")):
            raise RuntimeError(
                "bitcoind is running on a different network, check teos.conf and bitcoin.conf"
            )
        else:
            bitcoind_reachable.set()

        self.logger.info("tower_id = {}".format(
            Cryptographer.get_compressed_pk(sk.public_key)))
        self.block_processor = BlockProcessor(bitcoind_connect_params,
                                              bitcoind_reachable)
        carrier = Carrier(bitcoind_connect_params, bitcoind_reachable)

        gatekeeper = Gatekeeper(
            UsersDBM(self.config.get("USERS_DB_PATH")),
            self.block_processor,
            self.config.get("SUBSCRIPTION_SLOTS"),
            self.config.get("SUBSCRIPTION_DURATION"),
            self.config.get("EXPIRY_DELTA"),
        )
        self.db_manager = AppointmentsDBM(
            self.config.get("APPOINTMENTS_DB_PATH"))
        responder = Responder(self.db_manager, gatekeeper, carrier,
                              self.block_processor)
        self.watcher = Watcher(
            self.db_manager,
            gatekeeper,
            self.block_processor,
            responder,
            sk,
            self.config.get("MAX_APPOINTMENTS"),
            self.config.get("LOCATOR_CACHE_SIZE"),
        )

        self.watcher_thread = None
        self.responder_thread = None

        # Create the chain monitor
        self.chain_monitor = ChainMonitor(
            [
                self.watcher.block_queue, responder.block_queue,
                gatekeeper.block_queue
            ],
            self.block_processor,
            bitcoind_feed_params,
        )

        # Set up the internal API
        self.internal_api_endpoint = f'{self.config.get("INTERNAL_API_HOST")}:{self.config.get("INTERNAL_API_PORT")}'
        self.internal_api = InternalAPI(
            self.watcher, self.internal_api_endpoint,
            self.config.get("INTERNAL_API_WORKERS"), self.stop_command_event)

        # Create the rpc, without starting it
        self.rpc_process = multiprocessing.Process(
            target=rpc.serve,
            args=(
                self.config.get("RPC_BIND"),
                self.config.get("RPC_PORT"),
                self.internal_api_endpoint,
                self.logging_port,
                self.stop_event,
            ),
            daemon=True,
        )

        # This variables will contain the handle of the process running the API, when the service is started.
        # It will be an instance of either Popen or Process, depending on the WSGI config setting.
        self.api_proc = None
Ejemplo n.º 5
0
def main(command_line_conf):
    global db_manager, chain_monitor

    try:
        signal(SIGINT, handle_signals)
        signal(SIGTERM, handle_signals)
        signal(SIGQUIT, handle_signals)

        # Loads config and sets up the data folder and log file
        data_dir = command_line_conf.pop(
            "DATA_DIR") if "DATA_DIR" in command_line_conf else DATA_DIR
        config_loader = ConfigLoader(data_dir, CONF_FILE_NAME, DEFAULT_CONF,
                                     command_line_conf)
        config = config_loader.build_config()

        # Set default RPC port if not overwritten by the user.
        if "BTC_RPC_PORT" not in config_loader.overwritten_fields:
            config["BTC_RPC_PORT"] = get_default_rpc_port(
                config.get("BTC_NETWORK"))

        setup_data_folder(data_dir)
        setup_logging(config.get("LOG_FILE"), LOG_PREFIX)

        logger.info("Starting TEOS")

        bitcoind_connect_params = {
            k: v
            for k, v in config.items() if k.startswith("BTC")
        }
        bitcoind_feed_params = {
            k: v
            for k, v in config.items() if k.startswith("BTC_FEED")
        }

        if not can_connect_to_bitcoind(bitcoind_connect_params):
            logger.error("Cannot connect to bitcoind. Shutting down")

        elif not in_correct_network(bitcoind_connect_params,
                                    config.get("BTC_NETWORK")):
            logger.error(
                "bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down"
            )

        else:
            secret_key_der = Cryptographer.load_key_file(
                config.get("TEOS_SECRET_KEY"))
            if not secret_key_der:
                raise IOError("TEOS private key cannot be loaded")

            logger.info("tower_id = {}".format(
                Cryptographer.get_compressed_pk(
                    Cryptographer.load_private_key_der(
                        secret_key_der).public_key)))
            block_processor = BlockProcessor(bitcoind_connect_params)
            carrier = Carrier(bitcoind_connect_params)

            gatekeeper = Gatekeeper(
                UsersDBM(config.get("USERS_DB_PATH")),
                block_processor,
                config.get("SUBSCRIPTION_SLOTS"),
                config.get("SUBSCRIPTION_DURATION"),
                config.get("EXPIRY_DELTA"),
            )
            db_manager = AppointmentsDBM(config.get("APPOINTMENTS_DB_PATH"))
            responder = Responder(db_manager, gatekeeper, carrier,
                                  block_processor)
            watcher = Watcher(
                db_manager,
                gatekeeper,
                block_processor,
                responder,
                secret_key_der,
                config.get("MAX_APPOINTMENTS"),
                config.get("LOCATOR_CACHE_SIZE"),
            )

            # Create the chain monitor and start monitoring the chain
            chain_monitor = ChainMonitor(watcher.block_queue,
                                         watcher.responder.block_queue,
                                         block_processor, bitcoind_feed_params)

            watcher_appointments_data = db_manager.load_watcher_appointments()
            responder_trackers_data = db_manager.load_responder_trackers()

            if len(watcher_appointments_data) == 0 and len(
                    responder_trackers_data) == 0:
                logger.info("Fresh bootstrap")

                watcher.awake()
                watcher.responder.awake()

            else:
                logger.info("Bootstrapping from backed up data")

                # Update the Watcher backed up data if found.
                if len(watcher_appointments_data) != 0:
                    watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments(
                        watcher_appointments_data)

                # Update the Responder with backed up data if found.
                if len(responder_trackers_data) != 0:
                    watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers(
                        responder_trackers_data)

                # Awaking components so the states can be updated.
                watcher.awake()
                watcher.responder.awake()

                last_block_watcher = db_manager.load_last_block_hash_watcher()
                last_block_responder = db_manager.load_last_block_hash_responder(
                )

                # Populate the block queues with data if they've missed some while offline. If the blocks of both match
                # we don't perform the search twice.

                # FIXME: 32-reorgs-offline dropped txs are not used at this point.
                last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor(
                    last_block_watcher)
                missed_blocks_watcher = block_processor.get_missed_blocks(
                    last_common_ancestor_watcher)

                if last_block_watcher == last_block_responder:
                    dropped_txs_responder = dropped_txs_watcher
                    missed_blocks_responder = missed_blocks_watcher

                else:
                    last_common_ancestor_responder, dropped_txs_responder = block_processor.find_last_common_ancestor(
                        last_block_responder)
                    missed_blocks_responder = block_processor.get_missed_blocks(
                        last_common_ancestor_responder)

                # If only one of the instances needs to be updated, it can be done separately.
                if len(missed_blocks_watcher
                       ) == 0 and len(missed_blocks_responder) != 0:
                    Builder.populate_block_queue(watcher.responder.block_queue,
                                                 missed_blocks_responder)
                    watcher.responder.block_queue.join()

                elif len(missed_blocks_responder
                         ) == 0 and len(missed_blocks_watcher) != 0:
                    Builder.populate_block_queue(watcher.block_queue,
                                                 missed_blocks_watcher)
                    watcher.block_queue.join()

                # Otherwise they need to be updated at the same time, block by block
                elif len(missed_blocks_responder) != 0 and len(
                        missed_blocks_watcher) != 0:
                    Builder.update_states(watcher, missed_blocks_watcher,
                                          missed_blocks_responder)

            # Fire the API and the ChainMonitor
            # FIXME: 92-block-data-during-bootstrap-db
            chain_monitor.monitor_chain()
            inspector = Inspector(block_processor,
                                  config.get("MIN_TO_SELF_DELAY"))
            API(config.get("API_BIND"), config.get("API_PORT"), inspector,
                watcher).start()
    except Exception as e:
        logger.error("An error occurred: {}. Shutting down".format(e))
        exit(1)