Esempio n. 1
0
    def __next__(self):
        if not self.transaction_pool_obj.pending_tx_pool:
            raise StopIteration

        if len(self.transaction_pool_obj.transaction_pool) >= config.dev.transaction_pool_size:
            raise StopIteration

        tx = self.transaction_pool_obj.pending_tx_pool.pop(0)
        tx = tx[0]

        if not tx.validate():
            return False

        addr_from_state = self.state.get_address(address=tx.txfrom)
        addr_from_pk_state = addr_from_state

        addr_from_pk = Transaction.get_slave(tx)
        if addr_from_pk:
            addr_from_pk_state = self.state.get_address(address=addr_from_pk)

        is_valid_state = tx.validate_extended(addr_from_state=addr_from_state,
                                              addr_from_pk_state=addr_from_pk_state,
                                              transaction_pool=self.transaction_pool_obj.transaction_pool)

        is_valid_pool_state = tx.validate_transaction_pool(self.transaction_pool_obj.transaction_pool)

        if not (is_valid_state and is_valid_pool_state):
            logger.info('>>>TX %s failed state_validate', tx.txhash)
            return False

        logger.info('A TXN has been Processed %s', bin2hstr(tx.txhash))
        self.transaction_pool_obj.add_tx_to_pool(tx)
        self.broadcast_tx(tx)

        return True
Esempio n. 2
0
File: node.py Progetto: fanff/QRL
    def restart_unsynced_logic(self, delay=0):
        logger.info('Restarting unsynced logic in %s seconds', delay)
        try:
            reactor.unsynced_logic.cancel()
        except Exception:  # No need to log this exception
            pass

        reactor.unsynced_logic = reactor.callLater(delay, self.unsynced_logic)
Esempio n. 3
0
File: db.py Progetto: fanff/QRL
    def __init__(self):
        self.db_dir = os.path.join(config.user.data_dir, config.dev.db_name)
        logger.info('DB path: %s', self.db_dir)

        os.makedirs(self.db_dir, exist_ok=True)

        # TODO: leveldb python module is not very active. Decouple and replace
        self.db = leveldb.LevelDB(self.db_dir)
Esempio n. 4
0
 def send_fetch_block(self, block_idx):
     """
     Fetch Block n
     Sends request for the block number n.
     :return:
     """
     logger.info('<<<Fetching block: %s from %s', block_idx, self.connection_id)
     msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.FB,
                                       fbData=qrllegacy_pb2.FBData(index=block_idx))
     self.send(msg)
Esempio n. 5
0
File: node.py Progetto: fanff/QRL
    def update_node_state(self, new_sync_state: ESyncState):
        self.sync_state.state = new_sync_state
        logger.info('Status changed to %s', self.sync_state.state)

        _mapping = {
            ESyncState.unsynced: self._handler_state_unsynced,
            ESyncState.syncing: self._handler_state_syncing,
            ESyncState.synced: self._handler_state_synced,
            ESyncState.forked: self._handler_state_forked,
        }

        _mapping[self.sync_state.state]()
Esempio n. 6
0
    def test_hashchain_verify(self):
        seed = sha256(b'test_seed')

        HASHCHAIN_SIZE = 100
        hcb = hashchain(seed, 1, HASHCHAIN_SIZE)
        self.assertIsNotNone(hcb)
        self.assertEqual(HASHCHAIN_SIZE + 1, len(hcb.hashchain))

        for i, value in enumerate(hcb.hashchain):
            tmp = sha256_n(value, HASHCHAIN_SIZE - i)
            logger.info("{:-4} {} {}".format(i, bin2hstr(value), bin2hstr(tmp)))
            self.assertEqual(hcb.hc_terminator, tmp)
Esempio n. 7
0
    def _pre_check(self, block, ignore_duplicate):
        if block.block_number < 1:
            return False

        if not block.validate():
            return False

        if (not ignore_duplicate) and self.state.get_block(block.headerhash):  # Duplicate block check
            logger.info('Duplicate block %s %s', block.block_number, bin2hstr(block.headerhash))
            return False

        return True
Esempio n. 8
0
File: Miner.py Progetto: fanff/QRL
 def solutionEvent(self, nonce):
     # NOTE: This function usually runs in the context of a C++ thread
     try:
         logger.debug('Solution Found %s', nonce)
         self._mining_block.set_mining_nonce(nonce)
         logger.info('Block #%s nonce: %s', self._mining_block.block_number, StringToUInt256(str(nonce))[-4:])
         logger.info('Hash Rate: %s H/s', self.hashRate())
         cloned_block = copy.deepcopy(self._mining_block)
         self.pre_block_logic(cloned_block)
     except Exception as e:
         logger.warning("Exception in solutionEvent")
         logger.exception(e)
Esempio n. 9
0
    def handle_peer_list(self, source, message: qrllegacy_pb2.LegacyMessage):
        P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.PL)

        if not config.user.enable_peer_discovery:
            return

        if message.plData.peer_ips is None:
            return

        new_peers = set(ip for ip in message.plData.peer_ips)
        new_peers.discard(source.host_ip)  # Remove local address
        logger.info('%s peers data received: %s', source.peer_ip, new_peers)
        self.update_peer_addresses(new_peers)
Esempio n. 10
0
File: Miner.py Progetto: grx7/QRL
 def solutionEvent(self, nonce):
     # NOTE: This function usually runs in the context of a C++ thread
     try:
         logger.debug('Solution Found %s', nonce)
         self._mining_block.set_nonces(nonce, 0)
         logger.info('Block #%s nonce: %s', self._mining_block.block_number,
                     StringToUInt256(str(nonce))[-4:])
         logger.info('Hash Rate: %s H/s', self.hashRate())
         cloned_block = copy.deepcopy(self._mining_block)
         self.pre_block_logic(cloned_block)
     except Exception as e:
         logger.warning("Exception in solutionEvent")
         logger.exception(e)
Esempio n. 11
0
    def block_received(self, source, block: Block):
        self.pow.last_pb_time = ntp.getTime()
        logger.info('>>> Received Block #%d %s', block.block_number,
                    bin2hstr(block.headerhash))

        if source != self._target_channel:
            if self._target_channel is None:
                logger.warning('Received block and target channel is None')
            else:
                logger.warning('Received block from unexpected peer')
                logger.warning('Expected peer: %s', self._target_channel.peer)
                logger.warning('Found peer: %s', source.peer)
            return

        if block.block_number != self._last_requested_block_number:
            logger.warning('Did not match %s',
                           self._last_requested_block_number)
            return

        target_start_blocknumber = self._target_node_header_hash.block_number
        expected_headerhash = self._target_node_header_hash.headerhashes[
            block.block_number - target_start_blocknumber]
        if block.headerhash != expected_headerhash:
            logger.warning('Did not match headerhash')
            logger.warning('Expected headerhash %s', expected_headerhash)
            logger.warning('Found headerhash %s', block.headerhash)
            return

        if not block.validate(self._chain_manager, self.pow.future_blocks):
            logger.warning('Syncing Failed: Block Validation Failed')
            return

        if self._chain_manager.add_block(block, check_stale=False):
            if self._chain_manager.last_block.headerhash == block.headerhash:
                self.pow.suspend_mining_timestamp = ntp.getTime(
                ) + config.dev.sync_delay_mining
        else:
            logger.warning('Failed to Add Block')
            return

        try:
            reactor.download_monitor.cancel()
        except Exception as e:
            logger.warning("PB: %s", e)

        if self.is_syncing_finished():
            return

        self._last_requested_block_number += 1

        self.peer_fetch_block()
Esempio n. 12
0
    def validate_extended(self, addr_from_state, addr_from_pk_state, transaction_pool):
        if not self.validate_slave(addr_from_state, addr_from_pk_state):
            return False

        tx_balance = addr_from_state.balance

        if self.fee < 0:
            logger.info('State validation failed for %s because: Negative send', self.txhash)
            return False

        if not AddressState.address_is_valid(self.addr_from):
            logger.warning('Invalid address addr_from: %s', self.addr_from)
            return False

        if not AddressState.address_is_valid(self.owner):
            logger.warning('Invalid address owner_addr: %s', self.owner)
            return False

        for address_balance in self.initial_balances:
            if not AddressState.address_is_valid(address_balance.address):
                logger.warning('Invalid address address in initial_balances: %s', address_balance.address)
                return False

        if tx_balance < self.fee:
            logger.info('TokenTxn State validation failed for %s because: Insufficient funds', self.txhash)
            logger.info('balance: %s, Fee: %s', tx_balance, self.fee)
            return False

        if self.ots_key_reuse(addr_from_pk_state, self.ots_key):
            logger.info('TokenTxn State validation failed for %s because: OTS Public key re-use detected', self.txhash)
            return False

        return True
    def validate_transaction_pool(self, transaction_pool):
        for txn in transaction_pool:
            if txn.txhash == self.txhash:
                continue

            if self.PK != txn.PK:
                continue

            if txn.ots_key == self.ots_key:
                logger.info('State validation failed for %s because: OTS Public key re-use detected', self.txhash)
                logger.info('Subtype %s', type(self))
                return False

        return True
Esempio n. 14
0
    def ots_key_reuse(state_addr, ots_key):
        if state_addr is None:
            logger.info('-->> state_addr None not possible')
            return False

        offset = ots_key >> 3
        relative = ots_key % 8
        bitfield = bytearray(state_addr.ots_bitfield[offset])
        bit_value = (bitfield[0] >> relative) & 1

        if bit_value:
            return True

        return False
Esempio n. 15
0
    def validate_extended(self, addr_from_state: AddressState,
                          addr_from_pk_state: AddressState):
        if not self.validate_slave(addr_from_state, addr_from_pk_state):
            return False

        tx_balance = addr_from_state.balance

        if self.fee < 0:
            logger.info(
                'Lattice Txn: State validation failed %s : Negative fee %s',
                bin2hstr(self.txhash), self.fee)
            return False

        if tx_balance < self.fee:
            logger.info(
                'Lattice Txn: State validation failed %s : Insufficient funds',
                bin2hstr(self.txhash))
            logger.info('balance: %s, fee: %s', tx_balance, self.fee)
            return False

        if addr_from_pk_state.ots_key_reuse(self.ots_key):
            logger.info('Lattice Txn: OTS Public key re-use detected %s',
                        bin2hstr(self.txhash))
            return False

        return True
Esempio n. 16
0
    def _validate_custom(self) -> bool:
        if len(self.message_hash) == 0:
            logger.warning('Message cannot be empty')
            return False

        if len(self.addr_to) > 0 and not (OptimizedAddressState.address_is_valid(self.addr_to)):
            logger.warning('[MessageTransaction] Invalid address addr_to: %s', bin2hstr(self.addr_to))
            return False

        if self.fee < 0:
            logger.info('State validation failed for %s because: Negative send', bin2hstr(self.txhash))
            return False

        return True
Esempio n. 17
0
 def mine_next(self, parent_block):
     if ntp.getTime() < self.suspend_mining_timestamp:
         return
     if config.user.mining_enabled:
         parent_metadata = self.chain_manager.state.get_block_metadata(
             parent_block.headerhash)
         self.miner.prepare_next_unmined_block_template(
             mining_address=self.mining_address,
             tx_pool=self.chain_manager.tx_pool,
             parent_block=parent_block,
             parent_difficulty=parent_metadata.block_difficulty)
         logger.info('Mining Block #%s', parent_block.block_number + 1)
         self.miner.start_mining(parent_block,
                                 parent_metadata.block_difficulty)
Esempio n. 18
0
    def _pre_check(self, block, ignore_duplicate):
        if block.block_number < 1:
            return False

        if not block.validate():
            return False

        if (not ignore_duplicate) and self.state.get_block(
                block.headerhash):  # Duplicate block check
            logger.info('Duplicate block %s %s', block.block_number,
                        bin2hstr(block.headerhash))
            return False

        return True
Esempio n. 19
0
    def validate_extended(self, addr_from_state: AddressState,
                          addr_from_pk_state: AddressState) -> bool:
        if not self.validate_slave(addr_from_state, addr_from_pk_state):
            return False

        tx_balance = addr_from_state.balance

        if self.fee < 0:
            logger.info(
                'Slave: State validation failed for %s because: Negative send',
                bin2hstr(self.txhash))
            return False

        if tx_balance < self.fee:
            logger.info(
                'Slave: State validation failed for %s because: Insufficient funds',
                bin2hstr(self.txhash))
            logger.info('balance: %s, amount: %s', tx_balance, self.fee)
            return False

        if addr_from_pk_state.ots_key_reuse(self.ots_key):
            logger.info(
                'Slave: State validation failed for %s because: OTS Public key re-use detected',
                bin2hstr(self.txhash))
            return False

        return True
Esempio n. 20
0
    def validate_transaction_pool(self, transaction_pool):
        for txn in transaction_pool:
            if txn.txhash == self.txhash:
                continue

            if self.PK != txn.PK:
                continue

            if txn.ots_key == self.ots_key:
                logger.info('State validation failed for %s because: OTS Public key re-use detected', self.txhash)
                logger.info('Subtype %s', self.subtype)
                return False

        return True
Esempio n. 21
0
def get_mining_address(mining_address: str):
    try:
        if not mining_address:
            mining_address = bytes(hstr2bin(config.user.mining_address[1:]))
        else:
            mining_address = bytes(hstr2bin(mining_address[1:]))

        if not AddressState.address_is_valid(mining_address):
            raise ValueError('Mining Address Validation Failed')

        return mining_address
    except Exception as e:
        logger.info('Failed Parsing Mining Address %s', e)

    return None
Esempio n. 22
0
 def handleEvent(self, event):
     # NOTE: This function usually runs in the context of a C++ thread
     try:
         if event.type == SOLUTION:
             nonce = event.nonce
             self._mining_block.set_nonces(nonce, 0)
             logger.debug('Solution Found %s', nonce)
             logger.info('Block #%s nonce: %s',
                         self._mining_block.block_number, nonce)
             logger.info('Hash Rate: %s H/s', self.hashRate())
             cloned_block = copy.deepcopy(self._mining_block)
             self.pre_block_logic(cloned_block)
     except Exception as e:
         logger.warning("Exception in solutionEvent")
         logger.exception(e)
Esempio n. 23
0
def main():
    args = parse_arguments()

    logger.debug(
        "====================================================================================="
    )
    logger.info("QRL Path: %s", args.qrl_dir)
    config.user.qrl_dir = expanduser(args.qrl_dir)
    config.create_path(config.user.qrl_dir)
    logger.debug(
        "====================================================================================="
    )

    config.create_path(config.user.wallet_dir)
    mining_address = None
    if config.user.mining_enabled:
        mining_address = get_mining_address(args.mining_address)

        if not mining_address:
            logger.warning('Invalid Mining Credit Wallet Address')
            logger.warning('%s', args.mining_address)
            return False

    ntp.setDrift()

    if args.debug:
        logger.warning("FAULT HANDLER ENABLED")
        faulthandler.enable()

    logger.info('Initializing chain..')
    persistent_state = State()

    if args.measurement > -1:
        persistent_state.get_measurement = MagicMock(
            return_value=args.measurement)

    chain_manager = ChainManager(state=persistent_state)
    chain_manager.load(Block.from_json(GenesisBlock().to_json()))

    qrlnode = QRLNode(db_state=persistent_state, mining_address=mining_address)
    qrlnode.set_chain_manager(chain_manager)

    set_logger(args, qrlnode.sync_state)

    #######
    # NOTE: Keep assigned to a variable or might get collected
    admin_service, grpc_service, mining_service = start_services(qrlnode)

    qrlnode.start_listening()
    qrlnode.connect_peers()

    qrlnode.start_pow(args.mining_thread_count)

    logger.info('QRL blockchain ledger %s', config.dev.version)
    logger.info('mining/staking address %s', args.mining_address)

    # FIXME: This will be removed once we move away from Twisted
    reactor.run()
Esempio n. 24
0
    def handle_fetch_block(self, source, message: qrllegacy_pb2.LegacyMessage):  # Fetch Request for block
        """
        Fetch Block
        Sends the request for the block.
        :return:
        """
        P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.FB)

        block_number = message.fbData.index

        logger.info(' Request for %s by %s', block_number, source.connection_id)
        if 0 < block_number <= source.factory.chain_height:
            block = source.factory.get_block(block_number)
            msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.PB,
                                              pbData=qrllegacy_pb2.PBData(block=block.pbdata))
            source.send(msg)
Esempio n. 25
0
    def _add_block(self, block, batch=None):
        self.trigger_miner = False

        block_size_limit = self.state.get_block_size_limit(block)
        if block_size_limit and block.size > block_size_limit:
            logger.info('Block Size greater than threshold limit %s > %s',
                        block.size, block_size_limit)
            return False

        if self._try_orphan_add_block(block, batch):
            return True

        if self._try_branch_add_block(block, batch):
            return True

        return False
Esempio n. 26
0
    def __init__(self, db_dir=None):
        self.db_dir = os.path.join(config.user.data_dir, config.dev.db_name)
        if db_dir:
            self.db_dir = db_dir
        logger.info('DB path: %s', self.db_dir)

        os.makedirs(self.db_dir, exist_ok=True)

        try:
            self.db = plyvel.DB(self.db_dir, max_open_files=1000, lru_cache_size=5 * 1024)
        except Exception:
            self.db = plyvel.DB(self.db_dir,
                                max_open_files=1000,
                                lru_cache_size=5 * 1024,
                                create_if_missing=True,
                                compression='snappy')
            self.db.put(b'state_version', str(1).encode())
Esempio n. 27
0
File: node.py Progetto: kstuart/QRL
    def start_download(self):
        # FIXME: Why PoW is downloading blocks?
        # add peers and their identity to requested list
        # FMBH
        if self.sync_state.state == ESyncState.synced:
            return

        logger.info('Checking Download..')

        if self.p2p_factory.connections == 0:
            logger.warning('No connected peers. Moving to synced state')
            self.update_node_state(ESyncState.synced)
            return

        self.update_node_state(ESyncState.syncing)
        logger.info('Initializing download from %s', self.chain_manager.height + 1)
        self.p2p_factory.randomize_block_fetch()
Esempio n. 28
0
    def _add_block(self, block, ignore_duplicate=False, batch=None):
        block_size_limit = self.state.get_block_size_limit(block)
        if block_size_limit and block.size > block_size_limit:
            logger.info('Block Size greater than threshold limit %s > %s', block.size, block_size_limit)
            return False

        if not self._pre_check(block, ignore_duplicate):
            logger.debug('Failed pre_check')
            return False

        if self._try_orphan_add_block(block, batch):
            return True

        if self._try_branch_add_block(block, batch):
            return True

        return False
Esempio n. 29
0
File: node.py Progetto: fanff/QRL
    def start_download(self):
        # FIXME: Why PoW is downloading blocks?
        # add peers and their identity to requested list
        # FMBH
        if self.sync_state.state == ESyncState.synced:
            return

        logger.info('Checking Download..')

        if self.p2p_factory.connections == 0:
            logger.warning('No connected peers. Moving to synced state')
            self.update_node_state(ESyncState.synced)
            return

        self.update_node_state(ESyncState.syncing)
        logger.info('Initializing download from %s', self.chain_manager.height + 1)
        self.p2p_factory.randomize_block_fetch()
Esempio n. 30
0
 def validate(self) -> bool:
     """
     This method calls validate_or_raise, logs any failure and returns True or False accordingly
     The main purpose is to avoid exceptions and accomodate legacy code
     :return: True is the transation is valid
     :rtype: bool
     """
     try:
         self.validate_or_raise()
     except ValueError as e:
         logger.info('[%s] failed validate_tx', bin2hstr(self.txhash))
         logger.warning(str(e))
         return False
     except Exception as e:
         logger.exception(e)
         return False
     return True
Esempio n. 31
0
    def add_block(self, block: Block) -> bool:
        if block.block_number < self.height - config.dev.reorg_limit:
            logger.debug('Skipping block #%s as beyond re-org limit',
                         block.block_number)
            return False

        batch = self.state.get_batch()
        if self._add_block(block, batch=batch):
            self.state.write_batch(batch)
            self.update_child_metadata(
                block.headerhash
            )  # TODO: Not needed to execute when an orphan block is added
            logger.info('Added Block #%s %s', block.block_number,
                        bin2hstr(block.headerhash))
            return True

        return False
Esempio n. 32
0
 def validate(self, verify_signature=True) -> bool:
     """
     This method calls validate_or_raise, logs any failure and returns True or False accordingly
     The main purpose is to avoid exceptions and accommodate legacy code
     :return: True is the transaction is valid
     :rtype: bool
     """
     try:
         self.validate_or_raise(verify_signature)
     except ValueError as e:
         logger.info('[%s] failed validate_tx', bin2hstr(self.txhash))
         logger.warning(str(e))
         return False
     except Exception as e:
         logger.exception(e)
         return False
     return True
Esempio n. 33
0
    def broadcast_tx(self, tx: TransferTransaction):
        logger.info('<<<Transmitting TX: %s', bin2hstr(tx.txhash))

        if isinstance(tx, MessageTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.MT
        elif isinstance(tx, TransferTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.TX
        elif isinstance(tx, TokenTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.TK
        elif isinstance(tx, TransferTokenTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.TT
        elif isinstance(tx, SlaveTransaction):
            legacy_type = qrllegacy_pb2.LegacyMessage.SL
        else:
            raise ValueError('Invalid Transaction Type')
        self.register_and_broadcast(legacy_type, tx.get_message_hash(),
                                    tx.pbdata)
    def _add_block(self, block, ignore_duplicate=False, batch=None):
        block_size_limit = self.state.get_block_size_limit(block)
        if block_size_limit and block.size > block_size_limit:
            logger.info('Block Size greater than threshold limit %s > %s', block.size, block_size_limit)
            return False

        if not self._pre_check(block, ignore_duplicate):
            logger.debug('Failed pre_check')
            return False

        if self._try_orphan_add_block(block, batch):
            return True

        if self._try_branch_add_block(block, batch):
            return True

        return False
Esempio n. 35
0
    def broadcast_tx(self, tx: TransferTransaction):
        logger.info('<<<Transmitting TX: %s', tx.txhash)

        if tx.subtype == qrl_pb2.Transaction.MESSAGE:
            legacy_type = qrllegacy_pb2.LegacyMessage.MT
        elif tx.subtype == qrl_pb2.Transaction.TRANSFER:
            legacy_type = qrllegacy_pb2.LegacyMessage.TX
        elif tx.subtype == qrl_pb2.Transaction.TOKEN:
            legacy_type = qrllegacy_pb2.LegacyMessage.TK
        elif tx.subtype == qrl_pb2.Transaction.TRANSFERTOKEN:
            legacy_type = qrllegacy_pb2.LegacyMessage.TT
        elif tx.subtype == qrl_pb2.Transaction.LATTICE:
            legacy_type = qrllegacy_pb2.LegacyMessage.LT
        elif tx.subtype == qrl_pb2.Transaction.SLAVE:
            legacy_type = qrllegacy_pb2.LegacyMessage.SL
        else:
            raise ValueError('Invalid Transaction Type')
        self.register_and_broadcast(legacy_type, tx.get_message_hash(), tx.pbdata)
Esempio n. 36
0
    def block_received(self, source, block: Block):
        self.pow.last_pb_time = time.time()
        logger.info('>>> Received Block #%d %s', block.block_number,
                    bin2hstr(block.headerhash))

        if source != self._target_peer:
            logger.warning('Received block from unexpected peer')
            logger.warning('Expected peer: %s',
                           self._target_peer.connection_id)
            logger.warning('Found peer: %s', source.connection_id)
            return

        if block.block_number != self._last_requested_block_idx:
            logger.warning('Did not match %s', self._last_requested_block_idx)
            return

        target_start_blocknumber = self._target_node_header_hash.block_number
        expected_headerhash = self._target_node_header_hash.headerhashes[
            block.block_number - target_start_blocknumber]
        if block.headerhash != expected_headerhash:
            logger.warning('Did not match headerhash')
            logger.warning('Expected headerhash %s', expected_headerhash)
            logger.warning('Found headerhash %s', block.headerhash)
            return

        # FIXME: This check should not be necessary
        if not self._chain_manager.add_block(block):
            logger.warning('Failed to Add Block')
            return

        try:
            reactor.download_monitor.cancel()
        except Exception as e:
            logger.warning("PB: %s", e)

        if self.is_syncing_finished():
            return

        self._last_requested_block_idx += 1
        if self.is_syncing_finished():
            return

        self.peer_fetch_block()
Esempio n. 37
0
File: node.py Progetto: fanff/QRL
    def monitor_bk(self):
        # FIXME: Too many magic numbers / timing constants
        # FIXME: This is obsolete
        time_diff1 = time.time() - self.last_pow_cycle
        if 90 < time_diff1:
            if self.sync_state.state == ESyncState.unsynced:
                if time.time() - self.last_bk_time > 120:
                    self.last_pow_cycle = time.time()
                    logger.info(' POW cycle activated by monitor_bk() ')
                    self.update_node_state(ESyncState.synced)
                reactor.monitor_bk = reactor.callLater(60, self.monitor_bk)
                return

        time_diff2 = time.time() - self.last_pb_time
        if self.sync_state.state == ESyncState.syncing and time_diff2 > 60:
            self.update_node_state(ESyncState.unsynced)
            self.epoch_diff = -1

        reactor.monitor_bk = reactor.callLater(60, self.monitor_bk)
Esempio n. 38
0
    def handle_peer_list(self, source, message: qrllegacy_pb2.LegacyMessage):
        P2PBaseObserver._validate_message(message,
                                          qrllegacy_pb2.LegacyMessage.PL)

        if not config.user.enable_peer_discovery:
            return

        if not message.plData.peer_ips:
            return

        sender_peer = IPMetadata(source.peer.ip, message.plData.public_port)

        new_peers = self.combine_peer_lists(message.plData.peer_ips,
                                            [sender_peer.full_address],
                                            check_global=True)
        new_peers.discard(source.host.full_address)  # Remove local address

        logger.info('%s peers data received: %s', source.peer.ip, new_peers)
        self.extend_known_peers(new_peers)
Esempio n. 39
0
    def _validate_extended(self, state_container: StateContainer) -> bool:
        if len(self.addr_to) != 0:
            if state_container.block_number < state_container.current_dev_config.hard_fork_heights[0]:
                logger.warning("[MessageTransaction] Hard Fork Feature not yet activated")
                return False

        if len(self.message_hash) > state_container.current_dev_config.message_max_length:  # TODO: Move to dev config
            logger.warning('Message length cannot be more than %s', state_container.current_dev_config.message_max_length)
            logger.warning('Found message length %s', len(self.message_hash))
            return False

        tx_balance = state_container.addresses_state[self.addr_from].balance

        if tx_balance < self.fee:
            logger.info('State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash))
            logger.info('balance: %s, amount: %s', tx_balance, self.fee)
            return False

        return True
Esempio n. 40
0
File: node.py Progetto: kprimice/QRL
    def monitor_bk(self):
        # FIXME: Too many magic numbers / timing constants
        # FIXME: This is obsolete
        time_diff1 = time.time() - self.last_pow_cycle
        if 90 < time_diff1:
            if self.sync_state.state == ESyncState.unsynced:
                if time.time() - self.last_bk_time > 120:
                    self.last_pow_cycle = time.time()
                    logger.info(' POW cycle activated by monitor_bk() ')
                    self.update_node_state(ESyncState.synced)
                reactor.monitor_bk = reactor.callLater(60, self.monitor_bk)
                return

        time_diff2 = time.time() - self.last_pb_time
        if self.sync_state.state == ESyncState.syncing and time_diff2 > 60:
            self.update_node_state(ESyncState.unsynced)
            self.epoch_diff = -1

        reactor.monitor_bk = reactor.callLater(60, self.monitor_bk)
Esempio n. 41
0
    def block_received(self, source, block: Block):
        self.pow.last_pb_time = time.time()
        logger.info('>>> Received Block #%d %s', block.block_number, bin2hstr(block.headerhash))

        if source != self._target_peer:
            logger.warning('Received block from unexpected peer')
            logger.warning('Expected peer: %s', self._target_peer.addr_remote)
            logger.warning('Found peer: %s', source.addr_remote)
            return

        if block.block_number != self._last_requested_block_idx:
            logger.warning('Did not match %s', self._last_requested_block_idx)
            return

        target_start_blocknumber = self._target_node_header_hash.block_number
        expected_headerhash = self._target_node_header_hash.headerhashes[block.block_number - target_start_blocknumber]
        if block.headerhash != expected_headerhash:
            logger.warning('Did not match headerhash')
            logger.warning('Expected headerhash %s', expected_headerhash)
            logger.warning('Found headerhash %s', block.headerhash)
            return

        if self._chain_manager.add_block(block):
            if self._chain_manager.last_block.headerhash == block.headerhash:
                self.pow.suspend_mining_timestamp = ntp.getTime() + config.dev.sync_delay_mining
        else:
            logger.warning('Failed to Add Block')
            return

        try:
            reactor.download_monitor.cancel()
        except Exception as e:
            logger.warning("PB: %s", e)

        if self.is_syncing_finished():
            return

        self._last_requested_block_idx += 1
        if self.is_syncing_finished():
            return

        self.peer_fetch_block()
Esempio n. 42
0
    def add_block(self, block: Block) -> bool:
        with self.lock:
            if block.block_number < self.height - config.dev.reorg_limit:
                logger.debug('Skipping block #%s as beyond re-org limit',
                             block.block_number)
                return False

            if self.get_block_is_duplicate(block):
                return False

            batch = self._state.batch
            block_flag, fork_flag = self._add_block(block, batch=batch)
            if block_flag:
                if not fork_flag:
                    self._state.write_batch(batch)
                logger.info('Added Block #%s %s', block.block_number,
                            bin2hstr(block.headerhash))
                return True

            return False
Esempio n. 43
0
def start_services(node: QRLNode):
    public_server = grpc.server(
        ThreadPoolExecutor(max_workers=config.user.public_api_threads),
        maximum_concurrent_rpcs=config.user.public_api_max_concurrent_rpc)
    add_BaseServicer_to_server(BaseService(node), public_server)
    add_PublicAPIServicer_to_server(PublicAPIService(node), public_server)

    if config.user.public_api_enabled:
        public_server.add_insecure_port("{0}:{1}".format(
            config.user.public_api_host, config.user.public_api_port))
        public_server.start()

        logger.info("grpc public service - started !")

    admin_server = grpc.server(
        ThreadPoolExecutor(max_workers=config.user.admin_api_threads),
        maximum_concurrent_rpcs=config.user.admin_api_max_concurrent_rpc)
    add_AdminAPIServicer_to_server(AdminAPIService(node), admin_server)

    if config.user.admin_api_enabled:
        admin_server.add_insecure_port("{0}:{1}".format(
            config.user.admin_api_host, config.user.admin_api_port))
        admin_server.start()

        logger.info("grpc admin service - started !")

    mining_server = grpc.server(
        ThreadPoolExecutor(max_workers=config.user.mining_api_threads),
        maximum_concurrent_rpcs=config.user.mining_api_max_concurrent_rpc)
    add_MiningAPIServicer_to_server(MiningAPIService(node), mining_server)

    if config.user.mining_api_enabled:
        mining_server.add_insecure_port("{0}:{1}".format(
            config.user.mining_api_host, config.user.mining_api_port))
        mining_server.start()

        logger.info("grpc mining service - started !")

    debug_server = grpc.server(
        ThreadPoolExecutor(max_workers=config.user.debug_api_threads),
        maximum_concurrent_rpcs=config.user.debug_api_max_concurrent_rpc)
    add_DebugAPIServicer_to_server(DebugAPIService(node), debug_server)

    if config.user.debug_api_enabled:
        debug_server.add_insecure_port("{0}:{1}".format(
            config.user.debug_api_host, config.user.debug_api_port))
        debug_server.start()

        logger.info("grpc debug service - started !")

    return admin_server, public_server, mining_server, debug_server
    def handle_version(self, source, message: qrllegacy_pb2.LegacyMessage):
        """
        Version
        If version is empty, it sends the version & genesis_prev_headerhash.
        Otherwise, processes the content of data.
        In case of mismatches, it disconnects from the peer
        """
        self._validate_message(message, qrllegacy_pb2.LegacyMessage.VE)

        if not message.veData.version:
            msg = qrllegacy_pb2.LegacyMessage(
                func_name=qrllegacy_pb2.LegacyMessage.VE,
                veData=qrllegacy_pb2.VEData(
                    version=config.dev.version,
                    genesis_prev_hash=config.user.genesis_prev_headerhash,
                    rate_limit=config.user.peer_rate_limit))

            source.send(msg)
            return

        logger.info('%s version: %s | genesis prev_headerhash %s',
                    source.peer.ip, message.veData.version,
                    message.veData.genesis_prev_hash)

        if not self._get_version_compatibility(message.veData.version):
            logger.warning(
                "Disconnecting from Peer %s running incompatible node version %s",
                source.peer.ip, message.veData.version)
            source.loseConnection()
            self.ban_channel(source)
            return

        source.rate_limit = min(config.user.peer_rate_limit,
                                message.veData.rate_limit)

        if message.veData.genesis_prev_hash != config.user.genesis_prev_headerhash:
            logger.warning('%s genesis_prev_headerhash mismatch', source.peer)
            logger.warning('Expected: %s', config.user.genesis_prev_headerhash)
            logger.warning('Found: %s', message.veData.genesis_prev_hash)
            source.loseConnection()
            self.ban_channel(source)
    def __next__(self):
        if not self.transaction_pool_obj.pending_tx_pool:
            raise StopIteration

        if len(self.transaction_pool_obj.transaction_pool
               ) >= config.dev.transaction_pool_size:
            raise StopIteration

        tx = self.transaction_pool_obj.pending_tx_pool.pop(0)
        tx = tx[0]

        if not tx.validate():
            return False

        addr_from_state = self.state.get_address(address=tx.addr_from)
        addr_from_pk_state = addr_from_state

        addr_from_pk = Transaction.get_slave(tx)
        if addr_from_pk:
            addr_from_pk_state = self.state.get_address(address=addr_from_pk)

        is_valid_state = tx.validate_extended(
            addr_from_state=addr_from_state,
            addr_from_pk_state=addr_from_pk_state)

        is_valid_pool_state = tx.validate_transaction_pool(
            self.transaction_pool_obj.transaction_pool)

        if not (is_valid_state and is_valid_pool_state):
            logger.info('>>>TX %s failed state_validate', tx.txhash)
            return False

        for old_tx in self.transaction_pool_obj.transaction_pool:
            if old_tx.txhash == tx.txhash:
                return True

        logger.info('A TXN has been Processed %s', bin2hstr(tx.txhash))
        self.transaction_pool_obj.add_tx_to_pool(tx)
        self.broadcast_tx(tx)

        return True
Esempio n. 46
0
    def add_connection(self, conn_protocol) -> bool:
        # TODO: Most of this can go the peer manager

        if self._qrl_node.is_banned(conn_protocol.peer_ip):
            conn_protocol.loseConnection()
            return False

        # FIXME: (For AWS) This could be problematic for other users
        # FIXME: identify nodes by an GUID?
        if config.dev.public_ip and conn_protocol.peer_ip == config.dev.public_ip:
            conn_protocol.loseConnection()
            return False

        if self.reached_conn_limit:
            # FIXME: Should we stop listening to avoid unnecessary load due to many connections?
            logger.info('Peer limit hit. Disconnecting client %s',
                        conn_protocol.peer_ip)
            conn_protocol.loseConnection()
            return False

        peer_list = self._qrl_node.peer_addresses
        if conn_protocol.peer_ip == conn_protocol.host_ip:
            if conn_protocol.peer_ip in peer_list:
                logger.info('Self in peer_list, removing..')
                peer_list.remove(conn_protocol.peer_ip)
                self._qrl_node.peer_manager.update_peer_addresses(peer_list)

            conn_protocol.loseConnection()
            return False

        self._peer_connections.append(conn_protocol)

        if conn_protocol.peer_ip not in peer_list:
            logger.debug('Adding to peer_list')
            peer_list.add(conn_protocol.peer_ip)
            self._qrl_node.peer_manager.update_peer_addresses(peer_list)

        logger.debug('>>> new peer connection : %s:%s ', conn_protocol.peer_ip,
                     str(conn_protocol.peer_port))

        return True
Esempio n. 47
0
def start_services(node: QRLNode):
    public_server = grpc.server(ThreadPoolExecutor(max_workers=1),
                                maximum_concurrent_rpcs=config.user.max_peers_limit)
    add_BaseServicer_to_server(BaseService(node), public_server)
    add_PublicAPIServicer_to_server(PublicAPIService(node), public_server)

    public_server.add_insecure_port("[::]:9009")
    public_server.start()

    logger.info("grpc public service - started !")

    admin_server = grpc.server(ThreadPoolExecutor(max_workers=1),
                               maximum_concurrent_rpcs=config.user.max_peers_limit)
    add_AdminAPIServicer_to_server(AdminAPIService(node), admin_server)

    admin_server.add_insecure_port("127.0.0.1:9008")
    admin_server.start()

    logger.info("grpc admin service - started !")

    return admin_server, public_server
Esempio n. 48
0
    def block_received(self, source, block: Block):
        self.pow.last_pb_time = time.time()
        logger.info('>>> Received Block #%d %s', block.block_number, bin2hstr(block.headerhash))

        if source != self._target_peer:
            logger.warning('Received block from unexpected peer')
            logger.warning('Expected peer: %s', self._target_peer.connection_id)
            logger.warning('Found peer: %s', source.connection_id)
            return

        if block.block_number != self._last_requested_block_idx:
            logger.warning('Did not match %s', self._last_requested_block_idx)
            return

        target_start_blocknumber = self._target_node_header_hash.block_number
        expected_headerhash = self._target_node_header_hash.headerhashes[block.block_number - target_start_blocknumber]
        if block.headerhash != expected_headerhash:
            logger.warning('Did not match headerhash')
            logger.warning('Expected headerhash %s', expected_headerhash)
            logger.warning('Found headerhash %s', block.headerhash)
            return

        # FIXME: This check should not be necessary
        if not self._chain_manager.add_block(block):
            logger.warning('Failed to Add Block')
            return

        try:
            reactor.download_monitor.cancel()
        except Exception as e:
            logger.warning("PB: %s", e)

        if self.is_syncing_finished():
            return

        self._last_requested_block_idx += 1
        if self.is_syncing_finished():
            return

        self.peer_fetch_block()
Esempio n. 49
0
    def add_connection(self, conn_protocol) -> bool:
        # TODO: Most of this can go the peer manager

        if self._qrl_node.is_banned(conn_protocol.peer_ip):
            conn_protocol.loseConnection()
            return False

        # FIXME: (For AWS) This could be problematic for other users
        # FIXME: identify nodes by an GUID?
        if config.dev.public_ip and conn_protocol.peer_ip == config.dev.public_ip:
            conn_protocol.loseConnection()
            return False

        if self.reached_conn_limit:
            # FIXME: Should we stop listening to avoid unnecessary load due to many connections?
            logger.info('Peer limit hit. Disconnecting client %s', conn_protocol.peer_ip)
            conn_protocol.loseConnection()
            return False

        peer_list = self._qrl_node.peer_addresses
        if conn_protocol.peer_ip == conn_protocol.host_ip:
            if conn_protocol.peer_ip in peer_list:
                logger.info('Self in peer_list, removing..')
                peer_list.remove(conn_protocol.peer_ip)
                self._qrl_node.peer_manager.update_peer_addresses(peer_list)

            conn_protocol.loseConnection()
            return False

        self._peer_connections.append(conn_protocol)

        if conn_protocol.peer_ip not in peer_list:
            logger.debug('Adding to peer_list')
            peer_list.add(conn_protocol.peer_ip)
            self._qrl_node.peer_manager.update_peer_addresses(peer_list)

        logger.debug('>>> new peer connection : %s:%s ', conn_protocol.peer_ip, str(conn_protocol.peer_port))

        return True
Esempio n. 50
0
    def validate_extended(self, addr_from_state, addr_from_pk_state, transaction_pool):
        if not self.validate_slave(addr_from_state, addr_from_pk_state):
            return False

        tx_balance = addr_from_state.balance

        if self.fee < 0 or self.amount < 0:
            logger.info('TransferTokenTransaction State validation failed for %s because: ', self.txhash)
            logger.info('Txn amount: %s, Fee: %s', self.amount, self.fee)
            return False

        if tx_balance < self.fee:
            logger.info('TransferTokenTransaction State validation failed for %s because: Insufficient funds',
                        self.txhash)
            logger.info('balance: %s, Fee: %s', tx_balance, self.fee)
            return False

        if self.ots_key_reuse(addr_from_pk_state, self.ots_key):
            logger.info('TransferTokenTransaction State validation failed for %s because: OTS Public key re-use detected',
                        self.txhash)
            return False

        return True
Esempio n. 51
0
    def handle_block(self, source, message: qrllegacy_pb2.LegacyMessage):  # block received
        """
        Block
        This function processes any new block received.
        :return:
        """
        P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.BK)
        try:
            block = Block(message.block)
        except Exception as e:
            logger.error('block rejected - unable to decode serialised data %s', source.peer_ip)
            logger.exception(e)
            return

        logger.info('>>>Received block from %s %s %s',
                    source.connection_id,
                    block.block_number,
                    bin2hstr(block.headerhash))

        if not source.factory.master_mr.isRequested(block.headerhash, source, block):
            return

        source.factory.pow.pre_block_logic(block)  # FIXME: Ignores return value
        source.factory.master_mr.register(qrllegacy_pb2.LegacyMessage.BK, block.headerhash, message.block)
Esempio n. 52
0
    def compare_and_sync(self, peer, node_header_hash: qrl_pb2.NodeHeaderHash):
        if self._syncing_enabled:
            logger.info('>> Ignoring compare_and_sync Syncing Enabled')
            return
        last_block = self._chain_manager.get_last_block()
        node_last_block_number = node_header_hash.block_number + len(node_header_hash.headerhashes) - 1
        last_block_number = min(last_block.block_number, node_last_block_number)
        if last_block_number < node_header_hash.block_number:
            return
        fork_block_number = last_block.block_number + 1
        fork_found = False
        for i in range(last_block_number, node_header_hash.block_number - 1, -1):
            block = self._chain_manager.get_block_by_number(i)
            if block.headerhash == node_header_hash.headerhashes[i-node_header_hash.block_number]:
                break
            fork_block_number = i
            fork_found = True

        if fork_found or (last_block.block_number < node_last_block_number):
            self._target_peer = peer
            self._target_node_header_hash = node_header_hash
            self._last_requested_block_idx = fork_block_number
            self._syncing_enabled = True
            self.peer_fetch_block()
Esempio n. 53
0
    def validate_extended(self, addr_from_state, addr_from_pk_state, transaction_pool):
        if not self.validate_slave(addr_from_state, addr_from_pk_state):
            return False

        tx_balance = addr_from_state.balance

        if self.fee < 0:
            logger.info('Lattice Txn: State validation failed %s : Negative fee %s', self.txhash, self.fee)
            return False

        if tx_balance < self.fee:
            logger.info('Lattice Txn: State validation failed %s : Insufficient funds', self.txhash)
            logger.info('balance: %s, fee: %s', tx_balance, self.fee)
            return False

        if self.ots_key_reuse(addr_from_pk_state, self.ots_key):
            logger.info('Lattice Txn: OTS Public key re-use detected %s', self.txhash)
            return False

        return True
Esempio n. 54
0
File: main.py Progetto: fanff/QRL
def main():
    args = parse_arguments()

    config.create_path(config.user.wallet_dir)

    slaves = mining_wallet_checks(args)

    logger.debug("=====================================================================================")
    logger.info("Data Path: %s", args.data_dir)

    config.user.data_dir = args.data_dir
    config.create_path(config.user.data_dir)

    ntp.setDrift()

    logger.info('Initializing chain..')
    persistent_state = State()
    chain_manager = ChainManager(state=persistent_state)
    chain_manager.load(Block.from_json(GenesisBlock().to_json()))

    qrlnode = QRLNode(db_state=persistent_state, slaves=slaves)
    qrlnode.set_chain(chain_manager)

    set_logger(args, qrlnode.sync_state)

    #######
    # NOTE: Keep assigned to a variable or might get collected
    admin_service, grpc_service = start_services(qrlnode)

    qrlnode.start_listening()
    qrlnode.connect_peers()

    qrlnode.start_pow()

    logger.info('QRL blockchain ledger %s', config.dev.version)
    logger.info('mining/staking address %s', slaves[0])

    # FIXME: This will be removed once we move away from Twisted
    reactor.run()
Esempio n. 55
0
 def test_remaining_emission(self):
     # TODO: Verify value and required precision
     logger.info(remaining_emission(100, 2))
     self.assertEqual(remaining_emission(100, 2), Decimal('99.99999122'))
Esempio n. 56
0
def score(stake_address: bytes,
          reveal_one: bytes,
          balance: int = 0,
          seed: bytes = None,
          verbose: bool = False):
    if not seed:
        logger.info('Exception Raised due to seed none in score fn')
        raise Exception

    if not balance:
        logger.info(' balance 0 so score none ')
        logger.info(' stake_address %s', stake_address)
        return None

    # FIXME: Review this
    reveal_seed = bin2hstr(sha256(str(reveal_one).encode() + str(seed).encode()))
    score = (Decimal(config.dev.N) - (Decimal(int(reveal_seed, 16)).log10() / Decimal(2).log10())) / Decimal(balance)

    if verbose:
        logger.info('=' * 10)
        logger.info('Score - %s', score)
        logger.info('reveal_one - %s', reveal_one)
        logger.info('seed - %s', seed)
        logger.info('balance - %s', balance)

    return score
Esempio n. 57
0
File: node.py Progetto: fanff/QRL
 def mine_next(self, parent_block):
     if config.user.mining_enabled:
         parent_metadata = self.chain_manager.state.get_block_metadata(parent_block.headerhash)
         logger.info('Mining Block #%s', parent_block.block_number + 1)
         self.miner.start_mining(self.chain_manager.tx_pool, parent_block, parent_metadata.block_difficulty)
Esempio n. 58
0
 def broadcast_ephemeral_message(self, encrypted_ephemeral):
     logger.info('<<<Broadcasting Encrypted Ephemeral Message')
     self._chain_manager.add_ephemeral_message(encrypted_ephemeral)
     self.register_and_broadcast('EPH',
                                 encrypted_ephemeral.get_message_hash(),
                                 encrypted_ephemeral.to_json())